summaryrefslogtreecommitdiff
path: root/cloudinit
diff options
context:
space:
mode:
authorChad Smith <chad.smith@canonical.com>2018-02-02 11:11:36 -0700
committerChad Smith <chad.smith@canonical.com>2018-02-02 11:11:36 -0700
commit78013bc65030421699b5feb66bc8b7a205abfbc0 (patch)
tree2ebf7111129f4aaf8a833ba6d226d4513ed59388 /cloudinit
parent192261fe38a32edbd1f605ba25bbb6f4822a0720 (diff)
parentf7deaf15acf382d62554e2b1d70daa9a9109d542 (diff)
downloadvyos-cloud-init-78013bc65030421699b5feb66bc8b7a205abfbc0.tar.gz
vyos-cloud-init-78013bc65030421699b5feb66bc8b7a205abfbc0.zip
merge from master at 17.2-30-gf7deaf15
Diffstat (limited to 'cloudinit')
-rw-r--r--cloudinit/analyze/__main__.py4
-rw-r--r--cloudinit/analyze/dump.py8
-rw-r--r--cloudinit/cmd/clean.py103
-rw-r--r--cloudinit/cmd/main.py44
-rw-r--r--cloudinit/cmd/status.py160
-rw-r--r--cloudinit/cmd/tests/__init__.py0
-rw-r--r--cloudinit/cmd/tests/test_clean.py176
-rw-r--r--cloudinit/cmd/tests/test_status.py368
-rw-r--r--cloudinit/config/cc_apt_configure.py5
-rw-r--r--cloudinit/config/cc_disk_setup.py8
-rw-r--r--cloudinit/config/cc_landscape.py8
-rw-r--r--cloudinit/config/cc_ntp.py10
-rw-r--r--cloudinit/config/cc_power_state_change.py1
-rw-r--r--cloudinit/config/cc_resizefs.py12
-rw-r--r--cloudinit/config/cc_rh_subscription.py5
-rw-r--r--cloudinit/config/cc_rsyslog.py10
-rw-r--r--cloudinit/config/cc_seed_random.py3
-rw-r--r--cloudinit/config/cc_snap_config.py7
-rwxr-xr-xcloudinit/distros/__init__.py31
-rw-r--r--cloudinit/distros/freebsd.py11
-rw-r--r--cloudinit/ec2_utils.py39
-rw-r--r--cloudinit/net/__init__.py4
-rwxr-xr-xcloudinit/net/cmdline.py9
-rw-r--r--cloudinit/net/dhcp.py43
-rw-r--r--cloudinit/net/network_state.py20
-rw-r--r--cloudinit/sources/DataSourceAliYun.py1
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py7
-rw-r--r--cloudinit/sources/DataSourceAzure.py171
-rw-r--r--cloudinit/sources/DataSourceBigstep.py5
-rw-r--r--cloudinit/sources/DataSourceCloudSigma.py5
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py5
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py9
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py5
-rw-r--r--cloudinit/sources/DataSourceEc2.py65
-rw-r--r--cloudinit/sources/DataSourceGCE.py139
-rw-r--r--cloudinit/sources/DataSourceMAAS.py59
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py5
-rw-r--r--cloudinit/sources/DataSourceNone.py5
-rw-r--r--cloudinit/sources/DataSourceOVF.py130
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py122
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py5
-rw-r--r--cloudinit/sources/DataSourceScaleway.py4
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py5
-rw-r--r--cloudinit/sources/__init__.py131
-rw-r--r--cloudinit/sources/helpers/azure.py25
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config.py4
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_custom_script.py153
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py2
-rw-r--r--cloudinit/sources/tests/__init__.py0
-rw-r--r--cloudinit/sources/tests/test_init.py202
-rw-r--r--cloudinit/temp_utils.py11
-rw-r--r--cloudinit/tests/helpers.py42
-rw-r--r--cloudinit/tests/test_util.py46
-rw-r--r--cloudinit/url_helper.py29
-rw-r--r--cloudinit/util.py193
-rw-r--r--cloudinit/version.py2
56 files changed, 2279 insertions, 397 deletions
diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py
index 69b9e43e..3ba5903f 100644
--- a/cloudinit/analyze/__main__.py
+++ b/cloudinit/analyze/__main__.py
@@ -6,6 +6,8 @@ import argparse
import re
import sys
+from cloudinit.util import json_dumps
+
from . import dump
from . import show
@@ -112,7 +114,7 @@ def analyze_show(name, args):
def analyze_dump(name, args):
"""Dump cloud-init events in json format"""
(infh, outfh) = configure_io(args)
- outfh.write(dump.json_dumps(_get_events(infh)) + '\n')
+ outfh.write(json_dumps(_get_events(infh)) + '\n')
def _get_events(infile):
diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py
index ca4da496..b071aa19 100644
--- a/cloudinit/analyze/dump.py
+++ b/cloudinit/analyze/dump.py
@@ -2,7 +2,6 @@
import calendar
from datetime import datetime
-import json
import sys
from cloudinit import util
@@ -132,11 +131,6 @@ def parse_ci_logline(line):
return event
-def json_dumps(data):
- return json.dumps(data, indent=1, sort_keys=True,
- separators=(',', ': '))
-
-
def dump_events(cisource=None, rawdata=None):
events = []
event = None
@@ -169,7 +163,7 @@ def main():
else:
cisource = sys.stdin
- return json_dumps(dump_events(cisource))
+ return util.json_dumps(dump_events(cisource))
if __name__ == "__main__":
diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py
new file mode 100644
index 00000000..de22f7f2
--- /dev/null
+++ b/cloudinit/cmd/clean.py
@@ -0,0 +1,103 @@
+# Copyright (C) 2017 Canonical Ltd.
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Define 'clean' utility and handler as part of cloud-init commandline."""
+
+import argparse
+import os
+import sys
+
+from cloudinit.stages import Init
+from cloudinit.util import (
+ ProcessExecutionError, chdir, del_dir, del_file, get_config_logfiles,
+ is_link, subp)
+
+
+def error(msg):
+ sys.stderr.write("ERROR: " + msg + "\n")
+
+
+def get_parser(parser=None):
+ """Build or extend an arg parser for clean utility.
+
+ @param parser: Optional existing ArgumentParser instance representing the
+ clean subcommand which will be extended to support the args of
+ this utility.
+
+ @returns: ArgumentParser with proper argument configuration.
+ """
+ if not parser:
+ parser = argparse.ArgumentParser(
+ prog='clean',
+ description=('Remove logs and artifacts so cloud-init re-runs on '
+ 'a clean system'))
+ parser.add_argument(
+ '-l', '--logs', action='store_true', default=False, dest='remove_logs',
+ help='Remove cloud-init logs.')
+ parser.add_argument(
+ '-r', '--reboot', action='store_true', default=False,
+ help='Reboot system after logs are cleaned so cloud-init re-runs.')
+ parser.add_argument(
+ '-s', '--seed', action='store_true', default=False, dest='remove_seed',
+ help='Remove cloud-init seed directory /var/lib/cloud/seed.')
+ return parser
+
+
+def remove_artifacts(remove_logs, remove_seed=False):
+ """Helper which removes artifacts dir and optionally log files.
+
+ @param: remove_logs: Boolean. Set True to delete the cloud_dir path. False
+ preserves them.
+ @param: remove_seed: Boolean. Set True to also delete seed subdir in
+ paths.cloud_dir.
+ @returns: 0 on success, 1 otherwise.
+ """
+ init = Init(ds_deps=[])
+ init.read_cfg()
+ if remove_logs:
+ for log_file in get_config_logfiles(init.cfg):
+ del_file(log_file)
+
+ if not os.path.isdir(init.paths.cloud_dir):
+ return 0 # Artifacts dir already cleaned
+ with chdir(init.paths.cloud_dir):
+ for path in os.listdir('.'):
+ if path == 'seed' and not remove_seed:
+ continue
+ try:
+ if os.path.isdir(path) and not is_link(path):
+ del_dir(path)
+ else:
+ del_file(path)
+ except OSError as e:
+ error('Could not remove {0}: {1}'.format(path, str(e)))
+ return 1
+ return 0
+
+
+def handle_clean_args(name, args):
+ """Handle calls to 'cloud-init clean' as a subcommand."""
+ exit_code = remove_artifacts(args.remove_logs, args.remove_seed)
+ if exit_code == 0 and args.reboot:
+ cmd = ['shutdown', '-r', 'now']
+ try:
+ subp(cmd, capture=False)
+ except ProcessExecutionError as e:
+ error(
+ 'Could not reboot this system using "{0}": {1}'.format(
+ cmd, str(e)))
+ exit_code = 1
+ return exit_code
+
+
+def main():
+ """Tool to collect and tar all cloud-init related logs."""
+ parser = get_parser()
+ sys.exit(handle_clean_args('clean', parser.parse_args()))
+
+
+if __name__ == '__main__':
+ main()
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index 6fb9d9e7..d2f1b778 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -421,7 +421,13 @@ def di_report_warn(datasource, cfg):
LOG.debug("no di_report found in config.")
return
- dicfg = cfg.get('di_report', {})
+ dicfg = cfg['di_report']
+ if dicfg is None:
+ # ds-identify may write 'di_report:\n #comment\n'
+ # which reads as {'di_report': None}
+ LOG.debug("di_report was None.")
+ return
+
if not isinstance(dicfg, dict):
LOG.warning("di_report config not a dictionary: %s", dicfg)
return
@@ -603,7 +609,11 @@ def status_wrapper(name, args, data_d=None, link_d=None):
else:
raise ValueError("unknown name: %s" % name)
- modes = ('init', 'init-local', 'modules-config', 'modules-final')
+ modes = ('init', 'init-local', 'modules-init', 'modules-config',
+ 'modules-final')
+ if mode not in modes:
+ raise ValueError(
+ "Invalid cloud init mode specified '{0}'".format(mode))
status = None
if mode == 'init-local':
@@ -615,16 +625,18 @@ def status_wrapper(name, args, data_d=None, link_d=None):
except Exception:
pass
+ nullstatus = {
+ 'errors': [],
+ 'start': None,
+ 'finished': None,
+ }
if status is None:
- nullstatus = {
- 'errors': [],
- 'start': None,
- 'finished': None,
- }
status = {'v1': {}}
for m in modes:
status['v1'][m] = nullstatus.copy()
status['v1']['datasource'] = None
+ elif mode not in status['v1']:
+ status['v1'][mode] = nullstatus.copy()
v1 = status['v1']
v1['stage'] = mode
@@ -767,6 +779,12 @@ def main(sysv_args=None):
parser_collect_logs = subparsers.add_parser(
'collect-logs', help='Collect and tar all cloud-init debug info')
+ parser_clean = subparsers.add_parser(
+ 'clean', help='Remove logs and artifacts so cloud-init can re-run.')
+
+ parser_status = subparsers.add_parser(
+ 'status', help='Report cloud-init status or wait on completion.')
+
if sysv_args:
# Only load subparsers if subcommand is specified to avoid load cost
if sysv_args[0] == 'analyze':
@@ -783,6 +801,18 @@ def main(sysv_args=None):
logs_parser(parser_collect_logs)
parser_collect_logs.set_defaults(
action=('collect-logs', handle_collect_logs_args))
+ elif sysv_args[0] == 'clean':
+ from cloudinit.cmd.clean import (
+ get_parser as clean_parser, handle_clean_args)
+ clean_parser(parser_clean)
+ parser_clean.set_defaults(
+ action=('clean', handle_clean_args))
+ elif sysv_args[0] == 'status':
+ from cloudinit.cmd.status import (
+ get_parser as status_parser, handle_status_args)
+ status_parser(parser_status)
+ parser_status.set_defaults(
+ action=('status', handle_status_args))
args = parser.parse_args(args=sysv_args)
diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py
new file mode 100644
index 00000000..d7aaee9d
--- /dev/null
+++ b/cloudinit/cmd/status.py
@@ -0,0 +1,160 @@
+# Copyright (C) 2017 Canonical Ltd.
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Define 'status' utility and handler as part of cloud-init commandline."""
+
+import argparse
+import os
+import sys
+from time import gmtime, strftime, sleep
+
+from cloudinit.distros import uses_systemd
+from cloudinit.stages import Init
+from cloudinit.util import get_cmdline, load_file, load_json
+
+CLOUDINIT_DISABLED_FILE = '/etc/cloud/cloud-init.disabled'
+
+# customer visible status messages
+STATUS_ENABLED_NOT_RUN = 'not run'
+STATUS_RUNNING = 'running'
+STATUS_DONE = 'done'
+STATUS_ERROR = 'error'
+STATUS_DISABLED = 'disabled'
+
+
+def get_parser(parser=None):
+ """Build or extend an arg parser for status utility.
+
+ @param parser: Optional existing ArgumentParser instance representing the
+ status subcommand which will be extended to support the args of
+ this utility.
+
+ @returns: ArgumentParser with proper argument configuration.
+ """
+ if not parser:
+ parser = argparse.ArgumentParser(
+ prog='status',
+ description='Report run status of cloud init')
+ parser.add_argument(
+ '-l', '--long', action='store_true', default=False,
+ help=('Report long format of statuses including run stage name and'
+ ' error messages'))
+ parser.add_argument(
+ '-w', '--wait', action='store_true', default=False,
+ help='Block waiting on cloud-init to complete')
+ return parser
+
+
+def handle_status_args(name, args):
+ """Handle calls to 'cloud-init status' as a subcommand."""
+ # Read configured paths
+ init = Init(ds_deps=[])
+ init.read_cfg()
+
+ status, status_detail, time = _get_status_details(init.paths)
+ if args.wait:
+ while status in (STATUS_ENABLED_NOT_RUN, STATUS_RUNNING):
+ sys.stdout.write('.')
+ sys.stdout.flush()
+ status, status_detail, time = _get_status_details(init.paths)
+ sleep(0.25)
+ sys.stdout.write('\n')
+ if args.long:
+ print('status: {0}'.format(status))
+ if time:
+ print('time: {0}'.format(time))
+ print('detail:\n{0}'.format(status_detail))
+ else:
+ print('status: {0}'.format(status))
+ return 1 if status == STATUS_ERROR else 0
+
+
+def _is_cloudinit_disabled(disable_file, paths):
+ """Report whether cloud-init is disabled.
+
+ @param disable_file: The path to the cloud-init disable file.
+ @param paths: An initialized cloudinit.helpers.Paths object.
+ @returns: A tuple containing (bool, reason) about cloud-init's status and
+ why.
+ """
+ is_disabled = False
+ cmdline_parts = get_cmdline().split()
+ if not uses_systemd():
+ reason = 'Cloud-init enabled on sysvinit'
+ elif 'cloud-init=enabled' in cmdline_parts:
+ reason = 'Cloud-init enabled by kernel command line cloud-init=enabled'
+ elif os.path.exists(disable_file):
+ is_disabled = True
+ reason = 'Cloud-init disabled by {0}'.format(disable_file)
+ elif 'cloud-init=disabled' in cmdline_parts:
+ is_disabled = True
+ reason = 'Cloud-init disabled by kernel parameter cloud-init=disabled'
+ elif not os.path.exists(os.path.join(paths.run_dir, 'enabled')):
+ is_disabled = True
+ reason = 'Cloud-init disabled by cloud-init-generator'
+ else:
+ reason = 'Cloud-init enabled by systemd cloud-init-generator'
+ return (is_disabled, reason)
+
+
+def _get_status_details(paths):
+ """Return a 3-tuple of status, status_details and time of last event.
+
+ @param paths: An initialized cloudinit.helpers.paths object.
+
+ Values are obtained from parsing paths.run_dir/status.json.
+ """
+
+ status = STATUS_ENABLED_NOT_RUN
+ status_detail = ''
+ status_v1 = {}
+
+ status_file = os.path.join(paths.run_dir, 'status.json')
+
+ (is_disabled, reason) = _is_cloudinit_disabled(
+ CLOUDINIT_DISABLED_FILE, paths)
+ if is_disabled:
+ status = STATUS_DISABLED
+ status_detail = reason
+ if os.path.exists(status_file):
+ status_v1 = load_json(load_file(status_file)).get('v1', {})
+ errors = []
+ latest_event = 0
+ for key, value in sorted(status_v1.items()):
+ if key == 'stage':
+ if value:
+ status_detail = 'Running in stage: {0}'.format(value)
+ elif key == 'datasource':
+ status_detail = value
+ elif isinstance(value, dict):
+ errors.extend(value.get('errors', []))
+ start = value.get('start') or 0
+ finished = value.get('finished') or 0
+ if finished == 0 and start != 0:
+ status = STATUS_RUNNING
+ event_time = max(start, finished)
+ if event_time > latest_event:
+ latest_event = event_time
+ if errors:
+ status = STATUS_ERROR
+ status_detail = '\n'.join(errors)
+ elif status == STATUS_ENABLED_NOT_RUN and latest_event > 0:
+ status = STATUS_DONE
+ if latest_event:
+ time = strftime('%a, %d %b %Y %H:%M:%S %z', gmtime(latest_event))
+ else:
+ time = ''
+ return status, status_detail, time
+
+
+def main():
+ """Tool to report status of cloud-init."""
+ parser = get_parser()
+ sys.exit(handle_status_args('status', parser.parse_args()))
+
+
+if __name__ == '__main__':
+ main()
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/tests/__init__.py b/cloudinit/cmd/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/cloudinit/cmd/tests/__init__.py
diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py
new file mode 100644
index 00000000..6713af4f
--- /dev/null
+++ b/cloudinit/cmd/tests/test_clean.py
@@ -0,0 +1,176 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.cmd import clean
+from cloudinit.util import ensure_dir, sym_link, write_file
+from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock
+from collections import namedtuple
+import os
+from six import StringIO
+
+mypaths = namedtuple('MyPaths', 'cloud_dir')
+
+
+class TestClean(CiTestCase):
+
+ def setUp(self):
+ super(TestClean, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.artifact_dir = self.tmp_path('artifacts', self.new_root)
+ self.log1 = self.tmp_path('cloud-init.log', self.new_root)
+ self.log2 = self.tmp_path('cloud-init-output.log', self.new_root)
+
+ class FakeInit(object):
+ cfg = {'def_log_file': self.log1,
+ 'output': {'all': '|tee -a {0}'.format(self.log2)}}
+ paths = mypaths(cloud_dir=self.artifact_dir)
+
+ def __init__(self, ds_deps):
+ pass
+
+ def read_cfg(self):
+ pass
+
+ self.init_class = FakeInit
+
+ def test_remove_artifacts_removes_logs(self):
+ """remove_artifacts removes logs when remove_logs is True."""
+ write_file(self.log1, 'cloud-init-log')
+ write_file(self.log2, 'cloud-init-output-log')
+
+ self.assertFalse(
+ os.path.exists(self.artifact_dir), 'Unexpected artifacts dir')
+ retcode = wrap_and_call(
+ 'cloudinit.cmd.clean',
+ {'Init': {'side_effect': self.init_class}},
+ clean.remove_artifacts, remove_logs=True)
+ self.assertFalse(os.path.exists(self.log1), 'Unexpected file')
+ self.assertFalse(os.path.exists(self.log2), 'Unexpected file')
+ self.assertEqual(0, retcode)
+
+ def test_remove_artifacts_preserves_logs(self):
+ """remove_artifacts leaves logs when remove_logs is False."""
+ write_file(self.log1, 'cloud-init-log')
+ write_file(self.log2, 'cloud-init-output-log')
+
+ retcode = wrap_and_call(
+ 'cloudinit.cmd.clean',
+ {'Init': {'side_effect': self.init_class}},
+ clean.remove_artifacts, remove_logs=False)
+ self.assertTrue(os.path.exists(self.log1), 'Missing expected file')
+ self.assertTrue(os.path.exists(self.log2), 'Missing expected file')
+ self.assertEqual(0, retcode)
+
+ def test_remove_artifacts_removes_unlinks_symlinks(self):
+ """remove_artifacts cleans artifacts dir unlinking any symlinks."""
+ dir1 = os.path.join(self.artifact_dir, 'dir1')
+ ensure_dir(dir1)
+ symlink = os.path.join(self.artifact_dir, 'mylink')
+ sym_link(dir1, symlink)
+
+ retcode = wrap_and_call(
+ 'cloudinit.cmd.clean',
+ {'Init': {'side_effect': self.init_class}},
+ clean.remove_artifacts, remove_logs=False)
+ self.assertEqual(0, retcode)
+ for path in (dir1, symlink):
+ self.assertFalse(
+ os.path.exists(path),
+ 'Unexpected {0} dir'.format(path))
+
+ def test_remove_artifacts_removes_artifacts_skipping_seed(self):
+ """remove_artifacts cleans artifacts dir with exception of seed dir."""
+ dirs = [
+ self.artifact_dir,
+ os.path.join(self.artifact_dir, 'seed'),
+ os.path.join(self.artifact_dir, 'dir1'),
+ os.path.join(self.artifact_dir, 'dir2')]
+ for _dir in dirs:
+ ensure_dir(_dir)
+
+ retcode = wrap_and_call(
+ 'cloudinit.cmd.clean',
+ {'Init': {'side_effect': self.init_class}},
+ clean.remove_artifacts, remove_logs=False)
+ self.assertEqual(0, retcode)
+ for expected_dir in dirs[:2]:
+ self.assertTrue(
+ os.path.exists(expected_dir),
+ 'Missing {0} dir'.format(expected_dir))
+ for deleted_dir in dirs[2:]:
+ self.assertFalse(
+ os.path.exists(deleted_dir),
+ 'Unexpected {0} dir'.format(deleted_dir))
+
+ def test_remove_artifacts_removes_artifacts_removes_seed(self):
+ """remove_artifacts removes seed dir when remove_seed is True."""
+ dirs = [
+ self.artifact_dir,
+ os.path.join(self.artifact_dir, 'seed'),
+ os.path.join(self.artifact_dir, 'dir1'),
+ os.path.join(self.artifact_dir, 'dir2')]
+ for _dir in dirs:
+ ensure_dir(_dir)
+
+ retcode = wrap_and_call(
+ 'cloudinit.cmd.clean',
+ {'Init': {'side_effect': self.init_class}},
+ clean.remove_artifacts, remove_logs=False, remove_seed=True)
+ self.assertEqual(0, retcode)
+ self.assertTrue(
+ os.path.exists(self.artifact_dir), 'Missing artifact dir')
+ for deleted_dir in dirs[1:]:
+ self.assertFalse(
+ os.path.exists(deleted_dir),
+ 'Unexpected {0} dir'.format(deleted_dir))
+
+ def test_remove_artifacts_returns_one_on_errors(self):
+ """remove_artifacts returns non-zero on failure and prints an error."""
+ ensure_dir(self.artifact_dir)
+ ensure_dir(os.path.join(self.artifact_dir, 'dir1'))
+
+ with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
+ retcode = wrap_and_call(
+ 'cloudinit.cmd.clean',
+ {'del_dir': {'side_effect': OSError('oops')},
+ 'Init': {'side_effect': self.init_class}},
+ clean.remove_artifacts, remove_logs=False)
+ self.assertEqual(1, retcode)
+ self.assertEqual(
+ 'ERROR: Could not remove dir1: oops\n', m_stderr.getvalue())
+
+ def test_handle_clean_args_reboots(self):
+ """handle_clean_args_reboots when reboot arg is provided."""
+
+ called_cmds = []
+
+ def fake_subp(cmd, capture):
+ called_cmds.append((cmd, capture))
+ return '', ''
+
+ myargs = namedtuple('MyArgs', 'remove_logs remove_seed reboot')
+ cmdargs = myargs(remove_logs=False, remove_seed=False, reboot=True)
+ retcode = wrap_and_call(
+ 'cloudinit.cmd.clean',
+ {'subp': {'side_effect': fake_subp},
+ 'Init': {'side_effect': self.init_class}},
+ clean.handle_clean_args, name='does not matter', args=cmdargs)
+ self.assertEqual(0, retcode)
+ self.assertEqual(
+ [(['shutdown', '-r', 'now'], False)], called_cmds)
+
+ def test_status_main(self):
+ '''clean.main can be run as a standalone script.'''
+ write_file(self.log1, 'cloud-init-log')
+ with self.assertRaises(SystemExit) as context_manager:
+ wrap_and_call(
+ 'cloudinit.cmd.clean',
+ {'Init': {'side_effect': self.init_class},
+ 'sys.argv': {'new': ['clean', '--logs']}},
+ clean.main)
+
+ self.assertRaisesCodeEqual(0, context_manager.exception.code)
+ self.assertFalse(
+ os.path.exists(self.log1), 'Unexpected log {0}'.format(self.log1))
+
+
+# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py
new file mode 100644
index 00000000..a7c0a91a
--- /dev/null
+++ b/cloudinit/cmd/tests/test_status.py
@@ -0,0 +1,368 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from collections import namedtuple
+import os
+from six import StringIO
+from textwrap import dedent
+
+from cloudinit.atomic_helper import write_json
+from cloudinit.cmd import status
+from cloudinit.util import write_file
+from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock
+
+mypaths = namedtuple('MyPaths', 'run_dir')
+myargs = namedtuple('MyArgs', 'long wait')
+
+
+class TestStatus(CiTestCase):
+
+ def setUp(self):
+ super(TestStatus, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.status_file = self.tmp_path('status.json', self.new_root)
+ self.disable_file = self.tmp_path('cloudinit-disable', self.new_root)
+ self.paths = mypaths(run_dir=self.new_root)
+
+ class FakeInit(object):
+ paths = self.paths
+
+ def __init__(self, ds_deps):
+ pass
+
+ def read_cfg(self):
+ pass
+
+ self.init_class = FakeInit
+
+ def test__is_cloudinit_disabled_false_on_sysvinit(self):
+ '''When not in an environment using systemd, return False.'''
+ write_file(self.disable_file, '') # Create the ignored disable file
+ (is_disabled, reason) = wrap_and_call(
+ 'cloudinit.cmd.status',
+ {'uses_systemd': False},
+ status._is_cloudinit_disabled, self.disable_file, self.paths)
+ self.assertFalse(
+ is_disabled, 'expected enabled cloud-init on sysvinit')
+ self.assertEqual('Cloud-init enabled on sysvinit', reason)
+
+ def test__is_cloudinit_disabled_true_on_disable_file(self):
+ '''When using systemd and disable_file is present return disabled.'''
+ write_file(self.disable_file, '') # Create observed disable file
+ (is_disabled, reason) = wrap_and_call(
+ 'cloudinit.cmd.status',
+ {'uses_systemd': True},
+ status._is_cloudinit_disabled, self.disable_file, self.paths)
+ self.assertTrue(is_disabled, 'expected disabled cloud-init')
+ self.assertEqual(
+ 'Cloud-init disabled by {0}'.format(self.disable_file), reason)
+
+ def test__is_cloudinit_disabled_false_on_kernel_cmdline_enable(self):
+ '''Not disabled when using systemd and enabled via commandline.'''
+ write_file(self.disable_file, '') # Create ignored disable file
+ (is_disabled, reason) = wrap_and_call(
+ 'cloudinit.cmd.status',
+ {'uses_systemd': True,
+ 'get_cmdline': 'something cloud-init=enabled else'},
+ status._is_cloudinit_disabled, self.disable_file, self.paths)
+ self.assertFalse(is_disabled, 'expected enabled cloud-init')
+ self.assertEqual(
+ 'Cloud-init enabled by kernel command line cloud-init=enabled',
+ reason)
+
+ def test__is_cloudinit_disabled_true_on_kernel_cmdline(self):
+ '''When using systemd and disable_file is present return disabled.'''
+ (is_disabled, reason) = wrap_and_call(
+ 'cloudinit.cmd.status',
+ {'uses_systemd': True,
+ 'get_cmdline': 'something cloud-init=disabled else'},
+ status._is_cloudinit_disabled, self.disable_file, self.paths)
+ self.assertTrue(is_disabled, 'expected disabled cloud-init')
+ self.assertEqual(
+ 'Cloud-init disabled by kernel parameter cloud-init=disabled',
+ reason)
+
+ def test__is_cloudinit_disabled_true_when_generator_disables(self):
+ '''When cloud-init-generator doesn't write enabled file return True.'''
+ enabled_file = os.path.join(self.paths.run_dir, 'enabled')
+ self.assertFalse(os.path.exists(enabled_file))
+ (is_disabled, reason) = wrap_and_call(
+ 'cloudinit.cmd.status',
+ {'uses_systemd': True,
+ 'get_cmdline': 'something'},
+ status._is_cloudinit_disabled, self.disable_file, self.paths)
+ self.assertTrue(is_disabled, 'expected disabled cloud-init')
+ self.assertEqual('Cloud-init disabled by cloud-init-generator', reason)
+
+ def test__is_cloudinit_disabled_false_when_enabled_in_systemd(self):
+ '''Report enabled when systemd generator creates the enabled file.'''
+ enabled_file = os.path.join(self.paths.run_dir, 'enabled')
+ write_file(enabled_file, '')
+ (is_disabled, reason) = wrap_and_call(
+ 'cloudinit.cmd.status',
+ {'uses_systemd': True,
+ 'get_cmdline': 'something ignored'},
+ status._is_cloudinit_disabled, self.disable_file, self.paths)
+ self.assertFalse(is_disabled, 'expected enabled cloud-init')
+ self.assertEqual(
+ 'Cloud-init enabled by systemd cloud-init-generator', reason)
+
+ def test_status_returns_not_run(self):
+ '''When status.json does not exist yet, return 'not run'.'''
+ self.assertFalse(
+ os.path.exists(self.status_file), 'Unexpected status.json found')
+ cmdargs = myargs(long=False, wait=False)
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ 'cloudinit.cmd.status',
+ {'_is_cloudinit_disabled': (False, ''),
+ 'Init': {'side_effect': self.init_class}},
+ status.handle_status_args, 'ignored', cmdargs)
+ self.assertEqual(0, retcode)
+ self.assertEqual('status: not run\n', m_stdout.getvalue())
+
+ def test_status_returns_disabled_long_on_presence_of_disable_file(self):
+ '''When cloudinit is disabled, return disabled reason.'''
+
+ checked_files = []
+
+ def fakeexists(filepath):
+ checked_files.append(filepath)
+ status_file = os.path.join(self.paths.run_dir, 'status.json')
+ return bool(not filepath == status_file)
+
+ cmdargs = myargs(long=True, wait=False)
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ 'cloudinit.cmd.status',
+ {'os.path.exists': {'side_effect': fakeexists},
+ '_is_cloudinit_disabled': (True, 'disabled for some reason'),
+ 'Init': {'side_effect': self.init_class}},
+ status.handle_status_args, 'ignored', cmdargs)
+ self.assertEqual(0, retcode)
+ self.assertEqual(
+ [os.path.join(self.paths.run_dir, 'status.json')],
+ checked_files)
+ expected = dedent('''\
+ status: disabled
+ detail:
+ disabled for some reason
+ ''')
+ self.assertEqual(expected, m_stdout.getvalue())
+
+ def test_status_returns_running(self):
+ '''Report running when status exists with an unfinished stage.'''
+ write_json(self.status_file,
+ {'v1': {'init': {'start': 1, 'finished': None}}})
+ cmdargs = myargs(long=False, wait=False)
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ 'cloudinit.cmd.status',
+ {'_is_cloudinit_disabled': (False, ''),
+ 'Init': {'side_effect': self.init_class}},
+ status.handle_status_args, 'ignored', cmdargs)
+ self.assertEqual(0, retcode)
+ self.assertEqual('status: running\n', m_stdout.getvalue())
+
+ def test_status_returns_done(self):
+ '''Reports done when stage is None and all stages are finished.'''
+ write_json(
+ self.status_file,
+ {'v1': {'stage': None,
+ 'datasource': (
+ 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]'
+ '[dsmode=net]'),
+ 'blah': {'finished': 123.456},
+ 'init': {'errors': [], 'start': 124.567,
+ 'finished': 125.678},
+ 'init-local': {'start': 123.45, 'finished': 123.46}}})
+ cmdargs = myargs(long=False, wait=False)
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ 'cloudinit.cmd.status',
+ {'_is_cloudinit_disabled': (False, ''),
+ 'Init': {'side_effect': self.init_class}},
+ status.handle_status_args, 'ignored', cmdargs)
+ self.assertEqual(0, retcode)
+ self.assertEqual('status: done\n', m_stdout.getvalue())
+
+ def test_status_returns_done_long(self):
+ '''Long format of done status includes datasource info.'''
+ write_json(
+ self.status_file,
+ {'v1': {'stage': None,
+ 'datasource': (
+ 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]'
+ '[dsmode=net]'),
+ 'init': {'start': 124.567, 'finished': 125.678},
+ 'init-local': {'start': 123.45, 'finished': 123.46}}})
+ cmdargs = myargs(long=True, wait=False)
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ 'cloudinit.cmd.status',
+ {'_is_cloudinit_disabled': (False, ''),
+ 'Init': {'side_effect': self.init_class}},
+ status.handle_status_args, 'ignored', cmdargs)
+ self.assertEqual(0, retcode)
+ expected = dedent('''\
+ status: done
+ time: Thu, 01 Jan 1970 00:02:05 +0000
+ detail:
+ DataSourceNoCloud [seed=/var/.../seed/nocloud-net][dsmode=net]
+ ''')
+ self.assertEqual(expected, m_stdout.getvalue())
+
+ def test_status_on_errors(self):
+ '''Reports error when any stage has errors.'''
+ write_json(
+ self.status_file,
+ {'v1': {'stage': None,
+ 'blah': {'errors': [], 'finished': 123.456},
+ 'init': {'errors': ['error1'], 'start': 124.567,
+ 'finished': 125.678},
+ 'init-local': {'start': 123.45, 'finished': 123.46}}})
+ cmdargs = myargs(long=False, wait=False)
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ 'cloudinit.cmd.status',
+ {'_is_cloudinit_disabled': (False, ''),
+ 'Init': {'side_effect': self.init_class}},
+ status.handle_status_args, 'ignored', cmdargs)
+ self.assertEqual(1, retcode)
+ self.assertEqual('status: error\n', m_stdout.getvalue())
+
+ def test_status_on_errors_long(self):
+ '''Long format of error status includes all error messages.'''
+ write_json(
+ self.status_file,
+ {'v1': {'stage': None,
+ 'datasource': (
+ 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]'
+ '[dsmode=net]'),
+ 'init': {'errors': ['error1'], 'start': 124.567,
+ 'finished': 125.678},
+ 'init-local': {'errors': ['error2', 'error3'],
+ 'start': 123.45, 'finished': 123.46}}})
+ cmdargs = myargs(long=True, wait=False)
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ 'cloudinit.cmd.status',
+ {'_is_cloudinit_disabled': (False, ''),
+ 'Init': {'side_effect': self.init_class}},
+ status.handle_status_args, 'ignored', cmdargs)
+ self.assertEqual(1, retcode)
+ expected = dedent('''\
+ status: error
+ time: Thu, 01 Jan 1970 00:02:05 +0000
+ detail:
+ error1
+ error2
+ error3
+ ''')
+ self.assertEqual(expected, m_stdout.getvalue())
+
+ def test_status_returns_running_long_format(self):
+ '''Long format reports the stage in which we are running.'''
+ write_json(
+ self.status_file,
+ {'v1': {'stage': 'init',
+ 'init': {'start': 124.456, 'finished': None},
+ 'init-local': {'start': 123.45, 'finished': 123.46}}})
+ cmdargs = myargs(long=True, wait=False)
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ 'cloudinit.cmd.status',
+ {'_is_cloudinit_disabled': (False, ''),
+ 'Init': {'side_effect': self.init_class}},
+ status.handle_status_args, 'ignored', cmdargs)
+ self.assertEqual(0, retcode)
+ expected = dedent('''\
+ status: running
+ time: Thu, 01 Jan 1970 00:02:04 +0000
+ detail:
+ Running in stage: init
+ ''')
+ self.assertEqual(expected, m_stdout.getvalue())
+
+ def test_status_wait_blocks_until_done(self):
+ '''Specifying wait will poll every 1/4 second until done state.'''
+ running_json = {
+ 'v1': {'stage': 'init',
+ 'init': {'start': 124.456, 'finished': None},
+ 'init-local': {'start': 123.45, 'finished': 123.46}}}
+ done_json = {
+ 'v1': {'stage': None,
+ 'init': {'start': 124.456, 'finished': 125.678},
+ 'init-local': {'start': 123.45, 'finished': 123.46}}}
+
+ self.sleep_calls = 0
+
+ def fake_sleep(interval):
+ self.assertEqual(0.25, interval)
+ self.sleep_calls += 1
+ if self.sleep_calls == 2:
+ write_json(self.status_file, running_json)
+ elif self.sleep_calls == 3:
+ write_json(self.status_file, done_json)
+
+ cmdargs = myargs(long=False, wait=True)
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ 'cloudinit.cmd.status',
+ {'sleep': {'side_effect': fake_sleep},
+ '_is_cloudinit_disabled': (False, ''),
+ 'Init': {'side_effect': self.init_class}},
+ status.handle_status_args, 'ignored', cmdargs)
+ self.assertEqual(0, retcode)
+ self.assertEqual(4, self.sleep_calls)
+ self.assertEqual('....\nstatus: done\n', m_stdout.getvalue())
+
+ def test_status_wait_blocks_until_error(self):
+ '''Specifying wait will poll every 1/4 second until error state.'''
+ running_json = {
+ 'v1': {'stage': 'init',
+ 'init': {'start': 124.456, 'finished': None},
+ 'init-local': {'start': 123.45, 'finished': 123.46}}}
+ error_json = {
+ 'v1': {'stage': None,
+ 'init': {'errors': ['error1'], 'start': 124.456,
+ 'finished': 125.678},
+ 'init-local': {'start': 123.45, 'finished': 123.46}}}
+
+ self.sleep_calls = 0
+
+ def fake_sleep(interval):
+ self.assertEqual(0.25, interval)
+ self.sleep_calls += 1
+ if self.sleep_calls == 2:
+ write_json(self.status_file, running_json)
+ elif self.sleep_calls == 3:
+ write_json(self.status_file, error_json)
+
+ cmdargs = myargs(long=False, wait=True)
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ 'cloudinit.cmd.status',
+ {'sleep': {'side_effect': fake_sleep},
+ '_is_cloudinit_disabled': (False, ''),
+ 'Init': {'side_effect': self.init_class}},
+ status.handle_status_args, 'ignored', cmdargs)
+ self.assertEqual(1, retcode)
+ self.assertEqual(4, self.sleep_calls)
+ self.assertEqual('....\nstatus: error\n', m_stdout.getvalue())
+
+ def test_status_main(self):
+ '''status.main can be run as a standalone script.'''
+ write_json(self.status_file,
+ {'v1': {'init': {'start': 1, 'finished': None}}})
+ with self.assertRaises(SystemExit) as context_manager:
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ wrap_and_call(
+ 'cloudinit.cmd.status',
+ {'sys.argv': {'new': ['status']},
+ '_is_cloudinit_disabled': (False, ''),
+ 'Init': {'side_effect': self.init_class}},
+ status.main)
+ self.assertRaisesCodeEqual(0, context_manager.exception.code)
+ self.assertEqual('status: running\n', m_stdout.getvalue())
+
+# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 177cbcf7..5b9cbca0 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -275,8 +275,9 @@ def handle(name, ocfg, cloud, log, _):
cfg = ocfg.get('apt', {})
if not isinstance(cfg, dict):
- raise ValueError("Expected dictionary for 'apt' config, found %s",
- type(cfg))
+ raise ValueError(
+ "Expected dictionary for 'apt' config, found {config_type}".format(
+ config_type=type(cfg)))
apply_debconf_selections(cfg, target)
apply_apt(cfg, cloud, target)
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index c2b83aea..c3e8c484 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -788,7 +788,8 @@ def mkpart(device, definition):
# This prevents you from overwriting the device
LOG.debug("Checking if device %s is a valid device", device)
if not is_device_valid(device):
- raise Exception("Device %s is not a disk device!", device)
+ raise Exception(
+ 'Device {device} is not a disk device!'.format(device=device))
# Remove the partition table entries
if isinstance(layout, str) and layout.lower() == "remove":
@@ -945,8 +946,9 @@ def mkfs(fs_cfg):
# Check that we can create the FS
if not (fs_type or fs_cmd):
- raise Exception("No way to create filesystem '%s'. fs_type or fs_cmd "
- "must be set.", label)
+ raise Exception(
+ "No way to create filesystem '{label}'. fs_type or fs_cmd "
+ "must be set.".format(label=label))
# Create the commands
shell = False
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index 8f9f1abd..eaf1e940 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -94,10 +94,10 @@ def handle(_name, cfg, cloud, log, _args):
ls_cloudcfg = cfg.get("landscape", {})
if not isinstance(ls_cloudcfg, (dict)):
- raise RuntimeError(("'landscape' key existed in config,"
- " but not a dictionary type,"
- " is a %s instead"),
- type_utils.obj_name(ls_cloudcfg))
+ raise RuntimeError(
+ "'landscape' key existed in config, but not a dictionary type,"
+ " is a {_type} instead".format(
+ _type=type_utils.obj_name(ls_cloudcfg)))
if not ls_cloudcfg:
return
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index f50bcb35..cbd0237d 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -106,9 +106,9 @@ def handle(name, cfg, cloud, log, _args):
# TODO drop this when validate_cloudconfig_schema is strict=True
if not isinstance(ntp_cfg, (dict)):
- raise RuntimeError(("'ntp' key existed in config,"
- " but not a dictionary type,"
- " is a %s %instead"), type_utils.obj_name(ntp_cfg))
+ raise RuntimeError(
+ "'ntp' key existed in config, but not a dictionary type,"
+ " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg)))
validate_cloudconfig_schema(cfg, schema)
if ntp_installable():
@@ -206,8 +206,8 @@ def write_ntp_config_template(cfg, cloud, path, template=None):
if not template_fn:
template_fn = cloud.get_template_filename('ntp.conf')
if not template_fn:
- raise RuntimeError(("No template found, "
- "not rendering %s"), path)
+ raise RuntimeError(
+ 'No template found, not rendering {path}'.format(path=path))
templater.render_to_file(template_fn, path, params)
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index eba58b02..4da3a588 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -194,6 +194,7 @@ def doexit(sysexit):
def execmd(exe_args, output=None, data_in=None):
+ ret = 1
try:
proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE,
stdout=output, stderr=subprocess.STDOUT)
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 0d282e63..cec22bb7 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -59,7 +59,17 @@ __doc__ = get_schema_doc(schema) # Supplement python help()
def _resize_btrfs(mount_point, devpth):
- return ('btrfs', 'filesystem', 'resize', 'max', mount_point)
+ # If "/" is ro resize will fail. However it should be allowed since resize
+ # makes everything bigger and subvolumes that are not ro will benefit.
+ # Use a subvolume that is not ro to trick the resize operation to do the
+ # "right" thing. The use of ".snapshot" is specific to "snapper" a generic
+ # solution would be walk the subvolumes and find a rw mounted subvolume.
+ if (not util.mount_is_read_write(mount_point) and
+ os.path.isdir("%s/.snapshots" % mount_point)):
+ return ('btrfs', 'filesystem', 'resize', 'max',
+ '%s/.snapshots' % mount_point)
+ else:
+ return ('btrfs', 'filesystem', 'resize', 'max', mount_point)
def _resize_ext(mount_point, devpth):
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index a9d21e78..530808ce 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -276,9 +276,8 @@ class SubscriptionManager(object):
cmd = ['attach', '--auto']
try:
return_out, return_err = self._sub_man_cli(cmd)
- except util.ProcessExecutionError:
- self.log_warn("Auto-attach failed with: "
- "{0}]".format(return_err.strip()))
+ except util.ProcessExecutionError as e:
+ self.log_warn("Auto-attach failed with: {0}".format(e))
return False
for line in return_out.split("\n"):
if line is not "":
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 50ff9e35..af08788c 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -20,15 +20,15 @@ which defaults to ``20-cloud-config.conf``. The rsyslog config directory to
write config files to may be specified in ``config_dir``, which defaults to
``/etc/rsyslog.d``.
-A list of configurations for for rsyslog can be specified under the ``configs``
-key in the ``rsyslog`` config. Each entry in ``configs`` is either a string or
-a dictionary. Each config entry contains a configuration string and a file to
+A list of configurations for rsyslog can be specified under the ``configs`` key
+in the ``rsyslog`` config. Each entry in ``configs`` is either a string or a
+dictionary. Each config entry contains a configuration string and a file to
write it to. For config entries that are a dictionary, ``filename`` sets the
target filename and ``content`` specifies the config string to write. For
config entries that are only a string, the string is used as the config string
to write. If the filename to write the config to is not specified, the value of
-the ``config_filename`` key is used. A file with the selected filename will
-be written inside the directory specified by ``config_dir``.
+the ``config_filename`` key is used. A file with the selected filename will be
+written inside the directory specified by ``config_dir``.
The command to use to reload the rsyslog service after the config has been
updated can be specified in ``service_reload_command``. If this is set to
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index e76b9c09..65f6e777 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -95,7 +95,8 @@ def handle_random_seed_command(command, required, env=None):
cmd = command[0]
if not util.which(cmd):
if required:
- raise ValueError("command '%s' not found but required=true", cmd)
+ raise ValueError(
+ "command '{cmd}' not found but required=true".format(cmd=cmd))
else:
LOG.debug("command '%s' not found for seed_command", cmd)
return
diff --git a/cloudinit/config/cc_snap_config.py b/cloudinit/config/cc_snap_config.py
index fe0cc73e..e82c0811 100644
--- a/cloudinit/config/cc_snap_config.py
+++ b/cloudinit/config/cc_snap_config.py
@@ -87,7 +87,9 @@ def add_assertions(assertions=None):
assertions = []
if not isinstance(assertions, list):
- raise ValueError('assertion parameter was not a list: %s', assertions)
+ raise ValueError(
+ 'assertion parameter was not a list: {assertions}'.format(
+ assertions=assertions))
snap_cmd = [SNAPPY_CMD, 'ack']
combined = "\n".join(assertions)
@@ -115,7 +117,8 @@ def add_snap_user(cfg=None):
cfg = {}
if not isinstance(cfg, dict):
- raise ValueError('configuration parameter was not a dict: %s', cfg)
+ raise ValueError(
+ 'configuration parameter was not a dict: {cfg}'.format(cfg=cfg))
snapuser = cfg.get('email', None)
if not snapuser:
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index d5becd12..55260eae 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -45,6 +45,10 @@ OSFAMILIES = {
LOG = logging.getLogger(__name__)
+# This is a best guess regex, based on current EC2 AZs on 2017-12-11.
+# It could break when Amazon adds new regions and new AZs.
+_EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$')
+
@six.add_metaclass(abc.ABCMeta)
class Distro(object):
@@ -102,11 +106,8 @@ class Distro(object):
self._apply_hostname(writeable_hostname)
def uses_systemd(self):
- try:
- res = os.lstat('/run/systemd/system')
- return stat.S_ISDIR(res.st_mode)
- except Exception:
- return False
+ """Wrapper to report whether this distro uses systemd or sysvinit."""
+ return uses_systemd()
@abc.abstractmethod
def package_command(self, cmd, args=None, pkgs=None):
@@ -686,18 +687,13 @@ def _get_package_mirror_info(mirror_info, data_source=None,
if not mirror_info:
mirror_info = {}
- # ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b)
- # the region is us-east-1. so region = az[0:-1]
- directions_re = '|'.join([
- 'central', 'east', 'north', 'northeast', 'northwest',
- 'south', 'southeast', 'southwest', 'west'])
- ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" % directions_re)
-
subst = {}
if data_source and data_source.availability_zone:
subst['availability_zone'] = data_source.availability_zone
- if re.match(ec2_az_re, data_source.availability_zone):
+ # ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b)
+ # the region is us-east-1. so region = az[0:-1]
+ if _EC2_AZ_RE.match(data_source.availability_zone):
subst['ec2_region'] = "%s" % data_source.availability_zone[0:-1]
if data_source and data_source.region:
@@ -761,4 +757,13 @@ def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone",
util.copy(tz_file, tz_local)
return
+
+def uses_systemd():
+ try:
+ res = os.lstat('/run/systemd/system')
+ return stat.S_ISDIR(res.st_mode)
+ except Exception:
+ return False
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index bad112fe..aa468bca 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -116,6 +116,7 @@ class Distro(distros.Distro):
(out, err) = util.subp(['ifconfig', '-a'])
ifconfigoutput = [x for x in (out.strip()).splitlines()
if len(x.split()) > 0]
+ bsddev = 'NOT_FOUND'
for line in ifconfigoutput:
m = re.match('^\w+', line)
if m:
@@ -347,15 +348,9 @@ class Distro(distros.Distro):
bymac[Distro.get_interface_mac(n)] = {
'name': n, 'up': self.is_up(n), 'downable': None}
+ nics_with_addresses = set()
if check_downable:
- nics_with_addresses = set()
- ipv6 = self.get_ipv6()
- ipv4 = self.get_ipv4()
- for bytes_out in (ipv6, ipv4):
- for i in ipv6:
- nics_with_addresses.update(i)
- for i in ipv4:
- nics_with_addresses.update(i)
+ nics_with_addresses = set(self.get_ipv4() + self.get_ipv6())
for d in bymac.values():
d['downable'] = (d['up'] is False or
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
index 723d6bd6..d6c61e4c 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/ec2_utils.py
@@ -1,6 +1,8 @@
# Copyright (C) 2012 Yahoo! Inc.
+# Copyright (C) 2014 Amazon.com, Inc. or its affiliates.
#
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+# Author: Andrew Jorgensen <ajorgens@amazon.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
@@ -164,14 +166,11 @@ def get_instance_userdata(api_version='latest',
return user_data
-def get_instance_metadata(api_version='latest',
- metadata_address='http://169.254.169.254',
- ssl_details=None, timeout=5, retries=5,
- leaf_decoder=None):
- md_url = url_helper.combine_url(metadata_address, api_version)
- # Note, 'meta-data' explicitly has trailing /.
- # this is required for CloudStack (LP: #1356855)
- md_url = url_helper.combine_url(md_url, 'meta-data/')
+def _get_instance_metadata(tree, api_version='latest',
+ metadata_address='http://169.254.169.254',
+ ssl_details=None, timeout=5, retries=5,
+ leaf_decoder=None):
+ md_url = url_helper.combine_url(metadata_address, api_version, tree)
caller = functools.partial(util.read_file_or_url,
ssl_details=ssl_details, timeout=timeout,
retries=retries)
@@ -189,7 +188,29 @@ def get_instance_metadata(api_version='latest',
md = {}
return md
except Exception:
- util.logexc(LOG, "Failed fetching metadata from url %s", md_url)
+ util.logexc(LOG, "Failed fetching %s from url %s", tree, md_url)
return {}
+
+def get_instance_metadata(api_version='latest',
+ metadata_address='http://169.254.169.254',
+ ssl_details=None, timeout=5, retries=5,
+ leaf_decoder=None):
+ # Note, 'meta-data' explicitly has trailing /.
+ # this is required for CloudStack (LP: #1356855)
+ return _get_instance_metadata(tree='meta-data/', api_version=api_version,
+ metadata_address=metadata_address,
+ ssl_details=ssl_details, timeout=timeout,
+ retries=retries, leaf_decoder=leaf_decoder)
+
+
+def get_instance_identity(api_version='latest',
+ metadata_address='http://169.254.169.254',
+ ssl_details=None, timeout=5, retries=5,
+ leaf_decoder=None):
+ return _get_instance_metadata(tree='dynamic/instance-identity',
+ api_version=api_version,
+ metadata_address=metadata_address,
+ ssl_details=ssl_details, timeout=timeout,
+ retries=retries, leaf_decoder=leaf_decoder)
# vi: ts=4 expandtab
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index a1b0db10..c015e793 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -18,7 +18,7 @@ SYS_CLASS_NET = "/sys/class/net/"
DEFAULT_PRIMARY_INTERFACE = 'eth0'
-def _natural_sort_key(s, _nsre=re.compile('([0-9]+)')):
+def natural_sort_key(s, _nsre=re.compile('([0-9]+)')):
"""Sorting for Humans: natural sort order. Can be use as the key to sort
functions.
This will sort ['eth0', 'ens3', 'ens10', 'ens12', 'ens8', 'ens0'] as
@@ -224,7 +224,7 @@ def find_fallback_nic(blacklist_drivers=None):
# if eth0 exists use it above anything else, otherwise get the interface
# that we can read 'first' (using the sorted defintion of first).
- names = list(sorted(potential_interfaces, key=_natural_sort_key))
+ names = list(sorted(potential_interfaces, key=natural_sort_key))
if DEFAULT_PRIMARY_INTERFACE in names:
names.remove(DEFAULT_PRIMARY_INTERFACE)
names.insert(0, DEFAULT_PRIMARY_INTERFACE)
diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py
index 38b27a52..7b2cc9db 100755
--- a/cloudinit/net/cmdline.py
+++ b/cloudinit/net/cmdline.py
@@ -116,10 +116,11 @@ def config_from_klibc_net_cfg(files=None, mac_addrs=None):
prev = names[name]['entry']
if prev.get('mac_address') != entry.get('mac_address'):
raise ValueError(
- "device '%s' was defined multiple times (%s)"
- " but had differing mac addresses: %s -> %s.",
- (name, ' '.join(names[name]['files']),
- prev.get('mac_address'), entry.get('mac_address')))
+ "device '{name}' was defined multiple times ({files})"
+ " but had differing mac addresses: {old} -> {new}.".format(
+ name=name, files=' '.join(names[name]['files']),
+ old=prev.get('mac_address'),
+ new=entry.get('mac_address')))
prev['subnets'].extend(entry['subnets'])
names[name]['files'].append(cfg_file)
else:
diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
index 875a4609..087c0c03 100644
--- a/cloudinit/net/dhcp.py
+++ b/cloudinit/net/dhcp.py
@@ -10,7 +10,9 @@ import os
import re
import signal
-from cloudinit.net import find_fallback_nic, get_devicelist
+from cloudinit.net import (
+ EphemeralIPv4Network, find_fallback_nic, get_devicelist)
+from cloudinit.net.network_state import mask_and_ipv4_to_bcast_addr as bcip
from cloudinit import temp_utils
from cloudinit import util
from six import StringIO
@@ -29,6 +31,45 @@ class InvalidDHCPLeaseFileError(Exception):
pass
+class NoDHCPLeaseError(Exception):
+ """Raised when unable to get a DHCP lease."""
+ pass
+
+
+class EphemeralDHCPv4(object):
+ def __init__(self, iface=None):
+ self.iface = iface
+ self._ephipv4 = None
+
+ def __enter__(self):
+ try:
+ leases = maybe_perform_dhcp_discovery(self.iface)
+ except InvalidDHCPLeaseFileError:
+ raise NoDHCPLeaseError()
+ if not leases:
+ raise NoDHCPLeaseError()
+ lease = leases[-1]
+ LOG.debug("Received dhcp lease on %s for %s/%s",
+ lease['interface'], lease['fixed-address'],
+ lease['subnet-mask'])
+ nmap = {'interface': 'interface', 'ip': 'fixed-address',
+ 'prefix_or_mask': 'subnet-mask',
+ 'broadcast': 'broadcast-address',
+ 'router': 'routers'}
+ kwargs = dict([(k, lease.get(v)) for k, v in nmap.items()])
+ if not kwargs['broadcast']:
+ kwargs['broadcast'] = bcip(kwargs['prefix_or_mask'], kwargs['ip'])
+ ephipv4 = EphemeralIPv4Network(**kwargs)
+ ephipv4.__enter__()
+ self._ephipv4 = ephipv4
+ return lease
+
+ def __exit__(self, excp_type, excp_value, excp_traceback):
+ if not self._ephipv4:
+ return
+ self._ephipv4.__exit__(excp_type, excp_value, excp_traceback)
+
+
def maybe_perform_dhcp_discovery(nic=None):
"""Perform dhcp discovery if nic valid and dhclient command exists.
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index e9e2cf4e..fe667d88 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -474,8 +474,9 @@ class NetworkStateInterpreter(object):
elif bridge_stp in ['off', '0', 0]:
bridge_stp = False
else:
- raise ValueError("Cannot convert bridge_stp value"
- "(%s) to boolean", bridge_stp)
+ raise ValueError(
+ 'Cannot convert bridge_stp value ({stp}) to'
+ ' boolean'.format(stp=bridge_stp))
iface.update({'bridge_stp': bridge_stp})
interfaces.update({iface['name']: iface})
@@ -692,7 +693,8 @@ class NetworkStateInterpreter(object):
elif cmd_type == "bond":
self.handle_bond(v1_cmd)
else:
- raise ValueError('Unknown command type: %s', cmd_type)
+ raise ValueError('Unknown command type: {cmd_type}'.format(
+ cmd_type=cmd_type))
def _v2_to_v1_ipcfg(self, cfg):
"""Common ipconfig extraction from v2 to v1 subnets array."""
@@ -959,4 +961,16 @@ def mask_to_net_prefix(mask):
return ipv4_mask_to_net_prefix(mask)
+def mask_and_ipv4_to_bcast_addr(mask, ip):
+ """Calculate the broadcast address from the subnet mask and ip addr.
+
+ Supports ipv4 only."""
+ ip_bin = int(''.join([bin(int(x) + 256)[3:] for x in ip.split('.')]), 2)
+ mask_dec = ipv4_mask_to_net_prefix(mask)
+ bcast_bin = ip_bin | (2**(32 - mask_dec) - 1)
+ bcast_str = '.'.join([str(bcast_bin >> (i << 3) & 0xFF)
+ for i in range(4)[::-1]])
+ return bcast_str
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py
index 43a7e42c..7ac8288d 100644
--- a/cloudinit/sources/DataSourceAliYun.py
+++ b/cloudinit/sources/DataSourceAliYun.py
@@ -11,6 +11,7 @@ ALIYUN_PRODUCT = "Alibaba Cloud ECS"
class DataSourceAliYun(EC2.DataSourceEc2):
+ dsname = 'AliYun'
metadata_urls = ['http://100.100.100.200']
# The minimum supported metadata_version from the ec2 metadata apis
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index c78ad9eb..e1d0055b 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -74,6 +74,9 @@ def read_user_data_callback(mount_dir):
class DataSourceAltCloud(sources.DataSource):
+
+ dsname = 'AltCloud'
+
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.seed = None
@@ -112,7 +115,7 @@ class DataSourceAltCloud(sources.DataSource):
return 'UNKNOWN'
- def get_data(self):
+ def _get_data(self):
'''
Description:
User Data is passed to the launching instance which
@@ -142,7 +145,7 @@ class DataSourceAltCloud(sources.DataSource):
else:
cloud_type = self.get_cloud_type()
- LOG.debug('cloud_type: ' + str(cloud_type))
+ LOG.debug('cloud_type: %s', str(cloud_type))
if 'RHEV' in cloud_type:
if self.user_data_rhevm():
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 14367e9c..4bcbf3a4 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -11,13 +11,16 @@ from functools import partial
import os
import os.path
import re
+from time import time
from xml.dom import minidom
import xml.etree.ElementTree as ET
from cloudinit import log as logging
from cloudinit import net
+from cloudinit.net.dhcp import EphemeralDHCPv4
from cloudinit import sources
from cloudinit.sources.helpers.azure import get_metadata_from_fabric
+from cloudinit.url_helper import readurl, wait_for_url, UrlError
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -26,10 +29,16 @@ DS_NAME = 'Azure'
DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
AGENT_START = ['service', 'walinuxagent', 'start']
AGENT_START_BUILTIN = "__builtin__"
-BOUNCE_COMMAND = [
+BOUNCE_COMMAND_IFUP = [
'sh', '-xc',
"i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"
]
+BOUNCE_COMMAND_FREEBSD = [
+ 'sh', '-xc',
+ ("i=$interface; x=0; ifconfig down $i || x=$?; "
+ "ifconfig up $i || x=$?; exit $x")
+]
+
# azure systems will always have a resource disk, and 66-azure-ephemeral.rules
# ensures that it gets linked to this path.
RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource'
@@ -38,6 +47,9 @@ LEASE_FILE = '/var/lib/dhcp/dhclient.eth0.leases'
DEFAULT_FS = 'ext4'
# DMI chassis-asset-tag is set static for all azure instances
AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'
+REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"
+IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata"
+IMDS_RETRIES = 5
def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid):
@@ -177,11 +189,6 @@ if util.is_FreeBSD():
RESOURCE_DISK_PATH = "/dev/" + res_disk
else:
LOG.debug("resource disk is None")
- BOUNCE_COMMAND = [
- 'sh', '-xc',
- ("i=$interface; x=0; ifconfig down $i || x=$?; "
- "ifconfig up $i || x=$?; exit $x")
- ]
BUILTIN_DS_CONFIG = {
'agent_command': AGENT_START_BUILTIN,
@@ -190,7 +197,7 @@ BUILTIN_DS_CONFIG = {
'hostname_bounce': {
'interface': DEFAULT_PRIMARY_NIC,
'policy': True,
- 'command': BOUNCE_COMMAND,
+ 'command': 'builtin',
'hostname_command': 'hostname',
},
'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH},
@@ -246,6 +253,8 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
class DataSourceAzure(sources.DataSource):
+
+ dsname = 'Azure'
_negotiated = False
def __init__(self, sys_cfg, distro, paths):
@@ -273,19 +282,20 @@ class DataSourceAzure(sources.DataSource):
with temporary_hostname(azure_hostname, self.ds_cfg,
hostname_command=hostname_command) \
- as previous_hostname:
- if (previous_hostname is not None and
+ as previous_hn:
+ if (previous_hn is not None and
util.is_true(self.ds_cfg.get('set_hostname'))):
cfg = self.ds_cfg['hostname_bounce']
# "Bouncing" the network
try:
- perform_hostname_bounce(hostname=azure_hostname,
- cfg=cfg,
- prev_hostname=previous_hostname)
+ return perform_hostname_bounce(hostname=azure_hostname,
+ cfg=cfg,
+ prev_hostname=previous_hn)
except Exception as e:
LOG.warning("Failed publishing hostname: %s", e)
util.logexc(LOG, "handling set_hostname failed")
+ return False
def get_metadata_from_agent(self):
temp_hostname = self.metadata.get('local-hostname')
@@ -330,7 +340,7 @@ class DataSourceAzure(sources.DataSource):
metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
return metadata
- def get_data(self):
+ def _get_data(self):
# azure removes/ejects the cdrom containing the ovf-env.xml
# file on reboot. So, in order to successfully reboot we
# need to look in the datadir and consider that valid
@@ -342,15 +352,20 @@ class DataSourceAzure(sources.DataSource):
ddir = self.ds_cfg['data_dir']
candidates = [self.seed_dir]
+ if os.path.isfile(REPROVISION_MARKER_FILE):
+ candidates.insert(0, "IMDS")
candidates.extend(list_possible_azure_ds_devs())
if ddir:
candidates.append(ddir)
found = None
-
+ reprovision = False
for cdev in candidates:
try:
- if cdev.startswith("/dev/"):
+ if cdev == "IMDS":
+ ret = None
+ reprovision = True
+ elif cdev.startswith("/dev/"):
if util.is_FreeBSD():
ret = util.mount_cb(cdev, load_azure_ds_dir,
mtype="udf", sync=False)
@@ -367,6 +382,8 @@ class DataSourceAzure(sources.DataSource):
LOG.warning("%s was not mountable", cdev)
continue
+ if reprovision or self._should_reprovision(ret):
+ ret = self._reprovision()
(md, self.userdata_raw, cfg, files) = ret
self.seed = cdev
self.metadata = util.mergemanydict([md, DEFAULT_METADATA])
@@ -425,6 +442,83 @@ class DataSourceAzure(sources.DataSource):
LOG.debug("negotiating already done for %s",
self.get_instance_id())
+ def _poll_imds(self, report_ready=True):
+ """Poll IMDS for the new provisioning data until we get a valid
+ response. Then return the returned JSON object."""
+ url = IMDS_URL + "?api-version=2017-04-02"
+ headers = {"Metadata": "true"}
+ LOG.debug("Start polling IMDS")
+
+ def sleep_cb(response, loop_n):
+ return 1
+
+ def exception_cb(msg, exception):
+ if isinstance(exception, UrlError) and exception.code == 404:
+ return
+ LOG.warning("Exception during polling. Will try DHCP.",
+ exc_info=True)
+
+ # If we get an exception while trying to call IMDS, we
+ # call DHCP and setup the ephemeral network to acquire the new IP.
+ raise exception
+
+ need_report = report_ready
+ for i in range(IMDS_RETRIES):
+ try:
+ with EphemeralDHCPv4() as lease:
+ if need_report:
+ self._report_ready(lease=lease)
+ need_report = False
+ wait_for_url([url], max_wait=None, timeout=60,
+ status_cb=LOG.info,
+ headers_cb=lambda url: headers, sleep_time=1,
+ exception_cb=exception_cb,
+ sleep_time_cb=sleep_cb)
+ return str(readurl(url, headers=headers))
+ except Exception:
+ LOG.debug("Exception during polling-retrying dhcp" +
+ " %d more time(s).", (IMDS_RETRIES - i),
+ exc_info=True)
+
+ def _report_ready(self, lease):
+ """Tells the fabric provisioning has completed
+ before we go into our polling loop."""
+ try:
+ get_metadata_from_fabric(None, lease['unknown-245'])
+ except Exception as exc:
+ LOG.warning(
+ "Error communicating with Azure fabric; You may experience."
+ "connectivity issues.", exc_info=True)
+
+ def _should_reprovision(self, ret):
+ """Whether or not we should poll IMDS for reprovisioning data.
+ Also sets a marker file to poll IMDS.
+
+ The marker file is used for the following scenario: the VM boots into
+ this polling loop, which we expect to be proceeding infinitely until
+ the VM is picked. If for whatever reason the platform moves us to a
+ new host (for instance a hardware issue), we need to keep polling.
+ However, since the VM reports ready to the Fabric, we will not attach
+ the ISO, thus cloud-init needs to have a way of knowing that it should
+ jump back into the polling loop in order to retrieve the ovf_env."""
+ if not ret:
+ return False
+ (md, self.userdata_raw, cfg, files) = ret
+ path = REPROVISION_MARKER_FILE
+ if (cfg.get('PreprovisionedVm') is True or
+ os.path.isfile(path)):
+ if not os.path.isfile(path):
+ LOG.info("Creating a marker file to poll imds")
+ util.write_file(path, "%s: %s\n" % (os.getpid(), time()))
+ return True
+ return False
+
+ def _reprovision(self):
+ """Initiate the reprovisioning workflow."""
+ contents = self._poll_imds()
+ md, ud, cfg = read_azure_ovf(contents)
+ return (md, ud, cfg, {'ovf-env.xml': contents})
+
def _negotiate(self):
"""Negotiate with fabric and return data from it.
@@ -450,7 +544,7 @@ class DataSourceAzure(sources.DataSource):
"Error communicating with Azure fabric; You may experience."
"connectivity issues.", exc_info=True)
return False
-
+ util.del_file(REPROVISION_MARKER_FILE)
return fabric_data
def activate(self, cfg, is_new_instance):
@@ -580,18 +674,19 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
if os.path.exists(sempath):
try:
os.unlink(sempath)
- LOG.debug(bmsg + " removed.")
+ LOG.debug('%s removed.', bmsg)
except Exception as e:
# python3 throws FileNotFoundError, python2 throws OSError
- LOG.warning(bmsg + ": remove failed! (%s)", e)
+ LOG.warning('%s: remove failed! (%s)', bmsg, e)
else:
- LOG.debug(bmsg + " did not exist.")
+ LOG.debug('%s did not exist.', bmsg)
return
def perform_hostname_bounce(hostname, cfg, prev_hostname):
# set the hostname to 'hostname' if it is not already set to that.
# then, if policy is not off, bounce the interface using command
+ # Returns True if the network was bounced, False otherwise.
command = cfg['command']
interface = cfg['interface']
policy = cfg['policy']
@@ -604,8 +699,15 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
env['old_hostname'] = prev_hostname
if command == "builtin":
- command = BOUNCE_COMMAND
-
+ if util.is_FreeBSD():
+ command = BOUNCE_COMMAND_FREEBSD
+ elif util.which('ifup'):
+ command = BOUNCE_COMMAND_IFUP
+ else:
+ LOG.debug(
+ "Skipping network bounce: ifupdown utils aren't present.")
+ # Don't bounce as networkd handles hostname DDNS updates
+ return False
LOG.debug("pubhname: publishing hostname [%s]", msg)
shell = not isinstance(command, (list, tuple))
# capture=False, see comments in bug 1202758 and bug 1206164.
@@ -613,6 +715,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
get_uptime=True, func=util.subp,
kwargs={'args': command, 'shell': shell, 'capture': False,
'env': env})
+ return True
def crtfile_to_pubkey(fname, data=None):
@@ -829,9 +932,35 @@ def read_azure_ovf(contents):
if 'ssh_pwauth' not in cfg and password:
cfg['ssh_pwauth'] = True
+ cfg['PreprovisionedVm'] = _extract_preprovisioned_vm_setting(dom)
+
return (md, ud, cfg)
+def _extract_preprovisioned_vm_setting(dom):
+ """Read the preprovision flag from the ovf. It should not
+ exist unless true."""
+ platform_settings_section = find_child(
+ dom.documentElement,
+ lambda n: n.localName == "PlatformSettingsSection")
+ if not platform_settings_section or len(platform_settings_section) == 0:
+ LOG.debug("PlatformSettingsSection not found")
+ return False
+ platform_settings = find_child(
+ platform_settings_section[0],
+ lambda n: n.localName == "PlatformSettings")
+ if not platform_settings or len(platform_settings) == 0:
+ LOG.debug("PlatformSettings not found")
+ return False
+ preprovisionedVm = find_child(
+ platform_settings[0],
+ lambda n: n.localName == "PreprovisionedVm")
+ if not preprovisionedVm or len(preprovisionedVm) == 0:
+ LOG.debug("PreprovisionedVm not found")
+ return False
+ return util.translate_bool(preprovisionedVm[0].firstChild.nodeValue)
+
+
def encrypt_pass(password, salt_id="$6$"):
return crypt.crypt(password, salt_id + util.rand_str(strlen=16))
diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py
index d7fcd45a..699a85b5 100644
--- a/cloudinit/sources/DataSourceBigstep.py
+++ b/cloudinit/sources/DataSourceBigstep.py
@@ -16,13 +16,16 @@ LOG = logging.getLogger(__name__)
class DataSourceBigstep(sources.DataSource):
+
+ dsname = 'Bigstep'
+
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.metadata = {}
self.vendordata_raw = ""
self.userdata_raw = ""
- def get_data(self, apply_filter=False):
+ def _get_data(self, apply_filter=False):
url = get_url_from_file()
if url is None:
return False
diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
index 19df16b1..4eaad475 100644
--- a/cloudinit/sources/DataSourceCloudSigma.py
+++ b/cloudinit/sources/DataSourceCloudSigma.py
@@ -23,6 +23,9 @@ class DataSourceCloudSigma(sources.DataSource):
For more information about CloudSigma's Server Context:
http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
"""
+
+ dsname = 'CloudSigma'
+
def __init__(self, sys_cfg, distro, paths):
self.cepko = Cepko()
self.ssh_public_key = ''
@@ -46,7 +49,7 @@ class DataSourceCloudSigma(sources.DataSource):
LOG.warning("failed to query dmi data for system product name")
return False
- def get_data(self):
+ def _get_data(self):
"""
Metadata is the whole server context and /meta/cloud-config is used
as userdata.
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 9dc473fc..0df545fc 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -65,6 +65,9 @@ class CloudStackPasswordServerClient(object):
class DataSourceCloudStack(sources.DataSource):
+
+ dsname = 'CloudStack'
+
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.seed_dir = os.path.join(paths.seed_dir, 'cs')
@@ -117,7 +120,7 @@ class DataSourceCloudStack(sources.DataSource):
def get_config_obj(self):
return self.cfg
- def get_data(self):
+ def _get_data(self):
seed_ret = {}
if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")):
self.userdata_raw = seed_ret['user-data']
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index ef374f3f..b8db6267 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -25,13 +25,16 @@ DEFAULT_METADATA = {
"instance-id": DEFAULT_IID,
}
FS_TYPES = ('vfat', 'iso9660')
-LABEL_TYPES = ('config-2',)
+LABEL_TYPES = ('config-2', 'CONFIG-2')
POSSIBLE_MOUNTS = ('sr', 'cd')
OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS
for i in range(0, 2)))
class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
+
+ dsname = 'ConfigDrive'
+
def __init__(self, sys_cfg, distro, paths):
super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths)
self.source = None
@@ -50,7 +53,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
mstr += "[source=%s]" % (self.source)
return mstr
- def get_data(self):
+ def _get_data(self):
found = None
md = {}
results = {}
@@ -221,7 +224,7 @@ def find_candidate_devs(probe_optical=True):
config drive v2:
Disk should be:
* either vfat or iso9660 formated
- * labeled with 'config-2'
+ * labeled with 'config-2' or 'CONFIG-2'
"""
# query optical drive to get it in blkid cache for 2.6 kernels
if probe_optical:
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index 5e7e66be..e0ef665e 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -27,6 +27,9 @@ MD_USE_IPV4LL = True
class DataSourceDigitalOcean(sources.DataSource):
+
+ dsname = 'DigitalOcean'
+
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.distro = distro
@@ -44,7 +47,7 @@ class DataSourceDigitalOcean(sources.DataSource):
def _get_sysinfo(self):
return do_helper.read_sysinfo()
- def get_data(self):
+ def _get_data(self):
(is_do, droplet_id) = self._get_sysinfo()
# only proceed if we know we are on DigitalOcean
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 7bbbfb63..e14553b3 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -14,7 +14,7 @@ import time
from cloudinit import ec2_utils as ec2
from cloudinit import log as logging
from cloudinit import net
-from cloudinit.net import dhcp
+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
from cloudinit import sources
from cloudinit import url_helper as uhelp
from cloudinit import util
@@ -31,6 +31,7 @@ _unset = "_unset"
class Platforms(object):
+ # TODO Rename and move to cloudinit.cloud.CloudNames
ALIYUN = "AliYun"
AWS = "AWS"
BRIGHTBOX = "Brightbox"
@@ -45,6 +46,7 @@ class Platforms(object):
class DataSourceEc2(sources.DataSource):
+ dsname = 'Ec2'
# Default metadata urls that will be used if none are provided
# They will be checked for 'resolveability' and some of the
# following may be discarded if they do not resolve
@@ -68,11 +70,15 @@ class DataSourceEc2(sources.DataSource):
_fallback_interface = None
def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ super(DataSourceEc2, self).__init__(sys_cfg, distro, paths)
self.metadata_address = None
self.seed_dir = os.path.join(paths.seed_dir, "ec2")
- def get_data(self):
+ def _get_cloud_name(self):
+ """Return the cloud name as identified during _get_data."""
+ return self.cloud_platform
+
+ def _get_data(self):
seed_ret = {}
if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")):
self.userdata_raw = seed_ret['user-data']
@@ -96,22 +102,13 @@ class DataSourceEc2(sources.DataSource):
if util.is_FreeBSD():
LOG.debug("FreeBSD doesn't support running dhclient with -sf")
return False
- dhcp_leases = dhcp.maybe_perform_dhcp_discovery(
- self.fallback_interface)
- if not dhcp_leases:
- # DataSourceEc2Local failed in init-local stage. DataSourceEc2
- # will still run in init-network stage.
+ try:
+ with EphemeralDHCPv4(self.fallback_interface):
+ return util.log_time(
+ logfunc=LOG.debug, msg='Crawl of metadata service',
+ func=self._crawl_metadata)
+ except NoDHCPLeaseError:
return False
- dhcp_opts = dhcp_leases[-1]
- net_params = {'interface': dhcp_opts.get('interface'),
- 'ip': dhcp_opts.get('fixed-address'),
- 'prefix_or_mask': dhcp_opts.get('subnet-mask'),
- 'broadcast': dhcp_opts.get('broadcast-address'),
- 'router': dhcp_opts.get('routers')}
- with net.EphemeralIPv4Network(**net_params):
- return util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self._crawl_metadata)
else:
return self._crawl_metadata()
@@ -148,7 +145,12 @@ class DataSourceEc2(sources.DataSource):
return self.min_metadata_version
def get_instance_id(self):
- return self.metadata['instance-id']
+ if self.cloud_platform == Platforms.AWS:
+ # Prefer the ID from the instance identity document, but fall back
+ return self.identity.get(
+ 'instanceId', self.metadata['instance-id'])
+ else:
+ return self.metadata['instance-id']
def _get_url_settings(self):
mcfg = self.ds_cfg
@@ -262,19 +264,31 @@ class DataSourceEc2(sources.DataSource):
@property
def availability_zone(self):
try:
- return self.metadata['placement']['availability-zone']
+ if self.cloud_platform == Platforms.AWS:
+ return self.identity.get(
+ 'availabilityZone',
+ self.metadata['placement']['availability-zone'])
+ else:
+ return self.metadata['placement']['availability-zone']
except KeyError:
return None
@property
def region(self):
- az = self.availability_zone
- if az is not None:
- return az[:-1]
+ if self.cloud_platform == Platforms.AWS:
+ region = self.identity.get('region')
+ # Fallback to trimming the availability zone if region is missing
+ if self.availability_zone and not region:
+ region = self.availability_zone[:-1]
+ return region
+ else:
+ az = self.availability_zone
+ if az is not None:
+ return az[:-1]
return None
@property
- def cloud_platform(self):
+ def cloud_platform(self): # TODO rename cloud_name
if self._cloud_platform is None:
self._cloud_platform = identify_platform()
return self._cloud_platform
@@ -351,6 +365,9 @@ class DataSourceEc2(sources.DataSource):
api_version, self.metadata_address)
self.metadata = ec2.get_instance_metadata(
api_version, self.metadata_address)
+ if self.cloud_platform == Platforms.AWS:
+ self.identity = ec2.get_instance_identity(
+ api_version, self.metadata_address).get('document', {})
except Exception:
util.logexc(
LOG, "Failed reading from metadata address %s",
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index ccae4200..2da34a99 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -2,8 +2,12 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+import datetime
+import json
+
from base64 import b64decode
+from cloudinit.distros import ug_util
from cloudinit import log as logging
from cloudinit import sources
from cloudinit import url_helper
@@ -17,16 +21,18 @@ REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname')
class GoogleMetadataFetcher(object):
- headers = {'X-Google-Metadata-Request': 'True'}
+ headers = {'Metadata-Flavor': 'Google'}
def __init__(self, metadata_address):
self.metadata_address = metadata_address
- def get_value(self, path, is_text):
+ def get_value(self, path, is_text, is_recursive=False):
value = None
try:
- resp = url_helper.readurl(url=self.metadata_address + path,
- headers=self.headers)
+ url = self.metadata_address + path
+ if is_recursive:
+ url += '/?recursive=True'
+ resp = url_helper.readurl(url=url, headers=self.headers)
except url_helper.UrlError as exc:
msg = "url %s raised exception %s"
LOG.debug(msg, path, exc)
@@ -35,22 +41,29 @@ class GoogleMetadataFetcher(object):
if is_text:
value = util.decode_binary(resp.contents)
else:
- value = resp.contents
+ value = resp.contents.decode('utf-8')
else:
LOG.debug("url %s returned code %s", path, resp.code)
return value
class DataSourceGCE(sources.DataSource):
+
+ dsname = 'GCE'
+
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.default_user = None
+ if distro:
+ (users, _groups) = ug_util.normalize_users_groups(sys_cfg, distro)
+ (self.default_user, _user_config) = ug_util.extract_default(users)
self.metadata = dict()
self.ds_cfg = util.mergemanydict([
util.get_cfg_by_path(sys_cfg, ["datasource", "GCE"], {}),
BUILTIN_DS_CONFIG])
self.metadata_address = self.ds_cfg['metadata_url']
- def get_data(self):
+ def _get_data(self):
ret = util.log_time(
LOG.debug, 'Crawl of GCE metadata service',
read_md, kwargs={'address': self.metadata_address})
@@ -67,17 +80,18 @@ class DataSourceGCE(sources.DataSource):
@property
def launch_index(self):
- # GCE does not provide lauch_index property
+ # GCE does not provide lauch_index property.
return None
def get_instance_id(self):
return self.metadata['instance-id']
def get_public_ssh_keys(self):
- return self.metadata['public-keys']
+ public_keys_data = self.metadata['public-keys-data']
+ return _parse_public_keys(public_keys_data, self.default_user)
def get_hostname(self, fqdn=False, resolve_ip=False):
- # GCE has long FDQN's and has asked for short hostnames
+ # GCE has long FDQN's and has asked for short hostnames.
return self.metadata['local-hostname'].split('.')[0]
@property
@@ -89,15 +103,58 @@ class DataSourceGCE(sources.DataSource):
return self.availability_zone.rsplit('-', 1)[0]
-def _trim_key(public_key):
- # GCE takes sshKeys attribute in the format of '<user>:<public_key>'
- # so we have to trim each key to remove the username part
+def _has_expired(public_key):
+ # Check whether an SSH key is expired. Public key input is a single SSH
+ # public key in the GCE specific key format documented here:
+ # https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys#sshkeyformat
+ try:
+ # Check for the Google-specific schema identifier.
+ schema, json_str = public_key.split(None, 3)[2:]
+ except (ValueError, AttributeError):
+ return False
+
+ # Do not expire keys if they do not have the expected schema identifier.
+ if schema != 'google-ssh':
+ return False
+
+ try:
+ json_obj = json.loads(json_str)
+ except ValueError:
+ return False
+
+ # Do not expire keys if there is no expriation timestamp.
+ if 'expireOn' not in json_obj:
+ return False
+
+ expire_str = json_obj['expireOn']
+ format_str = '%Y-%m-%dT%H:%M:%S+0000'
try:
- index = public_key.index(':')
- if index > 0:
- return public_key[(index + 1):]
- except Exception:
- return public_key
+ expire_time = datetime.datetime.strptime(expire_str, format_str)
+ except ValueError:
+ return False
+
+ # Expire the key if and only if we have exceeded the expiration timestamp.
+ return datetime.datetime.utcnow() > expire_time
+
+
+def _parse_public_keys(public_keys_data, default_user=None):
+ # Parse the SSH key data for the default user account. Public keys input is
+ # a list containing SSH public keys in the GCE specific key format
+ # documented here:
+ # https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys#sshkeyformat
+ public_keys = []
+ if not public_keys_data:
+ return public_keys
+ for public_key in public_keys_data:
+ if not public_key or not all(ord(c) < 128 for c in public_key):
+ continue
+ split_public_key = public_key.split(':', 1)
+ if len(split_public_key) != 2:
+ continue
+ user, key = split_public_key
+ if user in ('cloudinit', default_user) and not _has_expired(key):
+ public_keys.append(key)
+ return public_keys
def read_md(address=None, platform_check=True):
@@ -113,31 +170,28 @@ def read_md(address=None, platform_check=True):
ret['reason'] = "Not running on GCE."
return ret
- # if we cannot resolve the metadata server, then no point in trying
+ # If we cannot resolve the metadata server, then no point in trying.
if not util.is_resolvable_url(address):
LOG.debug("%s is not resolvable", address)
ret['reason'] = 'address "%s" is not resolvable' % address
return ret
- # url_map: (our-key, path, required, is_text)
+ # url_map: (our-key, path, required, is_text, is_recursive)
url_map = [
- ('instance-id', ('instance/id',), True, True),
- ('availability-zone', ('instance/zone',), True, True),
- ('local-hostname', ('instance/hostname',), True, True),
- ('public-keys', ('project/attributes/sshKeys',
- 'instance/attributes/ssh-keys'), False, True),
- ('user-data', ('instance/attributes/user-data',), False, False),
- ('user-data-encoding', ('instance/attributes/user-data-encoding',),
- False, True),
+ ('instance-id', ('instance/id',), True, True, False),
+ ('availability-zone', ('instance/zone',), True, True, False),
+ ('local-hostname', ('instance/hostname',), True, True, False),
+ ('instance-data', ('instance/attributes',), False, False, True),
+ ('project-data', ('project/attributes',), False, False, True),
]
metadata_fetcher = GoogleMetadataFetcher(address)
md = {}
- # iterate over url_map keys to get metadata items
- for (mkey, paths, required, is_text) in url_map:
+ # Iterate over url_map keys to get metadata items.
+ for (mkey, paths, required, is_text, is_recursive) in url_map:
value = None
for path in paths:
- new_value = metadata_fetcher.get_value(path, is_text)
+ new_value = metadata_fetcher.get_value(path, is_text, is_recursive)
if new_value is not None:
value = new_value
if required and value is None:
@@ -146,17 +200,23 @@ def read_md(address=None, platform_check=True):
return ret
md[mkey] = value
- if md['public-keys']:
- lines = md['public-keys'].splitlines()
- md['public-keys'] = [_trim_key(k) for k in lines]
+ instance_data = json.loads(md['instance-data'] or '{}')
+ project_data = json.loads(md['project-data'] or '{}')
+ valid_keys = [instance_data.get('sshKeys'), instance_data.get('ssh-keys')]
+ block_project = instance_data.get('block-project-ssh-keys', '').lower()
+ if block_project != 'true' and not instance_data.get('sshKeys'):
+ valid_keys.append(project_data.get('ssh-keys'))
+ valid_keys.append(project_data.get('sshKeys'))
+ public_keys_data = '\n'.join([key for key in valid_keys if key])
+ md['public-keys-data'] = public_keys_data.splitlines()
if md['availability-zone']:
md['availability-zone'] = md['availability-zone'].split('/')[-1]
- encoding = md.get('user-data-encoding')
+ encoding = instance_data.get('user-data-encoding')
if encoding:
if encoding == 'base64':
- md['user-data'] = b64decode(md['user-data'])
+ md['user-data'] = b64decode(instance_data.get('user-data'))
else:
LOG.warning('unknown user-data-encoding: %s, ignoring', encoding)
@@ -185,20 +245,19 @@ def platform_reports_gce():
return False
-# Used to match classes to dependencies
+# Used to match classes to dependencies.
datasources = [
(DataSourceGCE, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
-# Return a list of data sources that match this set of dependencies
+# Return a list of data sources that match this set of dependencies.
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
if __name__ == "__main__":
import argparse
- import json
import sys
from base64 import b64encode
@@ -214,7 +273,7 @@ if __name__ == "__main__":
data = read_md(address=args.endpoint, platform_check=args.platform_check)
if 'user-data' in data:
# user-data is bytes not string like other things. Handle it specially.
- # if it can be represented as utf-8 then do so. Otherwise print base64
+ # If it can be represented as utf-8 then do so. Otherwise print base64
# encoded value in the key user-data-b64.
try:
data['user-data'] = data['user-data'].decode()
@@ -222,7 +281,7 @@ if __name__ == "__main__":
sys.stderr.write("User-data cannot be decoded. "
"Writing as base64\n")
del data['user-data']
- # b64encode returns a bytes value. decode to get the string.
+ # b64encode returns a bytes value. Decode to get the string.
data['user-data-b64'] = b64encode(data['user-data']).decode()
print(json.dumps(data, indent=1, sort_keys=True, separators=(',', ': ')))
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 77df5a51..6ac88635 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -8,6 +8,7 @@
from __future__ import print_function
+import hashlib
import os
import time
@@ -39,30 +40,28 @@ class DataSourceMAAS(sources.DataSource):
hostname
vendor-data
"""
+
+ dsname = "MAAS"
+ id_hash = None
+ _oauth_helper = None
+
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.base_url = None
self.seed_dir = os.path.join(paths.seed_dir, 'maas')
- self.oauth_helper = self._get_helper()
-
- def _get_helper(self):
- mcfg = self.ds_cfg
- # If we are missing token_key, token_secret or consumer_key
- # then just do non-authed requests
- for required in ('token_key', 'token_secret', 'consumer_key'):
- if required not in mcfg:
- return url_helper.OauthUrlHelper()
+ self.id_hash = get_id_from_ds_cfg(self.ds_cfg)
- return url_helper.OauthUrlHelper(
- consumer_key=mcfg['consumer_key'], token_key=mcfg['token_key'],
- token_secret=mcfg['token_secret'],
- consumer_secret=mcfg.get('consumer_secret'))
+ @property
+ def oauth_helper(self):
+ if not self._oauth_helper:
+ self._oauth_helper = get_oauth_helper(self.ds_cfg)
+ return self._oauth_helper
def __str__(self):
root = sources.DataSource.__str__(self)
return "%s [%s]" % (root, self.base_url)
- def get_data(self):
+ def _get_data(self):
mcfg = self.ds_cfg
try:
@@ -144,6 +143,36 @@ class DataSourceMAAS(sources.DataSource):
return bool(url)
+ def check_instance_id(self, sys_cfg):
+ """locally check if the current system is the same instance.
+
+ MAAS doesn't provide a real instance-id, and if it did, it is
+ still only available over the network. We need to check based
+ only on local resources. So compute a hash based on Oauth tokens."""
+ if self.id_hash is None:
+ return False
+ ncfg = util.get_cfg_by_path(sys_cfg, ("datasource", self.dsname), {})
+ return (self.id_hash == get_id_from_ds_cfg(ncfg))
+
+
+def get_oauth_helper(cfg):
+ """Return an oauth helper instance for values in cfg.
+
+ @raises ValueError from OauthUrlHelper if some required fields have
+ true-ish values but others do not."""
+ keys = ('consumer_key', 'consumer_secret', 'token_key', 'token_secret')
+ kwargs = dict([(r, cfg.get(r)) for r in keys])
+ return url_helper.OauthUrlHelper(**kwargs)
+
+
+def get_id_from_ds_cfg(ds_cfg):
+ """Given a config, generate a unique identifier for this node."""
+ fields = ('consumer_key', 'token_key', 'token_secret')
+ idstr = '\0'.join([ds_cfg.get(f, "") for f in fields])
+ # store the encoding version as part of the hash in the event
+ # that it ever changed we can compute older versions.
+ return 'v1:' + hashlib.sha256(idstr.encode('utf-8')).hexdigest()
+
def read_maas_seed_dir(seed_d):
if seed_d.startswith("file://"):
@@ -319,7 +348,7 @@ if __name__ == "__main__":
sys.stderr.write("Must provide a url or a config with url.\n")
sys.exit(1)
- oauth_helper = url_helper.OauthUrlHelper(**creds)
+ oauth_helper = get_oauth_helper(creds)
def geturl(url):
# the retry is to ensure that oauth timestamp gets fixed
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index e641244d..5d3a8ddb 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -20,6 +20,9 @@ LOG = logging.getLogger(__name__)
class DataSourceNoCloud(sources.DataSource):
+
+ dsname = "NoCloud"
+
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.seed = None
@@ -32,7 +35,7 @@ class DataSourceNoCloud(sources.DataSource):
root = sources.DataSource.__str__(self)
return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode)
- def get_data(self):
+ def _get_data(self):
defaults = {
"instance-id": "nocloud",
"dsmode": self.dsmode,
diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py
index 906bb278..e63a7e39 100644
--- a/cloudinit/sources/DataSourceNone.py
+++ b/cloudinit/sources/DataSourceNone.py
@@ -11,12 +11,15 @@ LOG = logging.getLogger(__name__)
class DataSourceNone(sources.DataSource):
+
+ dsname = "None"
+
def __init__(self, sys_cfg, distro, paths, ud_proc=None):
sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc)
self.metadata = {}
self.userdata_raw = ''
- def get_data(self):
+ def _get_data(self):
# If the datasource config has any provided 'fallback'
# userdata or metadata, use it...
if 'userdata_raw' in self.ds_cfg:
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index ccebf11a..6e62f984 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -21,6 +21,8 @@ from cloudinit import util
from cloudinit.sources.helpers.vmware.imc.config \
import Config
+from cloudinit.sources.helpers.vmware.imc.config_custom_script \
+ import PreCustomScript, PostCustomScript
from cloudinit.sources.helpers.vmware.imc.config_file \
import ConfigFile
from cloudinit.sources.helpers.vmware.imc.config_nic \
@@ -30,7 +32,7 @@ from cloudinit.sources.helpers.vmware.imc.config_passwd \
from cloudinit.sources.helpers.vmware.imc.guestcust_error \
import GuestCustErrorEnum
from cloudinit.sources.helpers.vmware.imc.guestcust_event \
- import GuestCustEventEnum
+ import GuestCustEventEnum as GuestCustEvent
from cloudinit.sources.helpers.vmware.imc.guestcust_state \
import GuestCustStateEnum
from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
@@ -43,6 +45,9 @@ LOG = logging.getLogger(__name__)
class DataSourceOVF(sources.DataSource):
+
+ dsname = "OVF"
+
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.seed = None
@@ -60,7 +65,7 @@ class DataSourceOVF(sources.DataSource):
root = sources.DataSource.__str__(self)
return "%s [seed=%s]" % (root, self.seed)
- def get_data(self):
+ def _get_data(self):
found = []
md = {}
ud = ""
@@ -124,17 +129,31 @@ class DataSourceOVF(sources.DataSource):
self._vmware_cust_conf = Config(cf)
(md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf)
self._vmware_nics_to_enable = get_nics_to_enable(nicspath)
- markerid = self._vmware_cust_conf.marker_id
- markerexists = check_marker_exists(markerid)
+ imcdirpath = os.path.dirname(vmwareImcConfigFilePath)
+ product_marker = self._vmware_cust_conf.marker_id
+ hasmarkerfile = check_marker_exists(
+ product_marker, os.path.join(self.paths.cloud_dir, 'data'))
+ special_customization = product_marker and not hasmarkerfile
+ customscript = self._vmware_cust_conf.custom_script_name
except Exception as e:
- LOG.debug("Error parsing the customization Config File")
- LOG.exception(e)
- set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED)
- raise e
- finally:
- util.del_dir(os.path.dirname(vmwareImcConfigFilePath))
+ _raise_error_status(
+ "Error parsing the customization Config File",
+ e,
+ GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
+ vmwareImcConfigFilePath)
+
+ if special_customization:
+ if customscript:
+ try:
+ precust = PreCustomScript(customscript, imcdirpath)
+ precust.execute()
+ except Exception as e:
+ _raise_error_status(
+ "Error executing pre-customization script",
+ e,
+ GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
+ vmwareImcConfigFilePath)
+
try:
LOG.debug("Preparing the Network configuration")
self._network_config = get_network_config_from_conf(
@@ -143,13 +162,13 @@ class DataSourceOVF(sources.DataSource):
True,
self.distro.osfamily)
except Exception as e:
- LOG.exception(e)
- set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- GuestCustEventEnum.GUESTCUST_EVENT_NETWORK_SETUP_FAILED)
- raise e
+ _raise_error_status(
+ "Error preparing Network Configuration",
+ e,
+ GuestCustEvent.GUESTCUST_EVENT_NETWORK_SETUP_FAILED,
+ vmwareImcConfigFilePath)
- if markerid and not markerexists:
+ if special_customization:
LOG.debug("Applying password customization")
pwdConfigurator = PasswordConfigurator()
adminpwd = self._vmware_cust_conf.admin_password
@@ -161,27 +180,41 @@ class DataSourceOVF(sources.DataSource):
else:
LOG.debug("Changing password is not needed")
except Exception as e:
- LOG.debug("Error applying Password Configuration: %s", e)
- set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED)
- return False
- if markerid:
- LOG.debug("Handle marker creation")
+ _raise_error_status(
+ "Error applying Password Configuration",
+ e,
+ GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
+ vmwareImcConfigFilePath)
+
+ if customscript:
+ try:
+ postcust = PostCustomScript(customscript, imcdirpath)
+ postcust.execute()
+ except Exception as e:
+ _raise_error_status(
+ "Error executing post-customization script",
+ e,
+ GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
+ vmwareImcConfigFilePath)
+
+ if product_marker:
try:
- setup_marker_files(markerid)
+ setup_marker_files(
+ product_marker,
+ os.path.join(self.paths.cloud_dir, 'data'))
except Exception as e:
- LOG.debug("Error creating marker files: %s", e)
- set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED)
- return False
+ _raise_error_status(
+ "Error creating marker files",
+ e,
+ GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
+ vmwareImcConfigFilePath)
self._vmware_cust_found = True
found.append('vmware-tools')
# TODO: Need to set the status to DONE only when the
# customization is done successfully.
+ util.del_dir(os.path.dirname(vmwareImcConfigFilePath))
enable_nics(self._vmware_nics_to_enable)
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_DONE,
@@ -536,31 +569,52 @@ def get_datasource_list(depends):
# To check if marker file exists
-def check_marker_exists(markerid):
+def check_marker_exists(markerid, marker_dir):
"""
Check the existence of a marker file.
Presence of marker file determines whether a certain code path is to be
executed. It is needed for partial guest customization in VMware.
+ @param markerid: is an unique string representing a particular product
+ marker.
+ @param: marker_dir: The directory in which markers exist.
"""
if not markerid:
return False
- markerfile = "/.markerfile-" + markerid
+ markerfile = os.path.join(marker_dir, ".markerfile-" + markerid + ".txt")
if os.path.exists(markerfile):
return True
return False
# Create a marker file
-def setup_marker_files(markerid):
+def setup_marker_files(markerid, marker_dir):
"""
Create a new marker file.
Marker files are unique to a full customization workflow in VMware
environment.
+ @param markerid: is an unique string representing a particular product
+ marker.
+ @param: marker_dir: The directory in which markers exist.
+
"""
- if not markerid:
- return
- markerfile = "/.markerfile-" + markerid
- util.del_file("/.markerfile-*.txt")
+ LOG.debug("Handle marker creation")
+ markerfile = os.path.join(marker_dir, ".markerfile-" + markerid + ".txt")
+ for fname in os.listdir(marker_dir):
+ if fname.startswith(".markerfile"):
+ util.del_file(os.path.join(marker_dir, fname))
open(markerfile, 'w').close()
+
+def _raise_error_status(prefix, error, event, config_file):
+ """
+ Raise error and send customization status to the underlying VMware
+ Virtualization Platform. Also, cleanup the imc directory.
+ """
+ LOG.debug('%s: %s', prefix, error)
+ set_customization_status(
+ GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
+ event)
+ util.del_dir(os.path.dirname(config_file))
+ raise error
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 5fdac192..ce47b6bd 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -12,6 +12,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+import collections
import os
import pwd
import re
@@ -19,6 +20,7 @@ import string
from cloudinit import log as logging
from cloudinit import net
+from cloudinit.net import eni
from cloudinit import sources
from cloudinit import util
@@ -31,6 +33,9 @@ CONTEXT_DISK_FILES = ["context.sh"]
class DataSourceOpenNebula(sources.DataSource):
+
+ dsname = "OpenNebula"
+
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.seed = None
@@ -40,7 +45,7 @@ class DataSourceOpenNebula(sources.DataSource):
root = sources.DataSource.__str__(self)
return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode)
- def get_data(self):
+ def _get_data(self):
defaults = {"instance-id": DEFAULT_IID}
results = None
seed = None
@@ -86,11 +91,18 @@ class DataSourceOpenNebula(sources.DataSource):
return False
self.seed = seed
- self.network_eni = results.get("network_config")
+ self.network_eni = results.get('network-interfaces')
self.metadata = md
self.userdata_raw = results.get('userdata')
return True
+ @property
+ def network_config(self):
+ if self.network_eni is not None:
+ return eni.convert_eni_data(self.network_eni)
+ else:
+ return None
+
def get_hostname(self, fqdn=False, resolve_ip=None):
if resolve_ip is None:
if self.dsmode == sources.DSMODE_NETWORK:
@@ -113,58 +125,53 @@ class OpenNebulaNetwork(object):
self.context = context
if system_nics_by_mac is None:
system_nics_by_mac = get_physical_nics_by_mac()
- self.ifaces = system_nics_by_mac
+ self.ifaces = collections.OrderedDict(
+ [k for k in sorted(system_nics_by_mac.items(),
+ key=lambda k: net.natural_sort_key(k[1]))])
+
+ # OpenNebula 4.14+ provide macaddr for ETHX in variable ETH_MAC.
+ # context_devname provides {mac.lower():ETHX, mac2.lower():ETHX}
+ self.context_devname = {}
+ for k, v in context.items():
+ m = re.match(r'^(.+)_MAC$', k)
+ if m:
+ self.context_devname[v.lower()] = m.group(1)
def mac2ip(self, mac):
- components = mac.split(':')[2:]
- return [str(int(c, 16)) for c in components]
+ return '.'.join([str(int(c, 16)) for c in mac.split(':')[2:]])
- def get_ip(self, dev, components):
- var_name = dev.upper() + '_IP'
- if var_name in self.context:
- return self.context[var_name]
- else:
- return '.'.join(components)
+ def mac2network(self, mac):
+ return self.mac2ip(mac).rpartition(".")[0] + ".0"
- def get_mask(self, dev):
- var_name = dev.upper() + '_MASK'
- if var_name in self.context:
- return self.context[var_name]
- else:
- return '255.255.255.0'
+ def get_dns(self, dev):
+ return self.get_field(dev, "dns", "").split()
- def get_network(self, dev, components):
- var_name = dev.upper() + '_NETWORK'
- if var_name in self.context:
- return self.context[var_name]
- else:
- return '.'.join(components[:-1]) + '.0'
+ def get_domain(self, dev):
+ return self.get_field(dev, "domain")
+
+ def get_ip(self, dev, mac):
+ return self.get_field(dev, "ip", self.mac2ip(mac))
def get_gateway(self, dev):
- var_name = dev.upper() + '_GATEWAY'
- if var_name in self.context:
- return self.context[var_name]
- else:
- return None
+ return self.get_field(dev, "gateway")
- def get_dns(self, dev):
- var_name = dev.upper() + '_DNS'
- if var_name in self.context:
- return self.context[var_name]
- else:
- return None
+ def get_mask(self, dev):
+ return self.get_field(dev, "mask", "255.255.255.0")
- def get_domain(self, dev):
- var_name = dev.upper() + '_DOMAIN'
- if var_name in self.context:
- return self.context[var_name]
- else:
- return None
+ def get_network(self, dev, mac):
+ return self.get_field(dev, "network", self.mac2network(mac))
+
+ def get_field(self, dev, name, default=None):
+ """return the field name in context for device dev.
+
+ context stores <dev>_<NAME> (example: eth0_DOMAIN).
+ an empty string for value will return default."""
+ val = self.context.get('_'.join((dev, name,)).upper())
+ # allow empty string to return the default.
+ return default if val in (None, "") else val
def gen_conf(self):
- global_dns = []
- if 'DNS' in self.context:
- global_dns.append(self.context['DNS'])
+ global_dns = self.context.get('DNS', "").split()
conf = []
conf.append('auto lo')
@@ -172,29 +179,31 @@ class OpenNebulaNetwork(object):
conf.append('')
for mac, dev in self.ifaces.items():
- ip_components = self.mac2ip(mac)
+ mac = mac.lower()
+
+ # c_dev stores name in context 'ETHX' for this device.
+ # dev stores the current system name.
+ c_dev = self.context_devname.get(mac, dev)
conf.append('auto ' + dev)
conf.append('iface ' + dev + ' inet static')
- conf.append(' address ' + self.get_ip(dev, ip_components))
- conf.append(' network ' + self.get_network(dev, ip_components))
- conf.append(' netmask ' + self.get_mask(dev))
+ conf.append(' #hwaddress %s' % mac)
+ conf.append(' address ' + self.get_ip(c_dev, mac))
+ conf.append(' network ' + self.get_network(c_dev, mac))
+ conf.append(' netmask ' + self.get_mask(c_dev))
- gateway = self.get_gateway(dev)
+ gateway = self.get_gateway(c_dev)
if gateway:
conf.append(' gateway ' + gateway)
- domain = self.get_domain(dev)
+ domain = self.get_domain(c_dev)
if domain:
conf.append(' dns-search ' + domain)
# add global DNS servers to all interfaces
- dns = self.get_dns(dev)
+ dns = self.get_dns(c_dev)
if global_dns or dns:
- all_dns = global_dns
- if dns:
- all_dns.append(dns)
- conf.append(' dns-nameservers ' + ' '.join(all_dns))
+ conf.append(' dns-nameservers ' + ' '.join(global_dns + dns))
conf.append('')
@@ -329,8 +338,9 @@ def read_context_disk_dir(source_dir, asuser=None):
try:
pwd.getpwnam(asuser)
except KeyError as e:
- raise BrokenContextDiskDir("configured user '%s' "
- "does not exist", asuser)
+ raise BrokenContextDiskDir(
+ "configured user '{user}' does not exist".format(
+ user=asuser))
try:
path = os.path.join(source_dir, 'context.sh')
content = util.load_file(path)
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index b64a7f24..e55a7638 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -24,6 +24,9 @@ DEFAULT_METADATA = {
class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
+
+ dsname = "OpenStack"
+
def __init__(self, sys_cfg, distro, paths):
super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths)
self.metadata_address = None
@@ -96,7 +99,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
self.metadata_address = url2base.get(avail_url)
return bool(avail_url)
- def get_data(self):
+ def _get_data(self):
try:
if not self.wait_for_metadata_service():
return False
diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py
index 3a8a8e8f..b0b19c93 100644
--- a/cloudinit/sources/DataSourceScaleway.py
+++ b/cloudinit/sources/DataSourceScaleway.py
@@ -169,6 +169,8 @@ def query_data_api(api_type, api_address, retries, timeout):
class DataSourceScaleway(sources.DataSource):
+ dsname = "Scaleway"
+
def __init__(self, sys_cfg, distro, paths):
super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths)
@@ -184,7 +186,7 @@ class DataSourceScaleway(sources.DataSource):
self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES))
self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT))
- def get_data(self):
+ def _get_data(self):
if not on_scaleway():
return False
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 6c6902fd..86bfa5d8 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -159,6 +159,9 @@ LEGACY_USER_D = "/var/db"
class DataSourceSmartOS(sources.DataSource):
+
+ dsname = "Joyent"
+
_unset = "_unset"
smartos_type = _unset
md_client = _unset
@@ -211,7 +214,7 @@ class DataSourceSmartOS(sources.DataSource):
os.rename('/'.join([svc_path, 'provisioning']),
'/'.join([svc_path, 'provision_success']))
- def get_data(self):
+ def _get_data(self):
self._init()
md = {}
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 9a43fbee..a05ca2f6 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -10,9 +10,11 @@
import abc
import copy
+import json
import os
import six
+from cloudinit.atomic_helper import write_json
from cloudinit import importer
from cloudinit import log as logging
from cloudinit import type_utils
@@ -33,6 +35,12 @@ DEP_FILESYSTEM = "FILESYSTEM"
DEP_NETWORK = "NETWORK"
DS_PREFIX = 'DataSource'
+# File in which instance meta-data, user-data and vendor-data is written
+INSTANCE_JSON_FILE = 'instance-data.json'
+
+# Key which can be provide a cloud's official product name to cloud-init
+METADATA_CLOUD_NAME_KEY = 'cloud-name'
+
LOG = logging.getLogger(__name__)
@@ -40,12 +48,39 @@ class DataSourceNotFoundException(Exception):
pass
+def process_base64_metadata(metadata, key_path=''):
+ """Strip ci-b64 prefix and return metadata with base64-encoded-keys set."""
+ md_copy = copy.deepcopy(metadata)
+ md_copy['base64-encoded-keys'] = []
+ for key, val in metadata.items():
+ if key_path:
+ sub_key_path = key_path + '/' + key
+ else:
+ sub_key_path = key
+ if isinstance(val, str) and val.startswith('ci-b64:'):
+ md_copy['base64-encoded-keys'].append(sub_key_path)
+ md_copy[key] = val.replace('ci-b64:', '')
+ if isinstance(val, dict):
+ return_val = process_base64_metadata(val, sub_key_path)
+ md_copy['base64-encoded-keys'].extend(
+ return_val.pop('base64-encoded-keys'))
+ md_copy[key] = return_val
+ return md_copy
+
+
@six.add_metaclass(abc.ABCMeta)
class DataSource(object):
dsmode = DSMODE_NETWORK
default_locale = 'en_US.UTF-8'
+ # Datasource name needs to be set by subclasses to determine which
+ # cloud-config datasource key is loaded
+ dsname = '_undef'
+
+ # Cached cloud_name as determined by _get_cloud_name
+ _cloud_name = None
+
def __init__(self, sys_cfg, distro, paths, ud_proc=None):
self.sys_cfg = sys_cfg
self.distro = distro
@@ -56,17 +91,8 @@ class DataSource(object):
self.vendordata = None
self.vendordata_raw = None
- # find the datasource config name.
- # remove 'DataSource' from classname on front, and remove 'Net' on end.
- # Both Foo and FooNet sources expect config in cfg['sources']['Foo']
- name = type_utils.obj_name(self)
- if name.startswith(DS_PREFIX):
- name = name[len(DS_PREFIX):]
- if name.endswith('Net'):
- name = name[0:-3]
-
- self.ds_cfg = util.get_cfg_by_path(self.sys_cfg,
- ("datasource", name), {})
+ self.ds_cfg = util.get_cfg_by_path(
+ self.sys_cfg, ("datasource", self.dsname), {})
if not self.ds_cfg:
self.ds_cfg = {}
@@ -78,6 +104,51 @@ class DataSource(object):
def __str__(self):
return type_utils.obj_name(self)
+ def _get_standardized_metadata(self):
+ """Return a dictionary of standardized metadata keys."""
+ return {'v1': {
+ 'local-hostname': self.get_hostname(),
+ 'instance-id': self.get_instance_id(),
+ 'cloud-name': self.cloud_name,
+ 'region': self.region,
+ 'availability-zone': self.availability_zone}}
+
+ def get_data(self):
+ """Datasources implement _get_data to setup metadata and userdata_raw.
+
+ Minimally, the datasource should return a boolean True on success.
+ """
+ return_value = self._get_data()
+ json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
+ if not return_value:
+ return return_value
+
+ instance_data = {
+ 'ds': {
+ 'meta-data': self.metadata,
+ 'user-data': self.get_userdata_raw(),
+ 'vendor-data': self.get_vendordata_raw()}}
+ instance_data.update(
+ self._get_standardized_metadata())
+ try:
+ # Process content base64encoding unserializable values
+ content = util.json_dumps(instance_data)
+ # Strip base64: prefix and return base64-encoded-keys
+ processed_data = process_base64_metadata(json.loads(content))
+ except TypeError as e:
+ LOG.warning('Error persisting instance-data.json: %s', str(e))
+ return return_value
+ except UnicodeDecodeError as e:
+ LOG.warning('Error persisting instance-data.json: %s', str(e))
+ return return_value
+ write_json(json_file, processed_data, mode=0o600)
+ return return_value
+
+ def _get_data(self):
+ raise NotImplementedError(
+ 'Subclasses of DataSource must implement _get_data which'
+ ' sets self.metadata, vendordata_raw and userdata_raw.')
+
def get_userdata(self, apply_filter=False):
if self.userdata is None:
self.userdata = self.ud_proc.process(self.get_userdata_raw())
@@ -91,6 +162,34 @@ class DataSource(object):
return self.vendordata
@property
+ def cloud_name(self):
+ """Return lowercase cloud name as determined by the datasource.
+
+ Datasource can determine or define its own cloud product name in
+ metadata.
+ """
+ if self._cloud_name:
+ return self._cloud_name
+ if self.metadata and self.metadata.get(METADATA_CLOUD_NAME_KEY):
+ cloud_name = self.metadata.get(METADATA_CLOUD_NAME_KEY)
+ if isinstance(cloud_name, six.string_types):
+ self._cloud_name = cloud_name.lower()
+ LOG.debug(
+ 'Ignoring metadata provided key %s: non-string type %s',
+ METADATA_CLOUD_NAME_KEY, type(cloud_name))
+ else:
+ self._cloud_name = self._get_cloud_name().lower()
+ return self._cloud_name
+
+ def _get_cloud_name(self):
+ """Return the datasource name as it frequently matches cloud name.
+
+ Should be overridden in subclasses which can run on multiple
+ cloud names, such as DatasourceEc2.
+ """
+ return self.dsname
+
+ @property
def launch_index(self):
if not self.metadata:
return None
@@ -161,8 +260,11 @@ class DataSource(object):
@property
def availability_zone(self):
- return self.metadata.get('availability-zone',
- self.metadata.get('availability_zone'))
+ top_level_az = self.metadata.get(
+ 'availability-zone', self.metadata.get('availability_zone'))
+ if top_level_az:
+ return top_level_az
+ return self.metadata.get('placement', {}).get('availability-zone')
@property
def region(self):
@@ -346,7 +448,7 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
# Return an ordered list of classes that match (if any)
def list_sources(cfg_list, depends, pkg_list):
src_list = []
- LOG.debug(("Looking for for data source in: %s,"
+ LOG.debug(("Looking for data source in: %s,"
" via packages %s that matches dependencies %s"),
cfg_list, pkg_list, depends)
for ds_name in cfg_list:
@@ -417,4 +519,5 @@ def list_from_depends(depends, ds_list):
ret_list.append(cls)
return ret_list
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index 959b1bda..90c12df1 100644
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -199,10 +199,10 @@ class WALinuxAgentShim(object):
' </Container>',
'</Health>'])
- def __init__(self, fallback_lease_file=None):
+ def __init__(self, fallback_lease_file=None, dhcp_options=None):
LOG.debug('WALinuxAgentShim instantiated, fallback_lease_file=%s',
fallback_lease_file)
- self.dhcpoptions = None
+ self.dhcpoptions = dhcp_options
self._endpoint = None
self.openssl_manager = None
self.values = {}
@@ -220,7 +220,8 @@ class WALinuxAgentShim(object):
@property
def endpoint(self):
if self._endpoint is None:
- self._endpoint = self.find_endpoint(self.lease_file)
+ self._endpoint = self.find_endpoint(self.lease_file,
+ self.dhcpoptions)
return self._endpoint
@staticmethod
@@ -274,7 +275,8 @@ class WALinuxAgentShim(object):
name = os.path.basename(hook_file).replace('.json', '')
dhcp_options[name] = json.loads(util.load_file((hook_file)))
except ValueError:
- raise ValueError("%s is not valid JSON data", hook_file)
+ raise ValueError(
+ '{_file} is not valid JSON data'.format(_file=hook_file))
return dhcp_options
@staticmethod
@@ -291,10 +293,14 @@ class WALinuxAgentShim(object):
return _value
@staticmethod
- def find_endpoint(fallback_lease_file=None):
+ def find_endpoint(fallback_lease_file=None, dhcp245=None):
value = None
- LOG.debug('Finding Azure endpoint from networkd...')
- value = WALinuxAgentShim._networkd_get_value_from_leases()
+ if dhcp245 is not None:
+ value = dhcp245
+ LOG.debug("Using Azure Endpoint from dhcp options")
+ if value is None:
+ LOG.debug('Finding Azure endpoint from networkd...')
+ value = WALinuxAgentShim._networkd_get_value_from_leases()
if value is None:
# Option-245 stored in /run/cloud-init/dhclient.hooks/<ifc>.json
# a dhclient exit hook that calls cloud-init-dhclient-hook
@@ -366,8 +372,9 @@ class WALinuxAgentShim(object):
LOG.info('Reported ready to Azure fabric.')
-def get_metadata_from_fabric(fallback_lease_file=None):
- shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file)
+def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None):
+ shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file,
+ dhcp_options=dhcp_opts)
try:
return shim.register_with_azure_and_fetch_data()
finally:
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
index 49d441db..2eaeff34 100644
--- a/cloudinit/sources/helpers/vmware/imc/config.py
+++ b/cloudinit/sources/helpers/vmware/imc/config.py
@@ -100,4 +100,8 @@ class Config(object):
"""Returns marker id."""
return self._configFile.get(Config.MARKERID, None)
+ @property
+ def custom_script_name(self):
+ """Return the name of custom (pre/post) script."""
+ return self._configFile.get(Config.CUSTOM_SCRIPT, None)
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
new file mode 100644
index 00000000..a7d4ad91
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
@@ -0,0 +1,153 @@
+# Copyright (C) 2017 Canonical Ltd.
+# Copyright (C) 2017 VMware Inc.
+#
+# Author: Maitreyee Saikia <msaikia@vmware.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import logging
+import os
+import stat
+from textwrap import dedent
+
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+
+class CustomScriptNotFound(Exception):
+ pass
+
+
+class CustomScriptConstant(object):
+ RC_LOCAL = "/etc/rc.local"
+ POST_CUST_TMP_DIR = "/root/.customization"
+ POST_CUST_RUN_SCRIPT_NAME = "post-customize-guest.sh"
+ POST_CUST_RUN_SCRIPT = os.path.join(POST_CUST_TMP_DIR,
+ POST_CUST_RUN_SCRIPT_NAME)
+ POST_REBOOT_PENDING_MARKER = "/.guest-customization-post-reboot-pending"
+
+
+class RunCustomScript(object):
+ def __init__(self, scriptname, directory):
+ self.scriptname = scriptname
+ self.directory = directory
+ self.scriptpath = os.path.join(directory, scriptname)
+
+ def prepare_script(self):
+ if not os.path.exists(self.scriptpath):
+ raise CustomScriptNotFound("Script %s not found!! "
+ "Cannot execute custom script!"
+ % self.scriptpath)
+ # Strip any CR characters from the decoded script
+ util.load_file(self.scriptpath).replace("\r", "")
+ st = os.stat(self.scriptpath)
+ os.chmod(self.scriptpath, st.st_mode | stat.S_IEXEC)
+
+
+class PreCustomScript(RunCustomScript):
+ def execute(self):
+ """Executing custom script with precustomization argument."""
+ LOG.debug("Executing pre-customization script")
+ self.prepare_script()
+ util.subp(["/bin/sh", self.scriptpath, "precustomization"])
+
+
+class PostCustomScript(RunCustomScript):
+ def __init__(self, scriptname, directory):
+ super(PostCustomScript, self).__init__(scriptname, directory)
+ # Determine when to run custom script. When postreboot is True,
+ # the user uploaded script will run as part of rc.local after
+ # the machine reboots. This is determined by presence of rclocal.
+ # When postreboot is False, script will run as part of cloud-init.
+ self.postreboot = False
+
+ def _install_post_reboot_agent(self, rclocal):
+ """
+ Install post-reboot agent for running custom script after reboot.
+ As part of this process, we are editing the rclocal file to run a
+ VMware script, which in turn is resposible for handling the user
+ script.
+ @param: path to rc local.
+ """
+ LOG.debug("Installing post-reboot customization from %s to %s",
+ self.directory, rclocal)
+ if not self.has_previous_agent(rclocal):
+ LOG.info("Adding post-reboot customization agent to rc.local")
+ new_content = dedent("""
+ # Run post-reboot guest customization
+ /bin/sh %s
+ exit 0
+ """) % CustomScriptConstant.POST_CUST_RUN_SCRIPT
+ existing_rclocal = util.load_file(rclocal).replace('exit 0\n', '')
+ st = os.stat(rclocal)
+ # "x" flag should be set
+ mode = st.st_mode | stat.S_IEXEC
+ util.write_file(rclocal, existing_rclocal + new_content, mode)
+
+ else:
+ # We don't need to update rclocal file everytime a customization
+ # is requested. It just needs to be done for the first time.
+ LOG.info("Post-reboot guest customization agent is already "
+ "registered in rc.local")
+ LOG.debug("Installing post-reboot customization agent finished: %s",
+ self.postreboot)
+
+ def has_previous_agent(self, rclocal):
+ searchstring = "# Run post-reboot guest customization"
+ if searchstring in open(rclocal).read():
+ return True
+ return False
+
+ def find_rc_local(self):
+ """
+ Determine if rc local is present.
+ """
+ rclocal = ""
+ if os.path.exists(CustomScriptConstant.RC_LOCAL):
+ LOG.debug("rc.local detected.")
+ # resolving in case of symlink
+ rclocal = os.path.realpath(CustomScriptConstant.RC_LOCAL)
+ LOG.debug("rc.local resolved to %s", rclocal)
+ else:
+ LOG.warning("Can't find rc.local, post-customization "
+ "will be run before reboot")
+ return rclocal
+
+ def install_agent(self):
+ rclocal = self.find_rc_local()
+ if rclocal:
+ self._install_post_reboot_agent(rclocal)
+ self.postreboot = True
+
+ def execute(self):
+ """
+ This method executes post-customization script before or after reboot
+ based on the presence of rc local.
+ """
+ self.prepare_script()
+ self.install_agent()
+ if not self.postreboot:
+ LOG.warning("Executing post-customization script inline")
+ util.subp(["/bin/sh", self.scriptpath, "postcustomization"])
+ else:
+ LOG.debug("Scheduling custom script to run post reboot")
+ if not os.path.isdir(CustomScriptConstant.POST_CUST_TMP_DIR):
+ os.mkdir(CustomScriptConstant.POST_CUST_TMP_DIR)
+ # Script "post-customize-guest.sh" and user uploaded script are
+ # are present in the same directory and needs to copied to a temp
+ # directory to be executed post reboot. User uploaded script is
+ # saved as customize.sh in the temp directory.
+ # post-customize-guest.sh excutes customize.sh after reboot.
+ LOG.debug("Copying post-customization script")
+ util.copy(self.scriptpath,
+ CustomScriptConstant.POST_CUST_TMP_DIR + "/customize.sh")
+ LOG.debug("Copying script to run post-customization script")
+ util.copy(
+ os.path.join(self.directory,
+ CustomScriptConstant.POST_CUST_RUN_SCRIPT_NAME),
+ CustomScriptConstant.POST_CUST_RUN_SCRIPT)
+ LOG.info("Creating post-reboot pending marker")
+ util.ensure_file(CustomScriptConstant.POST_REBOOT_PENDING_MARKER)
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index 2fb07c59..2d8900e2 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -161,7 +161,7 @@ class NicConfigurator(object):
if nic.primary and v4.gateways:
self.ipv4PrimaryGateway = v4.gateways[0]
subnet.update({'gateway': self.ipv4PrimaryGateway})
- return [subnet]
+ return ([subnet], route_list)
# Add routes if there is no primary nic
if not self._primaryNic:
diff --git a/cloudinit/sources/tests/__init__.py b/cloudinit/sources/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/cloudinit/sources/tests/__init__.py
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
new file mode 100644
index 00000000..af151154
--- /dev/null
+++ b/cloudinit/sources/tests/test_init.py
@@ -0,0 +1,202 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+import six
+import stat
+
+from cloudinit.helpers import Paths
+from cloudinit.sources import (
+ INSTANCE_JSON_FILE, DataSource)
+from cloudinit.tests.helpers import CiTestCase, skipIf
+from cloudinit.user_data import UserDataProcessor
+from cloudinit import util
+
+
+class DataSourceTestSubclassNet(DataSource):
+
+ dsname = 'MyTestSubclass'
+
+ def __init__(self, sys_cfg, distro, paths, custom_userdata=None):
+ super(DataSourceTestSubclassNet, self).__init__(
+ sys_cfg, distro, paths)
+ self._custom_userdata = custom_userdata
+
+ def _get_cloud_name(self):
+ return 'SubclassCloudName'
+
+ def _get_data(self):
+ self.metadata = {'availability_zone': 'myaz',
+ 'local-hostname': 'test-subclass-hostname',
+ 'region': 'myregion'}
+ if self._custom_userdata:
+ self.userdata_raw = self._custom_userdata
+ else:
+ self.userdata_raw = 'userdata_raw'
+ self.vendordata_raw = 'vendordata_raw'
+ return True
+
+
+class InvalidDataSourceTestSubclassNet(DataSource):
+ pass
+
+
+class TestDataSource(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestDataSource, self).setUp()
+ self.sys_cfg = {'datasource': {'_undef': {'key1': False}}}
+ self.distro = 'distrotest' # generally should be a Distro object
+ self.paths = Paths({})
+ self.datasource = DataSource(self.sys_cfg, self.distro, self.paths)
+
+ def test_datasource_init(self):
+ """DataSource initializes metadata attributes, ds_cfg and ud_proc."""
+ self.assertEqual(self.paths, self.datasource.paths)
+ self.assertEqual(self.sys_cfg, self.datasource.sys_cfg)
+ self.assertEqual(self.distro, self.datasource.distro)
+ self.assertIsNone(self.datasource.userdata)
+ self.assertEqual({}, self.datasource.metadata)
+ self.assertIsNone(self.datasource.userdata_raw)
+ self.assertIsNone(self.datasource.vendordata)
+ self.assertIsNone(self.datasource.vendordata_raw)
+ self.assertEqual({'key1': False}, self.datasource.ds_cfg)
+ self.assertIsInstance(self.datasource.ud_proc, UserDataProcessor)
+
+ def test_datasource_init_gets_ds_cfg_using_dsname(self):
+ """Init uses DataSource.dsname for sourcing ds_cfg."""
+ sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}}
+ distro = 'distrotest' # generally should be a Distro object
+ paths = Paths({})
+ datasource = DataSourceTestSubclassNet(sys_cfg, distro, paths)
+ self.assertEqual({'key2': False}, datasource.ds_cfg)
+
+ def test_str_is_classname(self):
+ """The string representation of the datasource is the classname."""
+ self.assertEqual('DataSource', str(self.datasource))
+ self.assertEqual(
+ 'DataSourceTestSubclassNet',
+ str(DataSourceTestSubclassNet('', '', self.paths)))
+
+ def test__get_data_unimplemented(self):
+ """Raise an error when _get_data is not implemented."""
+ with self.assertRaises(NotImplementedError) as context_manager:
+ self.datasource.get_data()
+ self.assertIn(
+ 'Subclasses of DataSource must implement _get_data',
+ str(context_manager.exception))
+ datasource2 = InvalidDataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, self.paths)
+ with self.assertRaises(NotImplementedError) as context_manager:
+ datasource2.get_data()
+ self.assertIn(
+ 'Subclasses of DataSource must implement _get_data',
+ str(context_manager.exception))
+
+ def test_get_data_calls_subclass__get_data(self):
+ """Datasource.get_data uses the subclass' version of _get_data."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
+ self.assertTrue(datasource.get_data())
+ self.assertEqual(
+ {'availability_zone': 'myaz',
+ 'local-hostname': 'test-subclass-hostname',
+ 'region': 'myregion'},
+ datasource.metadata)
+ self.assertEqual('userdata_raw', datasource.userdata_raw)
+ self.assertEqual('vendordata_raw', datasource.vendordata_raw)
+
+ def test_get_data_write_json_instance_data(self):
+ """get_data writes INSTANCE_JSON_FILE to run_dir as readonly root."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ content = util.load_file(json_file)
+ expected = {
+ 'base64-encoded-keys': [],
+ 'v1': {
+ 'availability-zone': 'myaz',
+ 'cloud-name': 'subclasscloudname',
+ 'instance-id': 'iid-datasource',
+ 'local-hostname': 'test-subclass-hostname',
+ 'region': 'myregion'},
+ 'ds': {
+ 'meta-data': {'availability_zone': 'myaz',
+ 'local-hostname': 'test-subclass-hostname',
+ 'region': 'myregion'},
+ 'user-data': 'userdata_raw',
+ 'vendor-data': 'vendordata_raw'}}
+ self.assertEqual(expected, util.load_json(content))
+ file_stat = os.stat(json_file)
+ self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode))
+
+ def test_get_data_handles_redacted_unserializable_content(self):
+ """get_data warns unserializable content in INSTANCE_JSON_FILE."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
+ custom_userdata={'key1': 'val1', 'key2': {'key2.1': self.paths}})
+ self.assertTrue(datasource.get_data())
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ content = util.load_file(json_file)
+ expected_userdata = {
+ 'key1': 'val1',
+ 'key2': {
+ 'key2.1': "Warning: redacted unserializable type <class"
+ " 'cloudinit.helpers.Paths'>"}}
+ instance_json = util.load_json(content)
+ self.assertEqual(
+ expected_userdata, instance_json['ds']['user-data'])
+
+ @skipIf(not six.PY3, "json serialization on <= py2.7 handles bytes")
+ def test_get_data_base64encodes_unserializable_bytes(self):
+ """On py3, get_data base64encodes any unserializable content."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
+ custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
+ self.assertTrue(datasource.get_data())
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ content = util.load_file(json_file)
+ instance_json = util.load_json(content)
+ self.assertEqual(
+ ['ds/user-data/key2/key2.1'],
+ instance_json['base64-encoded-keys'])
+ self.assertEqual(
+ {'key1': 'val1', 'key2': {'key2.1': 'EjM='}},
+ instance_json['ds']['user-data'])
+
+ @skipIf(not six.PY2, "json serialization on <= py2.7 handles bytes")
+ def test_get_data_handles_bytes_values(self):
+ """On py2 get_data handles bytes values without having to b64encode."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
+ custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
+ self.assertTrue(datasource.get_data())
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ content = util.load_file(json_file)
+ instance_json = util.load_json(content)
+ self.assertEqual([], instance_json['base64-encoded-keys'])
+ self.assertEqual(
+ {'key1': 'val1', 'key2': {'key2.1': '\x123'}},
+ instance_json['ds']['user-data'])
+
+ @skipIf(not six.PY2, "Only python2 hits UnicodeDecodeErrors on non-utf8")
+ def test_non_utf8_encoding_logs_warning(self):
+ """When non-utf-8 values exist in py2 instance-data is not written."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
+ custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}})
+ self.assertTrue(datasource.get_data())
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ self.assertFalse(os.path.exists(json_file))
+ self.assertIn(
+ "WARNING: Error persisting instance-data.json: 'utf8' codec can't"
+ " decode byte 0xaa in position 2: invalid start byte",
+ self.logs.getvalue())
diff --git a/cloudinit/temp_utils.py b/cloudinit/temp_utils.py
index 5d7adf70..c98a1b53 100644
--- a/cloudinit/temp_utils.py
+++ b/cloudinit/temp_utils.py
@@ -28,13 +28,18 @@ def _tempfile_dir_arg(odir=None, needs_exe=False):
if odir is not None:
return odir
+ if needs_exe:
+ tdir = _EXE_ROOT_TMPDIR
+ if not os.path.isdir(tdir):
+ os.makedirs(tdir)
+ os.chmod(tdir, 0o1777)
+ return tdir
+
global _TMPDIR
if _TMPDIR:
return _TMPDIR
- if needs_exe:
- tdir = _EXE_ROOT_TMPDIR
- elif os.getuid() == 0:
+ if os.getuid() == 0:
tdir = _ROOT_TMPDIR
else:
tdir = os.environ.get('TMPDIR', '/tmp')
diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py
index 6f88a5b7..0080c729 100644
--- a/cloudinit/tests/helpers.py
+++ b/cloudinit/tests/helpers.py
@@ -3,7 +3,6 @@
from __future__ import print_function
import functools
-import json
import logging
import os
import shutil
@@ -20,6 +19,11 @@ try:
except ImportError:
from contextlib2 import ExitStack
+try:
+ from configparser import ConfigParser
+except ImportError:
+ from ConfigParser import ConfigParser
+
from cloudinit import helpers as ch
from cloudinit import util
@@ -114,6 +118,16 @@ class TestCase(unittest2.TestCase):
self.addCleanup(m.stop)
setattr(self, attr, p)
+ # prefer python3 read_file over readfp but allow fallback
+ def parse_and_read(self, contents):
+ parser = ConfigParser()
+ if hasattr(parser, 'read_file'):
+ parser.read_file(contents)
+ elif hasattr(parser, 'readfp'):
+ # pylint: disable=W1505
+ parser.readfp(contents)
+ return parser
+
class CiTestCase(TestCase):
"""This is the preferred test case base class unless user
@@ -159,6 +173,18 @@ class CiTestCase(TestCase):
dir = self.tmp_dir()
return os.path.normpath(os.path.abspath(os.path.join(dir, path)))
+ def assertRaisesCodeEqual(self, expected, found):
+ """Handle centos6 having different context manager for assertRaises.
+ with assertRaises(Exception) as e:
+ raise Exception("BOO")
+
+ centos6 will have e.exception as an integer.
+ anything nwere will have it as something with a '.code'"""
+ if isinstance(found, int):
+ self.assertEqual(expected, found)
+ else:
+ self.assertEqual(expected, found.code)
+
class ResourceUsingTestCase(CiTestCase):
@@ -337,12 +363,6 @@ def dir2dict(startdir, prefix=None):
return flist
-def json_dumps(data):
- # print data in nicely formatted json.
- return json.dumps(data, indent=1, sort_keys=True,
- separators=(',', ': '))
-
-
def wrap_and_call(prefix, mocks, func, *args, **kwargs):
"""
call func(args, **kwargs) with mocks applied, then unapplies mocks
@@ -402,4 +422,12 @@ if not hasattr(mock.Mock, 'assert_not_called'):
mock.Mock.assert_not_called = __mock_assert_not_called
+# older unittest2.TestCase (centos6) do not have assertRaisesRegex
+# And setting assertRaisesRegex to assertRaisesRegexp causes
+# https://github.com/PyCQA/pylint/issues/1653 . So the workaround.
+if not hasattr(unittest2.TestCase, 'assertRaisesRegex'):
+ def _tricky(*args, **kwargs):
+ return unittest2.TestCase.assertRaisesRegexp
+ unittest2.TestCase.assertRaisesRegex = _tricky
+
# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
new file mode 100644
index 00000000..ba6bf699
--- /dev/null
+++ b/cloudinit/tests/test_util.py
@@ -0,0 +1,46 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests for cloudinit.util"""
+
+import logging
+
+import cloudinit.util as util
+
+from cloudinit.tests.helpers import CiTestCase, mock
+
+LOG = logging.getLogger(__name__)
+
+MOUNT_INFO = [
+ '68 0 8:3 / / ro,relatime shared:1 - btrfs /dev/sda1 ro,attr2,inode64',
+ '153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2'
+]
+
+
+class TestUtil(CiTestCase):
+
+ def test_parse_mount_info_no_opts_no_arg(self):
+ result = util.parse_mount_info('/home', MOUNT_INFO, LOG)
+ self.assertEqual(('/dev/sda2', 'xfs', '/home'), result)
+
+ def test_parse_mount_info_no_opts_arg(self):
+ result = util.parse_mount_info('/home', MOUNT_INFO, LOG, False)
+ self.assertEqual(('/dev/sda2', 'xfs', '/home'), result)
+
+ def test_parse_mount_info_with_opts(self):
+ result = util.parse_mount_info('/', MOUNT_INFO, LOG, True)
+ self.assertEqual(
+ ('/dev/sda1', 'btrfs', '/', 'ro,relatime'),
+ result
+ )
+
+ @mock.patch('cloudinit.util.get_mount_info')
+ def test_mount_is_rw(self, m_mount_info):
+ m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'rw,relatime')
+ is_rw = util.mount_is_read_write('/')
+ self.assertEqual(is_rw, True)
+
+ @mock.patch('cloudinit.util.get_mount_info')
+ def test_mount_is_ro(self, m_mount_info):
+ m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'ro,relatime')
+ is_rw = util.mount_is_read_write('/')
+ self.assertEqual(is_rw, False)
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 0e0f5b4c..0a5be0b3 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -273,7 +273,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
def wait_for_url(urls, max_wait=None, timeout=None,
status_cb=None, headers_cb=None, sleep_time=1,
- exception_cb=None):
+ exception_cb=None, sleep_time_cb=None):
"""
urls: a list of urls to try
max_wait: roughly the maximum time to wait before giving up
@@ -286,6 +286,8 @@ def wait_for_url(urls, max_wait=None, timeout=None,
for request.
exception_cb: call method with 2 arguments 'msg' (per status_cb) and
'exception', the exception that occurred.
+ sleep_time_cb: call method with 2 arguments (response, loop_n) that
+ generates the next sleep time.
the idea of this routine is to wait for the EC2 metdata service to
come up. On both Eucalyptus and EC2 we have seen the case where
@@ -301,6 +303,8 @@ def wait_for_url(urls, max_wait=None, timeout=None,
service but is not going to find one. It is possible that the instance
data host (169.254.169.254) may be firewalled off Entirely for a sytem,
meaning that the connection will block forever unless a timeout is set.
+
+ A value of None for max_wait will retry indefinitely.
"""
start_time = time.time()
@@ -311,18 +315,24 @@ def wait_for_url(urls, max_wait=None, timeout=None,
status_cb = log_status_cb
def timeup(max_wait, start_time):
- return ((max_wait <= 0 or max_wait is None) or
- (time.time() - start_time > max_wait))
+ if (max_wait is None):
+ return False
+ return ((max_wait <= 0) or (time.time() - start_time > max_wait))
loop_n = 0
+ response = None
while True:
- sleep_time = int(loop_n / 5) + 1
+ if sleep_time_cb is not None:
+ sleep_time = sleep_time_cb(response, loop_n)
+ else:
+ sleep_time = int(loop_n / 5) + 1
for url in urls:
now = time.time()
if loop_n != 0:
if timeup(max_wait, start_time):
break
- if timeout and (now + timeout > (start_time + max_wait)):
+ if (max_wait is not None and
+ timeout and (now + timeout > (start_time + max_wait))):
# shorten timeout to not run way over max_time
timeout = int((start_time + max_wait) - now)
@@ -354,10 +364,11 @@ def wait_for_url(urls, max_wait=None, timeout=None,
url_exc = e
time_taken = int(time.time() - start_time)
- status_msg = "Calling '%s' failed [%s/%ss]: %s" % (url,
- time_taken,
- max_wait,
- reason)
+ max_wait_str = "%ss" % max_wait if max_wait else "unlimited"
+ status_msg = "Calling '%s' failed [%s/%s]: %s" % (url,
+ time_taken,
+ max_wait_str,
+ reason)
status_cb(status_msg)
if exception_cb:
# This can be used to alter the headers that will be sent
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 6c014ba5..338fb971 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -253,12 +253,18 @@ class ProcessExecutionError(IOError):
self.exit_code = exit_code
if not stderr:
- self.stderr = self.empty_attr
+ if stderr is None:
+ self.stderr = self.empty_attr
+ else:
+ self.stderr = stderr
else:
self.stderr = self._indent_text(stderr)
if not stdout:
- self.stdout = self.empty_attr
+ if stdout is None:
+ self.stdout = self.empty_attr
+ else:
+ self.stdout = stdout
else:
self.stdout = self._indent_text(stdout)
@@ -533,15 +539,6 @@ def multi_log(text, console=True, stderr=True,
log.log(log_level, text)
-def load_json(text, root_types=(dict,)):
- decoded = json.loads(decode_binary(text))
- if not isinstance(decoded, tuple(root_types)):
- expected_types = ", ".join([str(t) for t in root_types])
- raise TypeError("(%s) root types expected, got %s instead"
- % (expected_types, type(decoded)))
- return decoded
-
-
def is_ipv4(instr):
"""determine if input string is a ipv4 address. return boolean."""
toks = instr.split('.')
@@ -900,17 +897,17 @@ def load_yaml(blob, default=None, allowed=(dict,)):
"of length %s with allowed root types %s",
len(blob), allowed)
converted = safeyaml.load(blob)
- if not isinstance(converted, allowed):
+ if converted is None:
+ LOG.debug("loaded blob returned None, returning default.")
+ converted = default
+ elif not isinstance(converted, allowed):
# Yes this will just be caught, but thats ok for now...
raise TypeError(("Yaml load allows %s root types,"
" but got %s instead") %
(allowed, type_utils.obj_name(converted)))
loaded = converted
except (yaml.YAMLError, TypeError, ValueError):
- if len(blob) == 0:
- LOG.debug("load_yaml given empty string, returning default")
- else:
- logexc(LOG, "Failed loading yaml blob")
+ logexc(LOG, "Failed loading yaml blob")
return loaded
@@ -1398,6 +1395,32 @@ def get_output_cfg(cfg, mode):
return ret
+def get_config_logfiles(cfg):
+ """Return a list of log file paths from the configuration dictionary.
+
+ @param cfg: The cloud-init merged configuration dictionary.
+ """
+ logs = []
+ if not cfg or not isinstance(cfg, dict):
+ return logs
+ default_log = cfg.get('def_log_file')
+ if default_log:
+ logs.append(default_log)
+ for fmt in get_output_cfg(cfg, None):
+ if not fmt:
+ continue
+ match = re.match('(?P<type>\||>+)\s*(?P<target>.*)', fmt)
+ if not match:
+ continue
+ target = match.group('target')
+ parts = target.split()
+ if len(parts) == 1:
+ logs.append(target)
+ elif ['tee', '-a'] == parts[:2]:
+ logs.append(parts[2])
+ return list(set(logs))
+
+
def logexc(log, msg, *args):
# Setting this here allows this to change
# levels easily (not always error level)
@@ -1454,7 +1477,31 @@ def ensure_dirs(dirlist, mode=0o755):
ensure_dir(d, mode)
+def load_json(text, root_types=(dict,)):
+ decoded = json.loads(decode_binary(text))
+ if not isinstance(decoded, tuple(root_types)):
+ expected_types = ", ".join([str(t) for t in root_types])
+ raise TypeError("(%s) root types expected, got %s instead"
+ % (expected_types, type(decoded)))
+ return decoded
+
+
+def json_serialize_default(_obj):
+ """Handler for types which aren't json serializable."""
+ try:
+ return 'ci-b64:{0}'.format(b64e(_obj))
+ except AttributeError:
+ return 'Warning: redacted unserializable type {0}'.format(type(_obj))
+
+
+def json_dumps(data):
+ """Return data in nicely formatted json."""
+ return json.dumps(data, indent=1, sort_keys=True,
+ separators=(',', ': '), default=json_serialize_default)
+
+
def yaml_dumps(obj, explicit_start=True, explicit_end=True):
+ """Return data in nicely formatted yaml."""
return yaml.safe_dump(obj,
line_break="\n",
indent=4,
@@ -1540,6 +1587,10 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
mtypes = list(mtype)
elif mtype is None:
mtypes = None
+ else:
+ raise TypeError(
+ 'Unsupported type provided for mtype parameter: {_type}'.format(
+ _type=type(mtype)))
# clean up 'mtype' input a bit based on platform.
platsys = platform.system().lower()
@@ -1788,58 +1839,60 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
env = env.copy()
env.update(update_env)
- try:
- if target_path(target) != "/":
- args = ['chroot', target] + list(args)
+ if target_path(target) != "/":
+ args = ['chroot', target] + list(args)
- if not logstring:
- LOG.debug(("Running command %s with allowed return codes %s"
- " (shell=%s, capture=%s)"), args, rcs, shell, capture)
- else:
- LOG.debug(("Running hidden command to protect sensitive "
- "input/output logstring: %s"), logstring)
-
- stdin = None
- stdout = None
- stderr = None
- if capture:
- stdout = subprocess.PIPE
- stderr = subprocess.PIPE
- if data is None:
- # using devnull assures any reads get null, rather
- # than possibly waiting on input.
- devnull_fp = open(os.devnull)
- stdin = devnull_fp
- else:
- stdin = subprocess.PIPE
- if not isinstance(data, bytes):
- data = data.encode()
+ if not logstring:
+ LOG.debug(("Running command %s with allowed return codes %s"
+ " (shell=%s, capture=%s)"), args, rcs, shell, capture)
+ else:
+ LOG.debug(("Running hidden command to protect sensitive "
+ "input/output logstring: %s"), logstring)
+
+ stdin = None
+ stdout = None
+ stderr = None
+ if capture:
+ stdout = subprocess.PIPE
+ stderr = subprocess.PIPE
+ if data is None:
+ # using devnull assures any reads get null, rather
+ # than possibly waiting on input.
+ devnull_fp = open(os.devnull)
+ stdin = devnull_fp
+ else:
+ stdin = subprocess.PIPE
+ if not isinstance(data, bytes):
+ data = data.encode()
+ try:
sp = subprocess.Popen(args, stdout=stdout,
stderr=stderr, stdin=stdin,
env=env, shell=shell)
(out, err) = sp.communicate(data)
-
- # Just ensure blank instead of none.
- if not out and capture:
- out = b''
- if not err and capture:
- err = b''
- if decode:
- def ldecode(data, m='utf-8'):
- if not isinstance(data, bytes):
- return data
- return data.decode(m, decode)
-
- out = ldecode(out)
- err = ldecode(err)
except OSError as e:
- raise ProcessExecutionError(cmd=args, reason=e,
- errno=e.errno)
+ raise ProcessExecutionError(
+ cmd=args, reason=e, errno=e.errno,
+ stdout="-" if decode else b"-",
+ stderr="-" if decode else b"-")
finally:
if devnull_fp:
devnull_fp.close()
+ # Just ensure blank instead of none.
+ if not out and capture:
+ out = b''
+ if not err and capture:
+ err = b''
+ if decode:
+ def ldecode(data, m='utf-8'):
+ if not isinstance(data, bytes):
+ return data
+ return data.decode(m, decode)
+
+ out = ldecode(out)
+ err = ldecode(err)
+
rc = sp.returncode
if rc not in rcs:
raise ProcessExecutionError(stdout=out, stderr=err,
@@ -2010,7 +2063,7 @@ def expand_package_list(version_fmt, pkgs):
return pkglist
-def parse_mount_info(path, mountinfo_lines, log=LOG):
+def parse_mount_info(path, mountinfo_lines, log=LOG, get_mnt_opts=False):
"""Return the mount information for PATH given the lines from
/proc/$$/mountinfo."""
@@ -2072,11 +2125,16 @@ def parse_mount_info(path, mountinfo_lines, log=LOG):
match_mount_point = mount_point
match_mount_point_elements = mount_point_elements
+ mount_options = parts[5]
- if devpth and fs_type and match_mount_point:
- return (devpth, fs_type, match_mount_point)
+ if get_mnt_opts:
+ if devpth and fs_type and match_mount_point and mount_options:
+ return (devpth, fs_type, match_mount_point, mount_options)
else:
- return None
+ if devpth and fs_type and match_mount_point:
+ return (devpth, fs_type, match_mount_point)
+
+ return None
def parse_mtab(path):
@@ -2146,7 +2204,7 @@ def parse_mount(path):
return None
-def get_mount_info(path, log=LOG):
+def get_mount_info(path, log=LOG, get_mnt_opts=False):
# Use /proc/$$/mountinfo to find the device where path is mounted.
# This is done because with a btrfs filesystem using os.stat(path)
# does not return the ID of the device.
@@ -2178,7 +2236,7 @@ def get_mount_info(path, log=LOG):
mountinfo_path = '/proc/%s/mountinfo' % os.getpid()
if os.path.exists(mountinfo_path):
lines = load_file(mountinfo_path).splitlines()
- return parse_mount_info(path, lines, log)
+ return parse_mount_info(path, lines, log, get_mnt_opts)
elif os.path.exists("/etc/mtab"):
return parse_mtab(path)
else:
@@ -2286,7 +2344,8 @@ def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep):
missing.append(f)
if len(missing):
- raise ValueError("Missing required files: %s", ','.join(missing))
+ raise ValueError(
+ 'Missing required files: {files}'.format(files=','.join(missing)))
return ret
@@ -2563,4 +2622,10 @@ def wait_for_files(flist, maxwait, naplen=.5, log_pre=""):
return need
+def mount_is_read_write(mount_point):
+ """Check whether the given mount point is mounted rw"""
+ result = get_mount_info(mount_point, get_mnt_opts=True)
+ mount_opts = result[-1].split(',')
+ return mount_opts[0] == 'rw'
+
# vi: ts=4 expandtab
diff --git a/cloudinit/version.py b/cloudinit/version.py
index 3255f399..be6262d6 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -4,7 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-__VERSION__ = "17.1"
+__VERSION__ = "17.2"
FEATURES = [
# supports network config version 1