summaryrefslogtreecommitdiff
path: root/cloudinit
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit')
-rw-r--r--cloudinit/analyze/__init__.py0
-rw-r--r--cloudinit/analyze/__main__.py155
-rw-r--r--cloudinit/analyze/dump.py176
-rw-r--r--cloudinit/analyze/show.py207
-rw-r--r--cloudinit/analyze/tests/test_dump.py210
-rw-r--r--cloudinit/apport.py105
-rw-r--r--cloudinit/cmd/devel/__init__.py0
-rw-r--r--cloudinit/cmd/devel/logs.py101
-rw-r--r--cloudinit/cmd/devel/parser.py26
-rw-r--r--cloudinit/cmd/devel/tests/__init__.py0
-rw-r--r--cloudinit/cmd/devel/tests/test_logs.py120
-rw-r--r--cloudinit/cmd/main.py69
-rw-r--r--cloudinit/config/cc_bootcmd.py90
-rw-r--r--cloudinit/config/cc_chef.py44
-rw-r--r--cloudinit/config/cc_landscape.py4
-rw-r--r--cloudinit/config/cc_ntp.py106
-rw-r--r--cloudinit/config/cc_puppet.py33
-rw-r--r--cloudinit/config/cc_resizefs.py157
-rw-r--r--cloudinit/config/cc_resolv_conf.py2
-rw-r--r--cloudinit/config/cc_runcmd.py84
-rw-r--r--cloudinit/config/cc_snappy.py4
-rwxr-xr-xcloudinit/config/cc_ssh_authkey_fingerprints.py4
-rw-r--r--cloudinit/config/cc_ubuntu_init_switch.py160
-rw-r--r--cloudinit/config/cc_zypper_add_repo.py218
-rw-r--r--cloudinit/config/schema.py224
-rwxr-xr-xcloudinit/distros/__init__.py9
-rw-r--r--cloudinit/distros/arch.py90
-rw-r--r--cloudinit/distros/debian.py94
-rw-r--r--cloudinit/distros/opensuse.py212
-rw-r--r--cloudinit/distros/sles.py160
-rw-r--r--cloudinit/helpers.py14
-rw-r--r--cloudinit/log.py5
-rw-r--r--cloudinit/net/__init__.py51
-rw-r--r--cloudinit/net/dhcp.py163
-rw-r--r--cloudinit/net/eni.py3
-rw-r--r--cloudinit/net/netplan.py40
-rw-r--r--cloudinit/net/network_state.py102
-rw-r--r--cloudinit/net/sysconfig.py6
-rw-r--r--cloudinit/net/tests/test_dhcp.py260
-rw-r--r--cloudinit/net/tests/test_init.py4
-rw-r--r--cloudinit/netinfo.py8
-rw-r--r--cloudinit/simpletable.py62
-rw-r--r--cloudinit/sources/DataSourceAliYun.py9
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py4
-rw-r--r--cloudinit/sources/DataSourceAzure.py10
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py51
-rw-r--r--cloudinit/sources/DataSourceEc2.py186
-rw-r--r--cloudinit/sources/DataSourceGCE.py198
-rw-r--r--cloudinit/sources/DataSourceOVF.py220
-rw-r--r--cloudinit/sources/__init__.py9
-rw-r--r--cloudinit/sources/helpers/azure.py24
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config.py24
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py201
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_passwd.py67
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py12
-rw-r--r--cloudinit/stages.py33
-rw-r--r--cloudinit/temp_utils.py101
-rw-r--r--cloudinit/tests/__init__.py0
-rw-r--r--cloudinit/tests/helpers.py405
-rw-r--r--cloudinit/tests/test_simpletable.py100
-rw-r--r--cloudinit/tests/test_temp_utils.py101
-rw-r--r--cloudinit/tests/test_url_helper.py40
-rw-r--r--cloudinit/url_helper.py6
-rw-r--r--cloudinit/util.py83
-rw-r--r--cloudinit/version.py2
65 files changed, 4410 insertions, 1058 deletions
diff --git a/cloudinit/analyze/__init__.py b/cloudinit/analyze/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/cloudinit/analyze/__init__.py
diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py
new file mode 100644
index 00000000..69b9e43e
--- /dev/null
+++ b/cloudinit/analyze/__main__.py
@@ -0,0 +1,155 @@
+# Copyright (C) 2017 Canonical Ltd.
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import argparse
+import re
+import sys
+
+from . import dump
+from . import show
+
+
+def get_parser(parser=None):
+ if not parser:
+ parser = argparse.ArgumentParser(
+ prog='cloudinit-analyze',
+ description='Devel tool: Analyze cloud-init logs and data')
+ subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
+ subparsers.required = True
+
+ parser_blame = subparsers.add_parser(
+ 'blame', help='Print list of executed stages ordered by time to init')
+ parser_blame.add_argument(
+ '-i', '--infile', action='store', dest='infile',
+ default='/var/log/cloud-init.log',
+ help='specify where to read input.')
+ parser_blame.add_argument(
+ '-o', '--outfile', action='store', dest='outfile', default='-',
+ help='specify where to write output. ')
+ parser_blame.set_defaults(action=('blame', analyze_blame))
+
+ parser_show = subparsers.add_parser(
+ 'show', help='Print list of in-order events during execution')
+ parser_show.add_argument('-f', '--format', action='store',
+ dest='print_format', default='%I%D @%Es +%ds',
+ help='specify formatting of output.')
+ parser_show.add_argument('-i', '--infile', action='store',
+ dest='infile', default='/var/log/cloud-init.log',
+ help='specify where to read input.')
+ parser_show.add_argument('-o', '--outfile', action='store',
+ dest='outfile', default='-',
+ help='specify where to write output.')
+ parser_show.set_defaults(action=('show', analyze_show))
+ parser_dump = subparsers.add_parser(
+ 'dump', help='Dump cloud-init events in JSON format')
+ parser_dump.add_argument('-i', '--infile', action='store',
+ dest='infile', default='/var/log/cloud-init.log',
+ help='specify where to read input. ')
+ parser_dump.add_argument('-o', '--outfile', action='store',
+ dest='outfile', default='-',
+ help='specify where to write output. ')
+ parser_dump.set_defaults(action=('dump', analyze_dump))
+ return parser
+
+
+def analyze_blame(name, args):
+ """Report a list of records sorted by largest time delta.
+
+ For example:
+ 30.210s (init-local) searching for datasource
+ 8.706s (init-network) reading and applying user-data
+ 166ms (modules-config) ....
+ 807us (modules-final) ...
+
+ We generate event records parsing cloud-init logs, formatting the output
+ and sorting by record data ('delta')
+ """
+ (infh, outfh) = configure_io(args)
+ blame_format = ' %ds (%n)'
+ r = re.compile('(^\s+\d+\.\d+)', re.MULTILINE)
+ for idx, record in enumerate(show.show_events(_get_events(infh),
+ blame_format)):
+ srecs = sorted(filter(r.match, record), reverse=True)
+ outfh.write('-- Boot Record %02d --\n' % (idx + 1))
+ outfh.write('\n'.join(srecs) + '\n')
+ outfh.write('\n')
+ outfh.write('%d boot records analyzed\n' % (idx + 1))
+
+
+def analyze_show(name, args):
+ """Generate output records using the 'standard' format to printing events.
+
+ Example output follows:
+ Starting stage: (init-local)
+ ...
+ Finished stage: (init-local) 0.105195 seconds
+
+ Starting stage: (init-network)
+ ...
+ Finished stage: (init-network) 0.339024 seconds
+
+ Starting stage: (modules-config)
+ ...
+ Finished stage: (modules-config) 0.NNN seconds
+
+ Starting stage: (modules-final)
+ ...
+ Finished stage: (modules-final) 0.NNN seconds
+ """
+ (infh, outfh) = configure_io(args)
+ for idx, record in enumerate(show.show_events(_get_events(infh),
+ args.print_format)):
+ outfh.write('-- Boot Record %02d --\n' % (idx + 1))
+ outfh.write('The total time elapsed since completing an event is'
+ ' printed after the "@" character.\n')
+ outfh.write('The time the event takes is printed after the "+" '
+ 'character.\n\n')
+ outfh.write('\n'.join(record) + '\n')
+ outfh.write('%d boot records analyzed\n' % (idx + 1))
+
+
+def analyze_dump(name, args):
+ """Dump cloud-init events in json format"""
+ (infh, outfh) = configure_io(args)
+ outfh.write(dump.json_dumps(_get_events(infh)) + '\n')
+
+
+def _get_events(infile):
+ rawdata = None
+ events, rawdata = show.load_events(infile, None)
+ if not events:
+ events, _ = dump.dump_events(rawdata=rawdata)
+ return events
+
+
+def configure_io(args):
+ """Common parsing and setup of input/output files"""
+ if args.infile == '-':
+ infh = sys.stdin
+ else:
+ try:
+ infh = open(args.infile, 'r')
+ except OSError:
+ sys.stderr.write('Cannot open file %s\n' % args.infile)
+ sys.exit(1)
+
+ if args.outfile == '-':
+ outfh = sys.stdout
+ else:
+ try:
+ outfh = open(args.outfile, 'w')
+ except OSError:
+ sys.stderr.write('Cannot open file %s\n' % args.outfile)
+ sys.exit(1)
+
+ return (infh, outfh)
+
+
+if __name__ == '__main__':
+ parser = get_parser()
+ args = parser.parse_args()
+ (name, action_functor) = args.action
+ action_functor(name, args)
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py
new file mode 100644
index 00000000..ca4da496
--- /dev/null
+++ b/cloudinit/analyze/dump.py
@@ -0,0 +1,176 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import calendar
+from datetime import datetime
+import json
+import sys
+
+from cloudinit import util
+
+stage_to_description = {
+ 'finished': 'finished running cloud-init',
+ 'init-local': 'starting search for local datasources',
+ 'init-network': 'searching for network datasources',
+ 'init': 'searching for network datasources',
+ 'modules-config': 'running config modules',
+ 'modules-final': 'finalizing modules',
+ 'modules': 'running modules for',
+ 'single': 'running single module ',
+}
+
+# logger's asctime format
+CLOUD_INIT_ASCTIME_FMT = "%Y-%m-%d %H:%M:%S,%f"
+
+# journctl -o short-precise
+CLOUD_INIT_JOURNALCTL_FMT = "%b %d %H:%M:%S.%f %Y"
+
+# other
+DEFAULT_FMT = "%b %d %H:%M:%S %Y"
+
+
+def parse_timestamp(timestampstr):
+ # default syslog time does not include the current year
+ months = [calendar.month_abbr[m] for m in range(1, 13)]
+ if timestampstr.split()[0] in months:
+ # Aug 29 22:55:26
+ FMT = DEFAULT_FMT
+ if '.' in timestampstr:
+ FMT = CLOUD_INIT_JOURNALCTL_FMT
+ dt = datetime.strptime(timestampstr + " " +
+ str(datetime.now().year),
+ FMT)
+ timestamp = dt.strftime("%s.%f")
+ elif "," in timestampstr:
+ # 2016-09-12 14:39:20,839
+ dt = datetime.strptime(timestampstr, CLOUD_INIT_ASCTIME_FMT)
+ timestamp = dt.strftime("%s.%f")
+ else:
+ # allow date(1) to handle other formats we don't expect
+ timestamp = parse_timestamp_from_date(timestampstr)
+
+ return float(timestamp)
+
+
+def parse_timestamp_from_date(timestampstr):
+ out, _ = util.subp(['date', '+%s.%3N', '-d', timestampstr])
+ timestamp = out.strip()
+ return float(timestamp)
+
+
+def parse_ci_logline(line):
+ # Stage Starts:
+ # Cloud-init v. 0.7.7 running 'init-local' at \
+ # Fri, 02 Sep 2016 19:28:07 +0000. Up 1.0 seconds.
+ # Cloud-init v. 0.7.7 running 'init' at \
+ # Fri, 02 Sep 2016 19:28:08 +0000. Up 2.0 seconds.
+ # Cloud-init v. 0.7.7 finished at
+ # Aug 29 22:55:26 test1 [CLOUDINIT] handlers.py[DEBUG]: \
+ # finish: modules-final: SUCCESS: running modules for final
+ # 2016-08-30T21:53:25.972325+00:00 y1 [CLOUDINIT] handlers.py[DEBUG]: \
+ # finish: modules-final: SUCCESS: running modules for final
+ #
+ # Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT] util.py[DEBUG]: \
+ # Cloud-init v. 0.7.8 running 'init-local' at \
+ # Thu, 03 Nov 2016 06:51:06 +0000. Up 1.0 seconds.
+ #
+ # 2017-05-22 18:02:01,088 - util.py[DEBUG]: Cloud-init v. 0.7.9 running \
+ # 'init-local' at Mon, 22 May 2017 18:02:01 +0000. Up 2.0 seconds.
+
+ separators = [' - ', ' [CLOUDINIT] ']
+ found = False
+ for sep in separators:
+ if sep in line:
+ found = True
+ break
+
+ if not found:
+ return None
+
+ (timehost, eventstr) = line.split(sep)
+
+ # journalctl -o short-precise
+ if timehost.endswith(":"):
+ timehost = " ".join(timehost.split()[0:-1])
+
+ if "," in timehost:
+ timestampstr, extra = timehost.split(",")
+ timestampstr += ",%s" % extra.split()[0]
+ if ' ' in extra:
+ hostname = extra.split()[-1]
+ else:
+ hostname = timehost.split()[-1]
+ timestampstr = timehost.split(hostname)[0].strip()
+ if 'Cloud-init v.' in eventstr:
+ event_type = 'start'
+ if 'running' in eventstr:
+ stage_and_timestamp = eventstr.split('running')[1].lstrip()
+ event_name, _ = stage_and_timestamp.split(' at ')
+ event_name = event_name.replace("'", "").replace(":", "-")
+ if event_name == "init":
+ event_name = "init-network"
+ else:
+ # don't generate a start for the 'finished at' banner
+ return None
+ event_description = stage_to_description[event_name]
+ else:
+ (pymodloglvl, event_type, event_name) = eventstr.split()[0:3]
+ event_description = eventstr.split(event_name)[1].strip()
+
+ event = {
+ 'name': event_name.rstrip(":"),
+ 'description': event_description,
+ 'timestamp': parse_timestamp(timestampstr),
+ 'origin': 'cloudinit',
+ 'event_type': event_type.rstrip(":"),
+ }
+ if event['event_type'] == "finish":
+ result = event_description.split(":")[0]
+ desc = event_description.split(result)[1].lstrip(':').strip()
+ event['result'] = result
+ event['description'] = desc.strip()
+
+ return event
+
+
+def json_dumps(data):
+ return json.dumps(data, indent=1, sort_keys=True,
+ separators=(',', ': '))
+
+
+def dump_events(cisource=None, rawdata=None):
+ events = []
+ event = None
+ CI_EVENT_MATCHES = ['start:', 'finish:', 'Cloud-init v.']
+
+ if not any([cisource, rawdata]):
+ raise ValueError('Either cisource or rawdata parameters are required')
+
+ if rawdata:
+ data = rawdata.splitlines()
+ else:
+ data = cisource.readlines()
+
+ for line in data:
+ for match in CI_EVENT_MATCHES:
+ if match in line:
+ try:
+ event = parse_ci_logline(line)
+ except ValueError:
+ sys.stderr.write('Skipping invalid entry\n')
+ if event:
+ events.append(event)
+
+ return events, data
+
+
+def main():
+ if len(sys.argv) > 1:
+ cisource = open(sys.argv[1])
+ else:
+ cisource = sys.stdin
+
+ return json_dumps(dump_events(cisource))
+
+
+if __name__ == "__main__":
+ print(main())
diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py
new file mode 100644
index 00000000..3e778b8b
--- /dev/null
+++ b/cloudinit/analyze/show.py
@@ -0,0 +1,207 @@
+# Copyright (C) 2016 Canonical Ltd.
+#
+# Author: Ryan Harper <ryan.harper@canonical.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import base64
+import datetime
+import json
+import os
+
+from cloudinit import util
+
+# An event:
+'''
+{
+ "description": "executing late commands",
+ "event_type": "start",
+ "level": "INFO",
+ "name": "cmd-install/stage-late"
+ "origin": "cloudinit",
+ "timestamp": 1461164249.1590767,
+},
+
+ {
+ "description": "executing late commands",
+ "event_type": "finish",
+ "level": "INFO",
+ "name": "cmd-install/stage-late",
+ "origin": "cloudinit",
+ "result": "SUCCESS",
+ "timestamp": 1461164249.1590767
+ }
+
+'''
+format_key = {
+ '%d': 'delta',
+ '%D': 'description',
+ '%E': 'elapsed',
+ '%e': 'event_type',
+ '%I': 'indent',
+ '%l': 'level',
+ '%n': 'name',
+ '%o': 'origin',
+ '%r': 'result',
+ '%t': 'timestamp',
+ '%T': 'total_time',
+}
+
+formatting_help = " ".join(["{0}: {1}".format(k.replace('%', '%%'), v)
+ for k, v in format_key.items()])
+
+
+def format_record(msg, event):
+ for i, j in format_key.items():
+ if i in msg:
+ # ensure consistent formatting of time values
+ if j in ['delta', 'elapsed', 'timestamp']:
+ msg = msg.replace(i, "{%s:08.5f}" % j)
+ else:
+ msg = msg.replace(i, "{%s}" % j)
+ return msg.format(**event)
+
+
+def dump_event_files(event):
+ content = dict((k, v) for k, v in event.items() if k not in ['content'])
+ files = content['files']
+ saved = []
+ for f in files:
+ fname = f['path']
+ fn_local = os.path.basename(fname)
+ fcontent = base64.b64decode(f['content']).decode('ascii')
+ util.write_file(fn_local, fcontent)
+ saved.append(fn_local)
+
+ return saved
+
+
+def event_name(event):
+ if event:
+ return event.get('name')
+ return None
+
+
+def event_type(event):
+ if event:
+ return event.get('event_type')
+ return None
+
+
+def event_parent(event):
+ if event:
+ return event_name(event).split("/")[0]
+ return None
+
+
+def event_timestamp(event):
+ return float(event.get('timestamp'))
+
+
+def event_datetime(event):
+ return datetime.datetime.utcfromtimestamp(event_timestamp(event))
+
+
+def delta_seconds(t1, t2):
+ return (t2 - t1).total_seconds()
+
+
+def event_duration(start, finish):
+ return delta_seconds(event_datetime(start), event_datetime(finish))
+
+
+def event_record(start_time, start, finish):
+ record = finish.copy()
+ record.update({
+ 'delta': event_duration(start, finish),
+ 'elapsed': delta_seconds(start_time, event_datetime(start)),
+ 'indent': '|' + ' ' * (event_name(start).count('/') - 1) + '`->',
+ })
+
+ return record
+
+
+def total_time_record(total_time):
+ return 'Total Time: %3.5f seconds\n' % total_time
+
+
+def generate_records(events, blame_sort=False,
+ print_format="(%n) %d seconds in %I%D",
+ dump_files=False, log_datafiles=False):
+
+ sorted_events = sorted(events, key=lambda x: x['timestamp'])
+ records = []
+ start_time = None
+ total_time = 0.0
+ stage_start_time = {}
+ stages_seen = []
+ boot_records = []
+
+ unprocessed = []
+ for e in range(0, len(sorted_events)):
+ event = events[e]
+ try:
+ next_evt = events[e + 1]
+ except IndexError:
+ next_evt = None
+
+ if event_type(event) == 'start':
+ if event.get('name') in stages_seen:
+ records.append(total_time_record(total_time))
+ boot_records.append(records)
+ records = []
+ start_time = None
+ total_time = 0.0
+
+ if start_time is None:
+ stages_seen = []
+ start_time = event_datetime(event)
+ stage_start_time[event_parent(event)] = start_time
+
+ # see if we have a pair
+ if event_name(event) == event_name(next_evt):
+ if event_type(next_evt) == 'finish':
+ records.append(format_record(print_format,
+ event_record(start_time,
+ event,
+ next_evt)))
+ else:
+ # This is a parent event
+ records.append("Starting stage: %s" % event.get('name'))
+ unprocessed.append(event)
+ stages_seen.append(event.get('name'))
+ continue
+ else:
+ prev_evt = unprocessed.pop()
+ if event_name(event) == event_name(prev_evt):
+ record = event_record(start_time, prev_evt, event)
+ records.append(format_record("Finished stage: "
+ "(%n) %d seconds ",
+ record) + "\n")
+ total_time += record.get('delta')
+ else:
+ # not a match, put it back
+ unprocessed.append(prev_evt)
+
+ records.append(total_time_record(total_time))
+ boot_records.append(records)
+ return boot_records
+
+
+def show_events(events, print_format):
+ return generate_records(events, print_format=print_format)
+
+
+def load_events(infile, rawdata=None):
+ if rawdata:
+ data = rawdata.read()
+ else:
+ data = infile.read()
+
+ j = None
+ try:
+ j = json.loads(data)
+ except ValueError:
+ pass
+
+ return j, data
diff --git a/cloudinit/analyze/tests/test_dump.py b/cloudinit/analyze/tests/test_dump.py
new file mode 100644
index 00000000..f4c42841
--- /dev/null
+++ b/cloudinit/analyze/tests/test_dump.py
@@ -0,0 +1,210 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from datetime import datetime
+from textwrap import dedent
+
+from cloudinit.analyze.dump import (
+ dump_events, parse_ci_logline, parse_timestamp)
+from cloudinit.util import subp, write_file
+from cloudinit.tests.helpers import CiTestCase
+
+
+class TestParseTimestamp(CiTestCase):
+
+ def test_parse_timestamp_handles_cloud_init_default_format(self):
+ """Logs with cloud-init detailed formats will be properly parsed."""
+ trusty_fmt = '%Y-%m-%d %H:%M:%S,%f'
+ trusty_stamp = '2016-09-12 14:39:20,839'
+
+ parsed = parse_timestamp(trusty_stamp)
+
+ # convert ourselves
+ dt = datetime.strptime(trusty_stamp, trusty_fmt)
+ expected = float(dt.strftime('%s.%f'))
+
+ # use date(1)
+ out, _err = subp(['date', '+%s.%3N', '-d', trusty_stamp])
+ timestamp = out.strip()
+ date_ts = float(timestamp)
+
+ self.assertEqual(expected, parsed)
+ self.assertEqual(expected, date_ts)
+ self.assertEqual(date_ts, parsed)
+
+ def test_parse_timestamp_handles_syslog_adding_year(self):
+ """Syslog timestamps lack a year. Add year and properly parse."""
+ syslog_fmt = '%b %d %H:%M:%S %Y'
+ syslog_stamp = 'Aug 08 15:12:51'
+
+ # convert stamp ourselves by adding the missing year value
+ year = datetime.now().year
+ dt = datetime.strptime(syslog_stamp + " " + str(year), syslog_fmt)
+ expected = float(dt.strftime('%s.%f'))
+ parsed = parse_timestamp(syslog_stamp)
+
+ # use date(1)
+ out, _ = subp(['date', '+%s.%3N', '-d', syslog_stamp])
+ timestamp = out.strip()
+ date_ts = float(timestamp)
+
+ self.assertEqual(expected, parsed)
+ self.assertEqual(expected, date_ts)
+ self.assertEqual(date_ts, parsed)
+
+ def test_parse_timestamp_handles_journalctl_format_adding_year(self):
+ """Journalctl precise timestamps lack a year. Add year and parse."""
+ journal_fmt = '%b %d %H:%M:%S.%f %Y'
+ journal_stamp = 'Aug 08 17:15:50.606811'
+
+ # convert stamp ourselves by adding the missing year value
+ year = datetime.now().year
+ dt = datetime.strptime(journal_stamp + " " + str(year), journal_fmt)
+ expected = float(dt.strftime('%s.%f'))
+ parsed = parse_timestamp(journal_stamp)
+
+ # use date(1)
+ out, _ = subp(['date', '+%s.%6N', '-d', journal_stamp])
+ timestamp = out.strip()
+ date_ts = float(timestamp)
+
+ self.assertEqual(expected, parsed)
+ self.assertEqual(expected, date_ts)
+ self.assertEqual(date_ts, parsed)
+
+ def test_parse_unexpected_timestamp_format_with_date_command(self):
+ """Dump sends unexpected timestamp formats to data for processing."""
+ new_fmt = '%H:%M %m/%d %Y'
+ new_stamp = '17:15 08/08'
+
+ # convert stamp ourselves by adding the missing year value
+ year = datetime.now().year
+ dt = datetime.strptime(new_stamp + " " + str(year), new_fmt)
+ expected = float(dt.strftime('%s.%f'))
+ parsed = parse_timestamp(new_stamp)
+
+ # use date(1)
+ out, _ = subp(['date', '+%s.%6N', '-d', new_stamp])
+ timestamp = out.strip()
+ date_ts = float(timestamp)
+
+ self.assertEqual(expected, parsed)
+ self.assertEqual(expected, date_ts)
+ self.assertEqual(date_ts, parsed)
+
+
+class TestParseCILogLine(CiTestCase):
+
+ def test_parse_logline_returns_none_without_separators(self):
+ """When no separators are found, parse_ci_logline returns None."""
+ expected_parse_ignores = [
+ '', '-', 'adsf-asdf', '2017-05-22 18:02:01,088', 'CLOUDINIT']
+ for parse_ignores in expected_parse_ignores:
+ self.assertIsNone(parse_ci_logline(parse_ignores))
+
+ def test_parse_logline_returns_event_for_cloud_init_logs(self):
+ """parse_ci_logline returns an event parse from cloud-init format."""
+ line = (
+ "2017-08-08 20:05:07,147 - util.py[DEBUG]: Cloud-init v. 0.7.9"
+ " running 'init-local' at Tue, 08 Aug 2017 20:05:07 +0000. Up"
+ " 6.26 seconds.")
+ dt = datetime.strptime(
+ '2017-08-08 20:05:07,147', '%Y-%m-%d %H:%M:%S,%f')
+ timestamp = float(dt.strftime('%s.%f'))
+ expected = {
+ 'description': 'starting search for local datasources',
+ 'event_type': 'start',
+ 'name': 'init-local',
+ 'origin': 'cloudinit',
+ 'timestamp': timestamp}
+ self.assertEqual(expected, parse_ci_logline(line))
+
+ def test_parse_logline_returns_event_for_journalctl_logs(self):
+ """parse_ci_logline returns an event parse from journalctl format."""
+ line = ("Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT]"
+ " util.py[DEBUG]: Cloud-init v. 0.7.8 running 'init-local' at"
+ " Thu, 03 Nov 2016 06:51:06 +0000. Up 1.0 seconds.")
+ year = datetime.now().year
+ dt = datetime.strptime(
+ 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y')
+ timestamp = float(dt.strftime('%s.%f'))
+ expected = {
+ 'description': 'starting search for local datasources',
+ 'event_type': 'start',
+ 'name': 'init-local',
+ 'origin': 'cloudinit',
+ 'timestamp': timestamp}
+ self.assertEqual(expected, parse_ci_logline(line))
+
+ def test_parse_logline_returns_event_for_finish_events(self):
+ """parse_ci_logline returns a finish event for a parsed log line."""
+ line = ('2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT]'
+ ' handlers.py[DEBUG]: finish: modules-final: SUCCESS: running'
+ ' modules for final')
+ expected = {
+ 'description': 'running modules for final',
+ 'event_type': 'finish',
+ 'name': 'modules-final',
+ 'origin': 'cloudinit',
+ 'result': 'SUCCESS',
+ 'timestamp': 1472594005.972}
+ self.assertEqual(expected, parse_ci_logline(line))
+
+
+SAMPLE_LOGS = dedent("""\
+Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT] util.py[DEBUG]:\
+ Cloud-init v. 0.7.8 running 'init-local' at Thu, 03 Nov 2016\
+ 06:51:06 +0000. Up 1.0 seconds.
+2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT] handlers.py[DEBUG]: finish:\
+ modules-final: SUCCESS: running modules for final
+""")
+
+
+class TestDumpEvents(CiTestCase):
+ maxDiff = None
+
+ def test_dump_events_with_rawdata(self):
+ """Rawdata is split and parsed into a tuple of events and data"""
+ events, data = dump_events(rawdata=SAMPLE_LOGS)
+ expected_data = SAMPLE_LOGS.splitlines()
+ year = datetime.now().year
+ dt1 = datetime.strptime(
+ 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y')
+ timestamp1 = float(dt1.strftime('%s.%f'))
+ expected_events = [{
+ 'description': 'starting search for local datasources',
+ 'event_type': 'start',
+ 'name': 'init-local',
+ 'origin': 'cloudinit',
+ 'timestamp': timestamp1}, {
+ 'description': 'running modules for final',
+ 'event_type': 'finish',
+ 'name': 'modules-final',
+ 'origin': 'cloudinit',
+ 'result': 'SUCCESS',
+ 'timestamp': 1472594005.972}]
+ self.assertEqual(expected_events, events)
+ self.assertEqual(expected_data, data)
+
+ def test_dump_events_with_cisource(self):
+ """Cisource file is read and parsed into a tuple of events and data."""
+ tmpfile = self.tmp_path('logfile')
+ write_file(tmpfile, SAMPLE_LOGS)
+ events, data = dump_events(cisource=open(tmpfile))
+ year = datetime.now().year
+ dt1 = datetime.strptime(
+ 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y')
+ timestamp1 = float(dt1.strftime('%s.%f'))
+ expected_events = [{
+ 'description': 'starting search for local datasources',
+ 'event_type': 'start',
+ 'name': 'init-local',
+ 'origin': 'cloudinit',
+ 'timestamp': timestamp1}, {
+ 'description': 'running modules for final',
+ 'event_type': 'finish',
+ 'name': 'modules-final',
+ 'origin': 'cloudinit',
+ 'result': 'SUCCESS',
+ 'timestamp': 1472594005.972}]
+ self.assertEqual(expected_events, events)
+ self.assertEqual(SAMPLE_LOGS.splitlines(), [d.strip() for d in data])
diff --git a/cloudinit/apport.py b/cloudinit/apport.py
new file mode 100644
index 00000000..221f341c
--- /dev/null
+++ b/cloudinit/apport.py
@@ -0,0 +1,105 @@
+# Copyright (C) 2017 Canonical Ltd.
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+'''Cloud-init apport interface'''
+
+try:
+ from apport.hookutils import (
+ attach_file, attach_root_command_outputs, root_command_output)
+ has_apport = True
+except ImportError:
+ has_apport = False
+
+
+KNOWN_CLOUD_NAMES = [
+ 'Amazon - Ec2', 'AliYun', 'AltCloud', 'Azure', 'Bigstep', 'CloudSigma',
+ 'CloudStack', 'DigitalOcean', 'GCE - Google Compute Engine', 'MAAS',
+ 'NoCloud', 'OpenNebula', 'OpenStack', 'OVF', 'Scaleway', 'SmartOS',
+ 'VMware', 'Other']
+
+# Potentially clear text collected logs
+CLOUDINIT_LOG = '/var/log/cloud-init.log'
+CLOUDINIT_OUTPUT_LOG = '/var/log/cloud-init-output.log'
+USER_DATA_FILE = '/var/lib/cloud/instance/user-data.txt' # Optional
+
+
+def attach_cloud_init_logs(report, ui=None):
+ '''Attach cloud-init logs and tarfile from 'cloud-init collect-logs'.'''
+ attach_root_command_outputs(report, {
+ 'cloud-init-log-warnings':
+ 'egrep -i "warn|error" /var/log/cloud-init.log',
+ 'cloud-init-output.log.txt': 'cat /var/log/cloud-init-output.log'})
+ root_command_output(
+ ['cloud-init', 'collect-logs', '-t', '/tmp/cloud-init-logs.tgz'])
+ attach_file(report, '/tmp/cloud-init-logs.tgz', 'logs.tgz')
+
+
+def attach_hwinfo(report, ui=None):
+ '''Optionally attach hardware info from lshw.'''
+ prompt = (
+ 'Your device details (lshw) may be useful to developers when'
+ ' addressing this bug, but gathering it requires admin privileges.'
+ ' Would you like to include this info?')
+ if ui and ui.yesno(prompt):
+ attach_root_command_outputs(report, {'lshw.txt': 'lshw'})
+
+
+def attach_cloud_info(report, ui=None):
+ '''Prompt for cloud details if available.'''
+ if ui:
+ prompt = 'Is this machine running in a cloud environment?'
+ response = ui.yesno(prompt)
+ if response is None:
+ raise StopIteration # User cancelled
+ if response:
+ prompt = ('Please select the cloud vendor or environment in which'
+ ' this instance is running')
+ response = ui.choice(prompt, KNOWN_CLOUD_NAMES)
+ if response:
+ report['CloudName'] = KNOWN_CLOUD_NAMES[response[0]]
+ else:
+ report['CloudName'] = 'None'
+
+
+def attach_user_data(report, ui=None):
+ '''Optionally provide user-data if desired.'''
+ if ui:
+ prompt = (
+ 'Your user-data or cloud-config file can optionally be provided'
+ ' from {0} and could be useful to developers when addressing this'
+ ' bug. Do you wish to attach user-data to this bug?'.format(
+ USER_DATA_FILE))
+ response = ui.yesno(prompt)
+ if response is None:
+ raise StopIteration # User cancelled
+ if response:
+ attach_file(report, USER_DATA_FILE, 'user_data.txt')
+
+
+def add_bug_tags(report):
+ '''Add any appropriate tags to the bug.'''
+ if 'JournalErrors' in report.keys():
+ errors = report['JournalErrors']
+ if 'Breaking ordering cycle' in errors:
+ report['Tags'] = 'systemd-ordering'
+
+
+def add_info(report, ui):
+ '''This is an entry point to run cloud-init's apport functionality.
+
+ Distros which want apport support will have a cloud-init package-hook at
+ /usr/share/apport/package-hooks/cloud-init.py which defines an add_info
+ function and returns the result of cloudinit.apport.add_info(report, ui).
+ '''
+ if not has_apport:
+ raise RuntimeError(
+ 'No apport imports discovered. Apport functionality disabled')
+ attach_cloud_init_logs(report, ui)
+ attach_hwinfo(report, ui)
+ attach_cloud_info(report, ui)
+ attach_user_data(report, ui)
+ add_bug_tags(report)
+ return True
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/__init__.py b/cloudinit/cmd/devel/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/cloudinit/cmd/devel/__init__.py
diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py
new file mode 100644
index 00000000..35ca478f
--- /dev/null
+++ b/cloudinit/cmd/devel/logs.py
@@ -0,0 +1,101 @@
+# Copyright (C) 2017 Canonical Ltd.
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Define 'collect-logs' utility and handler to include in cloud-init cmd."""
+
+import argparse
+from cloudinit.util import (
+ ProcessExecutionError, chdir, copy, ensure_dir, subp, write_file)
+from cloudinit.temp_utils import tempdir
+from datetime import datetime
+import os
+import shutil
+
+
+CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log']
+CLOUDINIT_RUN_DIR = '/run/cloud-init'
+USER_DATA_FILE = '/var/lib/cloud/instance/user-data.txt' # Optional
+
+
+def get_parser(parser=None):
+ """Build or extend and arg parser for collect-logs utility.
+
+ @param parser: Optional existing ArgumentParser instance representing the
+ collect-logs subcommand which will be extended to support the args of
+ this utility.
+
+ @returns: ArgumentParser with proper argument configuration.
+ """
+ if not parser:
+ parser = argparse.ArgumentParser(
+ prog='collect-logs',
+ description='Collect and tar all cloud-init debug info')
+ parser.add_argument(
+ "--tarfile", '-t', default='cloud-init.tar.gz',
+ help=('The tarfile to create containing all collected logs.'
+ ' Default: cloud-init.tar.gz'))
+ parser.add_argument(
+ "--include-userdata", '-u', default=False, action='store_true',
+ dest='userdata', help=(
+ 'Optionally include user-data from {0} which could contain'
+ ' sensitive information.'.format(USER_DATA_FILE)))
+ return parser
+
+
+def _write_command_output_to_file(cmd, filename):
+ """Helper which runs a command and writes output or error to filename."""
+ try:
+ out, _ = subp(cmd)
+ except ProcessExecutionError as e:
+ write_file(filename, str(e))
+ else:
+ write_file(filename, out)
+
+
+def collect_logs(tarfile, include_userdata):
+ """Collect all cloud-init logs and tar them up into the provided tarfile.
+
+ @param tarfile: The path of the tar-gzipped file to create.
+ @param include_userdata: Boolean, true means include user-data.
+ """
+ tarfile = os.path.abspath(tarfile)
+ date = datetime.utcnow().date().strftime('%Y-%m-%d')
+ log_dir = 'cloud-init-logs-{0}'.format(date)
+ with tempdir(dir='/tmp') as tmp_dir:
+ log_dir = os.path.join(tmp_dir, log_dir)
+ _write_command_output_to_file(
+ ['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'],
+ os.path.join(log_dir, 'version'))
+ _write_command_output_to_file(
+ ['dmesg'], os.path.join(log_dir, 'dmesg.txt'))
+ _write_command_output_to_file(
+ ['journalctl', '-o', 'short-precise'],
+ os.path.join(log_dir, 'journal.txt'))
+ for log in CLOUDINIT_LOGS:
+ copy(log, log_dir)
+ if include_userdata:
+ copy(USER_DATA_FILE, log_dir)
+ run_dir = os.path.join(log_dir, 'run')
+ ensure_dir(run_dir)
+ shutil.copytree(CLOUDINIT_RUN_DIR, os.path.join(run_dir, 'cloud-init'))
+ with chdir(tmp_dir):
+ subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')])
+
+
+def handle_collect_logs_args(name, args):
+ """Handle calls to 'cloud-init collect-logs' as a subcommand."""
+ collect_logs(args.tarfile, args.userdata)
+
+
+def main():
+ """Tool to collect and tar all cloud-init related logs."""
+ parser = get_parser()
+ handle_collect_logs_args('collect-logs', parser.parse_args())
+ return 0
+
+
+if __name__ == '__main__':
+ main()
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py
new file mode 100644
index 00000000..acacc4ed
--- /dev/null
+++ b/cloudinit/cmd/devel/parser.py
@@ -0,0 +1,26 @@
+# Copyright (C) 2017 Canonical Ltd.
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Define 'devel' subcommand argument parsers to include in cloud-init cmd."""
+
+import argparse
+from cloudinit.config.schema import (
+ get_parser as schema_parser, handle_schema_args)
+
+
+def get_parser(parser=None):
+ if not parser:
+ parser = argparse.ArgumentParser(
+ prog='cloudinit-devel',
+ description='Run development cloud-init tools')
+ subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
+ subparsers.required = True
+
+ parser_schema = subparsers.add_parser(
+ 'schema', help='Validate cloud-config files or document schema')
+ # Construct schema subcommand parser
+ schema_parser(parser_schema)
+ parser_schema.set_defaults(action=('schema', handle_schema_args))
+
+ return parser
diff --git a/cloudinit/cmd/devel/tests/__init__.py b/cloudinit/cmd/devel/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/cloudinit/cmd/devel/tests/__init__.py
diff --git a/cloudinit/cmd/devel/tests/test_logs.py b/cloudinit/cmd/devel/tests/test_logs.py
new file mode 100644
index 00000000..dc4947cc
--- /dev/null
+++ b/cloudinit/cmd/devel/tests/test_logs.py
@@ -0,0 +1,120 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.cmd.devel import logs
+from cloudinit.util import ensure_dir, load_file, subp, write_file
+from cloudinit.tests.helpers import FilesystemMockingTestCase, wrap_and_call
+from datetime import datetime
+import os
+
+
+class TestCollectLogs(FilesystemMockingTestCase):
+
+ def setUp(self):
+ super(TestCollectLogs, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.run_dir = self.tmp_path('run', self.new_root)
+
+ def test_collect_logs_creates_tarfile(self):
+ """collect-logs creates a tarfile with all related cloud-init info."""
+ log1 = self.tmp_path('cloud-init.log', self.new_root)
+ write_file(log1, 'cloud-init-log')
+ log2 = self.tmp_path('cloud-init-output.log', self.new_root)
+ write_file(log2, 'cloud-init-output-log')
+ ensure_dir(self.run_dir)
+ write_file(self.tmp_path('results.json', self.run_dir), 'results')
+ output_tarfile = self.tmp_path('logs.tgz')
+
+ date = datetime.utcnow().date().strftime('%Y-%m-%d')
+ date_logdir = 'cloud-init-logs-{0}'.format(date)
+
+ expected_subp = {
+ ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'):
+ '0.7fake\n',
+ ('dmesg',): 'dmesg-out\n',
+ ('journalctl', '-o', 'short-precise'): 'journal-out\n',
+ ('tar', 'czvf', output_tarfile, date_logdir): ''
+ }
+
+ def fake_subp(cmd):
+ cmd_tuple = tuple(cmd)
+ if cmd_tuple not in expected_subp:
+ raise AssertionError(
+ 'Unexpected command provided to subp: {0}'.format(cmd))
+ if cmd == ['tar', 'czvf', output_tarfile, date_logdir]:
+ subp(cmd) # Pass through tar cmd so we can check output
+ return expected_subp[cmd_tuple], ''
+
+ wrap_and_call(
+ 'cloudinit.cmd.devel.logs',
+ {'subp': {'side_effect': fake_subp},
+ 'CLOUDINIT_LOGS': {'new': [log1, log2]},
+ 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}},
+ logs.collect_logs, output_tarfile, include_userdata=False)
+ # unpack the tarfile and check file contents
+ subp(['tar', 'zxvf', output_tarfile, '-C', self.new_root])
+ out_logdir = self.tmp_path(date_logdir, self.new_root)
+ self.assertEqual(
+ '0.7fake\n',
+ load_file(os.path.join(out_logdir, 'version')))
+ self.assertEqual(
+ 'cloud-init-log',
+ load_file(os.path.join(out_logdir, 'cloud-init.log')))
+ self.assertEqual(
+ 'cloud-init-output-log',
+ load_file(os.path.join(out_logdir, 'cloud-init-output.log')))
+ self.assertEqual(
+ 'dmesg-out\n',
+ load_file(os.path.join(out_logdir, 'dmesg.txt')))
+ self.assertEqual(
+ 'journal-out\n',
+ load_file(os.path.join(out_logdir, 'journal.txt')))
+ self.assertEqual(
+ 'results',
+ load_file(
+ os.path.join(out_logdir, 'run', 'cloud-init', 'results.json')))
+
+ def test_collect_logs_includes_optional_userdata(self):
+ """collect-logs include userdata when --include-userdata is set."""
+ log1 = self.tmp_path('cloud-init.log', self.new_root)
+ write_file(log1, 'cloud-init-log')
+ log2 = self.tmp_path('cloud-init-output.log', self.new_root)
+ write_file(log2, 'cloud-init-output-log')
+ userdata = self.tmp_path('user-data.txt', self.new_root)
+ write_file(userdata, 'user-data')
+ ensure_dir(self.run_dir)
+ write_file(self.tmp_path('results.json', self.run_dir), 'results')
+ output_tarfile = self.tmp_path('logs.tgz')
+
+ date = datetime.utcnow().date().strftime('%Y-%m-%d')
+ date_logdir = 'cloud-init-logs-{0}'.format(date)
+
+ expected_subp = {
+ ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'):
+ '0.7fake',
+ ('dmesg',): 'dmesg-out\n',
+ ('journalctl', '-o', 'short-precise'): 'journal-out\n',
+ ('tar', 'czvf', output_tarfile, date_logdir): ''
+ }
+
+ def fake_subp(cmd):
+ cmd_tuple = tuple(cmd)
+ if cmd_tuple not in expected_subp:
+ raise AssertionError(
+ 'Unexpected command provided to subp: {0}'.format(cmd))
+ if cmd == ['tar', 'czvf', output_tarfile, date_logdir]:
+ subp(cmd) # Pass through tar cmd so we can check output
+ return expected_subp[cmd_tuple], ''
+
+ wrap_and_call(
+ 'cloudinit.cmd.devel.logs',
+ {'subp': {'side_effect': fake_subp},
+ 'CLOUDINIT_LOGS': {'new': [log1, log2]},
+ 'CLOUDINIT_RUN_DIR': {'new': self.run_dir},
+ 'USER_DATA_FILE': {'new': userdata}},
+ logs.collect_logs, output_tarfile, include_userdata=True)
+ # unpack the tarfile and check file contents
+ subp(['tar', 'zxvf', output_tarfile, '-C', self.new_root])
+ out_logdir = self.tmp_path(date_logdir, self.new_root)
+ self.assertEqual(
+ 'user-data',
+ load_file(os.path.join(out_logdir, 'user-data.txt')))
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index 139e03b3..6fb9d9e7 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -50,13 +50,6 @@ WELCOME_MSG_TPL = ("Cloud-init v. {version} running '{action}' at "
# Module section template
MOD_SECTION_TPL = "cloud_%s_modules"
-# Things u can query on
-QUERY_DATA_TYPES = [
- 'data',
- 'data_raw',
- 'instance_id',
-]
-
# Frequency shortname to full name
# (so users don't have to remember the full name...)
FREQ_SHORT_NAMES = {
@@ -510,11 +503,6 @@ def main_modules(action_name, args):
return run_module_section(mods, name, name)
-def main_query(name, _args):
- raise NotImplementedError(("Action '%s' is not"
- " currently implemented") % (name))
-
-
def main_single(name, args):
# Cloud-init single stage is broken up into the following sub-stages
# 1. Ensure that the init object fetches its config without errors
@@ -688,11 +676,10 @@ def main_features(name, args):
def main(sysv_args=None):
- if sysv_args is not None:
- parser = argparse.ArgumentParser(prog=sysv_args[0])
- sysv_args = sysv_args[1:]
- else:
- parser = argparse.ArgumentParser()
+ if not sysv_args:
+ sysv_args = sys.argv
+ parser = argparse.ArgumentParser(prog=sysv_args[0])
+ sysv_args = sysv_args[1:]
# Top level args
parser.add_argument('--version', '-v', action='version',
@@ -713,7 +700,8 @@ def main(sysv_args=None):
default=False)
parser.set_defaults(reporter=None)
- subparsers = parser.add_subparsers()
+ subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
+ subparsers.required = True
# Each action and its sub-options (if any)
parser_init = subparsers.add_parser('init',
@@ -737,17 +725,6 @@ def main(sysv_args=None):
choices=('init', 'config', 'final'))
parser_mod.set_defaults(action=('modules', main_modules))
- # These settings are used when you want to query information
- # stored in the cloud-init data objects/directories/files
- parser_query = subparsers.add_parser('query',
- help=('query information stored '
- 'in cloud-init'))
- parser_query.add_argument("--name", '-n', action="store",
- help="item name to query on",
- required=True,
- choices=QUERY_DATA_TYPES)
- parser_query.set_defaults(action=('query', main_query))
-
# This subcommand allows you to run a single module
parser_single = subparsers.add_parser('single',
help=('run a single module '))
@@ -781,15 +758,39 @@ def main(sysv_args=None):
help=('list defined features'))
parser_features.set_defaults(action=('features', main_features))
+ parser_analyze = subparsers.add_parser(
+ 'analyze', help='Devel tool: Analyze cloud-init logs and data')
+
+ parser_devel = subparsers.add_parser(
+ 'devel', help='Run development tools')
+
+ parser_collect_logs = subparsers.add_parser(
+ 'collect-logs', help='Collect and tar all cloud-init debug info')
+
+ if sysv_args:
+ # Only load subparsers if subcommand is specified to avoid load cost
+ if sysv_args[0] == 'analyze':
+ from cloudinit.analyze.__main__ import get_parser as analyze_parser
+ # Construct analyze subcommand parser
+ analyze_parser(parser_analyze)
+ elif sysv_args[0] == 'devel':
+ from cloudinit.cmd.devel.parser import get_parser as devel_parser
+ # Construct devel subcommand parser
+ devel_parser(parser_devel)
+ elif sysv_args[0] == 'collect-logs':
+ from cloudinit.cmd.devel.logs import (
+ get_parser as logs_parser, handle_collect_logs_args)
+ logs_parser(parser_collect_logs)
+ parser_collect_logs.set_defaults(
+ action=('collect-logs', handle_collect_logs_args))
+
args = parser.parse_args(args=sysv_args)
- try:
- (name, functor) = args.action
- except AttributeError:
- parser.error('too few arguments')
+ # Subparsers.required = True and each subparser sets action=(name, functor)
+ (name, functor) = args.action
# Setup basic logging to start (until reinitialized)
- # iff in debug mode...
+ # iff in debug mode.
if args.debug:
logging.setupBasicLogging()
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index 604f93b0..233da1ef 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -3,44 +3,73 @@
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Chad Smith <chad.smith@canonical.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Bootcmd
--------
-**Summary:** run commands early in boot process
-
-This module runs arbitrary commands very early in the boot process,
-only slightly after a boothook would run. This is very similar to a
-boothook, but more user friendly. The environment variable ``INSTANCE_ID``
-will be set to the current instance id for all run commands. Commands can be
-specified either as lists or strings. For invocation details, see ``runcmd``.
-
-.. note::
- bootcmd should only be used for things that could not be done later in the
- boot process.
-
-**Internal name:** ``cc_bootcmd``
-
-**Module frequency:** per always
-
-**Supported distros:** all
-
-**Config keys**::
-
- bootcmd:
- - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts
- - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ]
-"""
+"""Bootcmd: run arbitrary commands early in the boot process."""
import os
+from textwrap import dedent
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
from cloudinit.settings import PER_ALWAYS
+from cloudinit import temp_utils
from cloudinit import util
frequency = PER_ALWAYS
+# The schema definition for each cloud-config module is a strict contract for
+# describing supported configuration parameters for each cloud-config section.
+# It allows cloud-config to validate and alert users to invalid or ignored
+# configuration options before actually attempting to deploy with said
+# configuration.
+
+distros = ['all']
+
+schema = {
+ 'id': 'cc_bootcmd',
+ 'name': 'Bootcmd',
+ 'title': 'Run arbitrary commands early in the boot process',
+ 'description': dedent("""\
+ This module runs arbitrary commands very early in the boot process,
+ only slightly after a boothook would run. This is very similar to a
+ boothook, but more user friendly. The environment variable
+ ``INSTANCE_ID`` will be set to the current instance id for all run
+ commands. Commands can be specified either as lists or strings. For
+ invocation details, see ``runcmd``.
+
+ .. note::
+ bootcmd should only be used for things that could not be done later
+ in the boot process."""),
+ 'distros': distros,
+ 'examples': [dedent("""\
+ bootcmd:
+ - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts
+ - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ]
+ """)],
+ 'frequency': PER_ALWAYS,
+ 'type': 'object',
+ 'properties': {
+ 'bootcmd': {
+ 'type': 'array',
+ 'items': {
+ 'oneOf': [
+ {'type': 'array', 'items': {'type': 'string'}},
+ {'type': 'string'}]
+ },
+ 'additionalItems': False, # Reject items of non-string non-list
+ 'additionalProperties': False,
+ 'minItems': 1,
+ 'required': [],
+ 'uniqueItems': True
+ }
+ }
+}
+
+__doc__ = get_schema_doc(schema) # Supplement python help()
+
def handle(name, cfg, cloud, log, _args):
@@ -49,13 +78,14 @@ def handle(name, cfg, cloud, log, _args):
" no 'bootcmd' key in configuration"), name)
return
- with util.ExtendedTemporaryFile(suffix=".sh") as tmpf:
+ validate_cloudconfig_schema(cfg, schema)
+ with temp_utils.ExtendedTemporaryFile(suffix=".sh") as tmpf:
try:
content = util.shellify(cfg["bootcmd"])
tmpf.write(util.encode_text(content))
tmpf.flush()
- except Exception:
- util.logexc(log, "Failed to shellify bootcmd")
+ except Exception as e:
+ util.logexc(log, "Failed to shellify bootcmd: %s", str(e))
raise
try:
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index 02c70b10..46abedd1 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -58,6 +58,9 @@ file).
log_level:
log_location:
node_name:
+ omnibus_url:
+ omnibus_url_retries:
+ omnibus_version:
pid_file:
server_url:
show_time:
@@ -279,6 +282,31 @@ def run_chef(chef_cfg, log):
util.subp(cmd, capture=False)
+def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None):
+ """Install an omnibus unified package from url.
+
+ @param url: URL where blob of chef content may be downloaded. Defaults to
+ OMNIBUS_URL.
+ @param retries: Number of retries to perform when attempting to read url.
+ Defaults to OMNIBUS_URL_RETRIES
+ @param omnibus_version: Optional version string to require for omnibus
+ install.
+ """
+ if url is None:
+ url = OMNIBUS_URL
+ if retries is None:
+ retries = OMNIBUS_URL_RETRIES
+
+ if omnibus_version is None:
+ args = []
+ else:
+ args = ['-v', omnibus_version]
+ content = url_helper.readurl(url=url, retries=retries).contents
+ return util.subp_blob_in_tempfile(
+ blob=content, args=args,
+ basename='chef-omnibus-install', capture=False)
+
+
def install_chef(cloud, chef_cfg, log):
# If chef is not installed, we install chef based on 'install_type'
install_type = util.get_cfg_option_str(chef_cfg, 'install_type',
@@ -297,17 +325,11 @@ def install_chef(cloud, chef_cfg, log):
# This will install and run the chef-client from packages
cloud.distro.install_packages(('chef',))
elif install_type == 'omnibus':
- # This will install as a omnibus unified package
- url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL)
- retries = max(0, util.get_cfg_option_int(chef_cfg,
- "omnibus_url_retries",
- default=OMNIBUS_URL_RETRIES))
- content = url_helper.readurl(url=url, retries=retries).contents
- with util.tempdir() as tmpd:
- # Use tmpdir over tmpfile to avoid 'text file busy' on execute
- tmpf = "%s/chef-omnibus-install" % tmpd
- util.write_file(tmpf, content, mode=0o700)
- util.subp([tmpf], capture=False)
+ omnibus_version = util.get_cfg_option_str(chef_cfg, "omnibus_version")
+ install_chef_from_omnibus(
+ url=util.get_cfg_option_str(chef_cfg, "omnibus_url"),
+ retries=util.get_cfg_option_int(chef_cfg, "omnibus_url_retries"),
+ omnibus_version=omnibus_version)
else:
log.warn("Unknown chef install type '%s'", install_type)
run = False
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index 86b71383..8f9f1abd 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -57,7 +57,7 @@ The following default client config is provided, but can be overridden::
import os
-from six import StringIO
+from six import BytesIO
from configobj import ConfigObj
@@ -109,7 +109,7 @@ def handle(_name, cfg, cloud, log, _args):
ls_cloudcfg,
]
merged = merge_together(merge_data)
- contents = StringIO()
+ contents = BytesIO()
merged.write(contents)
util.ensure_dir(os.path.dirname(LSC_CLIENT_CFG_FILE))
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index 31ed64e3..15ae1ecd 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -4,39 +4,10 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-NTP
----
-**Summary:** enable and configure ntp
-
-Handle ntp configuration. If ntp is not installed on the system and ntp
-configuration is specified, ntp will be installed. If there is a default ntp
-config file in the image or one is present in the distro's ntp package, it will
-be copied to ``/etc/ntp.conf.dist`` before any changes are made. A list of ntp
-pools and ntp servers can be provided under the ``ntp`` config key. If no ntp
-servers or pools are provided, 4 pools will be used in the format
-``{0-3}.{distro}.pool.ntp.org``.
-
-**Internal name:** ``cc_ntp``
-
-**Module frequency:** per instance
-
-**Supported distros:** centos, debian, fedora, opensuse, ubuntu
-
-**Config keys**::
-
- ntp:
- pools:
- - 0.company.pool.ntp.org
- - 1.company.pool.ntp.org
- - ntp.myorg.org
- servers:
- - my.ntp.server.local
- - ntp.ubuntu.com
- - 192.168.23.2
-"""
+"""NTP: enable and configure ntp"""
-from cloudinit.config.schema import validate_cloudconfig_schema
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
from cloudinit import templater
@@ -50,6 +21,7 @@ LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
NTP_CONF = '/etc/ntp.conf'
+TIMESYNCD_CONF = '/etc/systemd/timesyncd.conf.d/cloud-init.conf'
NR_POOL_SERVERS = 4
distros = ['centos', 'debian', 'fedora', 'opensuse', 'ubuntu']
@@ -75,10 +47,13 @@ schema = {
``{0-3}.{distro}.pool.ntp.org``."""),
'distros': distros,
'examples': [
- {'ntp': {'pools': ['0.company.pool.ntp.org', '1.company.pool.ntp.org',
- 'ntp.myorg.org'],
- 'servers': ['my.ntp.server.local', 'ntp.ubuntu.com',
- '192.168.23.2']}}],
+ dedent("""\
+ ntp:
+ pools: [0.int.pool.ntp.org, 1.int.pool.ntp.org, ntp.myorg.org]
+ servers:
+ - ntp.server.local
+ - ntp.ubuntu.com
+ - 192.168.23.2""")],
'frequency': PER_INSTANCE,
'type': 'object',
'properties': {
@@ -116,6 +91,8 @@ schema = {
}
}
+__doc__ = get_schema_doc(schema) # Supplement python help()
+
def handle(name, cfg, cloud, log, _args):
"""Enable and configure ntp."""
@@ -132,20 +109,50 @@ def handle(name, cfg, cloud, log, _args):
" is a %s %instead"), type_utils.obj_name(ntp_cfg))
validate_cloudconfig_schema(cfg, schema)
+ if ntp_installable():
+ service_name = 'ntp'
+ confpath = NTP_CONF
+ template_name = None
+ packages = ['ntp']
+ check_exe = 'ntpd'
+ else:
+ service_name = 'systemd-timesyncd'
+ confpath = TIMESYNCD_CONF
+ template_name = 'timesyncd.conf'
+ packages = []
+ check_exe = '/lib/systemd/systemd-timesyncd'
+
rename_ntp_conf()
# ensure when ntp is installed it has a configuration file
# to use instead of starting up with packaged defaults
- write_ntp_config_template(ntp_cfg, cloud)
- install_ntp(cloud.distro.install_packages, packages=['ntp'],
- check_exe="ntpd")
- # if ntp was already installed, it may not have started
+ write_ntp_config_template(ntp_cfg, cloud, confpath, template=template_name)
+ install_ntp(cloud.distro.install_packages, packages=packages,
+ check_exe=check_exe)
+
try:
- reload_ntp(systemd=cloud.distro.uses_systemd())
+ reload_ntp(service_name, systemd=cloud.distro.uses_systemd())
except util.ProcessExecutionError as e:
LOG.exception("Failed to reload/start ntp service: %s", e)
raise
+def ntp_installable():
+ """Check if we can install ntp package
+
+ Ubuntu-Core systems do not have an ntp package available, so
+ we always return False. Other systems require package managers to install
+ the ntp package If we fail to find one of the package managers, then we
+ cannot install ntp.
+ """
+ if util.system_is_snappy():
+ return False
+
+ if any(map(util.which, ['apt-get', 'dnf', 'yum', 'zypper'])):
+ return True
+
+ return False
+
+
def install_ntp(install_func, packages=None, check_exe="ntpd"):
if util.which(check_exe):
return
@@ -156,7 +163,7 @@ def install_ntp(install_func, packages=None, check_exe="ntpd"):
def rename_ntp_conf(config=None):
- """Rename any existing ntp.conf file and render from template"""
+ """Rename any existing ntp.conf file"""
if config is None: # For testing
config = NTP_CONF
if os.path.exists(config):
@@ -171,7 +178,7 @@ def generate_server_names(distro):
return names
-def write_ntp_config_template(cfg, cloud):
+def write_ntp_config_template(cfg, cloud, path, template=None):
servers = cfg.get('servers', [])
pools = cfg.get('pools', [])
@@ -185,19 +192,20 @@ def write_ntp_config_template(cfg, cloud):
'pools': pools,
}
- template_fn = cloud.get_template_filename('ntp.conf.%s' %
- (cloud.distro.name))
+ if template is None:
+ template = 'ntp.conf.%s' % cloud.distro.name
+
+ template_fn = cloud.get_template_filename(template)
if not template_fn:
template_fn = cloud.get_template_filename('ntp.conf')
if not template_fn:
raise RuntimeError(("No template found, "
- "not rendering %s"), NTP_CONF)
+ "not rendering %s"), path)
- templater.render_to_file(template_fn, NTP_CONF, params)
+ templater.render_to_file(template_fn, path, params)
-def reload_ntp(systemd=False):
- service = 'ntp'
+def reload_ntp(service, systemd=False):
if systemd:
cmd = ['systemctl', 'reload-or-restart', service]
else:
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index dc11561b..28b1d568 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -15,21 +15,23 @@ This module handles puppet installation and configuration. If the ``puppet``
key does not exist in global configuration, no action will be taken. If a
config entry for ``puppet`` is present, then by default the latest version of
puppet will be installed. If ``install`` is set to ``false``, puppet will not
-be installed. However, this may result in an error if puppet is not already
+be installed. However, this will result in an error if puppet is not already
present on the system. The version of puppet to be installed can be specified
under ``version``, and defaults to ``none``, which selects the latest version
in the repos. If the ``puppet`` config key exists in the config archive, this
module will attempt to start puppet even if no installation was performed.
-Puppet configuration can be specified under the ``conf`` key. The configuration
-is specified as a dictionary which is converted into ``<key>=<value>`` format
-and appended to ``puppet.conf`` under the ``[puppetd]`` section. The
+Puppet configuration can be specified under the ``conf`` key. The
+configuration is specified as a dictionary containing high-level ``<section>``
+keys and lists of ``<key>=<value>`` pairs within each section. Each section
+name and ``<key>=<value>`` pair is written directly to ``puppet.conf``. As
+such, section names should be one of: ``main``, ``master``, ``agent`` or
+``user`` and keys should be valid puppet configuration options. The
``certname`` key supports string substitutions for ``%i`` and ``%f``,
corresponding to the instance id and fqdn of the machine respectively.
-If ``ca_cert`` is present under ``conf``, it will not be written to
-``puppet.conf``, but instead will be used as the puppermaster certificate.
-It should be specified in pem format as a multi-line string (using the ``|``
-yaml notation).
+If ``ca_cert`` is present, it will not be written to ``puppet.conf``, but
+instead will be used as the puppermaster certificate. It should be specified
+in pem format as a multi-line string (using the ``|`` yaml notation).
**Internal name:** ``cc_puppet``
@@ -43,12 +45,13 @@ yaml notation).
install: <true/false>
version: <version>
conf:
- server: "puppetmaster.example.org"
- certname: "%i.%f"
- ca_cert: |
- -------BEGIN CERTIFICATE-------
- <cert data>
- -------END CERTIFICATE-------
+ agent:
+ server: "puppetmaster.example.org"
+ certname: "%i.%f"
+ ca_cert: |
+ -------BEGIN CERTIFICATE-------
+ <cert data>
+ -------END CERTIFICATE-------
"""
from six import StringIO
@@ -127,7 +130,7 @@ def handle(name, cfg, cloud, log, _args):
util.write_file(PUPPET_SSL_CERT_PATH, cfg)
util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root')
else:
- # Iterate throug the config items, we'll use ConfigParser.set
+ # Iterate through the config items, we'll use ConfigParser.set
# to overwrite or create new items as needed
for (o, v) in cfg.items():
if o == 'certname':
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index ceee952b..f774baa3 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -6,31 +6,8 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Resizefs
---------
-**Summary:** resize filesystem
+"""Resizefs: cloud-config module which resizes the filesystem"""
-Resize a filesystem to use all avaliable space on partition. This module is
-useful along with ``cc_growpart`` and will ensure that if the root partition
-has been resized the root filesystem will be resized along with it. By default,
-``cc_resizefs`` will resize the root partition and will block the boot process
-while the resize command is running. Optionally, the resize operation can be
-performed in the background while cloud-init continues running modules. This
-can be enabled by setting ``resize_rootfs`` to ``true``. This module can be
-disabled altogether by setting ``resize_rootfs`` to ``false``.
-
-**Internal name:** ``cc_resizefs``
-
-**Module frequency:** per always
-
-**Supported distros:** all
-
-**Config keys**::
-
- resize_rootfs: <true/false/"noblock">
- resize_rootfs_tmp: <directory>
-"""
import errno
import getopt
@@ -38,11 +15,47 @@ import os
import re
import shlex
import stat
+from textwrap import dedent
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
from cloudinit.settings import PER_ALWAYS
from cloudinit import util
+NOBLOCK = "noblock"
+
frequency = PER_ALWAYS
+distros = ['all']
+
+schema = {
+ 'id': 'cc_resizefs',
+ 'name': 'Resizefs',
+ 'title': 'Resize filesystem',
+ 'description': dedent("""\
+ Resize a filesystem to use all avaliable space on partition. This
+ module is useful along with ``cc_growpart`` and will ensure that if the
+ root partition has been resized the root filesystem will be resized
+ along with it. By default, ``cc_resizefs`` will resize the root
+ partition and will block the boot process while the resize command is
+ running. Optionally, the resize operation can be performed in the
+ background while cloud-init continues running modules. This can be
+ enabled by setting ``resize_rootfs`` to ``true``. This module can be
+ disabled altogether by setting ``resize_rootfs`` to ``false``."""),
+ 'distros': distros,
+ 'examples': [
+ 'resize_rootfs: false # disable root filesystem resize operation'],
+ 'frequency': PER_ALWAYS,
+ 'type': 'object',
+ 'properties': {
+ 'resize_rootfs': {
+ 'enum': [True, False, NOBLOCK],
+ 'description': dedent("""\
+ Whether to resize the root partition. Default: 'true'""")
+ }
+ }
+}
+
+__doc__ = get_schema_doc(schema) # Supplement python help()
def _resize_btrfs(mount_point, devpth):
@@ -54,7 +67,7 @@ def _resize_ext(mount_point, devpth):
def _resize_xfs(mount_point, devpth):
- return ('xfs_growfs', devpth)
+ return ('xfs_growfs', mount_point)
def _resize_ufs(mount_point, devpth):
@@ -131,8 +144,6 @@ RESIZE_FS_PRECHECK_CMDS = {
'ufs': _can_skip_resize_ufs
}
-NOBLOCK = "noblock"
-
def rootdev_from_cmdline(cmdline):
found = None
@@ -161,71 +172,77 @@ def can_skip_resize(fs_type, resize_what, devpth):
return False
-def handle(name, cfg, _cloud, log, args):
- if len(args) != 0:
- resize_root = args[0]
- else:
- resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True)
+def is_device_path_writable_block(devpath, info, log):
+ """Return True if devpath is a writable block device.
- if not util.translate_bool(resize_root, addons=[NOBLOCK]):
- log.debug("Skipping module named %s, resizing disabled", name)
- return
-
- # TODO(harlowja) is the directory ok to be used??
- resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run")
- util.ensure_dir(resize_root_d)
-
- # TODO(harlowja): allow what is to be resized to be configurable??
- resize_what = "/"
- result = util.get_mount_info(resize_what, log)
- if not result:
- log.warn("Could not determine filesystem type of %s", resize_what)
- return
-
- (devpth, fs_type, mount_point) = result
-
- info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)
- log.debug("resize_info: %s" % info)
+ @param devpath: Path to the root device we want to resize.
+ @param info: String representing information about the requested device.
+ @param log: Logger to which logs will be added upon error.
+ @returns Boolean True if block device is writable
+ """
container = util.is_container()
# Ensure the path is a block device.
- if (devpth == "/dev/root" and not os.path.exists(devpth) and
+ if (devpath == "/dev/root" and not os.path.exists(devpath) and
not container):
- devpth = util.rootdev_from_cmdline(util.get_cmdline())
- if devpth is None:
+ devpath = util.rootdev_from_cmdline(util.get_cmdline())
+ if devpath is None:
log.warn("Unable to find device '/dev/root'")
- return
- log.debug("Converted /dev/root to '%s' per kernel cmdline", devpth)
+ return False
+ log.debug("Converted /dev/root to '%s' per kernel cmdline", devpath)
+
+ if devpath == 'overlayroot':
+ log.debug("Not attempting to resize devpath '%s': %s", devpath, info)
+ return False
try:
- statret = os.stat(devpth)
+ statret = os.stat(devpath)
except OSError as exc:
if container and exc.errno == errno.ENOENT:
log.debug("Device '%s' did not exist in container. "
- "cannot resize: %s", devpth, info)
+ "cannot resize: %s", devpath, info)
elif exc.errno == errno.ENOENT:
log.warn("Device '%s' did not exist. cannot resize: %s",
- devpth, info)
+ devpath, info)
else:
raise exc
- return
-
- if not os.access(devpth, os.W_OK):
- if container:
- log.debug("'%s' not writable in container. cannot resize: %s",
- devpth, info)
- else:
- log.warn("'%s' not writable. cannot resize: %s", devpth, info)
- return
+ return False
if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):
if container:
log.debug("device '%s' not a block device in container."
- " cannot resize: %s" % (devpth, info))
+ " cannot resize: %s" % (devpath, info))
else:
log.warn("device '%s' not a block device. cannot resize: %s" %
- (devpth, info))
+ (devpath, info))
+ return False
+ return True
+
+
+def handle(name, cfg, _cloud, log, args):
+ if len(args) != 0:
+ resize_root = args[0]
+ else:
+ resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True)
+ validate_cloudconfig_schema(cfg, schema)
+ if not util.translate_bool(resize_root, addons=[NOBLOCK]):
+ log.debug("Skipping module named %s, resizing disabled", name)
+ return
+
+ # TODO(harlowja): allow what is to be resized to be configurable??
+ resize_what = "/"
+ result = util.get_mount_info(resize_what, log)
+ if not result:
+ log.warn("Could not determine filesystem type of %s", resize_what)
+ return
+
+ (devpth, fs_type, mount_point) = result
+
+ info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)
+ log.debug("resize_info: %s" % info)
+
+ if not is_device_path_writable_block(devpth, info, log):
return
resizer = None
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index 2548d1f1..9812562a 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -55,7 +55,7 @@ LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
-distros = ['fedora', 'rhel', 'sles']
+distros = ['fedora', 'opensuse', 'rhel', 'sles']
def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"):
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
index dfa8cb3d..449872f0 100644
--- a/cloudinit/config/cc_runcmd.py
+++ b/cloudinit/config/cc_runcmd.py
@@ -6,41 +6,70 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Runcmd
-------
-**Summary:** run commands
+"""Runcmd: run arbitrary commands at rc.local with output to the console"""
-Run arbitrary commands at a rc.local like level with output to the console.
-Each item can be either a list or a string. If the item is a list, it will be
-properly executed as if passed to ``execve()`` (with the first arg as the
-command). If the item is a string, it will be written to a file and interpreted
-using ``sh``.
-
-.. note::
- all commands must be proper yaml, so you have to quote any characters yaml
- would eat (':' can be problematic)
-
-**Internal name:** ``cc_runcmd``
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
+from cloudinit import util
-**Module frequency:** per instance
+import os
+from textwrap import dedent
-**Supported distros:** all
-**Config keys**::
+# The schema definition for each cloud-config module is a strict contract for
+# describing supported configuration parameters for each cloud-config section.
+# It allows cloud-config to validate and alert users to invalid or ignored
+# configuration options before actually attempting to deploy with said
+# configuration.
- runcmd:
- - [ ls, -l, / ]
- - [ sh, -xc, "echo $(date) ': hello world!'" ]
- - [ sh, -c, echo "=========hello world'=========" ]
- - ls -l /root
- - [ wget, "http://example.org", -O, /tmp/index.html ]
-"""
+distros = [ALL_DISTROS]
+schema = {
+ 'id': 'cc_runcmd',
+ 'name': 'Runcmd',
+ 'title': 'Run arbitrary commands',
+ 'description': dedent("""\
+ Run arbitrary commands at a rc.local like level with output to the
+ console. Each item can be either a list or a string. If the item is a
+ list, it will be properly executed as if passed to ``execve()`` (with
+ the first arg as the command). If the item is a string, it will be
+ written to a file and interpreted
+ using ``sh``.
-import os
+ .. note::
+ all commands must be proper yaml, so you have to quote any characters
+ yaml would eat (':' can be problematic)"""),
+ 'distros': distros,
+ 'examples': [dedent("""\
+ runcmd:
+ - [ ls, -l, / ]
+ - [ sh, -xc, "echo $(date) ': hello world!'" ]
+ - [ sh, -c, echo "=========hello world'=========" ]
+ - ls -l /root
+ - [ wget, "http://example.org", -O, /tmp/index.html ]
+ """)],
+ 'frequency': PER_INSTANCE,
+ 'type': 'object',
+ 'properties': {
+ 'runcmd': {
+ 'type': 'array',
+ 'items': {
+ 'oneOf': [
+ {'type': 'array', 'items': {'type': 'string'}},
+ {'type': 'string'}]
+ },
+ 'additionalItems': False, # Reject items of non-string non-list
+ 'additionalProperties': False,
+ 'minItems': 1,
+ 'required': [],
+ 'uniqueItems': True
+ }
+ }
+}
-from cloudinit import util
+__doc__ = get_schema_doc(schema) # Supplement python help()
def handle(name, cfg, cloud, log, _args):
@@ -49,6 +78,7 @@ def handle(name, cfg, cloud, log, _args):
" no 'runcmd' key in configuration"), name)
return
+ validate_cloudconfig_schema(cfg, schema)
out_fn = os.path.join(cloud.get_ipath('scripts'), "runcmd")
cmd = cfg["runcmd"]
try:
diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index a9682f19..eecb8178 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -63,11 +63,11 @@ is ``auto``. Options are:
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
+from cloudinit import temp_utils
from cloudinit import util
import glob
import os
-import tempfile
LOG = logging.getLogger(__name__)
@@ -183,7 +183,7 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None):
# config
# Note, however, we do not touch config files on disk.
nested_cfg = {'config': {shortname: config}}
- (fd, cfg_tmpf) = tempfile.mkstemp()
+ (fd, cfg_tmpf) = temp_utils.mkstemp()
os.write(fd, util.yaml_dumps(nested_cfg).encode())
os.close(fd)
cfgfile = cfg_tmpf
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index 0066e97f..35d8c57f 100755
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -28,7 +28,7 @@ the keys can be specified, but defaults to ``md5``.
import base64
import hashlib
-from prettytable import PrettyTable
+from cloudinit.simpletable import SimpleTable
from cloudinit.distros import ug_util
from cloudinit import ssh_util
@@ -74,7 +74,7 @@ def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5',
return
tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options',
'Comment']
- tbl = PrettyTable(tbl_fields)
+ tbl = SimpleTable(tbl_fields)
for entry in key_entries:
if _is_printable_key(entry):
row = []
diff --git a/cloudinit/config/cc_ubuntu_init_switch.py b/cloudinit/config/cc_ubuntu_init_switch.py
deleted file mode 100644
index 5dd26901..00000000
--- a/cloudinit/config/cc_ubuntu_init_switch.py
+++ /dev/null
@@ -1,160 +0,0 @@
-# Copyright (C) 2014 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""
-Ubuntu Init Switch
-------------------
-**Summary:** reboot system into another init.
-
-This module provides a way for the user to boot with systemd even if the image
-is set to boot with upstart. It should be run as one of the first
-``cloud_init_modules``, and will switch the init system and then issue a
-reboot. The next boot will come up in the target init system and no action
-will be taken. This should be inert on non-ubuntu systems, and also
-exit quickly.
-
-.. note::
- best effort is made, but it's possible this system will break, and probably
- won't interact well with any other mechanism you've used to switch the init
- system.
-
-**Internal name:** ``cc_ubuntu_init_switch``
-
-**Module frequency:** once per instance
-
-**Supported distros:** ubuntu
-
-**Config keys**::
-
- init_switch:
- target: systemd (can be 'systemd' or 'upstart')
- reboot: true (reboot if a change was made, or false to not reboot)
-"""
-
-from cloudinit.distros import ubuntu
-from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-import os
-import time
-
-frequency = PER_INSTANCE
-REBOOT_CMD = ["/sbin/reboot", "--force"]
-
-DEFAULT_CONFIG = {
- 'init_switch': {'target': None, 'reboot': True}
-}
-
-SWITCH_INIT = """
-#!/bin/sh
-# switch_init: [upstart | systemd]
-
-is_systemd() {
- [ "$(dpkg-divert --listpackage /sbin/init)" = "systemd-sysv" ]
-}
-debug() { echo "$@" 1>&2; }
-fail() { echo "$@" 1>&2; exit 1; }
-
-if [ "$1" = "systemd" ]; then
- if is_systemd; then
- debug "already systemd, nothing to do"
- else
- [ -f /lib/systemd/systemd ] || fail "no systemd available";
- dpkg-divert --package systemd-sysv --divert /sbin/init.diverted \\
- --rename /sbin/init
- fi
- [ -f /sbin/init ] || ln /lib/systemd/systemd /sbin/init
-elif [ "$1" = "upstart" ]; then
- if is_systemd; then
- rm -f /sbin/init
- dpkg-divert --package systemd-sysv --rename --remove /sbin/init
- else
- debug "already upstart, nothing to do."
- fi
-else
- fail "Error. expect 'upstart' or 'systemd'"
-fi
-"""
-
-distros = ['ubuntu']
-
-
-def handle(name, cfg, cloud, log, args):
- """Handler method activated by cloud-init."""
-
- if not isinstance(cloud.distro, ubuntu.Distro):
- log.debug("%s: distro is '%s', not ubuntu. returning",
- name, cloud.distro.__class__)
- return
-
- cfg = util.mergemanydict([cfg, DEFAULT_CONFIG])
- target = cfg['init_switch']['target']
- reboot = cfg['init_switch']['reboot']
-
- if len(args) != 0:
- target = args[0]
- if len(args) > 1:
- reboot = util.is_true(args[1])
-
- if not target:
- log.debug("%s: target=%s. nothing to do", name, target)
- return
-
- if not util.which('dpkg'):
- log.warn("%s: 'dpkg' not available. Assuming not ubuntu", name)
- return
-
- supported = ('upstart', 'systemd')
- if target not in supported:
- log.warn("%s: target set to %s, expected one of: %s",
- name, target, str(supported))
-
- if os.path.exists("/run/systemd/system"):
- current = "systemd"
- else:
- current = "upstart"
-
- if current == target:
- log.debug("%s: current = target = %s. nothing to do", name, target)
- return
-
- try:
- util.subp(['sh', '-s', target], data=SWITCH_INIT)
- except util.ProcessExecutionError as e:
- log.warn("%s: Failed to switch to init '%s'. %s", name, target, e)
- return
-
- if util.is_false(reboot):
- log.info("%s: switched '%s' to '%s'. reboot=false, not rebooting.",
- name, current, target)
- return
-
- try:
- log.warn("%s: switched '%s' to '%s'. rebooting.",
- name, current, target)
- logging.flushLoggers(log)
- _fire_reboot(log, wait_attempts=4, initial_sleep=4)
- except Exception as e:
- util.logexc(log, "Requested reboot did not happen!")
- raise
-
-
-def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2):
- util.subp(REBOOT_CMD)
- start = time.time()
- wait_time = initial_sleep
- for _i in range(0, wait_attempts):
- time.sleep(wait_time)
- wait_time *= backoff
- elapsed = time.time() - start
- log.debug("Rebooted, but still running after %s seconds", int(elapsed))
- # If we got here, not good
- elapsed = time.time() - start
- raise RuntimeError(("Reboot did not happen"
- " after %s seconds!") % (int(elapsed)))
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py
new file mode 100644
index 00000000..aba26952
--- /dev/null
+++ b/cloudinit/config/cc_zypper_add_repo.py
@@ -0,0 +1,218 @@
+#
+# Copyright (C) 2017 SUSE LLC.
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""zypper_add_repo: Add zyper repositories to the system"""
+
+import configobj
+import os
+from six import string_types
+from textwrap import dedent
+
+from cloudinit.config.schema import get_schema_doc
+from cloudinit import log as logging
+from cloudinit.settings import PER_ALWAYS
+from cloudinit import util
+
+distros = ['opensuse', 'sles']
+
+schema = {
+ 'id': 'cc_zypper_add_repo',
+ 'name': 'ZypperAddRepo',
+ 'title': 'Configure zypper behavior and add zypper repositories',
+ 'description': dedent("""\
+ Configure zypper behavior by modifying /etc/zypp/zypp.conf. The
+ configuration writer is "dumb" and will simply append the provided
+ configuration options to the configuration file. Option settings
+ that may be duplicate will be resolved by the way the zypp.conf file
+ is parsed. The file is in INI format.
+ Add repositories to the system. No validation is performed on the
+ repository file entries, it is assumed the user is familiar with
+ the zypper repository file format."""),
+ 'distros': distros,
+ 'examples': [dedent("""\
+ zypper:
+ repos:
+ - id: opensuse-oss
+ name: os-oss
+ baseurl: http://dl.opensuse.org/dist/leap/v/repo/oss/
+ enabled: 1
+ autorefresh: 1
+ - id: opensuse-oss-update
+ name: os-oss-up
+ baseurl: http://dl.opensuse.org/dist/leap/v/update
+ # any setting per
+ # https://en.opensuse.org/openSUSE:Standards_RepoInfo
+ # enable and autorefresh are on by default
+ config:
+ reposdir: /etc/zypp/repos.dir
+ servicesdir: /etc/zypp/services.d
+ download.use_deltarpm: true
+ # any setting in /etc/zypp/zypp.conf
+ """)],
+ 'frequency': PER_ALWAYS,
+ 'type': 'object',
+ 'properties': {
+ 'zypper': {
+ 'type': 'object',
+ 'properties': {
+ 'repos': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {
+ 'type': 'string',
+ 'description': dedent("""\
+ The unique id of the repo, used when
+ writing
+ /etc/zypp/repos.d/<id>.repo.""")
+ },
+ 'baseurl': {
+ 'type': 'string',
+ 'format': 'uri', # built-in format type
+ 'description': 'The base repositoy URL'
+ }
+ },
+ 'required': ['id', 'baseurl'],
+ 'additionalProperties': True
+ },
+ 'minItems': 1
+ },
+ 'config': {
+ 'type': 'object',
+ 'description': dedent("""\
+ Any supported zypo.conf key is written to
+ /etc/zypp/zypp.conf'""")
+ }
+ },
+ 'required': [],
+ 'minProperties': 1, # Either config or repo must be provided
+ 'additionalProperties': False, # only repos and config allowed
+ }
+ }
+}
+
+__doc__ = get_schema_doc(schema) # Supplement python help()
+
+LOG = logging.getLogger(__name__)
+
+
+def _canonicalize_id(repo_id):
+ repo_id = repo_id.replace(" ", "_")
+ return repo_id
+
+
+def _format_repo_value(val):
+ if isinstance(val, bool):
+ # zypp prefers 1/0
+ return 1 if val else 0
+ if isinstance(val, (list, tuple)):
+ return "\n ".join([_format_repo_value(v) for v in val])
+ if not isinstance(val, string_types):
+ return str(val)
+ return val
+
+
+def _format_repository_config(repo_id, repo_config):
+ to_be = configobj.ConfigObj()
+ to_be[repo_id] = {}
+ # Do basic translation of the items -> values
+ for (k, v) in repo_config.items():
+ # For now assume that people using this know the format
+ # of zypper repos and don't verify keys/values further
+ to_be[repo_id][k] = _format_repo_value(v)
+ lines = to_be.write()
+ return "\n".join(lines)
+
+
+def _write_repos(repos, repo_base_path):
+ """Write the user-provided repo definition files
+ @param repos: A list of repo dictionary objects provided by the user's
+ cloud config.
+ @param repo_base_path: The directory path to which repo definitions are
+ written.
+ """
+
+ if not repos:
+ return
+ valid_repos = {}
+ for index, user_repo_config in enumerate(repos):
+ # Skip on absent required keys
+ missing_keys = set(['id', 'baseurl']).difference(set(user_repo_config))
+ if missing_keys:
+ LOG.warning(
+ "Repo config at index %d is missing required config keys: %s",
+ index, ",".join(missing_keys))
+ continue
+ repo_id = user_repo_config.get('id')
+ canon_repo_id = _canonicalize_id(repo_id)
+ repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id))
+ if os.path.exists(repo_fn_pth):
+ LOG.info("Skipping repo %s, file %s already exists!",
+ repo_id, repo_fn_pth)
+ continue
+ elif repo_id in valid_repos:
+ LOG.info("Skipping repo %s, file %s already pending!",
+ repo_id, repo_fn_pth)
+ continue
+
+ # Do some basic key formatting
+ repo_config = dict(
+ (k.lower().strip().replace("-", "_"), v)
+ for k, v in user_repo_config.items()
+ if k and k != 'id')
+
+ # Set defaults if not present
+ for field in ['enabled', 'autorefresh']:
+ if field not in repo_config:
+ repo_config[field] = '1'
+
+ valid_repos[repo_id] = (repo_fn_pth, repo_config)
+
+ for (repo_id, repo_data) in valid_repos.items():
+ repo_blob = _format_repository_config(repo_id, repo_data[-1])
+ util.write_file(repo_data[0], repo_blob)
+
+
+def _write_zypp_config(zypper_config):
+ """Write to the default zypp configuration file /etc/zypp/zypp.conf"""
+ if not zypper_config:
+ return
+ zypp_config = '/etc/zypp/zypp.conf'
+ zypp_conf_content = util.load_file(zypp_config)
+ new_settings = ['# Added via cloud.cfg']
+ for setting, value in zypper_config.items():
+ if setting == 'configdir':
+ msg = 'Changing the location of the zypper configuration is '
+ msg += 'not supported, skipping "configdir" setting'
+ LOG.warning(msg)
+ continue
+ if value:
+ new_settings.append('%s=%s' % (setting, value))
+ if len(new_settings) > 1:
+ new_config = zypp_conf_content + '\n'.join(new_settings)
+ else:
+ new_config = zypp_conf_content
+ util.write_file(zypp_config, new_config)
+
+
+def handle(name, cfg, _cloud, log, _args):
+ zypper_section = cfg.get('zypper')
+ if not zypper_section:
+ LOG.debug(("Skipping module named %s,"
+ " no 'zypper' relevant configuration found"), name)
+ return
+ repos = zypper_section.get('repos')
+ if not repos:
+ LOG.debug(("Skipping module named %s,"
+ " no 'repos' configuration found"), name)
+ return
+ zypper_config = zypper_section.get('config', {})
+ repo_base_path = zypper_config.get('reposdir', '/etc/zypp/repos.d/')
+
+ _write_zypp_config(zypper_config)
+ _write_repos(repos, repo_base_path)
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
index 6400f005..bb291ff8 100644
--- a/cloudinit/config/schema.py
+++ b/cloudinit/config/schema.py
@@ -3,19 +3,24 @@
from __future__ import print_function
-from cloudinit.util import read_file_or_url
+from cloudinit import importer
+from cloudinit.util import find_modules, read_file_or_url
import argparse
+from collections import defaultdict
+from copy import deepcopy
import logging
import os
+import re
import sys
import yaml
+_YAML_MAP = {True: 'true', False: 'false', None: 'null'}
SCHEMA_UNDEFINED = b'UNDEFINED'
CLOUD_CONFIG_HEADER = b'#cloud-config'
SCHEMA_DOC_TMPL = """
{name}
----
+{title_underbar}
**Summary:** {title}
{description}
@@ -31,6 +36,8 @@ SCHEMA_DOC_TMPL = """
{examples}
"""
SCHEMA_PROPERTY_TMPL = '{prefix}**{prop_name}:** ({type}) {description}'
+SCHEMA_EXAMPLES_HEADER = '\n**Examples**::\n\n'
+SCHEMA_EXAMPLES_SPACER_TEMPLATE = '\n # --- Example{0} ---'
class SchemaValidationError(ValueError):
@@ -83,11 +90,49 @@ def validate_cloudconfig_schema(config, schema, strict=False):
logging.warning('Invalid config:\n%s', '\n'.join(messages))
-def validate_cloudconfig_file(config_path, schema):
+def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
+ """Return contents of the cloud-config file annotated with schema errors.
+
+ @param cloudconfig: YAML-loaded object from the original_content.
+ @param original_content: The contents of a cloud-config file
+ @param schema_errors: List of tuples from a JSONSchemaValidationError. The
+ tuples consist of (schemapath, error_message).
+ """
+ if not schema_errors:
+ return original_content
+ schemapaths = _schemapath_for_cloudconfig(cloudconfig, original_content)
+ errors_by_line = defaultdict(list)
+ error_count = 1
+ error_footer = []
+ annotated_content = []
+ for path, msg in schema_errors:
+ errors_by_line[schemapaths[path]].append(msg)
+ error_footer.append('# E{0}: {1}'.format(error_count, msg))
+ error_count += 1
+ lines = original_content.decode().split('\n')
+ error_count = 1
+ for line_number, line in enumerate(lines):
+ errors = errors_by_line[line_number + 1]
+ if errors:
+ error_label = ','.join(
+ ['E{0}'.format(count + error_count)
+ for count in range(0, len(errors))])
+ error_count += len(errors)
+ annotated_content.append(line + '\t\t# ' + error_label)
+ else:
+ annotated_content.append(line)
+ annotated_content.append(
+ '# Errors: -------------\n{0}\n\n'.format('\n'.join(error_footer)))
+ return '\n'.join(annotated_content)
+
+
+def validate_cloudconfig_file(config_path, schema, annotate=False):
"""Validate cloudconfig file adheres to a specific jsonschema.
@param config_path: Path to the yaml cloud-config file to parse.
@param schema: Dict describing a valid jsonschema to validate against.
+ @param annotate: Boolean set True to print original config file with error
+ annotations on the offending lines.
@raises SchemaValidationError containing any of schema_errors encountered.
@raises RuntimeError when config_path does not exist.
@@ -108,18 +153,83 @@ def validate_cloudconfig_file(config_path, schema):
('format', 'File {0} is not valid yaml. {1}'.format(
config_path, str(e))),)
raise SchemaValidationError(errors)
- validate_cloudconfig_schema(
- cloudconfig, schema, strict=True)
+
+ try:
+ validate_cloudconfig_schema(
+ cloudconfig, schema, strict=True)
+ except SchemaValidationError as e:
+ if annotate:
+ print(annotated_cloudconfig_file(
+ cloudconfig, content, e.schema_errors))
+ raise
+
+
+def _schemapath_for_cloudconfig(config, original_content):
+ """Return a dictionary mapping schemapath to original_content line number.
+
+ @param config: The yaml.loaded config dictionary of a cloud-config file.
+ @param original_content: The simple file content of the cloud-config file
+ """
+ # FIXME Doesn't handle multi-line lists or multi-line strings
+ content_lines = original_content.decode().split('\n')
+ schema_line_numbers = {}
+ list_index = 0
+ RE_YAML_INDENT = r'^(\s*)'
+ scopes = []
+ for line_number, line in enumerate(content_lines):
+ indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0])
+ line = line.strip()
+ if not line or line.startswith('#'):
+ continue
+ if scopes:
+ previous_depth, path_prefix = scopes[-1]
+ else:
+ previous_depth = -1
+ path_prefix = ''
+ if line.startswith('- '):
+ key = str(list_index)
+ value = line[1:]
+ list_index += 1
+ else:
+ list_index = 0
+ key, value = line.split(':', 1)
+ while indent_depth <= previous_depth:
+ if scopes:
+ previous_depth, path_prefix = scopes.pop()
+ else:
+ previous_depth = -1
+ path_prefix = ''
+ if path_prefix:
+ key = path_prefix + '.' + key
+ scopes.append((indent_depth, key))
+ if value:
+ value = value.strip()
+ if value.startswith('['):
+ scopes.append((indent_depth + 2, key + '.0'))
+ for inner_list_index in range(0, len(yaml.safe_load(value))):
+ list_key = key + '.' + str(inner_list_index)
+ schema_line_numbers[list_key] = line_number + 1
+ schema_line_numbers[key] = line_number + 1
+ return schema_line_numbers
def _get_property_type(property_dict):
"""Return a string representing a property type from a given jsonschema."""
property_type = property_dict.get('type', SCHEMA_UNDEFINED)
+ if property_type == SCHEMA_UNDEFINED and property_dict.get('enum'):
+ property_type = [
+ str(_YAML_MAP.get(k, k)) for k in property_dict['enum']]
if isinstance(property_type, list):
property_type = '/'.join(property_type)
- item_type = property_dict.get('items', {}).get('type')
- if item_type:
- property_type = '{0} of {1}'.format(property_type, item_type)
+ items = property_dict.get('items', {})
+ sub_property_type = items.get('type', '')
+ # Collect each item type
+ for sub_item in items.get('oneOf', {}):
+ if sub_property_type:
+ sub_property_type += '/'
+ sub_property_type += '(' + _get_property_type(sub_item) + ')'
+ if sub_property_type:
+ return '{0} of {1}'.format(property_type, sub_property_type)
return property_type
@@ -146,12 +256,14 @@ def _get_schema_examples(schema, prefix=''):
examples = schema.get('examples')
if not examples:
return ''
- rst_content = '\n**Examples**::\n\n'
- for example in examples:
- example_yaml = yaml.dump(example, default_flow_style=False)
+ rst_content = SCHEMA_EXAMPLES_HEADER
+ for count, example in enumerate(examples):
# Python2.6 is missing textwrapper.indent
- lines = example_yaml.split('\n')
+ lines = example.split('\n')
indented_lines = [' {0}'.format(line) for line in lines]
+ if rst_content != SCHEMA_EXAMPLES_HEADER:
+ indented_lines.insert(
+ 0, SCHEMA_EXAMPLES_SPACER_TEMPLATE.format(count + 1))
rst_content += '\n'.join(indented_lines)
return rst_content
@@ -162,61 +274,87 @@ def get_schema_doc(schema):
@param schema: Dict of jsonschema to render.
@raise KeyError: If schema lacks an expected key.
"""
- schema['property_doc'] = _get_property_doc(schema)
- schema['examples'] = _get_schema_examples(schema)
- schema['distros'] = ', '.join(schema['distros'])
- return SCHEMA_DOC_TMPL.format(**schema)
-
-
-def get_schema(section_key=None):
- """Return a dict of jsonschema defined in any cc_* module.
-
- @param: section_key: Optionally limit schema to a specific top-level key.
- """
- # TODO use util.find_modules in subsequent branch
- from cloudinit.config.cc_ntp import schema
- return schema
+ schema_copy = deepcopy(schema)
+ schema_copy['property_doc'] = _get_property_doc(schema)
+ schema_copy['examples'] = _get_schema_examples(schema)
+ schema_copy['distros'] = ', '.join(schema['distros'])
+ # Need an underbar of the same length as the name
+ schema_copy['title_underbar'] = re.sub(r'.', '-', schema['name'])
+ return SCHEMA_DOC_TMPL.format(**schema_copy)
+
+
+FULL_SCHEMA = None
+
+
+def get_schema():
+ """Return jsonschema coalesced from all cc_* cloud-config module."""
+ global FULL_SCHEMA
+ if FULL_SCHEMA:
+ return FULL_SCHEMA
+ full_schema = {
+ '$schema': 'http://json-schema.org/draft-04/schema#',
+ 'id': 'cloud-config-schema', 'allOf': []}
+
+ configs_dir = os.path.dirname(os.path.abspath(__file__))
+ potential_handlers = find_modules(configs_dir)
+ for (fname, mod_name) in potential_handlers.items():
+ mod_locs, looked_locs = importer.find_module(
+ mod_name, ['cloudinit.config'], ['schema'])
+ if mod_locs:
+ mod = importer.import_module(mod_locs[0])
+ full_schema['allOf'].append(mod.schema)
+ FULL_SCHEMA = full_schema
+ return full_schema
def error(message):
print(message, file=sys.stderr)
- return 1
+ sys.exit(1)
-def get_parser():
+def get_parser(parser=None):
"""Return a parser for supported cmdline arguments."""
- parser = argparse.ArgumentParser()
+ if not parser:
+ parser = argparse.ArgumentParser(
+ prog='cloudconfig-schema',
+ description='Validate cloud-config files or document schema')
parser.add_argument('-c', '--config-file',
help='Path of the cloud-config yaml file to validate')
parser.add_argument('-d', '--doc', action="store_true", default=False,
help='Print schema documentation')
- parser.add_argument('-k', '--key',
- help='Limit validation or docs to a section key')
+ parser.add_argument('--annotate', action="store_true", default=False,
+ help='Annotate existing cloud-config file with errors')
return parser
-def main():
- """Tool to validate schema of a cloud-config file or print schema docs."""
- parser = get_parser()
- args = parser.parse_args()
+def handle_schema_args(name, args):
+ """Handle provided schema args and perform the appropriate actions."""
exclusive_args = [args.config_file, args.doc]
if not any(exclusive_args) or all(exclusive_args):
- return error('Expected either --config-file argument or --doc')
-
- schema = get_schema()
+ error('Expected either --config-file argument or --doc')
+ full_schema = get_schema()
if args.config_file:
try:
- validate_cloudconfig_file(args.config_file, schema)
+ validate_cloudconfig_file(
+ args.config_file, full_schema, args.annotate)
except (SchemaValidationError, RuntimeError) as e:
- return error(str(e))
- print("Valid cloud-config file {0}".format(args.config_file))
+ if not args.annotate:
+ error(str(e))
+ else:
+ print("Valid cloud-config file {0}".format(args.config_file))
if args.doc:
- print(get_schema_doc(schema))
+ for subschema in full_schema['allOf']:
+ print(get_schema_doc(subschema))
+
+
+def main():
+ """Tool to validate schema of a cloud-config file or print schema docs."""
+ parser = get_parser()
+ handle_schema_args('cloudconfig-schema', parser.parse_args())
return 0
if __name__ == '__main__':
sys.exit(main())
-
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 1fd48a7b..d5becd12 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -30,12 +30,16 @@ from cloudinit import util
from cloudinit.distros.parsers import hosts
+# Used when a cloud-config module can be run on all cloud-init distibutions.
+# The value 'all' is surfaced in module documentation for distro support.
+ALL_DISTROS = 'all'
+
OSFAMILIES = {
'debian': ['debian', 'ubuntu'],
'redhat': ['centos', 'fedora', 'rhel'],
'gentoo': ['gentoo'],
'freebsd': ['freebsd'],
- 'suse': ['sles'],
+ 'suse': ['opensuse', 'sles'],
'arch': ['arch'],
}
@@ -188,6 +192,9 @@ class Distro(object):
def _get_localhost_ip(self):
return "127.0.0.1"
+ def get_locale(self):
+ raise NotImplementedError()
+
@abc.abstractmethod
def _read_hostname(self, filename, default=None):
raise NotImplementedError()
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
index b4c0ba72..f87a3432 100644
--- a/cloudinit/distros/arch.py
+++ b/cloudinit/distros/arch.py
@@ -14,6 +14,8 @@ from cloudinit.distros.parsers.hostname import HostnameConf
from cloudinit.settings import PER_INSTANCE
+import os
+
LOG = logging.getLogger(__name__)
@@ -52,31 +54,10 @@ class Distro(distros.Distro):
entries = net_util.translate_network(settings)
LOG.debug("Translated ubuntu style network settings %s into %s",
settings, entries)
- dev_names = entries.keys()
- # Format for netctl
- for (dev, info) in entries.items():
- nameservers = []
- net_fn = self.network_conf_dir + dev
- net_cfg = {
- 'Connection': 'ethernet',
- 'Interface': dev,
- 'IP': info.get('bootproto'),
- 'Address': "('%s/%s')" % (info.get('address'),
- info.get('netmask')),
- 'Gateway': info.get('gateway'),
- 'DNS': str(tuple(info.get('dns-nameservers'))).replace(',', '')
- }
- util.write_file(net_fn, convert_netctl(net_cfg))
- if info.get('auto'):
- self._enable_interface(dev)
- if 'dns-nameservers' in info:
- nameservers.extend(info['dns-nameservers'])
-
- if nameservers:
- util.write_file(self.resolve_conf_fn,
- convert_resolv_conf(nameservers))
-
- return dev_names
+ return _render_network(
+ entries, resolv_conf=self.resolve_conf_fn,
+ conf_dir=self.network_conf_dir,
+ enable_func=self._enable_interface)
def _enable_interface(self, device_name):
cmd = ['netctl', 'reenable', device_name]
@@ -173,13 +154,60 @@ class Distro(distros.Distro):
["-y"], freq=PER_INSTANCE)
+def _render_network(entries, target="/", conf_dir="etc/netctl",
+ resolv_conf="etc/resolv.conf", enable_func=None):
+ """Render the translate_network format into netctl files in target.
+ Paths will be rendered under target.
+ """
+
+ devs = []
+ nameservers = []
+ resolv_conf = util.target_path(target, resolv_conf)
+ conf_dir = util.target_path(target, conf_dir)
+
+ for (dev, info) in entries.items():
+ if dev == 'lo':
+ # no configuration should be rendered for 'lo'
+ continue
+ devs.append(dev)
+ net_fn = os.path.join(conf_dir, dev)
+ net_cfg = {
+ 'Connection': 'ethernet',
+ 'Interface': dev,
+ 'IP': info.get('bootproto'),
+ 'Address': "%s/%s" % (info.get('address'),
+ info.get('netmask')),
+ 'Gateway': info.get('gateway'),
+ 'DNS': info.get('dns-nameservers', []),
+ }
+ util.write_file(net_fn, convert_netctl(net_cfg))
+ if enable_func and info.get('auto'):
+ enable_func(dev)
+ if 'dns-nameservers' in info:
+ nameservers.extend(info['dns-nameservers'])
+
+ if nameservers:
+ util.write_file(resolv_conf,
+ convert_resolv_conf(nameservers))
+ return devs
+
+
def convert_netctl(settings):
- """Returns a settings string formatted for netctl."""
- result = ''
- if isinstance(settings, dict):
- for k, v in settings.items():
- result = result + '%s=%s\n' % (k, v)
- return result
+ """Given a dictionary, returns a string in netctl profile format.
+
+ netctl profile is described at:
+ https://git.archlinux.org/netctl.git/tree/docs/netctl.profile.5.txt
+
+ Note that the 'Special Quoting Rules' are not handled here."""
+ result = []
+ for key in sorted(settings):
+ val = settings[key]
+ if val is None:
+ val = ""
+ elif isinstance(val, (tuple, list)):
+ val = "(" + ' '.join("'%s'" % v for v in val) + ")"
+ result.append("%s=%s\n" % (key, val))
+ return ''.join(result)
def convert_resolv_conf(settings):
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index abfb81f4..33cc0bf1 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -61,11 +61,49 @@ class Distro(distros.Distro):
# should only happen say once per instance...)
self._runner = helpers.Runners(paths)
self.osfamily = 'debian'
+ self.default_locale = 'en_US.UTF-8'
+ self.system_locale = None
- def apply_locale(self, locale, out_fn=None):
+ def get_locale(self):
+ """Return the default locale if set, else use default locale"""
+
+ # read system locale value
+ if not self.system_locale:
+ self.system_locale = read_system_locale()
+
+ # Return system_locale setting if valid, else use default locale
+ return (self.system_locale if self.system_locale else
+ self.default_locale)
+
+ def apply_locale(self, locale, out_fn=None, keyname='LANG'):
+ """Apply specified locale to system, regenerate if specified locale
+ differs from system default."""
if not out_fn:
out_fn = LOCALE_CONF_FN
- apply_locale(locale, out_fn)
+
+ if not locale:
+ raise ValueError('Failed to provide locale value.')
+
+ # Only call locale regeneration if needed
+ # Update system locale config with specified locale if needed
+ distro_locale = self.get_locale()
+ conf_fn_exists = os.path.exists(out_fn)
+ sys_locale_unset = False if self.system_locale else True
+ need_regen = (locale.lower() != distro_locale.lower() or
+ not conf_fn_exists or sys_locale_unset)
+ need_conf = not conf_fn_exists or need_regen or sys_locale_unset
+
+ if need_regen:
+ regenerate_locale(locale, out_fn, keyname=keyname)
+ else:
+ LOG.debug(
+ "System has '%s=%s' requested '%s', skipping regeneration.",
+ keyname, self.system_locale, locale)
+
+ if need_conf:
+ update_locale_conf(locale, out_fn, keyname=keyname)
+ # once we've updated the system config, invalidate cache
+ self.system_locale = None
def install_packages(self, pkglist):
self.update_package_sources()
@@ -218,37 +256,47 @@ def _maybe_remove_legacy_eth0(path="/etc/network/interfaces.d/eth0.cfg"):
LOG.warning(msg)
-def apply_locale(locale, sys_path=LOCALE_CONF_FN, keyname='LANG'):
- """Apply the locale.
-
- Run locale-gen for the provided locale and set the default
- system variable `keyname` appropriately in the provided `sys_path`.
-
- If sys_path indicates that `keyname` is already set to `locale`
- then no changes will be made and locale-gen not called.
- This allows images built with a locale already generated to not re-run
- locale-gen which can be very heavy.
- """
- if not locale:
- raise ValueError('Failed to provide locale value.')
-
+def read_system_locale(sys_path=LOCALE_CONF_FN, keyname='LANG'):
+ """Read system default locale setting, if present"""
+ sys_val = ""
if not sys_path:
raise ValueError('Invalid path: %s' % sys_path)
if os.path.exists(sys_path):
locale_content = util.load_file(sys_path)
- # if LANG isn't present, regen
sys_defaults = util.load_shell_content(locale_content)
sys_val = sys_defaults.get(keyname, "")
- if sys_val.lower() == locale.lower():
- LOG.debug(
- "System has '%s=%s' requested '%s', skipping regeneration.",
- keyname, sys_val, locale)
- return
- util.subp(['locale-gen', locale], capture=False)
+ return sys_val
+
+
+def update_locale_conf(locale, sys_path, keyname='LANG'):
+ """Update system locale config"""
+ LOG.debug('Updating %s with locale setting %s=%s',
+ sys_path, keyname, locale)
util.subp(
['update-locale', '--locale-file=' + sys_path,
'%s=%s' % (keyname, locale)], capture=False)
+
+def regenerate_locale(locale, sys_path, keyname='LANG'):
+ """
+ Run locale-gen for the provided locale and set the default
+ system variable `keyname` appropriately in the provided `sys_path`.
+
+ """
+ # special case for locales which do not require regen
+ # % locale -a
+ # C
+ # C.UTF-8
+ # POSIX
+ if locale.lower() in ['c', 'c.utf-8', 'posix']:
+ LOG.debug('%s=%s does not require rengeneration', keyname, locale)
+ return
+
+ # finally, trigger regeneration
+ LOG.debug('Generating locales for %s', locale)
+ util.subp(['locale-gen', locale], capture=False)
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py
new file mode 100644
index 00000000..a219e9fb
--- /dev/null
+++ b/cloudinit/distros/opensuse.py
@@ -0,0 +1,212 @@
+# Copyright (C) 2017 SUSE LLC
+# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
+#
+# Author: Robert Schweikert <rjschwei@suse.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# Leaning very heavily on the RHEL and Debian implementation
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import distros
+
+from cloudinit.distros.parsers.hostname import HostnameConf
+
+from cloudinit import helpers
+from cloudinit import log as logging
+from cloudinit import util
+
+from cloudinit.distros import net_util
+from cloudinit.distros import rhel_util as rhutil
+from cloudinit.settings import PER_INSTANCE
+
+LOG = logging.getLogger(__name__)
+
+
+class Distro(distros.Distro):
+ clock_conf_fn = '/etc/sysconfig/clock'
+ hostname_conf_fn = '/etc/HOSTNAME'
+ init_cmd = ['service']
+ locale_conf_fn = '/etc/sysconfig/language'
+ network_conf_fn = '/etc/sysconfig/network'
+ network_script_tpl = '/etc/sysconfig/network/ifcfg-%s'
+ resolve_conf_fn = '/etc/resolv.conf'
+ route_conf_tpl = '/etc/sysconfig/network/ifroute-%s'
+ systemd_hostname_conf_fn = '/etc/hostname'
+ systemd_locale_conf_fn = '/etc/locale.conf'
+ tz_local_fn = '/etc/localtime'
+
+ def __init__(self, name, cfg, paths):
+ distros.Distro.__init__(self, name, cfg, paths)
+ self._runner = helpers.Runners(paths)
+ self.osfamily = 'suse'
+ cfg['ssh_svcname'] = 'sshd'
+ if self.uses_systemd():
+ self.init_cmd = ['systemctl']
+ cfg['ssh_svcname'] = 'sshd.service'
+
+ def apply_locale(self, locale, out_fn=None):
+ if self.uses_systemd():
+ if not out_fn:
+ out_fn = self.systemd_locale_conf_fn
+ locale_cfg = {'LANG': locale}
+ else:
+ if not out_fn:
+ out_fn = self.locale_conf_fn
+ locale_cfg = {'RC_LANG': locale}
+ rhutil.update_sysconfig_file(out_fn, locale_cfg)
+
+ def install_packages(self, pkglist):
+ self.package_command(
+ 'install',
+ args='--auto-agree-with-licenses',
+ pkgs=pkglist
+ )
+
+ def package_command(self, command, args=None, pkgs=None):
+ if pkgs is None:
+ pkgs = []
+
+ cmd = ['zypper']
+ # No user interaction possible, enable non-interactive mode
+ cmd.append('--non-interactive')
+
+ # Comand is the operation, such as install
+ if command == 'upgrade':
+ command = 'update'
+ cmd.append(command)
+
+ # args are the arguments to the command, not global options
+ if args and isinstance(args, str):
+ cmd.append(args)
+ elif args and isinstance(args, list):
+ cmd.extend(args)
+
+ pkglist = util.expand_package_list('%s-%s', pkgs)
+ cmd.extend(pkglist)
+
+ # Allow the output of this to flow outwards (ie not be captured)
+ util.subp(cmd, capture=False)
+
+ def set_timezone(self, tz):
+ tz_file = self._find_tz_file(tz)
+ if self.uses_systemd():
+ # Currently, timedatectl complains if invoked during startup
+ # so for compatibility, create the link manually.
+ util.del_file(self.tz_local_fn)
+ util.sym_link(tz_file, self.tz_local_fn)
+ else:
+ # Adjust the sysconfig clock zone setting
+ clock_cfg = {
+ 'TIMEZONE': str(tz),
+ }
+ rhutil.update_sysconfig_file(self.clock_conf_fn, clock_cfg)
+ # This ensures that the correct tz will be used for the system
+ util.copy(tz_file, self.tz_local_fn)
+
+ def update_package_sources(self):
+ self._runner.run("update-sources", self.package_command,
+ ['refresh'], freq=PER_INSTANCE)
+
+ def _bring_up_interfaces(self, device_names):
+ if device_names and 'all' in device_names:
+ raise RuntimeError(('Distro %s can not translate '
+ 'the device name "all"') % (self.name))
+ return distros.Distro._bring_up_interfaces(self, device_names)
+
+ def _read_hostname(self, filename, default=None):
+ if self.uses_systemd() and filename.endswith('/previous-hostname'):
+ return util.load_file(filename).strip()
+ elif self.uses_systemd():
+ (out, _err) = util.subp(['hostname'])
+ if len(out):
+ return out
+ else:
+ return default
+ else:
+ try:
+ conf = self._read_hostname_conf(filename)
+ hostname = conf.hostname
+ except IOError:
+ pass
+ if not hostname:
+ return default
+ return hostname
+
+ def _read_hostname_conf(self, filename):
+ conf = HostnameConf(util.load_file(filename))
+ conf.parse()
+ return conf
+
+ def _read_system_hostname(self):
+ if self.uses_systemd():
+ host_fn = self.systemd_hostname_conf_fn
+ else:
+ host_fn = self.hostname_conf_fn
+ return (host_fn, self._read_hostname(host_fn))
+
+ def _write_hostname(self, hostname, out_fn):
+ if self.uses_systemd() and out_fn.endswith('/previous-hostname'):
+ util.write_file(out_fn, hostname)
+ elif self.uses_systemd():
+ util.subp(['hostnamectl', 'set-hostname', str(hostname)])
+ else:
+ conf = None
+ try:
+ # Try to update the previous one
+ # so lets see if we can read it first.
+ conf = self._read_hostname_conf(out_fn)
+ except IOError:
+ pass
+ if not conf:
+ conf = HostnameConf('')
+ conf.set_hostname(hostname)
+ util.write_file(out_fn, str(conf), 0o644)
+
+ def _write_network(self, settings):
+ # Convert debian settings to ifcfg format
+ entries = net_util.translate_network(settings)
+ LOG.debug("Translated ubuntu style network settings %s into %s",
+ settings, entries)
+ # Make the intermediate format as the suse format...
+ nameservers = []
+ searchservers = []
+ dev_names = entries.keys()
+ for (dev, info) in entries.items():
+ net_fn = self.network_script_tpl % (dev)
+ route_fn = self.route_conf_tpl % (dev)
+ mode = None
+ if info.get('auto', None):
+ mode = 'auto'
+ else:
+ mode = 'manual'
+ bootproto = info.get('bootproto', None)
+ gateway = info.get('gateway', None)
+ net_cfg = {
+ 'BOOTPROTO': bootproto,
+ 'BROADCAST': info.get('broadcast'),
+ 'GATEWAY': gateway,
+ 'IPADDR': info.get('address'),
+ 'LLADDR': info.get('hwaddress'),
+ 'NETMASK': info.get('netmask'),
+ 'STARTMODE': mode,
+ 'USERCONTROL': 'no'
+ }
+ if dev != 'lo':
+ net_cfg['ETHTOOL_OPTIONS'] = ''
+ else:
+ net_cfg['FIREWALL'] = 'no'
+ rhutil.update_sysconfig_file(net_fn, net_cfg, True)
+ if gateway and bootproto == 'static':
+ default_route = 'default %s' % gateway
+ util.write_file(route_fn, default_route, 0o644)
+ if 'dns-nameservers' in info:
+ nameservers.extend(info['dns-nameservers'])
+ if 'dns-search' in info:
+ searchservers.extend(info['dns-search'])
+ if nameservers or searchservers:
+ rhutil.update_resolve_conf_file(self.resolve_conf_fn,
+ nameservers, searchservers)
+ return dev_names
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py
index dbec2edf..6e336cbf 100644
--- a/cloudinit/distros/sles.py
+++ b/cloudinit/distros/sles.py
@@ -1,167 +1,17 @@
-# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2017 SUSE LLC
#
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Robert Schweikert <rjschwei@suse.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import distros
+from cloudinit.distros import opensuse
-from cloudinit.distros.parsers.hostname import HostnameConf
-
-from cloudinit import helpers
from cloudinit import log as logging
-from cloudinit import util
-
-from cloudinit.distros import net_util
-from cloudinit.distros import rhel_util
-from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
-class Distro(distros.Distro):
- clock_conf_fn = '/etc/sysconfig/clock'
- locale_conf_fn = '/etc/sysconfig/language'
- network_conf_fn = '/etc/sysconfig/network'
- hostname_conf_fn = '/etc/HOSTNAME'
- network_script_tpl = '/etc/sysconfig/network/ifcfg-%s'
- resolve_conf_fn = '/etc/resolv.conf'
- tz_local_fn = '/etc/localtime'
-
- def __init__(self, name, cfg, paths):
- distros.Distro.__init__(self, name, cfg, paths)
- # This will be used to restrict certain
- # calls from repeatly happening (when they
- # should only happen say once per instance...)
- self._runner = helpers.Runners(paths)
- self.osfamily = 'suse'
-
- def install_packages(self, pkglist):
- self.package_command('install', args='-l', pkgs=pkglist)
-
- def _write_network(self, settings):
- # Convert debian settings to ifcfg format
- entries = net_util.translate_network(settings)
- LOG.debug("Translated ubuntu style network settings %s into %s",
- settings, entries)
- # Make the intermediate format as the suse format...
- nameservers = []
- searchservers = []
- dev_names = entries.keys()
- for (dev, info) in entries.items():
- net_fn = self.network_script_tpl % (dev)
- mode = info.get('auto')
- if mode and mode.lower() == 'true':
- mode = 'auto'
- else:
- mode = 'manual'
- net_cfg = {
- 'BOOTPROTO': info.get('bootproto'),
- 'BROADCAST': info.get('broadcast'),
- 'GATEWAY': info.get('gateway'),
- 'IPADDR': info.get('address'),
- 'LLADDR': info.get('hwaddress'),
- 'NETMASK': info.get('netmask'),
- 'STARTMODE': mode,
- 'USERCONTROL': 'no'
- }
- if dev != 'lo':
- net_cfg['ETHERDEVICE'] = dev
- net_cfg['ETHTOOL_OPTIONS'] = ''
- else:
- net_cfg['FIREWALL'] = 'no'
- rhel_util.update_sysconfig_file(net_fn, net_cfg, True)
- if 'dns-nameservers' in info:
- nameservers.extend(info['dns-nameservers'])
- if 'dns-search' in info:
- searchservers.extend(info['dns-search'])
- if nameservers or searchservers:
- rhel_util.update_resolve_conf_file(self.resolve_conf_fn,
- nameservers, searchservers)
- return dev_names
-
- def apply_locale(self, locale, out_fn=None):
- if not out_fn:
- out_fn = self.locale_conf_fn
- locale_cfg = {
- 'RC_LANG': locale,
- }
- rhel_util.update_sysconfig_file(out_fn, locale_cfg)
-
- def _write_hostname(self, hostname, out_fn):
- conf = None
- try:
- # Try to update the previous one
- # so lets see if we can read it first.
- conf = self._read_hostname_conf(out_fn)
- except IOError:
- pass
- if not conf:
- conf = HostnameConf('')
- conf.set_hostname(hostname)
- util.write_file(out_fn, str(conf), 0o644)
-
- def _read_system_hostname(self):
- host_fn = self.hostname_conf_fn
- return (host_fn, self._read_hostname(host_fn))
-
- def _read_hostname_conf(self, filename):
- conf = HostnameConf(util.load_file(filename))
- conf.parse()
- return conf
-
- def _read_hostname(self, filename, default=None):
- hostname = None
- try:
- conf = self._read_hostname_conf(filename)
- hostname = conf.hostname
- except IOError:
- pass
- if not hostname:
- return default
- return hostname
-
- def _bring_up_interfaces(self, device_names):
- if device_names and 'all' in device_names:
- raise RuntimeError(('Distro %s can not translate '
- 'the device name "all"') % (self.name))
- return distros.Distro._bring_up_interfaces(self, device_names)
-
- def set_timezone(self, tz):
- tz_file = self._find_tz_file(tz)
- # Adjust the sysconfig clock zone setting
- clock_cfg = {
- 'TIMEZONE': str(tz),
- }
- rhel_util.update_sysconfig_file(self.clock_conf_fn, clock_cfg)
- # This ensures that the correct tz will be used for the system
- util.copy(tz_file, self.tz_local_fn)
-
- def package_command(self, command, args=None, pkgs=None):
- if pkgs is None:
- pkgs = []
-
- cmd = ['zypper']
- # No user interaction possible, enable non-interactive mode
- cmd.append('--non-interactive')
-
- # Comand is the operation, such as install
- cmd.append(command)
-
- # args are the arguments to the command, not global options
- if args and isinstance(args, str):
- cmd.append(args)
- elif args and isinstance(args, list):
- cmd.extend(args)
-
- pkglist = util.expand_package_list('%s-%s', pkgs)
- cmd.extend(pkglist)
-
- # Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, capture=False)
-
- def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ['refresh'], freq=PER_INSTANCE)
+class Distro(opensuse.Distro):
+ pass
# vi: ts=4 expandtab
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index f01021aa..1979cd96 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -13,7 +13,7 @@ from time import time
import contextlib
import os
-import six
+from six import StringIO
from six.moves.configparser import (
NoSectionError, NoOptionError, RawConfigParser)
@@ -441,12 +441,12 @@ class DefaultingConfigParser(RawConfigParser):
def stringify(self, header=None):
contents = ''
- with six.StringIO() as outputstream:
- self.write(outputstream)
- outputstream.flush()
- contents = outputstream.getvalue()
- if header:
- contents = "\n".join([header, contents])
+ outputstream = StringIO()
+ self.write(outputstream)
+ outputstream.flush()
+ contents = outputstream.getvalue()
+ if header:
+ contents = '\n'.join([header, contents, ''])
return contents
# vi: ts=4 expandtab
diff --git a/cloudinit/log.py b/cloudinit/log.py
index 3861709e..1d75c9ff 100644
--- a/cloudinit/log.py
+++ b/cloudinit/log.py
@@ -19,6 +19,8 @@ import sys
import six
from six import StringIO
+import time
+
# Logging levels for easy access
CRITICAL = logging.CRITICAL
FATAL = logging.FATAL
@@ -32,6 +34,9 @@ NOTSET = logging.NOTSET
# Default basic format
DEF_CON_FORMAT = '%(asctime)s - %(filename)s[%(levelname)s]: %(message)s'
+# Always format logging timestamps as UTC time
+logging.Formatter.converter = time.gmtime
+
def setupBasicLogging(level=DEBUG):
root = logging.getLogger()
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 46cb9c85..a1b0db10 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -175,13 +175,8 @@ def is_disabled_cfg(cfg):
return cfg.get('config') == "disabled"
-def generate_fallback_config(blacklist_drivers=None, config_driver=None):
- """Determine which attached net dev is most likely to have a connection and
- generate network state to run dhcp on that interface"""
-
- if not config_driver:
- config_driver = False
-
+def find_fallback_nic(blacklist_drivers=None):
+ """Return the name of the 'fallback' network device."""
if not blacklist_drivers:
blacklist_drivers = []
@@ -233,15 +228,24 @@ def generate_fallback_config(blacklist_drivers=None, config_driver=None):
if DEFAULT_PRIMARY_INTERFACE in names:
names.remove(DEFAULT_PRIMARY_INTERFACE)
names.insert(0, DEFAULT_PRIMARY_INTERFACE)
- target_name = None
- target_mac = None
+
+ # pick the first that has a mac-address
for name in names:
- mac = read_sys_net_safe(name, 'address')
- if mac:
- target_name = name
- target_mac = mac
- break
- if target_mac and target_name:
+ if read_sys_net_safe(name, 'address'):
+ return name
+ return None
+
+
+def generate_fallback_config(blacklist_drivers=None, config_driver=None):
+ """Determine which attached net dev is most likely to have a connection and
+ generate network state to run dhcp on that interface"""
+
+ if not config_driver:
+ config_driver = False
+
+ target_name = find_fallback_nic(blacklist_drivers=blacklist_drivers)
+ if target_name:
+ target_mac = read_sys_net_safe(target_name, 'address')
nconf = {'config': [], 'version': 1}
cfg = {'type': 'physical', 'name': target_name,
'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]}
@@ -511,21 +515,7 @@ def get_interfaces_by_mac():
Bridges and any devices that have a 'stolen' mac are excluded."""
ret = {}
- devs = get_devicelist()
- empty_mac = '00:00:00:00:00:00'
- for name in devs:
- if not interface_has_own_mac(name):
- continue
- if is_bridge(name):
- continue
- if is_vlan(name):
- continue
- mac = get_interface_mac(name)
- # some devices may not have a mac (tun0)
- if not mac:
- continue
- if mac == empty_mac and name != 'lo':
- continue
+ for name, mac, _driver, _devid in get_interfaces():
if mac in ret:
raise RuntimeError(
"duplicate mac found! both '%s' and '%s' have mac '%s'" %
@@ -599,6 +589,7 @@ class EphemeralIPv4Network(object):
self._bringup_router()
def __exit__(self, excp_type, excp_value, excp_traceback):
+ """Teardown anything we set up."""
for cmd in self.cleanup_cmds:
util.subp(cmd, capture=True)
diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
new file mode 100644
index 00000000..0cba7032
--- /dev/null
+++ b/cloudinit/net/dhcp.py
@@ -0,0 +1,163 @@
+# Copyright (C) 2017 Canonical Ltd.
+#
+# Author: Chad Smith <chad.smith@canonical.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import configobj
+import logging
+import os
+import re
+
+from cloudinit.net import find_fallback_nic, get_devicelist
+from cloudinit import temp_utils
+from cloudinit import util
+from six import StringIO
+
+LOG = logging.getLogger(__name__)
+
+NETWORKD_LEASES_DIR = '/run/systemd/netif/leases'
+
+
+class InvalidDHCPLeaseFileError(Exception):
+ """Raised when parsing an empty or invalid dhcp.leases file.
+
+ Current uses are DataSourceAzure and DataSourceEc2 during ephemeral
+ boot to scrape metadata.
+ """
+ pass
+
+
+def maybe_perform_dhcp_discovery(nic=None):
+ """Perform dhcp discovery if nic valid and dhclient command exists.
+
+ If the nic is invalid or undiscoverable or dhclient command is not found,
+ skip dhcp_discovery and return an empty dict.
+
+ @param nic: Name of the network interface we want to run dhclient on.
+ @return: A dict of dhcp options from the dhclient discovery if run,
+ otherwise an empty dict is returned.
+ """
+ if nic is None:
+ nic = find_fallback_nic()
+ if nic is None:
+ LOG.debug(
+ 'Skip dhcp_discovery: Unable to find fallback nic.')
+ return {}
+ elif nic not in get_devicelist():
+ LOG.debug(
+ 'Skip dhcp_discovery: nic %s not found in get_devicelist.', nic)
+ return {}
+ dhclient_path = util.which('dhclient')
+ if not dhclient_path:
+ LOG.debug('Skip dhclient configuration: No dhclient command found.')
+ return {}
+ with temp_utils.tempdir(prefix='cloud-init-dhcp-', needs_exe=True) as tdir:
+ # Use /var/tmp because /run/cloud-init/tmp is mounted noexec
+ return dhcp_discovery(dhclient_path, nic, tdir)
+
+
+def parse_dhcp_lease_file(lease_file):
+ """Parse the given dhcp lease file for the most recent lease.
+
+ Return a dict of dhcp options as key value pairs for the most recent lease
+ block.
+
+ @raises: InvalidDHCPLeaseFileError on empty of unparseable leasefile
+ content.
+ """
+ lease_regex = re.compile(r"lease {(?P<lease>[^}]*)}\n")
+ dhcp_leases = []
+ lease_content = util.load_file(lease_file)
+ if len(lease_content) == 0:
+ raise InvalidDHCPLeaseFileError(
+ 'Cannot parse empty dhcp lease file {0}'.format(lease_file))
+ for lease in lease_regex.findall(lease_content):
+ lease_options = []
+ for line in lease.split(';'):
+ # Strip newlines, double-quotes and option prefix
+ line = line.strip().replace('"', '').replace('option ', '')
+ if not line:
+ continue
+ lease_options.append(line.split(' ', 1))
+ dhcp_leases.append(dict(lease_options))
+ if not dhcp_leases:
+ raise InvalidDHCPLeaseFileError(
+ 'Cannot parse dhcp lease file {0}. No leases found'.format(
+ lease_file))
+ return dhcp_leases
+
+
+def dhcp_discovery(dhclient_cmd_path, interface, cleandir):
+ """Run dhclient on the interface without scripts or filesystem artifacts.
+
+ @param dhclient_cmd_path: Full path to the dhclient used.
+ @param interface: Name of the network inteface on which to dhclient.
+ @param cleandir: The directory from which to run dhclient as well as store
+ dhcp leases.
+
+ @return: A dict of dhcp options parsed from the dhcp.leases file or empty
+ dict.
+ """
+ LOG.debug('Performing a dhcp discovery on %s', interface)
+
+ # XXX We copy dhclient out of /sbin/dhclient to avoid dealing with strict
+ # app armor profiles which disallow running dhclient -sf <our-script-file>.
+ # We want to avoid running /sbin/dhclient-script because of side-effects in
+ # /etc/resolv.conf any any other vendor specific scripts in
+ # /etc/dhcp/dhclient*hooks.d.
+ sandbox_dhclient_cmd = os.path.join(cleandir, 'dhclient')
+ util.copy(dhclient_cmd_path, sandbox_dhclient_cmd)
+ pid_file = os.path.join(cleandir, 'dhclient.pid')
+ lease_file = os.path.join(cleandir, 'dhcp.leases')
+
+ # ISC dhclient needs the interface up to send initial discovery packets.
+ # Generally dhclient relies on dhclient-script PREINIT action to bring the
+ # link up before attempting discovery. Since we are using -sf /bin/true,
+ # we need to do that "link up" ourselves first.
+ util.subp(['ip', 'link', 'set', 'dev', interface, 'up'], capture=True)
+ cmd = [sandbox_dhclient_cmd, '-1', '-v', '-lf', lease_file,
+ '-pf', pid_file, interface, '-sf', '/bin/true']
+ util.subp(cmd, capture=True)
+ return parse_dhcp_lease_file(lease_file)
+
+
+def networkd_parse_lease(content):
+ """Parse a systemd lease file content as in /run/systemd/netif/leases/
+
+ Parse this (almost) ini style file even though it says:
+ # This is private data. Do not parse.
+
+ Simply return a dictionary of key/values."""
+
+ return dict(configobj.ConfigObj(StringIO(content), list_values=False))
+
+
+def networkd_load_leases(leases_d=None):
+ """Return a dictionary of dictionaries representing each lease
+ found in lease_d.i
+
+ The top level key will be the filename, which is typically the ifindex."""
+
+ if leases_d is None:
+ leases_d = NETWORKD_LEASES_DIR
+
+ ret = {}
+ if not os.path.isdir(leases_d):
+ return ret
+ for lfile in os.listdir(leases_d):
+ ret[lfile] = networkd_parse_lease(
+ util.load_file(os.path.join(leases_d, lfile)))
+ return ret
+
+
+def networkd_get_option_from_leases(keyname, leases_d=None):
+ if leases_d is None:
+ leases_d = NETWORKD_LEASES_DIR
+ leases = networkd_load_leases(leases_d=leases_d)
+ for ifindex, data in sorted(leases.items()):
+ if data.get(keyname):
+ return data[keyname]
+ return None
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index bb80ec02..c6a71d16 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -95,6 +95,9 @@ def _iface_add_attrs(iface, index):
ignore_map.append('mac_address')
for key, value in iface.items():
+ # convert bool to string for eni
+ if type(value) == bool:
+ value = 'on' if iface[key] else 'off'
if not value or key in ignore_map:
continue
if key in multiline_keys:
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
index 9f35b72b..d3788af8 100644
--- a/cloudinit/net/netplan.py
+++ b/cloudinit/net/netplan.py
@@ -4,7 +4,7 @@ import copy
import os
from . import renderer
-from .network_state import subnet_is_ipv6
+from .network_state import subnet_is_ipv6, NET_CONFIG_TO_V2
from cloudinit import log as logging
from cloudinit import util
@@ -27,31 +27,6 @@ network:
"""
LOG = logging.getLogger(__name__)
-NET_CONFIG_TO_V2 = {
- 'bond': {'bond-ad-select': 'ad-select',
- 'bond-arp-interval': 'arp-interval',
- 'bond-arp-ip-target': 'arp-ip-target',
- 'bond-arp-validate': 'arp-validate',
- 'bond-downdelay': 'down-delay',
- 'bond-fail-over-mac': 'fail-over-mac-policy',
- 'bond-lacp-rate': 'lacp-rate',
- 'bond-miimon': 'mii-monitor-interval',
- 'bond-min-links': 'min-links',
- 'bond-mode': 'mode',
- 'bond-num-grat-arp': 'gratuitious-arp',
- 'bond-primary-reselect': 'primary-reselect-policy',
- 'bond-updelay': 'up-delay',
- 'bond-xmit-hash-policy': 'transmit-hash-policy'},
- 'bridge': {'bridge_ageing': 'ageing-time',
- 'bridge_bridgeprio': 'priority',
- 'bridge_fd': 'forward-delay',
- 'bridge_gcint': None,
- 'bridge_hello': 'hello-time',
- 'bridge_maxage': 'max-age',
- 'bridge_maxwait': None,
- 'bridge_pathcost': 'path-cost',
- 'bridge_portprio': None,
- 'bridge_waitport': None}}
def _get_params_dict_by_match(config, match):
@@ -247,6 +222,14 @@ class Renderer(renderer.Renderer):
util.subp(cmd, capture=True)
def _render_content(self, network_state):
+
+ # if content already in netplan format, pass it back
+ if network_state.version == 2:
+ LOG.debug('V2 to V2 passthrough')
+ return util.yaml_dumps({'network': network_state.config},
+ explicit_start=False,
+ explicit_end=False)
+
ethernets = {}
wifis = {}
bridges = {}
@@ -261,9 +244,9 @@ class Renderer(renderer.Renderer):
for config in network_state.iter_interfaces():
ifname = config.get('name')
- # filter None entries up front so we can do simple if key in dict
+ # filter None (but not False) entries up front
ifcfg = dict((key, value) for (key, value) in config.items()
- if value)
+ if value is not None)
if_type = ifcfg.get('type')
if if_type == 'physical':
@@ -335,6 +318,7 @@ class Renderer(renderer.Renderer):
(port, cost) = costval.split()
newvalue[port] = int(cost)
br_config.update({newname: newvalue})
+
if len(br_config) > 0:
bridge.update({'parameters': br_config})
_extract_addresses(ifcfg, bridge)
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 87a7222d..0e830ee8 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -23,6 +23,34 @@ NETWORK_V2_KEY_FILTER = [
'match', 'mtu', 'nameservers', 'renderer', 'set-name', 'wakeonlan'
]
+NET_CONFIG_TO_V2 = {
+ 'bond': {'bond-ad-select': 'ad-select',
+ 'bond-arp-interval': 'arp-interval',
+ 'bond-arp-ip-target': 'arp-ip-target',
+ 'bond-arp-validate': 'arp-validate',
+ 'bond-downdelay': 'down-delay',
+ 'bond-fail-over-mac': 'fail-over-mac-policy',
+ 'bond-lacp-rate': 'lacp-rate',
+ 'bond-miimon': 'mii-monitor-interval',
+ 'bond-min-links': 'min-links',
+ 'bond-mode': 'mode',
+ 'bond-num-grat-arp': 'gratuitious-arp',
+ 'bond-primary': 'primary',
+ 'bond-primary-reselect': 'primary-reselect-policy',
+ 'bond-updelay': 'up-delay',
+ 'bond-xmit-hash-policy': 'transmit-hash-policy'},
+ 'bridge': {'bridge_ageing': 'ageing-time',
+ 'bridge_bridgeprio': 'priority',
+ 'bridge_fd': 'forward-delay',
+ 'bridge_gcint': None,
+ 'bridge_hello': 'hello-time',
+ 'bridge_maxage': 'max-age',
+ 'bridge_maxwait': None,
+ 'bridge_pathcost': 'path-cost',
+ 'bridge_portprio': None,
+ 'bridge_stp': 'stp',
+ 'bridge_waitport': None}}
+
def parse_net_config_data(net_config, skip_broken=True):
"""Parses the config, returns NetworkState object
@@ -120,6 +148,10 @@ class NetworkState(object):
self.use_ipv6 = network_state.get('use_ipv6', False)
@property
+ def config(self):
+ return self._network_state['config']
+
+ @property
def version(self):
return self._version
@@ -166,12 +198,14 @@ class NetworkStateInterpreter(object):
'search': [],
},
'use_ipv6': False,
+ 'config': None,
}
def __init__(self, version=NETWORK_STATE_VERSION, config=None):
self._version = version
self._config = config
self._network_state = copy.deepcopy(self.initial_network_state)
+ self._network_state['config'] = config
self._parsed = False
@property
@@ -432,6 +466,18 @@ class NetworkStateInterpreter(object):
for param, val in command.get('params', {}).items():
iface.update({param: val})
+ # convert value to boolean
+ bridge_stp = iface.get('bridge_stp')
+ if bridge_stp is not None and type(bridge_stp) != bool:
+ if bridge_stp in ['on', '1', 1]:
+ bridge_stp = True
+ elif bridge_stp in ['off', '0', 0]:
+ bridge_stp = False
+ else:
+ raise ValueError("Cannot convert bridge_stp value"
+ "(%s) to boolean", bridge_stp)
+ iface.update({'bridge_stp': bridge_stp})
+
interfaces.update({iface['name']: iface})
@ensure_command_keys(['address'])
@@ -460,12 +506,15 @@ class NetworkStateInterpreter(object):
v2_command = {
bond0: {
'interfaces': ['interface0', 'interface1'],
- 'miimon': 100,
- 'mode': '802.3ad',
- 'xmit_hash_policy': 'layer3+4'},
+ 'parameters': {
+ 'mii-monitor-interval': 100,
+ 'mode': '802.3ad',
+ 'xmit_hash_policy': 'layer3+4'}},
bond1: {
'bond-slaves': ['interface2', 'interface7'],
- 'mode': 1
+ 'parameters': {
+ 'mode': 1,
+ }
}
}
@@ -489,8 +538,8 @@ class NetworkStateInterpreter(object):
v2_command = {
br0: {
'interfaces': ['interface0', 'interface1'],
- 'fd': 0,
- 'stp': 'off',
+ 'forward-delay': 0,
+ 'stp': False,
'maxwait': 0,
}
}
@@ -554,6 +603,7 @@ class NetworkStateInterpreter(object):
if not mac_address:
LOG.debug('NetworkState Version2: missing "macaddress" info '
'in config entry: %s: %s', eth, str(cfg))
+ phy_cmd.update({'mac_address': mac_address})
for key in ['mtu', 'match', 'wakeonlan']:
if key in cfg:
@@ -598,8 +648,8 @@ class NetworkStateInterpreter(object):
self.handle_vlan(vlan_cmd)
def handle_wifis(self, command):
- raise NotImplementedError("NetworkState V2: "
- "Skipping wifi configuration")
+ LOG.warning('Wifi configuration is only available to distros with'
+ 'netplan rendering support.')
def _v2_common(self, cfg):
LOG.debug('v2_common: handling config:\n%s', cfg)
@@ -616,6 +666,11 @@ class NetworkStateInterpreter(object):
def _handle_bond_bridge(self, command, cmd_type=None):
"""Common handler for bond and bridge types"""
+
+ # inverse mapping for v2 keynames to v1 keynames
+ v2key_to_v1 = dict((v, k) for k, v in
+ NET_CONFIG_TO_V2.get(cmd_type).items())
+
for item_name, item_cfg in command.items():
item_params = dict((key, value) for (key, value) in
item_cfg.items() if key not in
@@ -624,14 +679,20 @@ class NetworkStateInterpreter(object):
'type': cmd_type,
'name': item_name,
cmd_type + '_interfaces': item_cfg.get('interfaces'),
- 'params': item_params,
+ 'params': dict((v2key_to_v1[k], v) for k, v in
+ item_params.get('parameters', {}).items())
}
subnets = self._v2_to_v1_ipcfg(item_cfg)
if len(subnets) > 0:
v1_cmd.update({'subnets': subnets})
- LOG.debug('v2(%ss) -> v1(%s):\n%s', cmd_type, cmd_type, v1_cmd)
- self.handle_bridge(v1_cmd)
+ LOG.debug('v2(%s) -> v1(%s):\n%s', cmd_type, cmd_type, v1_cmd)
+ if cmd_type == "bridge":
+ self.handle_bridge(v1_cmd)
+ elif cmd_type == "bond":
+ self.handle_bond(v1_cmd)
+ else:
+ raise ValueError('Unknown command type: %s', cmd_type)
def _v2_to_v1_ipcfg(self, cfg):
"""Common ipconfig extraction from v2 to v1 subnets array."""
@@ -651,12 +712,6 @@ class NetworkStateInterpreter(object):
'address': address,
}
- routes = []
- for route in cfg.get('routes', []):
- routes.append(_normalize_route(
- {'address': route.get('to'), 'gateway': route.get('via')}))
- subnet['routes'] = routes
-
if ":" in address:
if 'gateway6' in cfg and gateway6 is None:
gateway6 = cfg.get('gateway6')
@@ -667,6 +722,17 @@ class NetworkStateInterpreter(object):
subnet.update({'gateway': gateway4})
subnets.append(subnet)
+
+ routes = []
+ for route in cfg.get('routes', []):
+ routes.append(_normalize_route(
+ {'destination': route.get('to'), 'gateway': route.get('via')}))
+
+ # v2 routes are bound to the interface, in v1 we add them under
+ # the first subnet since there isn't an equivalent interface level.
+ if len(subnets) and len(routes):
+ subnets[0]['routes'] = routes
+
return subnets
@@ -721,7 +787,7 @@ def _normalize_net_keys(network, address_keys=()):
elif netmask:
prefix = mask_to_net_prefix(netmask)
elif 'prefix' in net:
- prefix = int(prefix)
+ prefix = int(net['prefix'])
else:
prefix = 64 if ipv6 else 24
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index a550f97c..f5727969 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -484,7 +484,11 @@ class Renderer(renderer.Renderer):
content.add_nameserver(nameserver)
for searchdomain in network_state.dns_searchdomains:
content.add_search_domain(searchdomain)
- return "\n".join([_make_header(';'), str(content)])
+ header = _make_header(';')
+ content_str = str(content)
+ if not content_str.startswith(header):
+ content_str = header + '\n' + content_str
+ return content_str
@staticmethod
def _render_networkmanager_conf(network_state):
diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py
new file mode 100644
index 00000000..1c1f504a
--- /dev/null
+++ b/cloudinit/net/tests/test_dhcp.py
@@ -0,0 +1,260 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import mock
+import os
+from textwrap import dedent
+
+from cloudinit.net.dhcp import (
+ InvalidDHCPLeaseFileError, maybe_perform_dhcp_discovery,
+ parse_dhcp_lease_file, dhcp_discovery, networkd_load_leases)
+from cloudinit.util import ensure_file, write_file
+from cloudinit.tests.helpers import CiTestCase, wrap_and_call, populate_dir
+
+
+class TestParseDHCPLeasesFile(CiTestCase):
+
+ def test_parse_empty_lease_file_errors(self):
+ """parse_dhcp_lease_file errors when file content is empty."""
+ empty_file = self.tmp_path('leases')
+ ensure_file(empty_file)
+ with self.assertRaises(InvalidDHCPLeaseFileError) as context_manager:
+ parse_dhcp_lease_file(empty_file)
+ error = context_manager.exception
+ self.assertIn('Cannot parse empty dhcp lease file', str(error))
+
+ def test_parse_malformed_lease_file_content_errors(self):
+ """parse_dhcp_lease_file errors when file content isn't dhcp leases."""
+ non_lease_file = self.tmp_path('leases')
+ write_file(non_lease_file, 'hi mom.')
+ with self.assertRaises(InvalidDHCPLeaseFileError) as context_manager:
+ parse_dhcp_lease_file(non_lease_file)
+ error = context_manager.exception
+ self.assertIn('Cannot parse dhcp lease file', str(error))
+
+ def test_parse_multiple_leases(self):
+ """parse_dhcp_lease_file returns a list of all leases within."""
+ lease_file = self.tmp_path('leases')
+ content = dedent("""
+ lease {
+ interface "wlp3s0";
+ fixed-address 192.168.2.74;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ renew 4 2017/07/27 18:02:30;
+ expire 5 2017/07/28 07:08:15;
+ }
+ lease {
+ interface "wlp3s0";
+ fixed-address 192.168.2.74;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ }
+ """)
+ expected = [
+ {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74',
+ 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1',
+ 'renew': '4 2017/07/27 18:02:30',
+ 'expire': '5 2017/07/28 07:08:15'},
+ {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74',
+ 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}]
+ write_file(lease_file, content)
+ self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file))
+
+
+class TestDHCPDiscoveryClean(CiTestCase):
+ with_logs = True
+
+ @mock.patch('cloudinit.net.dhcp.find_fallback_nic')
+ def test_no_fallback_nic_found(self, m_fallback_nic):
+ """Log and do nothing when nic is absent and no fallback is found."""
+ m_fallback_nic.return_value = None # No fallback nic found
+ self.assertEqual({}, maybe_perform_dhcp_discovery())
+ self.assertIn(
+ 'Skip dhcp_discovery: Unable to find fallback nic.',
+ self.logs.getvalue())
+
+ def test_provided_nic_does_not_exist(self):
+ """When the provided nic doesn't exist, log a message and no-op."""
+ self.assertEqual({}, maybe_perform_dhcp_discovery('idontexist'))
+ self.assertIn(
+ 'Skip dhcp_discovery: nic idontexist not found in get_devicelist.',
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.net.dhcp.util.which')
+ @mock.patch('cloudinit.net.dhcp.find_fallback_nic')
+ def test_absent_dhclient_command(self, m_fallback, m_which):
+ """When dhclient doesn't exist in the OS, log the issue and no-op."""
+ m_fallback.return_value = 'eth9'
+ m_which.return_value = None # dhclient isn't found
+ self.assertEqual({}, maybe_perform_dhcp_discovery())
+ self.assertIn(
+ 'Skip dhclient configuration: No dhclient command found.',
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.temp_utils.os.getuid')
+ @mock.patch('cloudinit.net.dhcp.dhcp_discovery')
+ @mock.patch('cloudinit.net.dhcp.util.which')
+ @mock.patch('cloudinit.net.dhcp.find_fallback_nic')
+ def test_dhclient_run_with_tmpdir(self, m_fback, m_which, m_dhcp, m_uid):
+ """maybe_perform_dhcp_discovery passes tmpdir to dhcp_discovery."""
+ m_uid.return_value = 0 # Fake root user for tmpdir
+ m_fback.return_value = 'eth9'
+ m_which.return_value = '/sbin/dhclient'
+ m_dhcp.return_value = {'address': '192.168.2.2'}
+ retval = wrap_and_call(
+ 'cloudinit.temp_utils',
+ {'_TMPDIR': {'new': None},
+ 'os.getuid': 0},
+ maybe_perform_dhcp_discovery)
+ self.assertEqual({'address': '192.168.2.2'}, retval)
+ self.assertEqual(
+ 1, m_dhcp.call_count, 'dhcp_discovery not called once')
+ call = m_dhcp.call_args_list[0]
+ self.assertEqual('/sbin/dhclient', call[0][0])
+ self.assertEqual('eth9', call[0][1])
+ self.assertIn('/var/tmp/cloud-init/cloud-init-dhcp-', call[0][2])
+
+ @mock.patch('cloudinit.net.dhcp.util.subp')
+ def test_dhcp_discovery_run_in_sandbox(self, m_subp):
+ """dhcp_discovery brings up the interface and runs dhclient.
+
+ It also returns the parsed dhcp.leases file generated in the sandbox.
+ """
+ tmpdir = self.tmp_dir()
+ dhclient_script = os.path.join(tmpdir, 'dhclient.orig')
+ script_content = '#!/bin/bash\necho fake-dhclient'
+ write_file(dhclient_script, script_content, mode=0o755)
+ lease_content = dedent("""
+ lease {
+ interface "eth9";
+ fixed-address 192.168.2.74;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ }
+ """)
+ lease_file = os.path.join(tmpdir, 'dhcp.leases')
+ write_file(lease_file, lease_content)
+ self.assertItemsEqual(
+ [{'interface': 'eth9', 'fixed-address': '192.168.2.74',
+ 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}],
+ dhcp_discovery(dhclient_script, 'eth9', tmpdir))
+ # dhclient script got copied
+ with open(os.path.join(tmpdir, 'dhclient')) as stream:
+ self.assertEqual(script_content, stream.read())
+ # Interface was brought up before dhclient called from sandbox
+ m_subp.assert_has_calls([
+ mock.call(
+ ['ip', 'link', 'set', 'dev', 'eth9', 'up'], capture=True),
+ mock.call(
+ [os.path.join(tmpdir, 'dhclient'), '-1', '-v', '-lf',
+ lease_file, '-pf', os.path.join(tmpdir, 'dhclient.pid'),
+ 'eth9', '-sf', '/bin/true'], capture=True)])
+
+
+class TestSystemdParseLeases(CiTestCase):
+
+ lxd_lease = dedent("""\
+ # This is private data. Do not parse.
+ ADDRESS=10.75.205.242
+ NETMASK=255.255.255.0
+ ROUTER=10.75.205.1
+ SERVER_ADDRESS=10.75.205.1
+ NEXT_SERVER=10.75.205.1
+ BROADCAST=10.75.205.255
+ T1=1580
+ T2=2930
+ LIFETIME=3600
+ DNS=10.75.205.1
+ DOMAINNAME=lxd
+ HOSTNAME=a1
+ CLIENTID=ffe617693400020000ab110c65a6a0866931c2
+ """)
+
+ lxd_parsed = {
+ 'ADDRESS': '10.75.205.242',
+ 'NETMASK': '255.255.255.0',
+ 'ROUTER': '10.75.205.1',
+ 'SERVER_ADDRESS': '10.75.205.1',
+ 'NEXT_SERVER': '10.75.205.1',
+ 'BROADCAST': '10.75.205.255',
+ 'T1': '1580',
+ 'T2': '2930',
+ 'LIFETIME': '3600',
+ 'DNS': '10.75.205.1',
+ 'DOMAINNAME': 'lxd',
+ 'HOSTNAME': 'a1',
+ 'CLIENTID': 'ffe617693400020000ab110c65a6a0866931c2',
+ }
+
+ azure_lease = dedent("""\
+ # This is private data. Do not parse.
+ ADDRESS=10.132.0.5
+ NETMASK=255.255.255.255
+ ROUTER=10.132.0.1
+ SERVER_ADDRESS=169.254.169.254
+ NEXT_SERVER=10.132.0.1
+ MTU=1460
+ T1=43200
+ T2=75600
+ LIFETIME=86400
+ DNS=169.254.169.254
+ NTP=169.254.169.254
+ DOMAINNAME=c.ubuntu-foundations.internal
+ DOMAIN_SEARCH_LIST=c.ubuntu-foundations.internal google.internal
+ HOSTNAME=tribaal-test-171002-1349.c.ubuntu-foundations.internal
+ ROUTES=10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1
+ CLIENTID=ff405663a200020000ab11332859494d7a8b4c
+ OPTION_245=624c3620
+ """)
+
+ azure_parsed = {
+ 'ADDRESS': '10.132.0.5',
+ 'NETMASK': '255.255.255.255',
+ 'ROUTER': '10.132.0.1',
+ 'SERVER_ADDRESS': '169.254.169.254',
+ 'NEXT_SERVER': '10.132.0.1',
+ 'MTU': '1460',
+ 'T1': '43200',
+ 'T2': '75600',
+ 'LIFETIME': '86400',
+ 'DNS': '169.254.169.254',
+ 'NTP': '169.254.169.254',
+ 'DOMAINNAME': 'c.ubuntu-foundations.internal',
+ 'DOMAIN_SEARCH_LIST': 'c.ubuntu-foundations.internal google.internal',
+ 'HOSTNAME': 'tribaal-test-171002-1349.c.ubuntu-foundations.internal',
+ 'ROUTES': '10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1',
+ 'CLIENTID': 'ff405663a200020000ab11332859494d7a8b4c',
+ 'OPTION_245': '624c3620'}
+
+ def setUp(self):
+ super(TestSystemdParseLeases, self).setUp()
+ self.lease_d = self.tmp_dir()
+
+ def test_no_leases_returns_empty_dict(self):
+ """A leases dir with no lease files should return empty dictionary."""
+ self.assertEqual({}, networkd_load_leases(self.lease_d))
+
+ def test_no_leases_dir_returns_empty_dict(self):
+ """A non-existing leases dir should return empty dict."""
+ enodir = os.path.join(self.lease_d, 'does-not-exist')
+ self.assertEqual({}, networkd_load_leases(enodir))
+
+ def test_single_leases_file(self):
+ """A leases dir with one leases file."""
+ populate_dir(self.lease_d, {'2': self.lxd_lease})
+ self.assertEqual(
+ {'2': self.lxd_parsed}, networkd_load_leases(self.lease_d))
+
+ def test_single_azure_leases_file(self):
+ """On Azure, option 245 should be present, verify it specifically."""
+ populate_dir(self.lease_d, {'1': self.azure_lease})
+ self.assertEqual(
+ {'1': self.azure_parsed}, networkd_load_leases(self.lease_d))
+
+ def test_multiple_files(self):
+ """Multiple leases files on azure with one found return that value."""
+ self.maxDiff = None
+ populate_dir(self.lease_d, {'1': self.azure_lease,
+ '9': self.lxd_lease})
+ self.assertEqual({'1': self.azure_parsed, '9': self.lxd_parsed},
+ networkd_load_leases(self.lease_d))
diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py
index 272a6ebd..8cb4114e 100644
--- a/cloudinit/net/tests/test_init.py
+++ b/cloudinit/net/tests/test_init.py
@@ -7,7 +7,7 @@ import os
import cloudinit.net as net
from cloudinit.util import ensure_file, write_file, ProcessExecutionError
-from tests.unittests.helpers import CiTestCase
+from cloudinit.tests.helpers import CiTestCase
class TestSysDevPath(CiTestCase):
@@ -414,7 +414,7 @@ class TestEphemeralIPV4Network(CiTestCase):
self.assertIn('Cannot init network on', str(error))
self.assertEqual(0, m_subp.call_count)
- def test_ephemeral_ipv4_network_errors_invalid_mask(self, m_subp):
+ def test_ephemeral_ipv4_network_errors_invalid_mask_prefix(self, m_subp):
"""Raise an error when prefix_or_mask is not a netmask or prefix."""
params = {
'interface': 'eth0', 'ip': '192.168.2.2',
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index 39c79dee..8f99d99c 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -13,7 +13,7 @@ import re
from cloudinit import log as logging
from cloudinit import util
-from prettytable import PrettyTable
+from cloudinit.simpletable import SimpleTable
LOG = logging.getLogger()
@@ -170,7 +170,7 @@ def netdev_pformat():
lines.append(util.center("Net device info failed", '!', 80))
else:
fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address']
- tbl = PrettyTable(fields)
+ tbl = SimpleTable(fields)
for (dev, d) in netdev.items():
tbl.add_row([dev, d["up"], d["addr"], d["mask"], ".", d["hwaddr"]])
if d.get('addr6'):
@@ -194,7 +194,7 @@ def route_pformat():
if routes.get('ipv4'):
fields_v4 = ['Route', 'Destination', 'Gateway',
'Genmask', 'Interface', 'Flags']
- tbl_v4 = PrettyTable(fields_v4)
+ tbl_v4 = SimpleTable(fields_v4)
for (n, r) in enumerate(routes.get('ipv4')):
route_id = str(n)
tbl_v4.add_row([route_id, r['destination'],
@@ -207,7 +207,7 @@ def route_pformat():
if routes.get('ipv6'):
fields_v6 = ['Route', 'Proto', 'Recv-Q', 'Send-Q',
'Local Address', 'Foreign Address', 'State']
- tbl_v6 = PrettyTable(fields_v6)
+ tbl_v6 = SimpleTable(fields_v6)
for (n, r) in enumerate(routes.get('ipv6')):
route_id = str(n)
tbl_v6.add_row([route_id, r['proto'],
diff --git a/cloudinit/simpletable.py b/cloudinit/simpletable.py
new file mode 100644
index 00000000..90603228
--- /dev/null
+++ b/cloudinit/simpletable.py
@@ -0,0 +1,62 @@
+# Copyright (C) 2017 Amazon.com, Inc. or its affiliates
+#
+# Author: Ethan Faust <efaust@amazon.com>
+# Author: Andrew Jorgensen <ajorgens@amazon.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+
+class SimpleTable(object):
+ """A minimal implementation of PrettyTable
+ for distribution with cloud-init.
+ """
+
+ def __init__(self, fields):
+ self.fields = fields
+ self.rows = []
+
+ # initialize list of 0s the same length
+ # as the number of fields
+ self.column_widths = [0] * len(self.fields)
+ self.update_column_widths(fields)
+
+ def update_column_widths(self, values):
+ for i, value in enumerate(values):
+ self.column_widths[i] = max(
+ len(value),
+ self.column_widths[i])
+
+ def add_row(self, values):
+ if len(values) > len(self.fields):
+ raise TypeError('too many values')
+ values = [str(value) for value in values]
+ self.rows.append(values)
+ self.update_column_widths(values)
+
+ def _hdiv(self):
+ """Returns a horizontal divider for the table."""
+ return '+' + '+'.join(
+ ['-' * (w + 2) for w in self.column_widths]) + '+'
+
+ def _row(self, row):
+ """Returns a formatted row."""
+ return '|' + '|'.join(
+ [col.center(self.column_widths[i] + 2)
+ for i, col in enumerate(row)]) + '|'
+
+ def __str__(self):
+ """Returns a string representation of the table with lines around.
+
+ +-----+-----+
+ | one | two |
+ +-----+-----+
+ | 1 | 2 |
+ | 01 | 10 |
+ +-----+-----+
+ """
+ lines = [self._hdiv(), self._row(self.fields), self._hdiv()]
+ lines += [self._row(r) for r in self.rows] + [self._hdiv()]
+ return '\n'.join(lines)
+
+ def get_string(self):
+ return repr(self)
diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py
index 380e27cb..43a7e42c 100644
--- a/cloudinit/sources/DataSourceAliYun.py
+++ b/cloudinit/sources/DataSourceAliYun.py
@@ -6,17 +6,20 @@ from cloudinit import sources
from cloudinit.sources import DataSourceEc2 as EC2
from cloudinit import util
-DEF_MD_VERSION = "2016-01-01"
ALIYUN_PRODUCT = "Alibaba Cloud ECS"
class DataSourceAliYun(EC2.DataSourceEc2):
- metadata_urls = ["http://100.100.100.200"]
+
+ metadata_urls = ['http://100.100.100.200']
+
+ # The minimum supported metadata_version from the ec2 metadata apis
+ min_metadata_version = '2016-01-01'
+ extended_metadata_versions = []
def __init__(self, sys_cfg, distro, paths):
super(DataSourceAliYun, self).__init__(sys_cfg, distro, paths)
self.seed_dir = os.path.join(paths.seed_dir, "AliYun")
- self.api_ver = DEF_MD_VERSION
def get_hostname(self, fqdn=False, _resolve_ip=False):
return self.metadata.get('hostname', 'localhost.localdomain')
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index ed1d691a..c78ad9eb 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -28,8 +28,8 @@ LOG = logging.getLogger(__name__)
CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info'
# Shell command lists
-CMD_PROBE_FLOPPY = ['/sbin/modprobe', 'floppy']
-CMD_UDEVADM_SETTLE = ['/sbin/udevadm', 'settle', '--timeout=5']
+CMD_PROBE_FLOPPY = ['modprobe', 'floppy']
+CMD_UDEVADM_SETTLE = ['udevadm', 'settle', '--timeout=5']
META_DATA_NOT_SUPPORTED = {
'block-device-mapping': {},
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index b5a95a1f..80c2bd12 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -317,9 +317,13 @@ class DataSourceAzure(sources.DataSource):
LOG.debug("ssh authentication: "
"using fingerprint from fabirc")
- missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
+ # wait very long for public SSH keys to arrive
+ # https://bugs.launchpad.net/cloud-init/+bug/1717611
+ missing = util.log_time(logfunc=LOG.debug,
+ msg="waiting for SSH public key files",
func=wait_for_files,
- args=(fp_files,))
+ args=(fp_files, 900))
+
if len(missing):
LOG.warning("Did not find files, but going on: %s", missing)
@@ -656,7 +660,7 @@ def pubkeys_from_crt_files(flist):
return pubkeys
-def wait_for_files(flist, maxwait=60, naplen=.5, log_pre=""):
+def wait_for_files(flist, maxwait, naplen=.5, log_pre=""):
need = set(flist)
waited = 0
while True:
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 0188d894..9dc473fc 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -19,6 +19,7 @@ import time
from cloudinit import ec2_utils as ec2
from cloudinit import log as logging
+from cloudinit.net import dhcp
from cloudinit import sources
from cloudinit import url_helper as uhelp
from cloudinit import util
@@ -187,22 +188,36 @@ def get_dhclient_d():
return None
-def get_latest_lease():
+def get_latest_lease(lease_d=None):
# find latest lease file
- lease_d = get_dhclient_d()
+ if lease_d is None:
+ lease_d = get_dhclient_d()
if not lease_d:
return None
lease_files = os.listdir(lease_d)
latest_mtime = -1
latest_file = None
- for file_name in lease_files:
- if file_name.startswith("dhclient.") and \
- (file_name.endswith(".lease") or file_name.endswith(".leases")):
- abs_path = os.path.join(lease_d, file_name)
- mtime = os.path.getmtime(abs_path)
- if mtime > latest_mtime:
- latest_mtime = mtime
- latest_file = abs_path
+
+ # lease files are named inconsistently across distros.
+ # We assume that 'dhclient6' indicates ipv6 and ignore it.
+ # ubuntu:
+ # dhclient.<iface>.leases, dhclient.leases, dhclient6.leases
+ # centos6:
+ # dhclient-<iface>.leases, dhclient6.leases
+ # centos7: ('--' is not a typo)
+ # dhclient--<iface>.lease, dhclient6.leases
+ for fname in lease_files:
+ if fname.startswith("dhclient6"):
+ # avoid files that start with dhclient6 assuming dhcpv6.
+ continue
+ if not (fname.endswith(".lease") or fname.endswith(".leases")):
+ continue
+
+ abs_path = os.path.join(lease_d, fname)
+ mtime = os.path.getmtime(abs_path)
+ if mtime > latest_mtime:
+ latest_mtime = mtime
+ latest_file = abs_path
return latest_file
@@ -210,20 +225,28 @@ def get_vr_address():
# Get the address of the virtual router via dhcp leases
# If no virtual router is detected, fallback on default gateway.
# See http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/4.8/virtual_machines/user-data.html # noqa
+
+ # Try networkd first...
+ latest_address = dhcp.networkd_get_option_from_leases('SERVER_ADDRESS')
+ if latest_address:
+ LOG.debug("Found SERVER_ADDRESS '%s' via networkd_leases",
+ latest_address)
+ return latest_address
+
+ # Try dhcp lease files next...
lease_file = get_latest_lease()
if not lease_file:
LOG.debug("No lease file found, using default gateway")
return get_default_gateway()
- latest_address = None
with open(lease_file, "r") as fd:
for line in fd:
if "dhcp-server-identifier" in line:
words = line.strip(" ;\r\n").split(" ")
if len(words) > 2:
- dhcp = words[2]
- LOG.debug("Found DHCP identifier %s", dhcp)
- latest_address = dhcp
+ dhcptok = words[2]
+ LOG.debug("Found DHCP identifier %s", dhcptok)
+ latest_address = dhcptok
if not latest_address:
# No virtual router found, fallback on default gateway
LOG.debug("No DHCP found, using default gateway")
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 4ec9592f..41367a8b 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -13,6 +13,8 @@ import time
from cloudinit import ec2_utils as ec2
from cloudinit import log as logging
+from cloudinit import net
+from cloudinit.net import dhcp
from cloudinit import sources
from cloudinit import url_helper as uhelp
from cloudinit import util
@@ -20,12 +22,13 @@ from cloudinit import warnings
LOG = logging.getLogger(__name__)
-# Which version we are requesting of the ec2 metadata apis
-DEF_MD_VERSION = '2009-04-04'
+SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND])
STRICT_ID_PATH = ("datasource", "Ec2", "strict_id")
STRICT_ID_DEFAULT = "warn"
+_unset = "_unset"
+
class Platforms(object):
ALIYUN = "AliYun"
@@ -41,17 +44,30 @@ class Platforms(object):
class DataSourceEc2(sources.DataSource):
+
# Default metadata urls that will be used if none are provided
# They will be checked for 'resolveability' and some of the
# following may be discarded if they do not resolve
metadata_urls = ["http://169.254.169.254", "http://instance-data.:8773"]
+
+ # The minimum supported metadata_version from the ec2 metadata apis
+ min_metadata_version = '2009-04-04'
+
+ # Priority ordered list of additional metadata versions which will be tried
+ # for extended metadata content. IPv6 support comes in 2016-09-02
+ extended_metadata_versions = ['2016-09-02']
+
_cloud_platform = None
+ _network_config = _unset # Used for caching calculated network config v1
+
+ # Whether we want to get network configuration from the metadata service.
+ get_network_metadata = False
+
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.metadata_address = None
self.seed_dir = os.path.join(paths.seed_dir, "ec2")
- self.api_ver = DEF_MD_VERSION
def get_data(self):
seed_ret = {}
@@ -73,21 +89,27 @@ class DataSourceEc2(sources.DataSource):
elif self.cloud_platform == Platforms.NO_EC2_METADATA:
return False
- try:
- if not self.wait_for_metadata_service():
+ if self.get_network_metadata: # Setup networking in init-local stage.
+ if util.is_FreeBSD():
+ LOG.debug("FreeBSD doesn't support running dhclient with -sf")
return False
- start_time = time.time()
- self.userdata_raw = \
- ec2.get_instance_userdata(self.api_ver, self.metadata_address)
- self.metadata = ec2.get_instance_metadata(self.api_ver,
- self.metadata_address)
- LOG.debug("Crawl of metadata service took %.3f seconds",
- time.time() - start_time)
- return True
- except Exception:
- util.logexc(LOG, "Failed reading from metadata address %s",
- self.metadata_address)
- return False
+ dhcp_leases = dhcp.maybe_perform_dhcp_discovery()
+ if not dhcp_leases:
+ # DataSourceEc2Local failed in init-local stage. DataSourceEc2
+ # will still run in init-network stage.
+ return False
+ dhcp_opts = dhcp_leases[-1]
+ net_params = {'interface': dhcp_opts.get('interface'),
+ 'ip': dhcp_opts.get('fixed-address'),
+ 'prefix_or_mask': dhcp_opts.get('subnet-mask'),
+ 'broadcast': dhcp_opts.get('broadcast-address'),
+ 'router': dhcp_opts.get('routers')}
+ with net.EphemeralIPv4Network(**net_params):
+ return util.log_time(
+ logfunc=LOG.debug, msg='Crawl of metadata service',
+ func=self._crawl_metadata)
+ else:
+ return self._crawl_metadata()
@property
def launch_index(self):
@@ -95,6 +117,32 @@ class DataSourceEc2(sources.DataSource):
return None
return self.metadata.get('ami-launch-index')
+ def get_metadata_api_version(self):
+ """Get the best supported api version from the metadata service.
+
+ Loop through all extended support metadata versions in order and
+ return the most-fully featured metadata api version discovered.
+
+ If extended_metadata_versions aren't present, return the datasource's
+ min_metadata_version.
+ """
+ # Assumes metadata service is already up
+ for api_ver in self.extended_metadata_versions:
+ url = '{0}/{1}/meta-data/instance-id'.format(
+ self.metadata_address, api_ver)
+ try:
+ resp = uhelp.readurl(url=url)
+ except uhelp.UrlError as e:
+ LOG.debug('url %s raised exception %s', url, e)
+ else:
+ if resp.code == 200:
+ LOG.debug('Found preferred metadata version %s', api_ver)
+ return api_ver
+ elif resp.code == 404:
+ msg = 'Metadata api version %s not present. Headers: %s'
+ LOG.debug(msg, api_ver, resp.headers)
+ return self.min_metadata_version
+
def get_instance_id(self):
return self.metadata['instance-id']
@@ -138,21 +186,22 @@ class DataSourceEc2(sources.DataSource):
urls = []
url2base = {}
for url in mdurls:
- cur = "%s/%s/meta-data/instance-id" % (url, self.api_ver)
+ cur = '{0}/{1}/meta-data/instance-id'.format(
+ url, self.min_metadata_version)
urls.append(cur)
url2base[cur] = url
start_time = time.time()
- url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,
- timeout=timeout, status_cb=LOG.warn)
+ url = uhelp.wait_for_url(
+ urls=urls, max_wait=max_wait, timeout=timeout, status_cb=LOG.warn)
if url:
- LOG.debug("Using metadata source: '%s'", url2base[url])
+ self.metadata_address = url2base[url]
+ LOG.debug("Using metadata source: '%s'", self.metadata_address)
else:
LOG.critical("Giving up on md from %s after %s seconds",
urls, int(time.time() - start_time))
- self.metadata_address = url2base.get(url)
return bool(url)
def device_name_to_device(self, name):
@@ -234,6 +283,68 @@ class DataSourceEc2(sources.DataSource):
util.get_cfg_by_path(cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT),
cfg)
+ @property
+ def network_config(self):
+ """Return a network config dict for rendering ENI or netplan files."""
+ if self._network_config != _unset:
+ return self._network_config
+
+ if self.metadata is None:
+ # this would happen if get_data hadn't been called. leave as _unset
+ LOG.warning(
+ "Unexpected call to network_config when metadata is None.")
+ return None
+
+ result = None
+ net_md = self.metadata.get('network')
+ if isinstance(net_md, dict):
+ result = convert_ec2_metadata_network_config(net_md)
+ else:
+ LOG.warning("unexpected metadata 'network' key not valid: %s",
+ net_md)
+ self._network_config = result
+
+ return self._network_config
+
+ def _crawl_metadata(self):
+ """Crawl metadata service when available.
+
+ @returns: True on success, False otherwise.
+ """
+ if not self.wait_for_metadata_service():
+ return False
+ api_version = self.get_metadata_api_version()
+ try:
+ self.userdata_raw = ec2.get_instance_userdata(
+ api_version, self.metadata_address)
+ self.metadata = ec2.get_instance_metadata(
+ api_version, self.metadata_address)
+ except Exception:
+ util.logexc(
+ LOG, "Failed reading from metadata address %s",
+ self.metadata_address)
+ return False
+ return True
+
+
+class DataSourceEc2Local(DataSourceEc2):
+ """Datasource run at init-local which sets up network to query metadata.
+
+ In init-local, no network is available. This subclass sets up minimal
+ networking with dhclient on a viable nic so that it can talk to the
+ metadata service. If the metadata service provides network configuration
+ then render the network configuration for that instance based on metadata.
+ """
+ get_network_metadata = True # Get metadata network config if present
+
+ def get_data(self):
+ supported_platforms = (Platforms.AWS,)
+ if self.cloud_platform not in supported_platforms:
+ LOG.debug("Local Ec2 mode only supported on %s, not %s",
+ supported_platforms, self.cloud_platform)
+ return False
+ return super(DataSourceEc2Local, self).get_data()
+
def read_strict_mode(cfgval, default):
try:
@@ -347,8 +458,39 @@ def _collect_platform_data():
return data
+def convert_ec2_metadata_network_config(network_md, macs_to_nics=None):
+ """Convert ec2 metadata to network config version 1 data dict.
+
+ @param: network_md: 'network' portion of EC2 metadata.
+ generally formed as {"interfaces": {"macs": {}} where
+ 'macs' is a dictionary with mac address as key and contents like:
+ {"device-number": "0", "interface-id": "...", "local-ipv4s": ...}
+ @param: macs_to_name: Optional dict mac addresses and the nic name. If
+ not provided, get_interfaces_by_mac is called to get it from the OS.
+
+ @return A dict of network config version 1 based on the metadata and macs.
+ """
+ netcfg = {'version': 1, 'config': []}
+ if not macs_to_nics:
+ macs_to_nics = net.get_interfaces_by_mac()
+ macs_metadata = network_md['interfaces']['macs']
+ for mac, nic_name in macs_to_nics.items():
+ nic_metadata = macs_metadata.get(mac)
+ if not nic_metadata:
+ continue # Not a physical nic represented in metadata
+ nic_cfg = {'type': 'physical', 'name': nic_name, 'subnets': []}
+ nic_cfg['mac_address'] = mac
+ if nic_metadata.get('public-ipv4s'):
+ nic_cfg['subnets'].append({'type': 'dhcp4'})
+ if nic_metadata.get('ipv6s'):
+ nic_cfg['subnets'].append({'type': 'dhcp6'})
+ netcfg['config'].append(nic_cfg)
+ return netcfg
+
+
# Used to match classes to dependencies
datasources = [
+ (DataSourceEc2Local, (sources.DEP_FILESYSTEM,)), # Run at init-local
(DataSourceEc2, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index 684eac86..ccae4200 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -11,9 +11,8 @@ from cloudinit import util
LOG = logging.getLogger(__name__)
-BUILTIN_DS_CONFIG = {
- 'metadata_url': 'http://metadata.google.internal/computeMetadata/v1/'
-}
+MD_V1_URL = 'http://metadata.google.internal/computeMetadata/v1/'
+BUILTIN_DS_CONFIG = {'metadata_url': MD_V1_URL}
REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname')
@@ -51,75 +50,20 @@ class DataSourceGCE(sources.DataSource):
BUILTIN_DS_CONFIG])
self.metadata_address = self.ds_cfg['metadata_url']
- # GCE takes sshKeys attribute in the format of '<user>:<public_key>'
- # so we have to trim each key to remove the username part
- def _trim_key(self, public_key):
- try:
- index = public_key.index(':')
- if index > 0:
- return public_key[(index + 1):]
- except Exception:
- return public_key
-
def get_data(self):
- if not platform_reports_gce():
- return False
-
- # url_map: (our-key, path, required, is_text)
- url_map = [
- ('instance-id', ('instance/id',), True, True),
- ('availability-zone', ('instance/zone',), True, True),
- ('local-hostname', ('instance/hostname',), True, True),
- ('public-keys', ('project/attributes/sshKeys',
- 'instance/attributes/ssh-keys'), False, True),
- ('user-data', ('instance/attributes/user-data',), False, False),
- ('user-data-encoding', ('instance/attributes/user-data-encoding',),
- False, True),
- ]
-
- # if we cannot resolve the metadata server, then no point in trying
- if not util.is_resolvable_url(self.metadata_address):
- LOG.debug("%s is not resolvable", self.metadata_address)
- return False
+ ret = util.log_time(
+ LOG.debug, 'Crawl of GCE metadata service',
+ read_md, kwargs={'address': self.metadata_address})
- metadata_fetcher = GoogleMetadataFetcher(self.metadata_address)
- # iterate over url_map keys to get metadata items
- running_on_gce = False
- for (mkey, paths, required, is_text) in url_map:
- value = None
- for path in paths:
- new_value = metadata_fetcher.get_value(path, is_text)
- if new_value is not None:
- value = new_value
- if value:
- running_on_gce = True
- if required and value is None:
- msg = "required key %s returned nothing. not GCE"
- if not running_on_gce:
- LOG.debug(msg, mkey)
- else:
- LOG.warning(msg, mkey)
- return False
- self.metadata[mkey] = value
-
- if self.metadata['public-keys']:
- lines = self.metadata['public-keys'].splitlines()
- self.metadata['public-keys'] = [self._trim_key(k) for k in lines]
-
- if self.metadata['availability-zone']:
- self.metadata['availability-zone'] = self.metadata[
- 'availability-zone'].split('/')[-1]
-
- encoding = self.metadata.get('user-data-encoding')
- if encoding:
- if encoding == 'base64':
- self.metadata['user-data'] = b64decode(
- self.metadata['user-data'])
+ if not ret['success']:
+ if ret['platform_reports_gce']:
+ LOG.warning(ret['reason'])
else:
- LOG.warning('unknown user-data-encoding: %s, ignoring',
- encoding)
-
- return running_on_gce
+ LOG.debug(ret['reason'])
+ return False
+ self.metadata = ret['meta-data']
+ self.userdata_raw = ret['user-data']
+ return True
@property
def launch_index(self):
@@ -136,9 +80,6 @@ class DataSourceGCE(sources.DataSource):
# GCE has long FDQN's and has asked for short hostnames
return self.metadata['local-hostname'].split('.')[0]
- def get_userdata_raw(self):
- return self.metadata['user-data']
-
@property
def availability_zone(self):
return self.metadata['availability-zone']
@@ -148,6 +89,87 @@ class DataSourceGCE(sources.DataSource):
return self.availability_zone.rsplit('-', 1)[0]
+def _trim_key(public_key):
+ # GCE takes sshKeys attribute in the format of '<user>:<public_key>'
+ # so we have to trim each key to remove the username part
+ try:
+ index = public_key.index(':')
+ if index > 0:
+ return public_key[(index + 1):]
+ except Exception:
+ return public_key
+
+
+def read_md(address=None, platform_check=True):
+
+ if address is None:
+ address = MD_V1_URL
+
+ ret = {'meta-data': None, 'user-data': None,
+ 'success': False, 'reason': None}
+ ret['platform_reports_gce'] = platform_reports_gce()
+
+ if platform_check and not ret['platform_reports_gce']:
+ ret['reason'] = "Not running on GCE."
+ return ret
+
+ # if we cannot resolve the metadata server, then no point in trying
+ if not util.is_resolvable_url(address):
+ LOG.debug("%s is not resolvable", address)
+ ret['reason'] = 'address "%s" is not resolvable' % address
+ return ret
+
+ # url_map: (our-key, path, required, is_text)
+ url_map = [
+ ('instance-id', ('instance/id',), True, True),
+ ('availability-zone', ('instance/zone',), True, True),
+ ('local-hostname', ('instance/hostname',), True, True),
+ ('public-keys', ('project/attributes/sshKeys',
+ 'instance/attributes/ssh-keys'), False, True),
+ ('user-data', ('instance/attributes/user-data',), False, False),
+ ('user-data-encoding', ('instance/attributes/user-data-encoding',),
+ False, True),
+ ]
+
+ metadata_fetcher = GoogleMetadataFetcher(address)
+ md = {}
+ # iterate over url_map keys to get metadata items
+ for (mkey, paths, required, is_text) in url_map:
+ value = None
+ for path in paths:
+ new_value = metadata_fetcher.get_value(path, is_text)
+ if new_value is not None:
+ value = new_value
+ if required and value is None:
+ msg = "required key %s returned nothing. not GCE"
+ ret['reason'] = msg % mkey
+ return ret
+ md[mkey] = value
+
+ if md['public-keys']:
+ lines = md['public-keys'].splitlines()
+ md['public-keys'] = [_trim_key(k) for k in lines]
+
+ if md['availability-zone']:
+ md['availability-zone'] = md['availability-zone'].split('/')[-1]
+
+ encoding = md.get('user-data-encoding')
+ if encoding:
+ if encoding == 'base64':
+ md['user-data'] = b64decode(md['user-data'])
+ else:
+ LOG.warning('unknown user-data-encoding: %s, ignoring', encoding)
+
+ if 'user-data' in md:
+ ret['user-data'] = md['user-data']
+ del md['user-data']
+
+ ret['meta-data'] = md
+ ret['success'] = True
+
+ return ret
+
+
def platform_reports_gce():
pname = util.read_dmi_data('system-product-name') or "N/A"
if pname == "Google Compute Engine":
@@ -173,4 +195,36 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
+if __name__ == "__main__":
+ import argparse
+ import json
+ import sys
+
+ from base64 import b64encode
+
+ parser = argparse.ArgumentParser(description='Query GCE Metadata Service')
+ parser.add_argument("--endpoint", metavar="URL",
+ help="The url of the metadata service.",
+ default=MD_V1_URL)
+ parser.add_argument("--no-platform-check", dest="platform_check",
+ help="Ignore smbios platform check",
+ action='store_false', default=True)
+ args = parser.parse_args()
+ data = read_md(address=args.endpoint, platform_check=args.platform_check)
+ if 'user-data' in data:
+ # user-data is bytes not string like other things. Handle it specially.
+ # if it can be represented as utf-8 then do so. Otherwise print base64
+ # encoded value in the key user-data-b64.
+ try:
+ data['user-data'] = data['user-data'].decode()
+ except UnicodeDecodeError:
+ sys.stderr.write("User-data cannot be decoded. "
+ "Writing as base64\n")
+ del data['user-data']
+ # b64encode returns a bytes value. decode to get the string.
+ data['user-data-b64'] = b64encode(data['user-data']).decode()
+
+ print(json.dumps(data, indent=1, sort_keys=True, separators=(',', ': ')))
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index f20c9a65..ccebf11a 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -25,6 +25,8 @@ from cloudinit.sources.helpers.vmware.imc.config_file \
import ConfigFile
from cloudinit.sources.helpers.vmware.imc.config_nic \
import NicConfigurator
+from cloudinit.sources.helpers.vmware.imc.config_passwd \
+ import PasswordConfigurator
from cloudinit.sources.helpers.vmware.imc.guestcust_error \
import GuestCustErrorEnum
from cloudinit.sources.helpers.vmware.imc.guestcust_event \
@@ -49,6 +51,10 @@ class DataSourceOVF(sources.DataSource):
self.cfg = {}
self.supported_seed_starts = ("/", "file://")
self.vmware_customization_supported = True
+ self._network_config = None
+ self._vmware_nics_to_enable = None
+ self._vmware_cust_conf = None
+ self._vmware_cust_found = False
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -58,8 +64,8 @@ class DataSourceOVF(sources.DataSource):
found = []
md = {}
ud = ""
- vmwarePlatformFound = False
- vmwareImcConfigFilePath = ''
+ vmwareImcConfigFilePath = None
+ nicspath = None
defaults = {
"instance-id": "iid-dsovf",
@@ -99,53 +105,88 @@ class DataSourceOVF(sources.DataSource):
logfunc=LOG.debug,
msg="waiting for configuration file",
func=wait_for_imc_cfg_file,
- args=("/var/run/vmware-imc", "cust.cfg", max_wait))
+ args=("cust.cfg", max_wait))
if vmwareImcConfigFilePath:
LOG.debug("Found VMware Customization Config File at %s",
vmwareImcConfigFilePath)
+ nicspath = wait_for_imc_cfg_file(
+ filename="nics.txt", maxwait=10, naplen=5)
else:
LOG.debug("Did not find VMware Customization Config File")
else:
LOG.debug("Customization for VMware platform is disabled.")
if vmwareImcConfigFilePath:
- nics = ""
+ self._vmware_nics_to_enable = ""
try:
cf = ConfigFile(vmwareImcConfigFilePath)
- conf = Config(cf)
- (md, ud, cfg) = read_vmware_imc(conf)
- dirpath = os.path.dirname(vmwareImcConfigFilePath)
- nics = get_nics_to_enable(dirpath)
+ self._vmware_cust_conf = Config(cf)
+ (md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf)
+ self._vmware_nics_to_enable = get_nics_to_enable(nicspath)
+ markerid = self._vmware_cust_conf.marker_id
+ markerexists = check_marker_exists(markerid)
except Exception as e:
LOG.debug("Error parsing the customization Config File")
LOG.exception(e)
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED)
- enable_nics(nics)
- return False
+ raise e
finally:
util.del_dir(os.path.dirname(vmwareImcConfigFilePath))
-
try:
- LOG.debug("Applying the Network customization")
- nicConfigurator = NicConfigurator(conf.nics)
- nicConfigurator.configure()
+ LOG.debug("Preparing the Network configuration")
+ self._network_config = get_network_config_from_conf(
+ self._vmware_cust_conf,
+ True,
+ True,
+ self.distro.osfamily)
except Exception as e:
- LOG.debug("Error applying the Network Configuration")
LOG.exception(e)
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
GuestCustEventEnum.GUESTCUST_EVENT_NETWORK_SETUP_FAILED)
- enable_nics(nics)
- return False
-
- vmwarePlatformFound = True
+ raise e
+
+ if markerid and not markerexists:
+ LOG.debug("Applying password customization")
+ pwdConfigurator = PasswordConfigurator()
+ adminpwd = self._vmware_cust_conf.admin_password
+ try:
+ resetpwd = self._vmware_cust_conf.reset_password
+ if adminpwd or resetpwd:
+ pwdConfigurator.configure(adminpwd, resetpwd,
+ self.distro)
+ else:
+ LOG.debug("Changing password is not needed")
+ except Exception as e:
+ LOG.debug("Error applying Password Configuration: %s", e)
+ set_customization_status(
+ GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
+ GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED)
+ return False
+ if markerid:
+ LOG.debug("Handle marker creation")
+ try:
+ setup_marker_files(markerid)
+ except Exception as e:
+ LOG.debug("Error creating marker files: %s", e)
+ set_customization_status(
+ GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
+ GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED)
+ return False
+
+ self._vmware_cust_found = True
+ found.append('vmware-tools')
+
+ # TODO: Need to set the status to DONE only when the
+ # customization is done successfully.
+ enable_nics(self._vmware_nics_to_enable)
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_DONE,
GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS)
- enable_nics(nics)
+
else:
np = {'iso': transport_iso9660,
'vmware-guestd': transport_vmware_guestd, }
@@ -160,7 +201,7 @@ class DataSourceOVF(sources.DataSource):
found.append(name)
# There was no OVF transports found
- if len(found) == 0 and not vmwarePlatformFound:
+ if len(found) == 0:
return False
if 'seedfrom' in md and md['seedfrom']:
@@ -205,6 +246,10 @@ class DataSourceOVF(sources.DataSource):
def get_config_obj(self):
return self.cfg
+ @property
+ def network_config(self):
+ return self._network_config
+
class DataSourceOVFNet(DataSourceOVF):
def __init__(self, sys_cfg, distro, paths):
@@ -236,12 +281,13 @@ def get_max_wait_from_cfg(cfg):
return max_wait
-def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5):
+def wait_for_imc_cfg_file(filename, maxwait=180, naplen=5,
+ dirpath="/var/run/vmware-imc"):
waited = 0
while waited < maxwait:
- fileFullPath = search_file(dirpath, filename)
- if fileFullPath:
+ fileFullPath = os.path.join(dirpath, filename)
+ if os.path.isfile(fileFullPath):
return fileFullPath
LOG.debug("Waiting for VMware Customization Config File")
time.sleep(naplen)
@@ -249,6 +295,26 @@ def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5):
return None
+def get_network_config_from_conf(config, use_system_devices=True,
+ configure=False, osfamily=None):
+ nicConfigurator = NicConfigurator(config.nics, use_system_devices)
+ nics_cfg_list = nicConfigurator.generate(configure, osfamily)
+
+ return get_network_config(nics_cfg_list,
+ config.name_servers,
+ config.dns_suffixes)
+
+
+def get_network_config(nics=None, nameservers=None, search=None):
+ config_list = nics
+
+ if nameservers or search:
+ config_list.append({'type': 'nameserver', 'address': nameservers,
+ 'search': search})
+
+ return {'version': 1, 'config': config_list}
+
+
# This will return a dict with some content
# meta-data, user-data, some config
def read_vmware_imc(config):
@@ -264,6 +330,9 @@ def read_vmware_imc(config):
if config.timezone:
cfg['timezone'] = config.timezone
+ # Generate a unique instance-id so that re-customization will
+ # happen in cloud-init
+ md['instance-id'] = "iid-vmware-" + util.rand_str(strlen=8)
return (md, ud, cfg)
@@ -306,26 +375,56 @@ def get_ovf_env(dirname):
return (None, False)
-# Transport functions take no input and return
-# a 3 tuple of content, path, filename
-def transport_iso9660(require_iso=True):
+def maybe_cdrom_device(devname):
+ """Test if devname matches known list of devices which may contain iso9660
+ filesystems.
- # default_regex matches values in
- # /lib/udev/rules.d/60-cdrom_id.rules
- # KERNEL!="sr[0-9]*|hd[a-z]|xvd*", GOTO="cdrom_end"
- envname = "CLOUD_INIT_CDROM_DEV_REGEX"
- default_regex = "^(sr[0-9]+|hd[a-z]|xvd.*)"
+ Be helpful in accepting either knames (with no leading /dev/) or full path
+ names, but do not allow paths outside of /dev/, like /dev/foo/bar/xxx.
+ """
+ if not devname:
+ return False
+ elif not isinstance(devname, util.string_types):
+ raise ValueError("Unexpected input for devname: %s" % devname)
+
+ # resolve '..' and multi '/' elements
+ devname = os.path.normpath(devname)
+
+ # drop leading '/dev/'
+ if devname.startswith("/dev/"):
+ # partition returns tuple (before, partition, after)
+ devname = devname.partition("/dev/")[-1]
- devname_regex = os.environ.get(envname, default_regex)
+ # ignore leading slash (/sr0), else fail on / in name (foo/bar/xvdc)
+ if devname.startswith("/"):
+ devname = devname.split("/")[-1]
+ elif devname.count("/") > 0:
+ return False
+
+ # if empty string
+ if not devname:
+ return False
+
+ # default_regex matches values in /lib/udev/rules.d/60-cdrom_id.rules
+ # KERNEL!="sr[0-9]*|hd[a-z]|xvd*", GOTO="cdrom_end"
+ default_regex = r"^(sr[0-9]+|hd[a-z]|xvd.*)"
+ devname_regex = os.environ.get("CLOUD_INIT_CDROM_DEV_REGEX", default_regex)
cdmatch = re.compile(devname_regex)
+ return cdmatch.match(devname) is not None
+
+
+# Transport functions take no input and return
+# a 3 tuple of content, path, filename
+def transport_iso9660(require_iso=True):
+
# Go through mounts to see if it was already mounted
mounts = util.mounts()
for (dev, info) in mounts.items():
fstype = info['fstype']
if fstype != "iso9660" and require_iso:
continue
- if cdmatch.match(dev[5:]) is None: # take off '/dev/'
+ if not maybe_cdrom_device(dev):
continue
mp = info['mountpoint']
(fname, contents) = get_ovf_env(mp)
@@ -337,29 +436,19 @@ def transport_iso9660(require_iso=True):
else:
mtype = None
- devs = os.listdir("/dev/")
- devs.sort()
+ # generate a list of devices with mtype filesystem, filter by regex
+ devs = [dev for dev in
+ util.find_devs_with("TYPE=%s" % mtype if mtype else None)
+ if maybe_cdrom_device(dev)]
for dev in devs:
- fullp = os.path.join("/dev/", dev)
-
- if (fullp in mounts or
- not cdmatch.match(dev) or os.path.isdir(fullp)):
- continue
-
- try:
- # See if we can read anything at all...??
- util.peek_file(fullp, 512)
- except IOError:
- continue
-
try:
- (fname, contents) = util.mount_cb(fullp, get_ovf_env, mtype=mtype)
+ (fname, contents) = util.mount_cb(dev, get_ovf_env, mtype=mtype)
except util.MountFailedError:
- LOG.debug("%s not mountable as iso9660", fullp)
+ LOG.debug("%s not mountable as iso9660", dev)
continue
if contents is not False:
- return (contents, fullp, fname)
+ return (contents, dev, fname)
return (False, None, None)
@@ -445,4 +534,33 @@ datasources = (
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
+# To check if marker file exists
+def check_marker_exists(markerid):
+ """
+ Check the existence of a marker file.
+ Presence of marker file determines whether a certain code path is to be
+ executed. It is needed for partial guest customization in VMware.
+ """
+ if not markerid:
+ return False
+ markerfile = "/.markerfile-" + markerid
+ if os.path.exists(markerfile):
+ return True
+ return False
+
+
+# Create a marker file
+def setup_marker_files(markerid):
+ """
+ Create a new marker file.
+ Marker files are unique to a full customization workflow in VMware
+ environment.
+ """
+ if not markerid:
+ return
+ markerfile = "/.markerfile-" + markerid
+ util.del_file("/.markerfile-*.txt")
+ open(markerfile, 'w').close()
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 952caf35..9a43fbee 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -44,6 +44,7 @@ class DataSourceNotFoundException(Exception):
class DataSource(object):
dsmode = DSMODE_NETWORK
+ default_locale = 'en_US.UTF-8'
def __init__(self, sys_cfg, distro, paths, ud_proc=None):
self.sys_cfg = sys_cfg
@@ -150,7 +151,13 @@ class DataSource(object):
return None
def get_locale(self):
- return 'en_US.UTF-8'
+ """Default locale is en_US.UTF-8, but allow distros to override"""
+ locale = self.default_locale
+ try:
+ locale = self.distro.get_locale()
+ except NotImplementedError:
+ pass
+ return locale
@property
def availability_zone(self):
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index e22409d1..959b1bda 100644
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -6,16 +6,16 @@ import os
import re
import socket
import struct
-import tempfile
import time
+from cloudinit.net import dhcp
from cloudinit import stages
+from cloudinit import temp_utils
from contextlib import contextmanager
from xml.etree import ElementTree
from cloudinit import util
-
LOG = logging.getLogger(__name__)
@@ -111,7 +111,7 @@ class OpenSSLManager(object):
}
def __init__(self):
- self.tmpdir = tempfile.mkdtemp()
+ self.tmpdir = temp_utils.mkdtemp()
self.certificate = None
self.generate_certificate()
@@ -239,6 +239,11 @@ class WALinuxAgentShim(object):
return socket.inet_ntoa(packed_bytes)
@staticmethod
+ def _networkd_get_value_from_leases(leases_d=None):
+ return dhcp.networkd_get_option_from_leases(
+ 'OPTION_245', leases_d=leases_d)
+
+ @staticmethod
def _get_value_from_leases_file(fallback_lease_file):
leases = []
content = util.load_file(fallback_lease_file)
@@ -287,12 +292,15 @@ class WALinuxAgentShim(object):
@staticmethod
def find_endpoint(fallback_lease_file=None):
- LOG.debug('Finding Azure endpoint...')
value = None
- # Option-245 stored in /run/cloud-init/dhclient.hooks/<ifc>.json
- # a dhclient exit hook that calls cloud-init-dhclient-hook
- dhcp_options = WALinuxAgentShim._load_dhclient_json()
- value = WALinuxAgentShim._get_value_from_dhcpoptions(dhcp_options)
+ LOG.debug('Finding Azure endpoint from networkd...')
+ value = WALinuxAgentShim._networkd_get_value_from_leases()
+ if value is None:
+ # Option-245 stored in /run/cloud-init/dhclient.hooks/<ifc>.json
+ # a dhclient exit hook that calls cloud-init-dhclient-hook
+ LOG.debug('Finding Azure endpoint from hook json...')
+ dhcp_options = WALinuxAgentShim._load_dhclient_json()
+ value = WALinuxAgentShim._get_value_from_dhcpoptions(dhcp_options)
if value is None:
# Fallback and check the leases file if unsuccessful
LOG.debug("Unable to find endpoint in dhclient logs. "
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
index 9a5e3a8a..49d441db 100644
--- a/cloudinit/sources/helpers/vmware/imc/config.py
+++ b/cloudinit/sources/helpers/vmware/imc/config.py
@@ -5,6 +5,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+
from .nic import Nic
@@ -14,13 +15,16 @@ class Config(object):
Specification file.
"""
+ CUSTOM_SCRIPT = 'CUSTOM-SCRIPT|SCRIPT-NAME'
DNS = 'DNS|NAMESERVER|'
- SUFFIX = 'DNS|SUFFIX|'
+ DOMAINNAME = 'NETWORK|DOMAINNAME'
+ HOSTNAME = 'NETWORK|HOSTNAME'
+ MARKERID = 'MISC|MARKER-ID'
PASS = 'PASSWORD|-PASS'
+ RESETPASS = 'PASSWORD|RESET'
+ SUFFIX = 'DNS|SUFFIX|'
TIMEZONE = 'DATETIME|TIMEZONE'
UTC = 'DATETIME|UTC'
- HOSTNAME = 'NETWORK|HOSTNAME'
- DOMAINNAME = 'NETWORK|DOMAINNAME'
def __init__(self, configFile):
self._configFile = configFile
@@ -82,4 +86,18 @@ class Config(object):
return res
+ @property
+ def reset_password(self):
+ """Retreives if the root password needs to be reset."""
+ resetPass = self._configFile.get(Config.RESETPASS, 'no')
+ resetPass = resetPass.lower()
+ if resetPass not in ('yes', 'no'):
+ raise ValueError('ResetPassword value should be yes/no')
+ return resetPass == 'yes'
+
+ @property
+ def marker_id(self):
+ """Returns marker id."""
+ return self._configFile.get(Config.MARKERID, None)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index 67ac21db..2fb07c59 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -9,22 +9,48 @@ import logging
import os
import re
+from cloudinit.net.network_state import mask_to_net_prefix
from cloudinit import util
logger = logging.getLogger(__name__)
+def gen_subnet(ip, netmask):
+ """
+ Return the subnet for a given ip address and a netmask
+ @return (str): the subnet
+ @param ip: ip address
+ @param netmask: netmask
+ """
+ ip_array = ip.split(".")
+ mask_array = netmask.split(".")
+ result = []
+ for index in list(range(4)):
+ result.append(int(ip_array[index]) & int(mask_array[index]))
+
+ return ".".join([str(x) for x in result])
+
+
class NicConfigurator(object):
- def __init__(self, nics):
+ def __init__(self, nics, use_system_devices=True):
"""
Initialize the Nic Configurator
@param nics (list) an array of nics to configure
+ @param use_system_devices (Bool) Get the MAC names from the system
+ if this is True. If False, then mac names will be retrieved from
+ the specified nics.
"""
self.nics = nics
self.mac2Name = {}
self.ipv4PrimaryGateway = None
self.ipv6PrimaryGateway = None
- self.find_devices()
+
+ if use_system_devices:
+ self.find_devices()
+ else:
+ for nic in self.nics:
+ self.mac2Name[nic.mac.lower()] = nic.name
+
self._primaryNic = self.get_primary_nic()
def get_primary_nic(self):
@@ -61,138 +87,163 @@ class NicConfigurator(object):
def gen_one_nic(self, nic):
"""
- Return the lines needed to configure a nic
- @return (str list): the string list to configure the nic
+ Return the config list needed to configure a nic
+ @return (list): the subnets and routes list to configure the nic
@param nic (NicBase): the nic to configure
"""
- lines = []
- name = self.mac2Name.get(nic.mac.lower())
+ mac = nic.mac.lower()
+ name = self.mac2Name.get(mac)
if not name:
raise ValueError('No known device has MACADDR: %s' % nic.mac)
- if nic.onboot:
- lines.append('auto %s' % name)
+ nics_cfg_list = []
+
+ cfg = {'type': 'physical', 'name': name, 'mac_address': mac}
+
+ subnet_list = []
+ route_list = []
# Customize IPv4
- lines.extend(self.gen_ipv4(name, nic))
+ (subnets, routes) = self.gen_ipv4(name, nic)
+ subnet_list.extend(subnets)
+ route_list.extend(routes)
# Customize IPv6
- lines.extend(self.gen_ipv6(name, nic))
+ (subnets, routes) = self.gen_ipv6(name, nic)
+ subnet_list.extend(subnets)
+ route_list.extend(routes)
+
+ cfg.update({'subnets': subnet_list})
- lines.append('')
+ nics_cfg_list.append(cfg)
+ if route_list:
+ nics_cfg_list.extend(route_list)
- return lines
+ return nics_cfg_list
def gen_ipv4(self, name, nic):
"""
- Return the lines needed to configure the IPv4 setting of a nic
- @return (str list): the string list to configure the gateways
- @param name (str): name of the nic
+ Return the set of subnets and routes needed to configure the
+ IPv4 settings of a nic
+ @return (set): the set of subnet and routes to configure the gateways
+ @param name (str): subnet and route list for the nic
@param nic (NicBase): the nic to configure
"""
- lines = []
+
+ subnet = {}
+ route_list = []
+
+ if nic.onboot:
+ subnet.update({'control': 'auto'})
bootproto = nic.bootProto.lower()
if nic.ipv4_mode.lower() == 'disabled':
bootproto = 'manual'
- lines.append('iface %s inet %s' % (name, bootproto))
if bootproto != 'static':
- return lines
+ subnet.update({'type': 'dhcp'})
+ return ([subnet], route_list)
+ else:
+ subnet.update({'type': 'static'})
# Static Ipv4
addrs = nic.staticIpv4
if not addrs:
- return lines
+ return ([subnet], route_list)
v4 = addrs[0]
if v4.ip:
- lines.append(' address %s' % v4.ip)
+ subnet.update({'address': v4.ip})
if v4.netmask:
- lines.append(' netmask %s' % v4.netmask)
+ subnet.update({'netmask': v4.netmask})
# Add the primary gateway
if nic.primary and v4.gateways:
self.ipv4PrimaryGateway = v4.gateways[0]
- lines.append(' gateway %s metric 0' % self.ipv4PrimaryGateway)
- return lines
+ subnet.update({'gateway': self.ipv4PrimaryGateway})
+ return [subnet]
# Add routes if there is no primary nic
if not self._primaryNic:
- lines.extend(self.gen_ipv4_route(nic, v4.gateways))
+ route_list.extend(self.gen_ipv4_route(nic,
+ v4.gateways,
+ v4.netmask))
- return lines
+ return ([subnet], route_list)
- def gen_ipv4_route(self, nic, gateways):
+ def gen_ipv4_route(self, nic, gateways, netmask):
"""
- Return the lines needed to configure additional Ipv4 route
- @return (str list): the string list to configure the gateways
+ Return the routes list needed to configure additional Ipv4 route
+ @return (list): the route list to configure the gateways
@param nic (NicBase): the nic to configure
@param gateways (str list): the list of gateways
"""
- lines = []
+ route_list = []
+
+ cidr = mask_to_net_prefix(netmask)
for gateway in gateways:
- lines.append(' up route add default gw %s metric 10000' %
- gateway)
+ destination = "%s/%d" % (gen_subnet(gateway, netmask), cidr)
+ route_list.append({'destination': destination,
+ 'type': 'route',
+ 'gateway': gateway,
+ 'metric': 10000})
- return lines
+ return route_list
def gen_ipv6(self, name, nic):
"""
- Return the lines needed to configure the gateways for a nic
- @return (str list): the string list to configure the gateways
+ Return the set of subnets and routes needed to configure the
+ gateways for a nic
+ @return (set): the set of subnets and routes to configure the gateways
@param name (str): name of the nic
@param nic (NicBase): the nic to configure
"""
- lines = []
if not nic.staticIpv6:
- return lines
+ return ([], [])
+ subnet_list = []
# Static Ipv6
addrs = nic.staticIpv6
- lines.append('iface %s inet6 static' % name)
- lines.append(' address %s' % addrs[0].ip)
- lines.append(' netmask %s' % addrs[0].netmask)
- for addr in addrs[1:]:
- lines.append(' up ifconfig %s inet6 add %s/%s' % (name, addr.ip,
- addr.netmask))
- # Add the primary gateway
- if nic.primary:
- for addr in addrs:
- if addr.gateway:
- self.ipv6PrimaryGateway = addr.gateway
- lines.append(' gateway %s' % self.ipv6PrimaryGateway)
- return lines
+ for addr in addrs:
+ subnet = {'type': 'static6',
+ 'address': addr.ip,
+ 'netmask': addr.netmask}
+ subnet_list.append(subnet)
- # Add routes if there is no primary nic
- if not self._primaryNic:
- lines.extend(self._genIpv6Route(name, nic, addrs))
+ # TODO: Add the primary gateway
+
+ route_list = []
+ # TODO: Add routes if there is no primary nic
+ # if not self._primaryNic:
+ # route_list.extend(self._genIpv6Route(name, nic, addrs))
- return lines
+ return (subnet_list, route_list)
def _genIpv6Route(self, name, nic, addrs):
- lines = []
+ route_list = []
for addr in addrs:
- lines.append(' up route -A inet6 add default gw '
- '%s metric 10000' % addr.gateway)
+ route_list.append({'type': 'route',
+ 'gateway': addr.gateway,
+ 'metric': 10000})
+
+ return route_list
- return lines
+ def generate(self, configure=False, osfamily=None):
+ """Return the config elements that are needed to configure the nics"""
+ if configure:
+ logger.info("Configuring the interfaces file")
+ self.configure(osfamily)
- def generate(self):
- """Return the lines that is needed to configure the nics"""
- lines = []
- lines.append('iface lo inet loopback')
- lines.append('auto lo')
- lines.append('')
+ nics_cfg_list = []
for nic in self.nics:
- lines.extend(self.gen_one_nic(nic))
+ nics_cfg_list.extend(self.gen_one_nic(nic))
- return lines
+ return nics_cfg_list
def clear_dhcp(self):
logger.info('Clearing DHCP leases')
@@ -201,11 +252,16 @@ class NicConfigurator(object):
util.subp(["pkill", "dhclient"], rcs=[0, 1])
util.subp(["rm", "-f", "/var/lib/dhcp/*"])
- def configure(self):
+ def configure(self, osfamily=None):
"""
- Configure the /etc/network/intefaces
+ Configure the /etc/network/interfaces
Make a back up of the original
"""
+
+ if not osfamily or osfamily != "debian":
+ logger.info("Debian OS not detected. Skipping the configure step")
+ return
+
containingDir = '/etc/network'
interfaceFile = os.path.join(containingDir, 'interfaces')
@@ -215,10 +271,13 @@ class NicConfigurator(object):
if not os.path.exists(originalFile) and os.path.exists(interfaceFile):
os.rename(interfaceFile, originalFile)
- lines = self.generate()
- with open(interfaceFile, 'w') as fp:
- for line in lines:
- fp.write('%s\n' % line)
+ lines = [
+ "# DO NOT EDIT THIS FILE BY HAND --"
+ " AUTOMATICALLY GENERATED BY cloud-init",
+ "source /etc/network/interfaces.d/*.cfg",
+ ]
+
+ util.write_file(interfaceFile, content='\n'.join(lines))
self.clear_dhcp()
diff --git a/cloudinit/sources/helpers/vmware/imc/config_passwd.py b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
new file mode 100644
index 00000000..75cfbaaf
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
@@ -0,0 +1,67 @@
+# Copyright (C) 2016 Canonical Ltd.
+# Copyright (C) 2016 VMware INC.
+#
+# Author: Maitreyee Saikia <msaikia@vmware.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+
+import logging
+import os
+
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+
+class PasswordConfigurator(object):
+ """
+ Class for changing configurations related to passwords in a VM. Includes
+ setting and expiring passwords.
+ """
+ def configure(self, passwd, resetPasswd, distro):
+ """
+ Main method to perform all functionalities based on configuration file
+ inputs.
+ @param passwd: encoded admin password.
+ @param resetPasswd: boolean to determine if password needs to be reset.
+ @return cfg: dict to be used by cloud-init set_passwd code.
+ """
+ LOG.info('Starting password configuration')
+ if passwd:
+ passwd = util.b64d(passwd)
+ allRootUsers = []
+ for line in open('/etc/passwd', 'r'):
+ if line.split(':')[2] == '0':
+ allRootUsers.append(line.split(':')[0])
+ # read shadow file and check for each user, if its uid0 or root.
+ uidUsersList = []
+ for line in open('/etc/shadow', 'r'):
+ user = line.split(':')[0]
+ if user in allRootUsers:
+ uidUsersList.append(user)
+ if passwd:
+ LOG.info('Setting admin password')
+ distro.set_passwd('root', passwd)
+ if resetPasswd:
+ self.reset_password(uidUsersList)
+ LOG.info('Configure Password completed!')
+
+ def reset_password(self, uidUserList):
+ """
+ Method to reset password. Use passwd --expire command. Use chage if
+ not succeeded using passwd command. Log failure message otherwise.
+ @param: list of users for which to expire password.
+ """
+ LOG.info('Expiring password.')
+ for user in uidUserList:
+ try:
+ out, err = util.subp(['passwd', '--expire', user])
+ except util.ProcessExecutionError as e:
+ if os.path.exists('/usr/bin/chage'):
+ out, e = util.subp(['chage', '-d', '0', user])
+ else:
+ LOG.warning('Failed to expire password for %s with error: '
+ '%s', user, e)
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
index 1ab6bd41..44075255 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
@@ -59,14 +59,16 @@ def set_customization_status(custstate, custerror, errormessage=None):
return (out, err)
-# This will read the file nics.txt in the specified directory
-# and return the content
-def get_nics_to_enable(dirpath):
- if not dirpath:
+def get_nics_to_enable(nicsfilepath):
+ """Reads the NICS from the specified file path and returns the content
+
+ @param nicsfilepath: Absolute file path to the NICS.txt file.
+ """
+
+ if not nicsfilepath:
return None
NICS_SIZE = 1024
- nicsfilepath = os.path.join(dirpath, "nics.txt")
if not os.path.exists(nicsfilepath):
return None
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index a1c4a517..d0452688 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -821,28 +821,35 @@ class Modules(object):
skipped = []
forced = []
overridden = self.cfg.get('unverified_modules', [])
+ active_mods = []
+ all_distros = set([distros.ALL_DISTROS])
for (mod, name, _freq, _args) in mostly_mods:
- worked_distros = set(mod.distros)
+ worked_distros = set(mod.distros) # Minimally [] per fixup_modules
worked_distros.update(
distros.Distro.expand_osfamily(mod.osfamilies))
- # module does not declare 'distros' or lists this distro
- if not worked_distros or d_name in worked_distros:
- continue
-
- if name in overridden:
- forced.append(name)
- else:
- skipped.append(name)
+ # Skip only when the following conditions are all met:
+ # - distros are defined in the module != ALL_DISTROS
+ # - the current d_name isn't in distros
+ # - and the module is unverified and not in the unverified_modules
+ # override list
+ if worked_distros and worked_distros != all_distros:
+ if d_name not in worked_distros:
+ if name not in overridden:
+ skipped.append(name)
+ continue
+ forced.append(name)
+ active_mods.append([mod, name, _freq, _args])
if skipped:
- LOG.info("Skipping modules %s because they are not verified "
+ LOG.info("Skipping modules '%s' because they are not verified "
"on distro '%s'. To run anyway, add them to "
- "'unverified_modules' in config.", skipped, d_name)
+ "'unverified_modules' in config.",
+ ','.join(skipped), d_name)
if forced:
- LOG.info("running unverified_modules: %s", forced)
+ LOG.info("running unverified_modules: '%s'", ', '.join(forced))
- return self._run_modules(mostly_mods)
+ return self._run_modules(active_mods)
def read_runtime_config():
diff --git a/cloudinit/temp_utils.py b/cloudinit/temp_utils.py
new file mode 100644
index 00000000..5d7adf70
--- /dev/null
+++ b/cloudinit/temp_utils.py
@@ -0,0 +1,101 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import contextlib
+import errno
+import os
+import shutil
+import tempfile
+
+_TMPDIR = None
+_ROOT_TMPDIR = "/run/cloud-init/tmp"
+_EXE_ROOT_TMPDIR = "/var/tmp/cloud-init"
+
+
+def _tempfile_dir_arg(odir=None, needs_exe=False):
+ """Return the proper 'dir' argument for tempfile functions.
+
+ When root, cloud-init will use /run/cloud-init/tmp to avoid
+ any cleaning that a distro boot might do on /tmp (such as
+ systemd-tmpfiles-clean).
+
+ If the caller of this function (mkdtemp or mkstemp) was provided
+ with a 'dir' argument, then that is respected.
+
+ @param odir: original 'dir' arg to 'mkdtemp' or other.
+ @param needs_exe: Boolean specifying whether or not exe permissions are
+ needed for tempdir. This is needed because /run is mounted noexec.
+ """
+ if odir is not None:
+ return odir
+
+ global _TMPDIR
+ if _TMPDIR:
+ return _TMPDIR
+
+ if needs_exe:
+ tdir = _EXE_ROOT_TMPDIR
+ elif os.getuid() == 0:
+ tdir = _ROOT_TMPDIR
+ else:
+ tdir = os.environ.get('TMPDIR', '/tmp')
+ if not os.path.isdir(tdir):
+ os.makedirs(tdir)
+ os.chmod(tdir, 0o1777)
+
+ _TMPDIR = tdir
+ return tdir
+
+
+def ExtendedTemporaryFile(**kwargs):
+ kwargs['dir'] = _tempfile_dir_arg(
+ kwargs.pop('dir', None), kwargs.pop('needs_exe', False))
+ fh = tempfile.NamedTemporaryFile(**kwargs)
+ # Replace its unlink with a quiet version
+ # that does not raise errors when the
+ # file to unlink has been unlinked elsewhere..
+
+ def _unlink_if_exists(path):
+ try:
+ os.unlink(path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise e
+
+ fh.unlink = _unlink_if_exists
+
+ # Add a new method that will unlink
+ # right 'now' but still lets the exit
+ # method attempt to remove it (which will
+ # not throw due to our del file being quiet
+ # about files that are not there)
+ def unlink_now():
+ fh.unlink(fh.name)
+
+ setattr(fh, 'unlink_now', unlink_now)
+ return fh
+
+
+@contextlib.contextmanager
+def tempdir(**kwargs):
+ # This seems like it was only added in python 3.2
+ # Make it since its useful...
+ # See: http://bugs.python.org/file12970/tempdir.patch
+ tdir = mkdtemp(**kwargs)
+ try:
+ yield tdir
+ finally:
+ shutil.rmtree(tdir)
+
+
+def mkdtemp(**kwargs):
+ kwargs['dir'] = _tempfile_dir_arg(
+ kwargs.pop('dir', None), kwargs.pop('needs_exe', False))
+ return tempfile.mkdtemp(**kwargs)
+
+
+def mkstemp(**kwargs):
+ kwargs['dir'] = _tempfile_dir_arg(
+ kwargs.pop('dir', None), kwargs.pop('needs_exe', False))
+ return tempfile.mkstemp(**kwargs)
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/tests/__init__.py b/cloudinit/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/cloudinit/tests/__init__.py
diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py
new file mode 100644
index 00000000..6f88a5b7
--- /dev/null
+++ b/cloudinit/tests/helpers.py
@@ -0,0 +1,405 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from __future__ import print_function
+
+import functools
+import json
+import logging
+import os
+import shutil
+import sys
+import tempfile
+import unittest
+
+import mock
+import six
+import unittest2
+
+try:
+ from contextlib import ExitStack
+except ImportError:
+ from contextlib2 import ExitStack
+
+from cloudinit import helpers as ch
+from cloudinit import util
+
+# Used for skipping tests
+SkipTest = unittest2.SkipTest
+
+# Used for detecting different python versions
+PY2 = False
+PY26 = False
+PY27 = False
+PY3 = False
+
+_PY_VER = sys.version_info
+_PY_MAJOR, _PY_MINOR, _PY_MICRO = _PY_VER[0:3]
+if (_PY_MAJOR, _PY_MINOR) <= (2, 6):
+ if (_PY_MAJOR, _PY_MINOR) == (2, 6):
+ PY26 = True
+ if (_PY_MAJOR, _PY_MINOR) >= (2, 0):
+ PY2 = True
+else:
+ if (_PY_MAJOR, _PY_MINOR) == (2, 7):
+ PY27 = True
+ PY2 = True
+ if (_PY_MAJOR, _PY_MINOR) >= (3, 0):
+ PY3 = True
+
+
+# Makes the old path start
+# with new base instead of whatever
+# it previously had
+def rebase_path(old_path, new_base):
+ if old_path.startswith(new_base):
+ # Already handled...
+ return old_path
+ # Retarget the base of that path
+ # to the new base instead of the
+ # old one...
+ path = os.path.join(new_base, old_path.lstrip("/"))
+ path = os.path.abspath(path)
+ return path
+
+
+# Can work on anything that takes a path as arguments
+def retarget_many_wrapper(new_base, am, old_func):
+ def wrapper(*args, **kwds):
+ n_args = list(args)
+ nam = am
+ if am == -1:
+ nam = len(n_args)
+ for i in range(0, nam):
+ path = args[i]
+ # patchOS() wraps various os and os.path functions, however in
+ # Python 3 some of these now accept file-descriptors (integers).
+ # That breaks rebase_path() so in lieu of a better solution, just
+ # don't rebase if we get a fd.
+ if isinstance(path, six.string_types):
+ n_args[i] = rebase_path(path, new_base)
+ return old_func(*n_args, **kwds)
+ return wrapper
+
+
+class TestCase(unittest2.TestCase):
+
+ def reset_global_state(self):
+ """Reset any global state to its original settings.
+
+ cloudinit caches some values in cloudinit.util. Unit tests that
+ involved those cached paths were then subject to failure if the order
+ of invocation changed (LP: #1703697).
+
+ This function resets any of these global state variables to their
+ initial state.
+
+ In the future this should really be done with some registry that
+ can then be cleaned in a more obvious way.
+ """
+ util.PROC_CMDLINE = None
+ util._DNS_REDIRECT_IP = None
+ util._LSB_RELEASE = {}
+
+ def setUp(self):
+ super(TestCase, self).setUp()
+ self.reset_global_state()
+
+ def add_patch(self, target, attr, **kwargs):
+ """Patches specified target object and sets it as attr on test
+ instance also schedules cleanup"""
+ if 'autospec' not in kwargs:
+ kwargs['autospec'] = True
+ m = mock.patch(target, **kwargs)
+ p = m.start()
+ self.addCleanup(m.stop)
+ setattr(self, attr, p)
+
+
+class CiTestCase(TestCase):
+ """This is the preferred test case base class unless user
+ needs other test case classes below."""
+
+ # Subclass overrides for specific test behavior
+ # Whether or not a unit test needs logfile setup
+ with_logs = False
+
+ def setUp(self):
+ super(CiTestCase, self).setUp()
+ if self.with_logs:
+ # Create a log handler so unit tests can search expected logs.
+ self.logger = logging.getLogger()
+ self.logs = six.StringIO()
+ formatter = logging.Formatter('%(levelname)s: %(message)s')
+ handler = logging.StreamHandler(self.logs)
+ handler.setFormatter(formatter)
+ self.old_handlers = self.logger.handlers
+ self.logger.handlers = [handler]
+
+ def tearDown(self):
+ if self.with_logs:
+ # Remove the handler we setup
+ logging.getLogger().handlers = self.old_handlers
+ super(CiTestCase, self).tearDown()
+
+ def tmp_dir(self, dir=None, cleanup=True):
+ # return a full path to a temporary directory that will be cleaned up.
+ if dir is None:
+ tmpd = tempfile.mkdtemp(
+ prefix="ci-%s." % self.__class__.__name__)
+ else:
+ tmpd = tempfile.mkdtemp(dir=dir)
+ self.addCleanup(functools.partial(shutil.rmtree, tmpd))
+ return tmpd
+
+ def tmp_path(self, path, dir=None):
+ # return an absolute path to 'path' under dir.
+ # if dir is None, one will be created with tmp_dir()
+ # the file is not created or modified.
+ if dir is None:
+ dir = self.tmp_dir()
+ return os.path.normpath(os.path.abspath(os.path.join(dir, path)))
+
+
+class ResourceUsingTestCase(CiTestCase):
+
+ def setUp(self):
+ super(ResourceUsingTestCase, self).setUp()
+ self.resource_path = None
+
+ def resourceLocation(self, subname=None):
+ if self.resource_path is None:
+ paths = [
+ os.path.join('tests', 'data'),
+ os.path.join('data'),
+ os.path.join(os.pardir, 'tests', 'data'),
+ os.path.join(os.pardir, 'data'),
+ ]
+ for p in paths:
+ if os.path.isdir(p):
+ self.resource_path = p
+ break
+ self.assertTrue((self.resource_path and
+ os.path.isdir(self.resource_path)),
+ msg="Unable to locate test resource data path!")
+ if not subname:
+ return self.resource_path
+ return os.path.join(self.resource_path, subname)
+
+ def readResource(self, name):
+ where = self.resourceLocation(name)
+ with open(where, 'r') as fh:
+ return fh.read()
+
+ def getCloudPaths(self, ds=None):
+ tmpdir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, tmpdir)
+ cp = ch.Paths({'cloud_dir': tmpdir,
+ 'templates_dir': self.resourceLocation()},
+ ds=ds)
+ return cp
+
+
+class FilesystemMockingTestCase(ResourceUsingTestCase):
+
+ def setUp(self):
+ super(FilesystemMockingTestCase, self).setUp()
+ self.patched_funcs = ExitStack()
+
+ def tearDown(self):
+ self.patched_funcs.close()
+ ResourceUsingTestCase.tearDown(self)
+
+ def replicateTestRoot(self, example_root, target_root):
+ real_root = self.resourceLocation()
+ real_root = os.path.join(real_root, 'roots', example_root)
+ for (dir_path, _dirnames, filenames) in os.walk(real_root):
+ real_path = dir_path
+ make_path = rebase_path(real_path[len(real_root):], target_root)
+ util.ensure_dir(make_path)
+ for f in filenames:
+ real_path = util.abs_join(real_path, f)
+ make_path = util.abs_join(make_path, f)
+ shutil.copy(real_path, make_path)
+
+ def patchUtils(self, new_root):
+ patch_funcs = {
+ util: [('write_file', 1),
+ ('append_file', 1),
+ ('load_file', 1),
+ ('ensure_dir', 1),
+ ('chmod', 1),
+ ('delete_dir_contents', 1),
+ ('del_file', 1),
+ ('sym_link', -1),
+ ('copy', -1)],
+ }
+ for (mod, funcs) in patch_funcs.items():
+ for (f, am) in funcs:
+ func = getattr(mod, f)
+ trap_func = retarget_many_wrapper(new_root, am, func)
+ self.patched_funcs.enter_context(
+ mock.patch.object(mod, f, trap_func))
+
+ # Handle subprocess calls
+ func = getattr(util, 'subp')
+
+ def nsubp(*_args, **_kwargs):
+ return ('', '')
+
+ self.patched_funcs.enter_context(
+ mock.patch.object(util, 'subp', nsubp))
+
+ def null_func(*_args, **_kwargs):
+ return None
+
+ for f in ['chownbyid', 'chownbyname']:
+ self.patched_funcs.enter_context(
+ mock.patch.object(util, f, null_func))
+
+ def patchOS(self, new_root):
+ patch_funcs = {
+ os.path: [('isfile', 1), ('exists', 1),
+ ('islink', 1), ('isdir', 1)],
+ os: [('listdir', 1), ('mkdir', 1),
+ ('lstat', 1), ('symlink', 2)],
+ }
+ for (mod, funcs) in patch_funcs.items():
+ for f, nargs in funcs:
+ func = getattr(mod, f)
+ trap_func = retarget_many_wrapper(new_root, nargs, func)
+ self.patched_funcs.enter_context(
+ mock.patch.object(mod, f, trap_func))
+
+ def patchOpen(self, new_root):
+ trap_func = retarget_many_wrapper(new_root, 1, open)
+ name = 'builtins.open' if PY3 else '__builtin__.open'
+ self.patched_funcs.enter_context(mock.patch(name, trap_func))
+
+ def patchStdoutAndStderr(self, stdout=None, stderr=None):
+ if stdout is not None:
+ self.patched_funcs.enter_context(
+ mock.patch.object(sys, 'stdout', stdout))
+ if stderr is not None:
+ self.patched_funcs.enter_context(
+ mock.patch.object(sys, 'stderr', stderr))
+
+ def reRoot(self, root=None):
+ if root is None:
+ root = self.tmp_dir()
+ self.patchUtils(root)
+ self.patchOS(root)
+ return root
+
+
+class HttprettyTestCase(CiTestCase):
+ # necessary as http_proxy gets in the way of httpretty
+ # https://github.com/gabrielfalcao/HTTPretty/issues/122
+
+ def setUp(self):
+ self.restore_proxy = os.environ.get('http_proxy')
+ if self.restore_proxy is not None:
+ del os.environ['http_proxy']
+ super(HttprettyTestCase, self).setUp()
+
+ def tearDown(self):
+ if self.restore_proxy:
+ os.environ['http_proxy'] = self.restore_proxy
+ super(HttprettyTestCase, self).tearDown()
+
+
+def populate_dir(path, files):
+ if not os.path.exists(path):
+ os.makedirs(path)
+ ret = []
+ for (name, content) in files.items():
+ p = os.path.sep.join([path, name])
+ util.ensure_dir(os.path.dirname(p))
+ with open(p, "wb") as fp:
+ if isinstance(content, six.binary_type):
+ fp.write(content)
+ else:
+ fp.write(content.encode('utf-8'))
+ fp.close()
+ ret.append(p)
+
+ return ret
+
+
+def dir2dict(startdir, prefix=None):
+ flist = {}
+ if prefix is None:
+ prefix = startdir
+ for root, dirs, files in os.walk(startdir):
+ for fname in files:
+ fpath = os.path.join(root, fname)
+ key = fpath[len(prefix):]
+ flist[key] = util.load_file(fpath)
+ return flist
+
+
+def json_dumps(data):
+ # print data in nicely formatted json.
+ return json.dumps(data, indent=1, sort_keys=True,
+ separators=(',', ': '))
+
+
+def wrap_and_call(prefix, mocks, func, *args, **kwargs):
+ """
+ call func(args, **kwargs) with mocks applied, then unapplies mocks
+ nicer to read than repeating dectorators on each function
+
+ prefix: prefix for mock names (e.g. 'cloudinit.stages.util') or None
+ mocks: dictionary of names (under 'prefix') to mock and either
+ a return value or a dictionary to pass to the mock.patch call
+ func: function to call with mocks applied
+ *args,**kwargs: arguments for 'func'
+
+ return_value: return from 'func'
+ """
+ delim = '.'
+ if prefix is None:
+ prefix = ''
+ prefix = prefix.rstrip(delim)
+ unwraps = []
+ for fname, kw in mocks.items():
+ if prefix:
+ fname = delim.join((prefix, fname))
+ if not isinstance(kw, dict):
+ kw = {'return_value': kw}
+ p = mock.patch(fname, **kw)
+ p.start()
+ unwraps.append(p)
+ try:
+ return func(*args, **kwargs)
+ finally:
+ for p in unwraps:
+ p.stop()
+
+
+try:
+ skipIf = unittest.skipIf
+except AttributeError:
+ # Python 2.6. Doesn't have to be high fidelity.
+ def skipIf(condition, reason):
+ def decorator(func):
+ def wrapper(*args, **kws):
+ if condition:
+ return func(*args, **kws)
+ else:
+ print(reason, file=sys.stderr)
+ return wrapper
+ return decorator
+
+
+# older versions of mock do not have the useful 'assert_not_called'
+if not hasattr(mock.Mock, 'assert_not_called'):
+ def __mock_assert_not_called(mmock):
+ if mmock.call_count != 0:
+ msg = ("[citest] Expected '%s' to not have been called. "
+ "Called %s times." %
+ (mmock._mock_name or 'mock', mmock.call_count))
+ raise AssertionError(msg)
+ mock.Mock.assert_not_called = __mock_assert_not_called
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_simpletable.py b/cloudinit/tests/test_simpletable.py
new file mode 100644
index 00000000..96bc24cf
--- /dev/null
+++ b/cloudinit/tests/test_simpletable.py
@@ -0,0 +1,100 @@
+# Copyright (C) 2017 Amazon.com, Inc. or its affiliates
+#
+# Author: Andrew Jorgensen <ajorgens@amazon.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Tests that SimpleTable works just like PrettyTable for cloud-init.
+
+Not all possible PrettyTable cases are tested because we're not trying to
+reimplement the entire library, only the minimal parts we actually use.
+"""
+
+from cloudinit.simpletable import SimpleTable
+from cloudinit.tests.helpers import CiTestCase
+
+# Examples rendered by cloud-init using PrettyTable
+NET_DEVICE_FIELDS = (
+ 'Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address')
+NET_DEVICE_ROWS = (
+ ('ens3', True, '172.31.4.203', '255.255.240.0', '.', '0a:1f:07:15:98:70'),
+ ('ens3', True, 'fe80::81f:7ff:fe15:9870/64', '.', 'link',
+ '0a:1f:07:15:98:70'),
+ ('lo', True, '127.0.0.1', '255.0.0.0', '.', '.'),
+ ('lo', True, '::1/128', '.', 'host', '.'),
+)
+NET_DEVICE_TABLE = """\
++--------+------+----------------------------+---------------+-------+-------------------+
+| Device | Up | Address | Mask | Scope | Hw-Address |
++--------+------+----------------------------+---------------+-------+-------------------+
+| ens3 | True | 172.31.4.203 | 255.255.240.0 | . | 0a:1f:07:15:98:70 |
+| ens3 | True | fe80::81f:7ff:fe15:9870/64 | . | link | 0a:1f:07:15:98:70 |
+| lo | True | 127.0.0.1 | 255.0.0.0 | . | . |
+| lo | True | ::1/128 | . | host | . |
++--------+------+----------------------------+---------------+-------+-------------------+""" # noqa: E501
+ROUTE_IPV4_FIELDS = (
+ 'Route', 'Destination', 'Gateway', 'Genmask', 'Interface', 'Flags')
+ROUTE_IPV4_ROWS = (
+ ('0', '0.0.0.0', '172.31.0.1', '0.0.0.0', 'ens3', 'UG'),
+ ('1', '169.254.0.0', '0.0.0.0', '255.255.0.0', 'ens3', 'U'),
+ ('2', '172.31.0.0', '0.0.0.0', '255.255.240.0', 'ens3', 'U'),
+)
+ROUTE_IPV4_TABLE = """\
++-------+-------------+------------+---------------+-----------+-------+
+| Route | Destination | Gateway | Genmask | Interface | Flags |
++-------+-------------+------------+---------------+-----------+-------+
+| 0 | 0.0.0.0 | 172.31.0.1 | 0.0.0.0 | ens3 | UG |
+| 1 | 169.254.0.0 | 0.0.0.0 | 255.255.0.0 | ens3 | U |
+| 2 | 172.31.0.0 | 0.0.0.0 | 255.255.240.0 | ens3 | U |
++-------+-------------+------------+---------------+-----------+-------+"""
+
+AUTHORIZED_KEYS_FIELDS = (
+ 'Keytype', 'Fingerprint (md5)', 'Options', 'Comment')
+AUTHORIZED_KEYS_ROWS = (
+ ('ssh-rsa', '24:c7:41:49:47:12:31:a0:de:6f:62:79:9b:13:06:36', '-',
+ 'ajorgens'),
+)
+AUTHORIZED_KEYS_TABLE = """\
++---------+-------------------------------------------------+---------+----------+
+| Keytype | Fingerprint (md5) | Options | Comment |
++---------+-------------------------------------------------+---------+----------+
+| ssh-rsa | 24:c7:41:49:47:12:31:a0:de:6f:62:79:9b:13:06:36 | - | ajorgens |
++---------+-------------------------------------------------+---------+----------+""" # noqa: E501
+
+# from prettytable import PrettyTable
+# pt = PrettyTable(('HEADER',))
+# print(pt)
+NO_ROWS_FIELDS = ('HEADER',)
+NO_ROWS_TABLE = """\
++--------+
+| HEADER |
++--------+
++--------+"""
+
+
+class TestSimpleTable(CiTestCase):
+
+ def test_no_rows(self):
+ """An empty table is rendered as PrettyTable would have done it."""
+ table = SimpleTable(NO_ROWS_FIELDS)
+ self.assertEqual(str(table), NO_ROWS_TABLE)
+
+ def test_net_dev(self):
+ """Net device info is rendered as it was with PrettyTable."""
+ table = SimpleTable(NET_DEVICE_FIELDS)
+ for row in NET_DEVICE_ROWS:
+ table.add_row(row)
+ self.assertEqual(str(table), NET_DEVICE_TABLE)
+
+ def test_route_ipv4(self):
+ """Route IPv4 info is rendered as it was with PrettyTable."""
+ table = SimpleTable(ROUTE_IPV4_FIELDS)
+ for row in ROUTE_IPV4_ROWS:
+ table.add_row(row)
+ self.assertEqual(str(table), ROUTE_IPV4_TABLE)
+
+ def test_authorized_keys(self):
+ """SSH authorized keys are rendered as they were with PrettyTable."""
+ table = SimpleTable(AUTHORIZED_KEYS_FIELDS)
+ for row in AUTHORIZED_KEYS_ROWS:
+ table.add_row(row)
+ self.assertEqual(str(table), AUTHORIZED_KEYS_TABLE)
diff --git a/cloudinit/tests/test_temp_utils.py b/cloudinit/tests/test_temp_utils.py
new file mode 100644
index 00000000..ffbb92cd
--- /dev/null
+++ b/cloudinit/tests/test_temp_utils.py
@@ -0,0 +1,101 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests for cloudinit.temp_utils"""
+
+from cloudinit.temp_utils import mkdtemp, mkstemp
+from cloudinit.tests.helpers import CiTestCase, wrap_and_call
+
+
+class TestTempUtils(CiTestCase):
+
+ def test_mkdtemp_default_non_root(self):
+ """mkdtemp creates a dir under /tmp for the unprivileged."""
+ calls = []
+
+ def fake_mkdtemp(*args, **kwargs):
+ calls.append(kwargs)
+ return '/fake/return/path'
+
+ retval = wrap_and_call(
+ 'cloudinit.temp_utils',
+ {'os.getuid': 1000,
+ 'tempfile.mkdtemp': {'side_effect': fake_mkdtemp},
+ '_TMPDIR': {'new': None},
+ 'os.path.isdir': True},
+ mkdtemp)
+ self.assertEqual('/fake/return/path', retval)
+ self.assertEqual([{'dir': '/tmp'}], calls)
+
+ def test_mkdtemp_default_non_root_needs_exe(self):
+ """mkdtemp creates a dir under /var/tmp/cloud-init when needs_exe."""
+ calls = []
+
+ def fake_mkdtemp(*args, **kwargs):
+ calls.append(kwargs)
+ return '/fake/return/path'
+
+ retval = wrap_and_call(
+ 'cloudinit.temp_utils',
+ {'os.getuid': 1000,
+ 'tempfile.mkdtemp': {'side_effect': fake_mkdtemp},
+ '_TMPDIR': {'new': None},
+ 'os.path.isdir': True},
+ mkdtemp, needs_exe=True)
+ self.assertEqual('/fake/return/path', retval)
+ self.assertEqual([{'dir': '/var/tmp/cloud-init'}], calls)
+
+ def test_mkdtemp_default_root(self):
+ """mkdtemp creates a dir under /run/cloud-init for the privileged."""
+ calls = []
+
+ def fake_mkdtemp(*args, **kwargs):
+ calls.append(kwargs)
+ return '/fake/return/path'
+
+ retval = wrap_and_call(
+ 'cloudinit.temp_utils',
+ {'os.getuid': 0,
+ 'tempfile.mkdtemp': {'side_effect': fake_mkdtemp},
+ '_TMPDIR': {'new': None},
+ 'os.path.isdir': True},
+ mkdtemp)
+ self.assertEqual('/fake/return/path', retval)
+ self.assertEqual([{'dir': '/run/cloud-init/tmp'}], calls)
+
+ def test_mkstemp_default_non_root(self):
+ """mkstemp creates secure tempfile under /tmp for the unprivileged."""
+ calls = []
+
+ def fake_mkstemp(*args, **kwargs):
+ calls.append(kwargs)
+ return '/fake/return/path'
+
+ retval = wrap_and_call(
+ 'cloudinit.temp_utils',
+ {'os.getuid': 1000,
+ 'tempfile.mkstemp': {'side_effect': fake_mkstemp},
+ '_TMPDIR': {'new': None},
+ 'os.path.isdir': True},
+ mkstemp)
+ self.assertEqual('/fake/return/path', retval)
+ self.assertEqual([{'dir': '/tmp'}], calls)
+
+ def test_mkstemp_default_root(self):
+ """mkstemp creates a secure tempfile in /run/cloud-init for root."""
+ calls = []
+
+ def fake_mkstemp(*args, **kwargs):
+ calls.append(kwargs)
+ return '/fake/return/path'
+
+ retval = wrap_and_call(
+ 'cloudinit.temp_utils',
+ {'os.getuid': 0,
+ 'tempfile.mkstemp': {'side_effect': fake_mkstemp},
+ '_TMPDIR': {'new': None},
+ 'os.path.isdir': True},
+ mkstemp)
+ self.assertEqual('/fake/return/path', retval)
+ self.assertEqual([{'dir': '/run/cloud-init/tmp'}], calls)
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py
new file mode 100644
index 00000000..b778a3a7
--- /dev/null
+++ b/cloudinit/tests/test_url_helper.py
@@ -0,0 +1,40 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.url_helper import oauth_headers
+from cloudinit.tests.helpers import CiTestCase, mock, skipIf
+
+
+try:
+ import oauthlib
+ assert oauthlib # avoid pyflakes error F401: import unused
+ _missing_oauthlib_dep = False
+except ImportError:
+ _missing_oauthlib_dep = True
+
+
+class TestOAuthHeaders(CiTestCase):
+
+ def test_oauth_headers_raises_not_implemented_when_oathlib_missing(self):
+ """oauth_headers raises a NotImplemented error when oauth absent."""
+ with mock.patch.dict('sys.modules', {'oauthlib': None}):
+ with self.assertRaises(NotImplementedError) as context_manager:
+ oauth_headers(1, 2, 3, 4, 5)
+ self.assertEqual(
+ 'oauth support is not available',
+ str(context_manager.exception))
+
+ @skipIf(_missing_oauthlib_dep, "No python-oauthlib dependency")
+ @mock.patch('oauthlib.oauth1.Client')
+ def test_oauth_headers_calls_oathlibclient_when_available(self, m_client):
+ """oauth_headers calls oaut1.hClient.sign with the provided url."""
+ class fakeclient(object):
+ def sign(self, url):
+ # The first and 3rd item of the client.sign tuple are ignored
+ return ('junk', url, 'junk2')
+
+ m_client.return_value = fakeclient()
+
+ return_value = oauth_headers(
+ 'url', 'consumer_key', 'token_key', 'token_secret',
+ 'consumer_secret')
+ self.assertEqual('url', return_value)
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 7cf76aae..0e0f5b4c 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -17,7 +17,6 @@ import time
from email.utils import parsedate
from functools import partial
-import oauthlib.oauth1 as oauth1
from requests import exceptions
from six.moves.urllib.parse import (
@@ -488,6 +487,11 @@ class OauthUrlHelper(object):
def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
timestamp=None):
+ try:
+ import oauthlib.oauth1 as oauth1
+ except ImportError:
+ raise NotImplementedError('oauth support is not available')
+
if timestamp:
timestamp = str(timestamp)
else:
diff --git a/cloudinit/util.py b/cloudinit/util.py
index ce2c6034..e1290aa8 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -12,7 +12,6 @@ import contextlib
import copy as obj_copy
import ctypes
import email
-import errno
import glob
import grp
import gzip
@@ -31,9 +30,10 @@ import stat
import string
import subprocess
import sys
-import tempfile
import time
+from errno import ENOENT, ENOEXEC
+
from base64 import b64decode, b64encode
from six.moves.urllib import parse as urlparse
@@ -44,6 +44,7 @@ from cloudinit import importer
from cloudinit import log as logging
from cloudinit import mergers
from cloudinit import safeyaml
+from cloudinit import temp_utils
from cloudinit import type_utils
from cloudinit import url_helper
from cloudinit import version
@@ -239,7 +240,10 @@ class ProcessExecutionError(IOError):
self.cmd = cmd
if not description:
- self.description = 'Unexpected error while running command.'
+ if not exit_code and errno == ENOEXEC:
+ self.description = 'Exec format error. Missing #! in script?'
+ else:
+ self.description = 'Unexpected error while running command.'
else:
self.description = description
@@ -345,26 +349,6 @@ class DecompressionError(Exception):
pass
-def ExtendedTemporaryFile(**kwargs):
- fh = tempfile.NamedTemporaryFile(**kwargs)
- # Replace its unlink with a quiet version
- # that does not raise errors when the
- # file to unlink has been unlinked elsewhere..
- LOG.debug("Created temporary file %s", fh.name)
- fh.unlink = del_file
-
- # Add a new method that will unlink
- # right 'now' but still lets the exit
- # method attempt to remove it (which will
- # not throw due to our del file being quiet
- # about files that are not there)
- def unlink_now():
- fh.unlink(fh.name)
-
- setattr(fh, 'unlink_now', unlink_now)
- return fh
-
-
def fork_cb(child_cb, *args, **kwargs):
fid = os.fork()
if fid == 0:
@@ -433,7 +417,7 @@ def read_conf(fname):
try:
return load_yaml(load_file(fname), default={})
except IOError as e:
- if e.errno == errno.ENOENT:
+ if e.errno == ENOENT:
return {}
else:
raise
@@ -614,6 +598,8 @@ def system_info():
var = 'ubuntu'
elif linux_dist == 'redhat':
var = 'rhel'
+ elif linux_dist == 'suse':
+ var = 'suse'
else:
var = 'linux'
elif system in ('windows', 'darwin', "freebsd"):
@@ -786,18 +772,6 @@ def umask(n_msk):
os.umask(old)
-@contextlib.contextmanager
-def tempdir(**kwargs):
- # This seems like it was only added in python 3.2
- # Make it since its useful...
- # See: http://bugs.python.org/file12970/tempdir.patch
- tdir = tempfile.mkdtemp(**kwargs)
- try:
- yield tdir
- finally:
- del_dir(tdir)
-
-
def center(text, fill, max_len):
return '{0:{fill}{align}{size}}'.format(text, fill=fill,
align="^", size=max_len)
@@ -901,7 +875,7 @@ def read_file_or_url(url, timeout=5, retries=10,
contents = load_file(file_path, decode=False)
except IOError as e:
code = e.errno
- if e.errno == errno.ENOENT:
+ if e.errno == ENOENT:
code = url_helper.NOT_FOUND
raise url_helper.UrlError(cause=e, code=code, headers=None,
url=url)
@@ -1247,7 +1221,7 @@ def find_devs_with(criteria=None, oformat='device',
try:
(out, _err) = subp(cmd, rcs=[0, 2])
except ProcessExecutionError as e:
- if e.errno == errno.ENOENT:
+ if e.errno == ENOENT:
# blkid not found...
out = ""
else:
@@ -1285,7 +1259,7 @@ def load_file(fname, read_cb=None, quiet=False, decode=True):
except IOError as e:
if not quiet:
raise
- if e.errno != errno.ENOENT:
+ if e.errno != ENOENT:
raise
contents = ofh.getvalue()
LOG.debug("Read %s bytes from %s", len(contents), fname)
@@ -1583,7 +1557,7 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
mtypes = ['']
mounted = mounts()
- with tempdir() as tmpd:
+ with temp_utils.tempdir() as tmpd:
umount = False
if os.path.realpath(device) in mounted:
mountpoint = mounted[os.path.realpath(device)]['mountpoint']
@@ -1653,7 +1627,7 @@ def del_file(path):
try:
os.unlink(path)
except OSError as e:
- if e.errno != errno.ENOENT:
+ if e.errno != ENOENT:
raise e
@@ -1770,6 +1744,31 @@ def delete_dir_contents(dirname):
del_file(node_fullpath)
+def subp_blob_in_tempfile(blob, *args, **kwargs):
+ """Write blob to a tempfile, and call subp with args, kwargs. Then cleanup.
+
+ 'basename' as a kwarg allows providing the basename for the file.
+ The 'args' argument to subp will be updated with the full path to the
+ filename as the first argument.
+ """
+ basename = kwargs.pop('basename', "subp_blob")
+
+ if len(args) == 0 and 'args' not in kwargs:
+ args = [tuple()]
+
+ # Use tmpdir over tmpfile to avoid 'text file busy' on execute
+ with temp_utils.tempdir(needs_exe=True) as tmpd:
+ tmpf = os.path.join(tmpd, basename)
+ if 'args' in kwargs:
+ kwargs['args'] = [tmpf] + list(kwargs['args'])
+ else:
+ args = list(args)
+ args[0] = [tmpf] + args[0]
+
+ write_file(tmpf, blob, mode=0o700)
+ return subp(*args, **kwargs)
+
+
def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
logstring=False, decode="replace", target=None, update_env=None):
@@ -2281,7 +2280,7 @@ def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep):
try:
ret[f] = load_file(base + delim + f, quiet=False, decode=False)
except IOError as e:
- if e.errno != errno.ENOENT:
+ if e.errno != ENOENT:
raise
if f in required:
missing.append(f)
diff --git a/cloudinit/version.py b/cloudinit/version.py
index dff4af04..3255f399 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -4,7 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-__VERSION__ = "0.7.9"
+__VERSION__ = "17.1"
FEATURES = [
# supports network config version 1