summaryrefslogtreecommitdiff
path: root/cloudinit
diff options
context:
space:
mode:
authorChad Smith <chad.smith@canonical.com>2018-10-03 12:10:23 -0600
committerChad Smith <chad.smith@canonical.com>2018-10-03 12:10:23 -0600
commitd6347e1c439eda7f43d9620dac2b461e980e1ae9 (patch)
tree08410263488d11a2a29edcc620575ed1b028100e /cloudinit
parent564793a76b9c9add1ee81bab4919c8dccd45a33d (diff)
parente28000457591bde9f22d6b7a538b1fc33349d780 (diff)
downloadvyos-cloud-init-d6347e1c439eda7f43d9620dac2b461e980e1ae9.tar.gz
vyos-cloud-init-d6347e1c439eda7f43d9620dac2b461e980e1ae9.zip
merge from master at 18.4
Diffstat (limited to 'cloudinit')
-rw-r--r--cloudinit/analyze/tests/test_dump.py86
-rw-r--r--cloudinit/apport.py1
-rw-r--r--cloudinit/cloud.py4
-rw-r--r--cloudinit/cmd/devel/__init__.py25
-rwxr-xr-xcloudinit/cmd/devel/net_convert.py132
-rw-r--r--cloudinit/cmd/devel/parser.py23
-rwxr-xr-xcloudinit/cmd/devel/render.py85
-rw-r--r--cloudinit/cmd/devel/tests/test_render.py101
-rw-r--r--cloudinit/cmd/main.py34
-rw-r--r--cloudinit/cmd/query.py155
-rw-r--r--cloudinit/cmd/tests/test_main.py4
-rw-r--r--cloudinit/cmd/tests/test_query.py193
-rw-r--r--cloudinit/cmd/tests/test_status.py6
-rw-r--r--cloudinit/config/cc_lxd.py21
-rw-r--r--cloudinit/config/cc_rh_subscription.py43
-rwxr-xr-xcloudinit/config/cc_ssh.py7
-rw-r--r--cloudinit/config/cc_users_groups.py41
-rw-r--r--cloudinit/config/tests/test_snap.py7
-rw-r--r--cloudinit/config/tests/test_ssh.py151
-rw-r--r--cloudinit/config/tests/test_ubuntu_advantage.py7
-rw-r--r--cloudinit/config/tests/test_users_groups.py144
-rw-r--r--[-rwxr-xr-x]cloudinit/distros/__init__.py36
-rw-r--r--cloudinit/distros/debian.py5
-rw-r--r--cloudinit/distros/net_util.py19
-rw-r--r--cloudinit/distros/opensuse.py60
-rw-r--r--cloudinit/distros/rhel.py59
-rw-r--r--cloudinit/handlers/__init__.py11
-rw-r--r--cloudinit/handlers/boot_hook.py12
-rw-r--r--cloudinit/handlers/cloud_config.py15
-rw-r--r--cloudinit/handlers/jinja_template.py137
-rw-r--r--cloudinit/handlers/shell_script.py9
-rw-r--r--cloudinit/handlers/upstart_job.py9
-rw-r--r--cloudinit/helpers.py8
-rw-r--r--cloudinit/log.py12
-rw-r--r--cloudinit/net/__init__.py45
-rw-r--r--cloudinit/net/eni.py13
-rw-r--r--cloudinit/net/netplan.py6
-rw-r--r--cloudinit/net/network_state.py4
-rw-r--r--cloudinit/net/renderer.py9
-rw-r--r--cloudinit/net/sysconfig.py92
-rw-r--r--cloudinit/net/tests/test_init.py13
-rw-r--r--cloudinit/reporting/__init__.py8
-rw-r--r--cloudinit/reporting/handlers.py246
-rw-r--r--cloudinit/settings.py3
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py24
-rw-r--r--cloudinit/sources/DataSourceAzure.py256
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py2
-rw-r--r--cloudinit/sources/DataSourceIBMCloud.py13
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py2
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py9
-rw-r--r--cloudinit/sources/DataSourceOracle.py233
-rw-r--r--cloudinit/sources/DataSourceScaleway.py54
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py31
-rw-r--r--cloudinit/sources/__init__.py134
-rw-r--r--cloudinit/sources/helpers/openstack.py38
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py2
-rw-r--r--cloudinit/sources/tests/test_init.py192
-rw-r--r--cloudinit/sources/tests/test_oracle.py331
-rw-r--r--cloudinit/ssh_util.py6
-rw-r--r--cloudinit/stages.py26
-rw-r--r--cloudinit/templater.py28
-rw-r--r--cloudinit/tests/helpers.py110
-rw-r--r--cloudinit/tests/test_util.py78
-rw-r--r--cloudinit/util.py34
-rw-r--r--cloudinit/version.py2
-rw-r--r--cloudinit/warnings.py2
66 files changed, 3242 insertions, 466 deletions
diff --git a/cloudinit/analyze/tests/test_dump.py b/cloudinit/analyze/tests/test_dump.py
index f4c42841..db2a667b 100644
--- a/cloudinit/analyze/tests/test_dump.py
+++ b/cloudinit/analyze/tests/test_dump.py
@@ -5,8 +5,8 @@ from textwrap import dedent
from cloudinit.analyze.dump import (
dump_events, parse_ci_logline, parse_timestamp)
-from cloudinit.util import subp, write_file
-from cloudinit.tests.helpers import CiTestCase
+from cloudinit.util import which, write_file
+from cloudinit.tests.helpers import CiTestCase, mock, skipIf
class TestParseTimestamp(CiTestCase):
@@ -15,21 +15,9 @@ class TestParseTimestamp(CiTestCase):
"""Logs with cloud-init detailed formats will be properly parsed."""
trusty_fmt = '%Y-%m-%d %H:%M:%S,%f'
trusty_stamp = '2016-09-12 14:39:20,839'
-
- parsed = parse_timestamp(trusty_stamp)
-
- # convert ourselves
dt = datetime.strptime(trusty_stamp, trusty_fmt)
- expected = float(dt.strftime('%s.%f'))
-
- # use date(1)
- out, _err = subp(['date', '+%s.%3N', '-d', trusty_stamp])
- timestamp = out.strip()
- date_ts = float(timestamp)
-
- self.assertEqual(expected, parsed)
- self.assertEqual(expected, date_ts)
- self.assertEqual(date_ts, parsed)
+ self.assertEqual(
+ float(dt.strftime('%s.%f')), parse_timestamp(trusty_stamp))
def test_parse_timestamp_handles_syslog_adding_year(self):
"""Syslog timestamps lack a year. Add year and properly parse."""
@@ -39,17 +27,9 @@ class TestParseTimestamp(CiTestCase):
# convert stamp ourselves by adding the missing year value
year = datetime.now().year
dt = datetime.strptime(syslog_stamp + " " + str(year), syslog_fmt)
- expected = float(dt.strftime('%s.%f'))
- parsed = parse_timestamp(syslog_stamp)
-
- # use date(1)
- out, _ = subp(['date', '+%s.%3N', '-d', syslog_stamp])
- timestamp = out.strip()
- date_ts = float(timestamp)
-
- self.assertEqual(expected, parsed)
- self.assertEqual(expected, date_ts)
- self.assertEqual(date_ts, parsed)
+ self.assertEqual(
+ float(dt.strftime('%s.%f')),
+ parse_timestamp(syslog_stamp))
def test_parse_timestamp_handles_journalctl_format_adding_year(self):
"""Journalctl precise timestamps lack a year. Add year and parse."""
@@ -59,37 +39,22 @@ class TestParseTimestamp(CiTestCase):
# convert stamp ourselves by adding the missing year value
year = datetime.now().year
dt = datetime.strptime(journal_stamp + " " + str(year), journal_fmt)
- expected = float(dt.strftime('%s.%f'))
- parsed = parse_timestamp(journal_stamp)
-
- # use date(1)
- out, _ = subp(['date', '+%s.%6N', '-d', journal_stamp])
- timestamp = out.strip()
- date_ts = float(timestamp)
-
- self.assertEqual(expected, parsed)
- self.assertEqual(expected, date_ts)
- self.assertEqual(date_ts, parsed)
+ self.assertEqual(
+ float(dt.strftime('%s.%f')), parse_timestamp(journal_stamp))
+ @skipIf(not which("date"), "'date' command not available.")
def test_parse_unexpected_timestamp_format_with_date_command(self):
- """Dump sends unexpected timestamp formats to data for processing."""
+ """Dump sends unexpected timestamp formats to date for processing."""
new_fmt = '%H:%M %m/%d %Y'
new_stamp = '17:15 08/08'
-
# convert stamp ourselves by adding the missing year value
year = datetime.now().year
dt = datetime.strptime(new_stamp + " " + str(year), new_fmt)
- expected = float(dt.strftime('%s.%f'))
- parsed = parse_timestamp(new_stamp)
# use date(1)
- out, _ = subp(['date', '+%s.%6N', '-d', new_stamp])
- timestamp = out.strip()
- date_ts = float(timestamp)
-
- self.assertEqual(expected, parsed)
- self.assertEqual(expected, date_ts)
- self.assertEqual(date_ts, parsed)
+ with self.allow_subp(["date"]):
+ self.assertEqual(
+ float(dt.strftime('%s.%f')), parse_timestamp(new_stamp))
class TestParseCILogLine(CiTestCase):
@@ -135,7 +100,9 @@ class TestParseCILogLine(CiTestCase):
'timestamp': timestamp}
self.assertEqual(expected, parse_ci_logline(line))
- def test_parse_logline_returns_event_for_finish_events(self):
+ @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date")
+ def test_parse_logline_returns_event_for_finish_events(self,
+ m_parse_from_date):
"""parse_ci_logline returns a finish event for a parsed log line."""
line = ('2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT]'
' handlers.py[DEBUG]: finish: modules-final: SUCCESS: running'
@@ -147,7 +114,10 @@ class TestParseCILogLine(CiTestCase):
'origin': 'cloudinit',
'result': 'SUCCESS',
'timestamp': 1472594005.972}
+ m_parse_from_date.return_value = "1472594005.972"
self.assertEqual(expected, parse_ci_logline(line))
+ m_parse_from_date.assert_has_calls(
+ [mock.call("2016-08-30 21:53:25.972325+00:00")])
SAMPLE_LOGS = dedent("""\
@@ -162,10 +132,16 @@ Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT] util.py[DEBUG]:\
class TestDumpEvents(CiTestCase):
maxDiff = None
- def test_dump_events_with_rawdata(self):
+ @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date")
+ def test_dump_events_with_rawdata(self, m_parse_from_date):
"""Rawdata is split and parsed into a tuple of events and data"""
+ m_parse_from_date.return_value = "1472594005.972"
events, data = dump_events(rawdata=SAMPLE_LOGS)
expected_data = SAMPLE_LOGS.splitlines()
+ self.assertEqual(
+ [mock.call("2016-08-30 21:53:25.972325+00:00")],
+ m_parse_from_date.call_args_list)
+ self.assertEqual(expected_data, data)
year = datetime.now().year
dt1 = datetime.strptime(
'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y')
@@ -183,12 +159,14 @@ class TestDumpEvents(CiTestCase):
'result': 'SUCCESS',
'timestamp': 1472594005.972}]
self.assertEqual(expected_events, events)
- self.assertEqual(expected_data, data)
- def test_dump_events_with_cisource(self):
+ @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date")
+ def test_dump_events_with_cisource(self, m_parse_from_date):
"""Cisource file is read and parsed into a tuple of events and data."""
tmpfile = self.tmp_path('logfile')
write_file(tmpfile, SAMPLE_LOGS)
+ m_parse_from_date.return_value = 1472594005.972
+
events, data = dump_events(cisource=open(tmpfile))
year = datetime.now().year
dt1 = datetime.strptime(
@@ -208,3 +186,5 @@ class TestDumpEvents(CiTestCase):
'timestamp': 1472594005.972}]
self.assertEqual(expected_events, events)
self.assertEqual(SAMPLE_LOGS.splitlines(), [d.strip() for d in data])
+ m_parse_from_date.assert_has_calls(
+ [mock.call("2016-08-30 21:53:25.972325+00:00")])
diff --git a/cloudinit/apport.py b/cloudinit/apport.py
index 130ff269..22cb7fde 100644
--- a/cloudinit/apport.py
+++ b/cloudinit/apport.py
@@ -30,6 +30,7 @@ KNOWN_CLOUD_NAMES = [
'NoCloud',
'OpenNebula',
'OpenStack',
+ 'Oracle',
'OVF',
'OpenTelekomCloud',
'Scaleway',
diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py
index 6d12c437..7ae98e1c 100644
--- a/cloudinit/cloud.py
+++ b/cloudinit/cloud.py
@@ -47,7 +47,7 @@ class Cloud(object):
@property
def cfg(self):
- # Ensure that not indirectly modified
+ # Ensure that cfg is not indirectly modified
return copy.deepcopy(self._cfg)
def run(self, name, functor, args, freq=None, clear_on_fail=False):
@@ -61,7 +61,7 @@ class Cloud(object):
return None
return fn
- # The rest of thes are just useful proxies
+ # The rest of these are just useful proxies
def get_userdata(self, apply_filter=True):
return self.datasource.get_userdata(apply_filter)
diff --git a/cloudinit/cmd/devel/__init__.py b/cloudinit/cmd/devel/__init__.py
index e69de29b..3ae28b69 100644
--- a/cloudinit/cmd/devel/__init__.py
+++ b/cloudinit/cmd/devel/__init__.py
@@ -0,0 +1,25 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Common cloud-init devel commandline utility functions."""
+
+
+import logging
+
+from cloudinit import log
+from cloudinit.stages import Init
+
+
+def addLogHandlerCLI(logger, log_level):
+ """Add a commandline logging handler to emit messages to stderr."""
+ formatter = logging.Formatter('%(levelname)s: %(message)s')
+ log.setupBasicLogging(log_level, formatter=formatter)
+ return logger
+
+
+def read_cfg_paths():
+ """Return a Paths object based on the system configuration on disk."""
+ init = Init(ds_deps=[])
+ init.read_cfg()
+ return init.paths
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py
new file mode 100755
index 00000000..a0f58a0a
--- /dev/null
+++ b/cloudinit/cmd/devel/net_convert.py
@@ -0,0 +1,132 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Debug network config format conversions."""
+import argparse
+import json
+import os
+import sys
+import yaml
+
+from cloudinit.sources.helpers import openstack
+from cloudinit.sources import DataSourceAzure as azure
+
+from cloudinit import distros
+from cloudinit.net import eni, netplan, network_state, sysconfig
+from cloudinit import log
+
+NAME = 'net-convert'
+
+
+def get_parser(parser=None):
+ """Build or extend and arg parser for net-convert utility.
+
+ @param parser: Optional existing ArgumentParser instance representing the
+ subcommand which will be extended to support the args of this utility.
+
+ @returns: ArgumentParser with proper argument configuration.
+ """
+ if not parser:
+ parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
+ parser.add_argument("-p", "--network-data", type=open,
+ metavar="PATH", required=True)
+ parser.add_argument("-k", "--kind",
+ choices=['eni', 'network_data.json', 'yaml',
+ 'azure-imds'],
+ required=True)
+ parser.add_argument("-d", "--directory",
+ metavar="PATH",
+ help="directory to place output in",
+ required=True)
+ parser.add_argument("-D", "--distro",
+ choices=[item for sublist in
+ distros.OSFAMILIES.values()
+ for item in sublist],
+ required=True)
+ parser.add_argument("-m", "--mac",
+ metavar="name,mac",
+ action='append',
+ help="interface name to mac mapping")
+ parser.add_argument("--debug", action='store_true',
+ help='enable debug logging to stderr.')
+ parser.add_argument("-O", "--output-kind",
+ choices=['eni', 'netplan', 'sysconfig'],
+ required=True)
+ return parser
+
+
+def handle_args(name, args):
+ if not args.directory.endswith("/"):
+ args.directory += "/"
+
+ if not os.path.isdir(args.directory):
+ os.makedirs(args.directory)
+
+ if args.debug:
+ log.setupBasicLogging(level=log.DEBUG)
+ else:
+ log.setupBasicLogging(level=log.WARN)
+ if args.mac:
+ known_macs = {}
+ for item in args.mac:
+ iface_name, iface_mac = item.split(",", 1)
+ known_macs[iface_mac] = iface_name
+ else:
+ known_macs = None
+
+ net_data = args.network_data.read()
+ if args.kind == "eni":
+ pre_ns = eni.convert_eni_data(net_data)
+ ns = network_state.parse_net_config_data(pre_ns)
+ elif args.kind == "yaml":
+ pre_ns = yaml.load(net_data)
+ if 'network' in pre_ns:
+ pre_ns = pre_ns.get('network')
+ if args.debug:
+ sys.stderr.write('\n'.join(
+ ["Input YAML",
+ yaml.dump(pre_ns, default_flow_style=False, indent=4), ""]))
+ ns = network_state.parse_net_config_data(pre_ns)
+ elif args.kind == 'network_data.json':
+ pre_ns = openstack.convert_net_json(
+ json.loads(net_data), known_macs=known_macs)
+ ns = network_state.parse_net_config_data(pre_ns)
+ elif args.kind == 'azure-imds':
+ pre_ns = azure.parse_network_config(json.loads(net_data))
+ ns = network_state.parse_net_config_data(pre_ns)
+
+ if not ns:
+ raise RuntimeError("No valid network_state object created from"
+ "input data")
+
+ if args.debug:
+ sys.stderr.write('\n'.join([
+ "", "Internal State",
+ yaml.dump(ns, default_flow_style=False, indent=4), ""]))
+ distro_cls = distros.fetch(args.distro)
+ distro = distro_cls(args.distro, {}, None)
+ config = {}
+ if args.output_kind == "eni":
+ r_cls = eni.Renderer
+ config = distro.renderer_configs.get('eni')
+ elif args.output_kind == "netplan":
+ r_cls = netplan.Renderer
+ config = distro.renderer_configs.get('netplan')
+ else:
+ r_cls = sysconfig.Renderer
+ config = distro.renderer_configs.get('sysconfig')
+
+ r = r_cls(config=config)
+ sys.stderr.write(''.join([
+ "Read input format '%s' from '%s'.\n" % (
+ args.kind, args.network_data.name),
+ "Wrote output format '%s' to '%s'\n" % (
+ args.output_kind, args.directory)]) + "\n")
+ r.render_network_state(network_state=ns, target=args.directory)
+
+
+if __name__ == '__main__':
+ args = get_parser().parse_args()
+ handle_args(NAME, args)
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py
index acacc4ed..99a234ce 100644
--- a/cloudinit/cmd/devel/parser.py
+++ b/cloudinit/cmd/devel/parser.py
@@ -5,8 +5,10 @@
"""Define 'devel' subcommand argument parsers to include in cloud-init cmd."""
import argparse
-from cloudinit.config.schema import (
- get_parser as schema_parser, handle_schema_args)
+from cloudinit.config import schema
+
+from . import net_convert
+from . import render
def get_parser(parser=None):
@@ -17,10 +19,17 @@ def get_parser(parser=None):
subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
subparsers.required = True
- parser_schema = subparsers.add_parser(
- 'schema', help='Validate cloud-config files or document schema')
- # Construct schema subcommand parser
- schema_parser(parser_schema)
- parser_schema.set_defaults(action=('schema', handle_schema_args))
+ subcmds = [
+ ('schema', 'Validate cloud-config files for document schema',
+ schema.get_parser, schema.handle_schema_args),
+ (net_convert.NAME, net_convert.__doc__,
+ net_convert.get_parser, net_convert.handle_args),
+ (render.NAME, render.__doc__,
+ render.get_parser, render.handle_args)
+ ]
+ for (subcmd, helpmsg, get_parser, handler) in subcmds:
+ parser = subparsers.add_parser(subcmd, help=helpmsg)
+ get_parser(parser)
+ parser.set_defaults(action=(subcmd, handler))
return parser
diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py
new file mode 100755
index 00000000..2ba6b681
--- /dev/null
+++ b/cloudinit/cmd/devel/render.py
@@ -0,0 +1,85 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Debug jinja template rendering of user-data."""
+
+import argparse
+import os
+import sys
+
+from cloudinit.handlers.jinja_template import render_jinja_payload_from_file
+from cloudinit import log
+from cloudinit.sources import INSTANCE_JSON_FILE
+from . import addLogHandlerCLI, read_cfg_paths
+
+NAME = 'render'
+DEFAULT_INSTANCE_DATA = '/run/cloud-init/instance-data.json'
+
+LOG = log.getLogger(NAME)
+
+
+def get_parser(parser=None):
+ """Build or extend and arg parser for jinja render utility.
+
+ @param parser: Optional existing ArgumentParser instance representing the
+ subcommand which will be extended to support the args of this utility.
+
+ @returns: ArgumentParser with proper argument configuration.
+ """
+ if not parser:
+ parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
+ parser.add_argument(
+ 'user_data', type=str, help='Path to the user-data file to render')
+ parser.add_argument(
+ '-i', '--instance-data', type=str,
+ help=('Optional path to instance-data.json file. Defaults to'
+ ' /run/cloud-init/instance-data.json'))
+ parser.add_argument('-d', '--debug', action='store_true', default=False,
+ help='Add verbose messages during template render')
+ return parser
+
+
+def handle_args(name, args):
+ """Render the provided user-data template file using instance-data values.
+
+ Also setup CLI log handlers to report to stderr since this is a development
+ utility which should be run by a human on the CLI.
+
+ @return 0 on success, 1 on failure.
+ """
+ addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING)
+ if not args.instance_data:
+ paths = read_cfg_paths()
+ instance_data_fn = os.path.join(
+ paths.run_dir, INSTANCE_JSON_FILE)
+ else:
+ instance_data_fn = args.instance_data
+ if not os.path.exists(instance_data_fn):
+ LOG.error('Missing instance-data.json file: %s', instance_data_fn)
+ return 1
+ try:
+ with open(args.user_data) as stream:
+ user_data = stream.read()
+ except IOError:
+ LOG.error('Missing user-data file: %s', args.user_data)
+ return 1
+ rendered_payload = render_jinja_payload_from_file(
+ payload=user_data, payload_fn=args.user_data,
+ instance_data_file=instance_data_fn,
+ debug=True if args.debug else False)
+ if not rendered_payload:
+ LOG.error('Unable to render user-data file: %s', args.user_data)
+ return 1
+ sys.stdout.write(rendered_payload)
+ return 0
+
+
+def main():
+ args = get_parser().parse_args()
+ return(handle_args(NAME, args))
+
+
+if __name__ == '__main__':
+ sys.exit(main())
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/tests/test_render.py b/cloudinit/cmd/devel/tests/test_render.py
new file mode 100644
index 00000000..fc5d2c0d
--- /dev/null
+++ b/cloudinit/cmd/devel/tests/test_render.py
@@ -0,0 +1,101 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from six import StringIO
+import os
+
+from collections import namedtuple
+from cloudinit.cmd.devel import render
+from cloudinit.helpers import Paths
+from cloudinit.sources import INSTANCE_JSON_FILE
+from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJinja
+from cloudinit.util import ensure_dir, write_file
+
+
+class TestRender(CiTestCase):
+
+ with_logs = True
+
+ args = namedtuple('renderargs', 'user_data instance_data debug')
+
+ def setUp(self):
+ super(TestRender, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def test_handle_args_error_on_missing_user_data(self):
+ """When user_data file path does not exist, log an error."""
+ absent_file = self.tmp_path('user-data', dir=self.tmp)
+ instance_data = self.tmp_path('instance-data', dir=self.tmp)
+ write_file(instance_data, '{}')
+ args = self.args(
+ user_data=absent_file, instance_data=instance_data, debug=False)
+ with mock.patch('sys.stderr', new_callable=StringIO):
+ self.assertEqual(1, render.handle_args('anyname', args))
+ self.assertIn(
+ 'Missing user-data file: %s' % absent_file,
+ self.logs.getvalue())
+
+ def test_handle_args_error_on_missing_instance_data(self):
+ """When instance_data file path does not exist, log an error."""
+ user_data = self.tmp_path('user-data', dir=self.tmp)
+ absent_file = self.tmp_path('instance-data', dir=self.tmp)
+ args = self.args(
+ user_data=user_data, instance_data=absent_file, debug=False)
+ with mock.patch('sys.stderr', new_callable=StringIO):
+ self.assertEqual(1, render.handle_args('anyname', args))
+ self.assertIn(
+ 'Missing instance-data.json file: %s' % absent_file,
+ self.logs.getvalue())
+
+ def test_handle_args_defaults_instance_data(self):
+ """When no instance_data argument, default to configured run_dir."""
+ user_data = self.tmp_path('user-data', dir=self.tmp)
+ run_dir = self.tmp_path('run_dir', dir=self.tmp)
+ ensure_dir(run_dir)
+ paths = Paths({'run_dir': run_dir})
+ self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths')
+ self.m_paths.return_value = paths
+ args = self.args(
+ user_data=user_data, instance_data=None, debug=False)
+ with mock.patch('sys.stderr', new_callable=StringIO):
+ self.assertEqual(1, render.handle_args('anyname', args))
+ json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
+ self.assertIn(
+ 'Missing instance-data.json file: %s' % json_file,
+ self.logs.getvalue())
+
+ @skipUnlessJinja()
+ def test_handle_args_renders_instance_data_vars_in_template(self):
+ """If user_data file is a jinja template render instance-data vars."""
+ user_data = self.tmp_path('user-data', dir=self.tmp)
+ write_file(user_data, '##template: jinja\nrendering: {{ my_var }}')
+ instance_data = self.tmp_path('instance-data', dir=self.tmp)
+ write_file(instance_data, '{"my-var": "jinja worked"}')
+ args = self.args(
+ user_data=user_data, instance_data=instance_data, debug=True)
+ with mock.patch('sys.stderr', new_callable=StringIO) as m_console_err:
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ self.assertEqual(0, render.handle_args('anyname', args))
+ self.assertIn(
+ 'DEBUG: Converted jinja variables\n{', self.logs.getvalue())
+ self.assertIn(
+ 'DEBUG: Converted jinja variables\n{', m_console_err.getvalue())
+ self.assertEqual('rendering: jinja worked', m_stdout.getvalue())
+
+ @skipUnlessJinja()
+ def test_handle_args_warns_and_gives_up_on_invalid_jinja_operation(self):
+ """If user_data file has invalid jinja operations log warnings."""
+ user_data = self.tmp_path('user-data', dir=self.tmp)
+ write_file(user_data, '##template: jinja\nrendering: {{ my-var }}')
+ instance_data = self.tmp_path('instance-data', dir=self.tmp)
+ write_file(instance_data, '{"my-var": "jinja worked"}')
+ args = self.args(
+ user_data=user_data, instance_data=instance_data, debug=True)
+ with mock.patch('sys.stderr', new_callable=StringIO):
+ self.assertEqual(1, render.handle_args('anyname', args))
+ self.assertIn(
+ 'WARNING: Ignoring jinja template for %s: Undefined jinja'
+ ' variable: "my-var". Jinja tried subtraction. Perhaps you meant'
+ ' "my_var"?' % user_data,
+ self.logs.getvalue())
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index d6ba90f4..5a437020 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -315,7 +315,7 @@ def main_init(name, args):
existing = "trust"
init.purge_cache()
- # Delete the non-net file as well
+ # Delete the no-net file as well
util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net"))
# Stage 5
@@ -339,7 +339,7 @@ def main_init(name, args):
" Likely bad things to come!"))
if not args.force:
init.apply_network_config(bring_up=not args.local)
- LOG.debug("[%s] Exiting without datasource in local mode", mode)
+ LOG.debug("[%s] Exiting without datasource", mode)
if mode == sources.DSMODE_LOCAL:
return (None, [])
else:
@@ -348,6 +348,7 @@ def main_init(name, args):
LOG.debug("[%s] barreling on in force mode without datasource",
mode)
+ _maybe_persist_instance_data(init)
# Stage 6
iid = init.instancify()
LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s",
@@ -490,6 +491,7 @@ def main_modules(action_name, args):
print_exc(msg)
if not args.force:
return [(msg)]
+ _maybe_persist_instance_data(init)
# Stage 3
mods = stages.Modules(init, extract_fns(args), reporter=args.reporter)
# Stage 4
@@ -541,6 +543,7 @@ def main_single(name, args):
" likely bad things to come!"))
if not args.force:
return 1
+ _maybe_persist_instance_data(init)
# Stage 3
mods = stages.Modules(init, extract_fns(args), reporter=args.reporter)
mod_args = args.module_args
@@ -688,6 +691,15 @@ def status_wrapper(name, args, data_d=None, link_d=None):
return len(v1[mode]['errors'])
+def _maybe_persist_instance_data(init):
+ """Write instance-data.json file if absent and datasource is restored."""
+ if init.ds_restored:
+ instance_data_file = os.path.join(
+ init.paths.run_dir, sources.INSTANCE_JSON_FILE)
+ if not os.path.exists(instance_data_file):
+ init.datasource.persist_instance_data()
+
+
def _maybe_set_hostname(init, stage, retry_stage):
"""Call set-hostname if metadata, vendordata or userdata provides it.
@@ -779,6 +791,10 @@ def main(sysv_args=None):
' pass to this module'))
parser_single.set_defaults(action=('single', main_single))
+ parser_query = subparsers.add_parser(
+ 'query',
+ help='Query standardized instance metadata from the command line.')
+
parser_dhclient = subparsers.add_parser('dhclient-hook',
help=('run the dhclient hook'
'to record network info'))
@@ -830,6 +846,12 @@ def main(sysv_args=None):
clean_parser(parser_clean)
parser_clean.set_defaults(
action=('clean', handle_clean_args))
+ elif sysv_args[0] == 'query':
+ from cloudinit.cmd.query import (
+ get_parser as query_parser, handle_args as handle_query_args)
+ query_parser(parser_query)
+ parser_query.set_defaults(
+ action=('render', handle_query_args))
elif sysv_args[0] == 'status':
from cloudinit.cmd.status import (
get_parser as status_parser, handle_status_args)
@@ -877,14 +899,18 @@ def main(sysv_args=None):
rname, rdesc, reporting_enabled=report_on)
with args.reporter:
- return util.log_time(
+ retval = util.log_time(
logfunc=LOG.debug, msg="cloud-init mode '%s'" % name,
get_uptime=True, func=functor, args=(name, args))
+ reporting.flush_events()
+ return retval
if __name__ == '__main__':
if 'TZ' not in os.environ:
os.environ['TZ'] = ":/etc/localtime"
- main(sys.argv)
+ return_value = main(sys.argv)
+ if return_value:
+ sys.exit(return_value)
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py
new file mode 100644
index 00000000..7d2d4fe4
--- /dev/null
+++ b/cloudinit/cmd/query.py
@@ -0,0 +1,155 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Query standardized instance metadata from the command line."""
+
+import argparse
+import os
+import six
+import sys
+
+from cloudinit.handlers.jinja_template import (
+ convert_jinja_instance_data, render_jinja_payload)
+from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths
+from cloudinit import log
+from cloudinit.sources import (
+ INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE, REDACT_SENSITIVE_VALUE)
+from cloudinit import util
+
+NAME = 'query'
+LOG = log.getLogger(NAME)
+
+
+def get_parser(parser=None):
+ """Build or extend an arg parser for query utility.
+
+ @param parser: Optional existing ArgumentParser instance representing the
+ query subcommand which will be extended to support the args of
+ this utility.
+
+ @returns: ArgumentParser with proper argument configuration.
+ """
+ if not parser:
+ parser = argparse.ArgumentParser(
+ prog=NAME, description='Query cloud-init instance data')
+ parser.add_argument(
+ '-d', '--debug', action='store_true', default=False,
+ help='Add verbose messages during template render')
+ parser.add_argument(
+ '-i', '--instance-data', type=str,
+ help=('Path to instance-data.json file. Default is /run/cloud-init/%s'
+ % INSTANCE_JSON_FILE))
+ parser.add_argument(
+ '-l', '--list-keys', action='store_true', default=False,
+ help=('List query keys available at the provided instance-data'
+ ' <varname>.'))
+ parser.add_argument(
+ '-u', '--user-data', type=str,
+ help=('Path to user-data file. Default is'
+ ' /var/lib/cloud/instance/user-data.txt'))
+ parser.add_argument(
+ '-v', '--vendor-data', type=str,
+ help=('Path to vendor-data file. Default is'
+ ' /var/lib/cloud/instance/vendor-data.txt'))
+ parser.add_argument(
+ 'varname', type=str, nargs='?',
+ help=('A dot-delimited instance data variable to query from'
+ ' instance-data query. For example: v2.local_hostname'))
+ parser.add_argument(
+ '-a', '--all', action='store_true', default=False, dest='dump_all',
+ help='Dump all available instance-data')
+ parser.add_argument(
+ '-f', '--format', type=str, dest='format',
+ help=('Optionally specify a custom output format string. Any'
+ ' instance-data variable can be specified between double-curly'
+ ' braces. For example -f "{{ v2.cloud_name }}"'))
+ return parser
+
+
+def handle_args(name, args):
+ """Handle calls to 'cloud-init query' as a subcommand."""
+ paths = None
+ addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING)
+ if not any([args.list_keys, args.varname, args.format, args.dump_all]):
+ LOG.error(
+ 'Expected one of the options: --all, --format,'
+ ' --list-keys or varname')
+ get_parser().print_help()
+ return 1
+
+ uid = os.getuid()
+ if not all([args.instance_data, args.user_data, args.vendor_data]):
+ paths = read_cfg_paths()
+ if not args.instance_data:
+ if uid == 0:
+ default_json_fn = INSTANCE_JSON_SENSITIVE_FILE
+ else:
+ default_json_fn = INSTANCE_JSON_FILE # World readable
+ instance_data_fn = os.path.join(paths.run_dir, default_json_fn)
+ else:
+ instance_data_fn = args.instance_data
+ if not args.user_data:
+ user_data_fn = os.path.join(paths.instance_link, 'user-data.txt')
+ else:
+ user_data_fn = args.user_data
+ if not args.vendor_data:
+ vendor_data_fn = os.path.join(paths.instance_link, 'vendor-data.txt')
+ else:
+ vendor_data_fn = args.vendor_data
+
+ try:
+ instance_json = util.load_file(instance_data_fn)
+ except IOError:
+ LOG.error('Missing instance-data.json file: %s', instance_data_fn)
+ return 1
+
+ instance_data = util.load_json(instance_json)
+ if uid != 0:
+ instance_data['userdata'] = (
+ '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, user_data_fn))
+ instance_data['vendordata'] = (
+ '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, vendor_data_fn))
+ else:
+ instance_data['userdata'] = util.load_file(user_data_fn)
+ instance_data['vendordata'] = util.load_file(vendor_data_fn)
+ if args.format:
+ payload = '## template: jinja\n{fmt}'.format(fmt=args.format)
+ rendered_payload = render_jinja_payload(
+ payload=payload, payload_fn='query commandline',
+ instance_data=instance_data,
+ debug=True if args.debug else False)
+ if rendered_payload:
+ print(rendered_payload)
+ return 0
+ return 1
+
+ response = convert_jinja_instance_data(instance_data)
+ if args.varname:
+ try:
+ for var in args.varname.split('.'):
+ response = response[var]
+ except KeyError:
+ LOG.error('Undefined instance-data key %s', args.varname)
+ return 1
+ if args.list_keys:
+ if not isinstance(response, dict):
+ LOG.error("--list-keys provided but '%s' is not a dict", var)
+ return 1
+ response = '\n'.join(sorted(response.keys()))
+ elif args.list_keys:
+ response = '\n'.join(sorted(response.keys()))
+ if not isinstance(response, six.string_types):
+ response = util.json_dumps(response)
+ print(response)
+ return 0
+
+
+def main():
+ """Tool to query specific instance-data values."""
+ parser = get_parser()
+ sys.exit(handle_args(NAME, parser.parse_args()))
+
+
+if __name__ == '__main__':
+ main()
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py
index e2c54ae8..a1e534fb 100644
--- a/cloudinit/cmd/tests/test_main.py
+++ b/cloudinit/cmd/tests/test_main.py
@@ -125,7 +125,9 @@ class TestMain(FilesystemMockingTestCase):
updated_cfg.update(
{'def_log_file': '/var/log/cloud-init.log',
'log_cfgs': [],
- 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel'],
+ 'syslog_fix_perms': [
+ 'syslog:adm', 'root:adm', 'root:wheel', 'root:root'
+ ],
'vendor_data': {'enabled': True, 'prefix': []}})
updated_cfg.pop('system_info')
diff --git a/cloudinit/cmd/tests/test_query.py b/cloudinit/cmd/tests/test_query.py
new file mode 100644
index 00000000..fb87c6ab
--- /dev/null
+++ b/cloudinit/cmd/tests/test_query.py
@@ -0,0 +1,193 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from six import StringIO
+from textwrap import dedent
+import os
+
+from collections import namedtuple
+from cloudinit.cmd import query
+from cloudinit.helpers import Paths
+from cloudinit.sources import REDACT_SENSITIVE_VALUE, INSTANCE_JSON_FILE
+from cloudinit.tests.helpers import CiTestCase, mock
+from cloudinit.util import ensure_dir, write_file
+
+
+class TestQuery(CiTestCase):
+
+ with_logs = True
+
+ args = namedtuple(
+ 'queryargs',
+ ('debug dump_all format instance_data list_keys user_data vendor_data'
+ ' varname'))
+
+ def setUp(self):
+ super(TestQuery, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.instance_data = self.tmp_path('instance-data', dir=self.tmp)
+
+ def test_handle_args_error_on_missing_param(self):
+ """Error when missing required parameters and print usage."""
+ args = self.args(
+ debug=False, dump_all=False, format=None, instance_data=None,
+ list_keys=False, user_data=None, vendor_data=None, varname=None)
+ with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ self.assertEqual(1, query.handle_args('anyname', args))
+ expected_error = (
+ 'ERROR: Expected one of the options: --all, --format, --list-keys'
+ ' or varname\n')
+ self.assertIn(expected_error, self.logs.getvalue())
+ self.assertIn('usage: query', m_stdout.getvalue())
+ self.assertIn(expected_error, m_stderr.getvalue())
+
+ def test_handle_args_error_on_missing_instance_data(self):
+ """When instance_data file path does not exist, log an error."""
+ absent_fn = self.tmp_path('absent', dir=self.tmp)
+ args = self.args(
+ debug=False, dump_all=True, format=None, instance_data=absent_fn,
+ list_keys=False, user_data='ud', vendor_data='vd', varname=None)
+ with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
+ self.assertEqual(1, query.handle_args('anyname', args))
+ self.assertIn(
+ 'ERROR: Missing instance-data.json file: %s' % absent_fn,
+ self.logs.getvalue())
+ self.assertIn(
+ 'ERROR: Missing instance-data.json file: %s' % absent_fn,
+ m_stderr.getvalue())
+
+ def test_handle_args_defaults_instance_data(self):
+ """When no instance_data argument, default to configured run_dir."""
+ args = self.args(
+ debug=False, dump_all=True, format=None, instance_data=None,
+ list_keys=False, user_data=None, vendor_data=None, varname=None)
+ run_dir = self.tmp_path('run_dir', dir=self.tmp)
+ ensure_dir(run_dir)
+ paths = Paths({'run_dir': run_dir})
+ self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths')
+ self.m_paths.return_value = paths
+ with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
+ self.assertEqual(1, query.handle_args('anyname', args))
+ json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
+ self.assertIn(
+ 'ERROR: Missing instance-data.json file: %s' % json_file,
+ self.logs.getvalue())
+ self.assertIn(
+ 'ERROR: Missing instance-data.json file: %s' % json_file,
+ m_stderr.getvalue())
+
+ def test_handle_args_dumps_all_instance_data(self):
+ """When --all is specified query will dump all instance data vars."""
+ write_file(self.instance_data, '{"my-var": "it worked"}')
+ args = self.args(
+ debug=False, dump_all=True, format=None,
+ instance_data=self.instance_data, list_keys=False,
+ user_data='ud', vendor_data='vd', varname=None)
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ self.assertEqual(0, query.handle_args('anyname', args))
+ self.assertEqual(
+ '{\n "my_var": "it worked",\n "userdata": "<%s> file:ud",\n'
+ ' "vendordata": "<%s> file:vd"\n}\n' % (
+ REDACT_SENSITIVE_VALUE, REDACT_SENSITIVE_VALUE),
+ m_stdout.getvalue())
+
+ def test_handle_args_returns_top_level_varname(self):
+ """When the argument varname is passed, report its value."""
+ write_file(self.instance_data, '{"my-var": "it worked"}')
+ args = self.args(
+ debug=False, dump_all=True, format=None,
+ instance_data=self.instance_data, list_keys=False,
+ user_data='ud', vendor_data='vd', varname='my_var')
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ self.assertEqual(0, query.handle_args('anyname', args))
+ self.assertEqual('it worked\n', m_stdout.getvalue())
+
+ def test_handle_args_returns_nested_varname(self):
+ """If user_data file is a jinja template render instance-data vars."""
+ write_file(self.instance_data,
+ '{"v1": {"key-2": "value-2"}, "my-var": "it worked"}')
+ args = self.args(
+ debug=False, dump_all=False, format=None,
+ instance_data=self.instance_data, user_data='ud', vendor_data='vd',
+ list_keys=False, varname='v1.key_2')
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ self.assertEqual(0, query.handle_args('anyname', args))
+ self.assertEqual('value-2\n', m_stdout.getvalue())
+
+ def test_handle_args_returns_standardized_vars_to_top_level_aliases(self):
+ """Any standardized vars under v# are promoted as top-level aliases."""
+ write_file(
+ self.instance_data,
+ '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},'
+ ' "top": "gun"}')
+ expected = dedent("""\
+ {
+ "top": "gun",
+ "userdata": "<redacted for non-root user> file:ud",
+ "v1": {
+ "v1_1": "val1.1"
+ },
+ "v1_1": "val1.1",
+ "v2": {
+ "v2_2": "val2.2"
+ },
+ "v2_2": "val2.2",
+ "vendordata": "<redacted for non-root user> file:vd"
+ }
+ """)
+ args = self.args(
+ debug=False, dump_all=True, format=None,
+ instance_data=self.instance_data, user_data='ud', vendor_data='vd',
+ list_keys=False, varname=None)
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ self.assertEqual(0, query.handle_args('anyname', args))
+ self.assertEqual(expected, m_stdout.getvalue())
+
+ def test_handle_args_list_keys_sorts_top_level_keys_when_no_varname(self):
+ """Sort all top-level keys when only --list-keys provided."""
+ write_file(
+ self.instance_data,
+ '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},'
+ ' "top": "gun"}')
+ expected = 'top\nuserdata\nv1\nv1_1\nv2\nv2_2\nvendordata\n'
+ args = self.args(
+ debug=False, dump_all=False, format=None,
+ instance_data=self.instance_data, list_keys=True, user_data='ud',
+ vendor_data='vd', varname=None)
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ self.assertEqual(0, query.handle_args('anyname', args))
+ self.assertEqual(expected, m_stdout.getvalue())
+
+ def test_handle_args_list_keys_sorts_nested_keys_when_varname(self):
+ """Sort all nested keys of varname object when --list-keys provided."""
+ write_file(
+ self.instance_data,
+ '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2":' +
+ ' {"v2_2": "val2.2"}, "top": "gun"}')
+ expected = 'v1_1\nv1_2\n'
+ args = self.args(
+ debug=False, dump_all=False, format=None,
+ instance_data=self.instance_data, list_keys=True,
+ user_data='ud', vendor_data='vd', varname='v1')
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ self.assertEqual(0, query.handle_args('anyname', args))
+ self.assertEqual(expected, m_stdout.getvalue())
+
+ def test_handle_args_list_keys_errors_when_varname_is_not_a_dict(self):
+ """Raise an error when --list-keys and varname specify a non-list."""
+ write_file(
+ self.instance_data,
+ '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2": ' +
+ '{"v2_2": "val2.2"}, "top": "gun"}')
+ expected_error = "ERROR: --list-keys provided but 'top' is not a dict"
+ args = self.args(
+ debug=False, dump_all=False, format=None,
+ instance_data=self.instance_data, list_keys=True, user_data='ud',
+ vendor_data='vd', varname='top')
+ with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ self.assertEqual(1, query.handle_args('anyname', args))
+ self.assertEqual('', m_stdout.getvalue())
+ self.assertIn(expected_error, m_stderr.getvalue())
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py
index 37a89936..aded8580 100644
--- a/cloudinit/cmd/tests/test_status.py
+++ b/cloudinit/cmd/tests/test_status.py
@@ -39,7 +39,8 @@ class TestStatus(CiTestCase):
ensure_file(self.disable_file) # Create the ignored disable file
(is_disabled, reason) = wrap_and_call(
'cloudinit.cmd.status',
- {'uses_systemd': False},
+ {'uses_systemd': False,
+ 'get_cmdline': "root=/dev/my-root not-important"},
status._is_cloudinit_disabled, self.disable_file, self.paths)
self.assertFalse(
is_disabled, 'expected enabled cloud-init on sysvinit')
@@ -50,7 +51,8 @@ class TestStatus(CiTestCase):
ensure_file(self.disable_file) # Create observed disable file
(is_disabled, reason) = wrap_and_call(
'cloudinit.cmd.status',
- {'uses_systemd': True},
+ {'uses_systemd': True,
+ 'get_cmdline': "root=/dev/my-root not-important"},
status._is_cloudinit_disabled, self.disable_file, self.paths)
self.assertTrue(is_disabled, 'expected disabled cloud-init')
self.assertEqual(
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index ac72ac4a..24a8ebea 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -104,6 +104,7 @@ def handle(name, cfg, cloud, log, args):
'network_address', 'network_port', 'storage_backend',
'storage_create_device', 'storage_create_loop',
'storage_pool', 'trust_password')
+ util.subp(['lxd', 'waitready', '--timeout=300'])
cmd = ['lxd', 'init', '--auto']
for k in init_keys:
if init_cfg.get(k):
@@ -260,7 +261,9 @@ def bridge_to_cmd(bridge_cfg):
def _lxc(cmd):
- env = {'LC_ALL': 'C'}
+ env = {'LC_ALL': 'C',
+ 'HOME': os.environ.get('HOME', '/root'),
+ 'USER': os.environ.get('USER', 'root')}
util.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env)
@@ -276,27 +279,27 @@ def maybe_cleanup_default(net_name, did_init, create, attach,
if net_name != _DEFAULT_NETWORK_NAME or not did_init:
return
- fail_assume_enoent = " failed. Assuming it did not exist."
- succeeded = " succeeded."
+ fail_assume_enoent = "failed. Assuming it did not exist."
+ succeeded = "succeeded."
if create:
- msg = "Deletion of lxd network '%s'" % net_name
+ msg = "Deletion of lxd network '%s' %s"
try:
_lxc(["network", "delete", net_name])
- LOG.debug(msg + succeeded)
+ LOG.debug(msg, net_name, succeeded)
except util.ProcessExecutionError as e:
if e.exit_code != 1:
raise e
- LOG.debug(msg + fail_assume_enoent)
+ LOG.debug(msg, net_name, fail_assume_enoent)
if attach:
- msg = "Removal of device '%s' from profile '%s'" % (nic_name, profile)
+ msg = "Removal of device '%s' from profile '%s' %s"
try:
_lxc(["profile", "device", "remove", profile, nic_name])
- LOG.debug(msg + succeeded)
+ LOG.debug(msg, nic_name, profile, succeeded)
except util.ProcessExecutionError as e:
if e.exit_code != 1:
raise e
- LOG.debug(msg + fail_assume_enoent)
+ LOG.debug(msg, nic_name, profile, fail_assume_enoent)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index 1c679430..edee01e5 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -126,7 +126,6 @@ class SubscriptionManager(object):
self.enable_repo = self.rhel_cfg.get('enable-repo')
self.disable_repo = self.rhel_cfg.get('disable-repo')
self.servicelevel = self.rhel_cfg.get('service-level')
- self.subman = ['subscription-manager']
def log_success(self, msg):
'''Simple wrapper for logging info messages. Useful for unittests'''
@@ -173,21 +172,12 @@ class SubscriptionManager(object):
cmd = ['identity']
try:
- self._sub_man_cli(cmd)
+ _sub_man_cli(cmd)
except util.ProcessExecutionError:
return False
return True
- def _sub_man_cli(self, cmd, logstring_val=False):
- '''
- Uses the prefered cloud-init subprocess def of util.subp
- and runs subscription-manager. Breaking this to a
- separate function for later use in mocking and unittests
- '''
- cmd = self.subman + cmd
- return util.subp(cmd, logstring=logstring_val)
-
def rhn_register(self):
'''
Registers the system by userid and password or activation key
@@ -209,7 +199,7 @@ class SubscriptionManager(object):
cmd.append("--serverurl={0}".format(self.server_hostname))
try:
- return_out = self._sub_man_cli(cmd, logstring_val=True)[0]
+ return_out = _sub_man_cli(cmd, logstring_val=True)[0]
except util.ProcessExecutionError as e:
if e.stdout == "":
self.log_warn("Registration failed due "
@@ -232,7 +222,7 @@ class SubscriptionManager(object):
# Attempting to register the system only
try:
- return_out = self._sub_man_cli(cmd, logstring_val=True)[0]
+ return_out = _sub_man_cli(cmd, logstring_val=True)[0]
except util.ProcessExecutionError as e:
if e.stdout == "":
self.log_warn("Registration failed due "
@@ -255,7 +245,7 @@ class SubscriptionManager(object):
.format(self.servicelevel)]
try:
- return_out = self._sub_man_cli(cmd)[0]
+ return_out = _sub_man_cli(cmd)[0]
except util.ProcessExecutionError as e:
if e.stdout.rstrip() != '':
for line in e.stdout.split("\n"):
@@ -273,7 +263,7 @@ class SubscriptionManager(object):
def _set_auto_attach(self):
cmd = ['attach', '--auto']
try:
- return_out = self._sub_man_cli(cmd)[0]
+ return_out = _sub_man_cli(cmd)[0]
except util.ProcessExecutionError as e:
self.log_warn("Auto-attach failed with: {0}".format(e))
return False
@@ -292,12 +282,12 @@ class SubscriptionManager(object):
# Get all available pools
cmd = ['list', '--available', '--pool-only']
- results = self._sub_man_cli(cmd)[0]
+ results = _sub_man_cli(cmd)[0]
available = (results.rstrip()).split("\n")
# Get all consumed pools
cmd = ['list', '--consumed', '--pool-only']
- results = self._sub_man_cli(cmd)[0]
+ results = _sub_man_cli(cmd)[0]
consumed = (results.rstrip()).split("\n")
return available, consumed
@@ -309,14 +299,14 @@ class SubscriptionManager(object):
'''
cmd = ['repos', '--list-enabled']
- return_out = self._sub_man_cli(cmd)[0]
+ return_out = _sub_man_cli(cmd)[0]
active_repos = []
for repo in return_out.split("\n"):
if "Repo ID:" in repo:
active_repos.append((repo.split(':')[1]).strip())
cmd = ['repos', '--list-disabled']
- return_out = self._sub_man_cli(cmd)[0]
+ return_out = _sub_man_cli(cmd)[0]
inactive_repos = []
for repo in return_out.split("\n"):
@@ -346,7 +336,7 @@ class SubscriptionManager(object):
if len(pool_list) > 0:
cmd.extend(pool_list)
try:
- self._sub_man_cli(cmd)
+ _sub_man_cli(cmd)
self.log.debug("Attached the following pools to your "
"system: %s", (", ".join(pool_list))
.replace('--pool=', ''))
@@ -423,7 +413,7 @@ class SubscriptionManager(object):
cmd.extend(enable_list)
try:
- self._sub_man_cli(cmd)
+ _sub_man_cli(cmd)
except util.ProcessExecutionError as e:
self.log_warn("Unable to alter repos due to {0}".format(e))
return False
@@ -439,4 +429,15 @@ class SubscriptionManager(object):
def is_configured(self):
return bool((self.userid and self.password) or self.activation_key)
+
+def _sub_man_cli(cmd, logstring_val=False):
+ '''
+ Uses the prefered cloud-init subprocess def of util.subp
+ and runs subscription-manager. Breaking this to a
+ separate function for later use in mocking and unittests
+ '''
+ return util.subp(['subscription-manager'] + cmd,
+ logstring=logstring_val)
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 45204a07..f8f7cb35 100755
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -101,10 +101,6 @@ from cloudinit.distros import ug_util
from cloudinit import ssh_util
from cloudinit import util
-DISABLE_ROOT_OPTS = (
- "no-port-forwarding,no-agent-forwarding,"
- "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\""
- " rather than the user \\\"root\\\".\';echo;sleep 10\"")
GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519']
KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key'
@@ -185,7 +181,7 @@ def handle(_name, cfg, cloud, log, _args):
(user, _user_config) = ug_util.extract_default(users)
disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
- DISABLE_ROOT_OPTS)
+ ssh_util.DISABLE_USER_OPTS)
keys = cloud.get_public_ssh_keys() or []
if "ssh_authorized_keys" in cfg:
@@ -207,6 +203,7 @@ def apply_credentials(keys, user, disable_root, disable_root_opts):
if not user:
user = "NONE"
key_prefix = disable_root_opts.replace('$USER', user)
+ key_prefix = key_prefix.replace('$DISABLE_USER', 'root')
else:
key_prefix = ''
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
index c95bdaad..c32a743a 100644
--- a/cloudinit/config/cc_users_groups.py
+++ b/cloudinit/config/cc_users_groups.py
@@ -52,8 +52,17 @@ config keys for an entry in ``users`` are as follows:
associated with the address, username and SSH keys will be requested from
there. Default: none
- ``ssh_authorized_keys``: Optional. List of ssh keys to add to user's
- authkeys file. Default: none
- - ``ssh_import_id``: Optional. SSH id to import for user. Default: none
+ authkeys file. Default: none. This key can not be combined with
+ ``ssh_redirect_user``.
+ - ``ssh_import_id``: Optional. SSH id to import for user. Default: none.
+ This key can not be combined with ``ssh_redirect_user``.
+ - ``ssh_redirect_user``: Optional. Boolean set to true to disable SSH
+ logins for this user. When specified, all cloud meta-data public ssh
+ keys will be set up in a disabled state for this username. Any ssh login
+ as this username will timeout and prompt with a message to login instead
+ as the configured <default_username> for this instance. Default: false.
+ This key can not be combined with ``ssh_import_id`` or
+ ``ssh_authorized_keys``.
- ``sudo``: Optional. Sudo rule to use, list of sudo rules to use or False.
Default: none. An absence of sudo key, or a value of none or false
will result in no sudo rules being written for the user.
@@ -101,6 +110,7 @@ config keys for an entry in ``users`` are as follows:
selinux_user: <selinux username>
shell: <shell path>
snapuser: <email>
+ ssh_redirect_user: <true/false>
ssh_authorized_keys:
- <key>
- <key>
@@ -114,17 +124,44 @@ config keys for an entry in ``users`` are as follows:
# since the module attribute 'distros'
# is a list of distros that are supported, not a sub-module
from cloudinit.distros import ug_util
+from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
+LOG = logging.getLogger(__name__)
+
frequency = PER_INSTANCE
def handle(name, cfg, cloud, _log, _args):
(users, groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
+ (default_user, _user_config) = ug_util.extract_default(users)
+ cloud_keys = cloud.get_public_ssh_keys() or []
for (name, members) in groups.items():
cloud.distro.create_group(name, members)
for (user, config) in users.items():
+ ssh_redirect_user = config.pop("ssh_redirect_user", False)
+ if ssh_redirect_user:
+ if 'ssh_authorized_keys' in config or 'ssh_import_id' in config:
+ raise ValueError(
+ 'Not creating user %s. ssh_redirect_user cannot be'
+ ' provided with ssh_import_id or ssh_authorized_keys' %
+ user)
+ if ssh_redirect_user not in (True, 'default'):
+ raise ValueError(
+ 'Not creating user %s. Invalid value of'
+ ' ssh_redirect_user: %s. Expected values: true, default'
+ ' or false.' % (user, ssh_redirect_user))
+ if default_user is None:
+ LOG.warning(
+ 'Ignoring ssh_redirect_user: %s for %s.'
+ ' No default_user defined.'
+ ' Perhaps missing cloud configuration users: '
+ ' [default, ..].',
+ ssh_redirect_user, user)
+ else:
+ config['ssh_redirect_user'] = default_user
+ config['cloud_public_ssh_keys'] = cloud_keys
cloud.distro.create_user(user, **config)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py
index 34c80f1e..3c472891 100644
--- a/cloudinit/config/tests/test_snap.py
+++ b/cloudinit/config/tests/test_snap.py
@@ -162,6 +162,7 @@ class TestAddAssertions(CiTestCase):
class TestRunCommands(CiTestCase):
with_logs = True
+ allowed_subp = [CiTestCase.SUBP_SHELL_TRUE]
def setUp(self):
super(TestRunCommands, self).setUp()
@@ -424,8 +425,10 @@ class TestHandle(CiTestCase):
'snap': {'commands': ['echo "HI" >> %s' % outfile,
'echo "MOM" >> %s' % outfile]}}
mock_path = 'cloudinit.config.cc_snap.sys.stderr'
- with mock.patch(mock_path, new_callable=StringIO):
- handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None)
+ with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]):
+ with mock.patch(mock_path, new_callable=StringIO):
+ handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None)
+
self.assertEqual('HI\nMOM\n', util.load_file(outfile))
@mock.patch('cloudinit.config.cc_snap.util.subp')
diff --git a/cloudinit/config/tests/test_ssh.py b/cloudinit/config/tests/test_ssh.py
new file mode 100644
index 00000000..c8a4271f
--- /dev/null
+++ b/cloudinit/config/tests/test_ssh.py
@@ -0,0 +1,151 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+
+from cloudinit.config import cc_ssh
+from cloudinit import ssh_util
+from cloudinit.tests.helpers import CiTestCase, mock
+
+MODPATH = "cloudinit.config.cc_ssh."
+
+
+@mock.patch(MODPATH + "ssh_util.setup_user_keys")
+class TestHandleSsh(CiTestCase):
+ """Test cc_ssh handling of ssh config."""
+
+ def test_apply_credentials_with_user(self, m_setup_keys):
+ """Apply keys for the given user and root."""
+ keys = ["key1"]
+ user = "clouduser"
+ cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS)
+ self.assertEqual([mock.call(set(keys), user),
+ mock.call(set(keys), "root", options="")],
+ m_setup_keys.call_args_list)
+
+ def test_apply_credentials_with_no_user(self, m_setup_keys):
+ """Apply keys for root only."""
+ keys = ["key1"]
+ user = None
+ cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS)
+ self.assertEqual([mock.call(set(keys), "root", options="")],
+ m_setup_keys.call_args_list)
+
+ def test_apply_credentials_with_user_disable_root(self, m_setup_keys):
+ """Apply keys for the given user and disable root ssh."""
+ keys = ["key1"]
+ user = "clouduser"
+ options = ssh_util.DISABLE_USER_OPTS
+ cc_ssh.apply_credentials(keys, user, True, options)
+ options = options.replace("$USER", user)
+ options = options.replace("$DISABLE_USER", "root")
+ self.assertEqual([mock.call(set(keys), user),
+ mock.call(set(keys), "root", options=options)],
+ m_setup_keys.call_args_list)
+
+ def test_apply_credentials_with_no_user_disable_root(self, m_setup_keys):
+ """Apply keys no user and disable root ssh."""
+ keys = ["key1"]
+ user = None
+ options = ssh_util.DISABLE_USER_OPTS
+ cc_ssh.apply_credentials(keys, user, True, options)
+ options = options.replace("$USER", "NONE")
+ options = options.replace("$DISABLE_USER", "root")
+ self.assertEqual([mock.call(set(keys), "root", options=options)],
+ m_setup_keys.call_args_list)
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_no_cfg(self, m_path_exists, m_nug,
+ m_glob, m_setup_keys):
+ """Test handle with no config ignores generating existing keyfiles."""
+ cfg = {}
+ keys = ["key1"]
+ m_glob.return_value = [] # Return no matching keys to prevent removal
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ([], {})
+ cloud = self.tmp_cloud(
+ distro='ubuntu', metadata={'public-keys': keys})
+ cc_ssh.handle("name", cfg, cloud, None, None)
+ options = ssh_util.DISABLE_USER_OPTS.replace("$USER", "NONE")
+ options = options.replace("$DISABLE_USER", "root")
+ m_glob.assert_called_once_with('/etc/ssh/ssh_host_*key*')
+ self.assertIn(
+ [mock.call('/etc/ssh/ssh_host_rsa_key'),
+ mock.call('/etc/ssh/ssh_host_dsa_key'),
+ mock.call('/etc/ssh/ssh_host_ecdsa_key'),
+ mock.call('/etc/ssh/ssh_host_ed25519_key')],
+ m_path_exists.call_args_list)
+ self.assertEqual([mock.call(set(keys), "root", options=options)],
+ m_setup_keys.call_args_list)
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_no_cfg_and_default_root(self, m_path_exists, m_nug,
+ m_glob, m_setup_keys):
+ """Test handle with no config and a default distro user."""
+ cfg = {}
+ keys = ["key1"]
+ user = "clouduser"
+ m_glob.return_value = [] # Return no matching keys to prevent removal
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(
+ distro='ubuntu', metadata={'public-keys': keys})
+ cc_ssh.handle("name", cfg, cloud, None, None)
+
+ options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
+ options = options.replace("$DISABLE_USER", "root")
+ self.assertEqual([mock.call(set(keys), user),
+ mock.call(set(keys), "root", options=options)],
+ m_setup_keys.call_args_list)
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_cfg_with_explicit_disable_root(self, m_path_exists, m_nug,
+ m_glob, m_setup_keys):
+ """Test handle with explicit disable_root and a default distro user."""
+ # This test is identical to test_handle_no_cfg_and_default_root,
+ # except this uses an explicit cfg value
+ cfg = {"disable_root": True}
+ keys = ["key1"]
+ user = "clouduser"
+ m_glob.return_value = [] # Return no matching keys to prevent removal
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(
+ distro='ubuntu', metadata={'public-keys': keys})
+ cc_ssh.handle("name", cfg, cloud, None, None)
+
+ options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
+ options = options.replace("$DISABLE_USER", "root")
+ self.assertEqual([mock.call(set(keys), user),
+ mock.call(set(keys), "root", options=options)],
+ m_setup_keys.call_args_list)
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_cfg_without_disable_root(self, m_path_exists, m_nug,
+ m_glob, m_setup_keys):
+ """Test handle with disable_root == False."""
+ # When disable_root == False, the ssh redirect for root is skipped
+ cfg = {"disable_root": False}
+ keys = ["key1"]
+ user = "clouduser"
+ m_glob.return_value = [] # Return no matching keys to prevent removal
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(
+ distro='ubuntu', metadata={'public-keys': keys})
+ cloud.get_public_ssh_keys = mock.Mock(return_value=keys)
+ cc_ssh.handle("name", cfg, cloud, None, None)
+
+ self.assertEqual([mock.call(set(keys), user),
+ mock.call(set(keys), "root", options="")],
+ m_setup_keys.call_args_list)
diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py
index f1beeff8..b7cf9bee 100644
--- a/cloudinit/config/tests/test_ubuntu_advantage.py
+++ b/cloudinit/config/tests/test_ubuntu_advantage.py
@@ -23,6 +23,7 @@ class FakeCloud(object):
class TestRunCommands(CiTestCase):
with_logs = True
+ allowed_subp = [CiTestCase.SUBP_SHELL_TRUE]
def setUp(self):
super(TestRunCommands, self).setUp()
@@ -234,8 +235,10 @@ class TestHandle(CiTestCase):
'ubuntu-advantage': {'commands': ['echo "HI" >> %s' % outfile,
'echo "MOM" >> %s' % outfile]}}
mock_path = '%s.sys.stderr' % MPATH
- with mock.patch(mock_path, new_callable=StringIO):
- handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
+ with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]):
+ with mock.patch(mock_path, new_callable=StringIO):
+ handle('nomatter', cfg=cfg, cloud=None, log=self.logger,
+ args=None)
self.assertEqual('HI\nMOM\n', util.load_file(outfile))
diff --git a/cloudinit/config/tests/test_users_groups.py b/cloudinit/config/tests/test_users_groups.py
new file mode 100644
index 00000000..ba0afae3
--- /dev/null
+++ b/cloudinit/config/tests/test_users_groups.py
@@ -0,0 +1,144 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+
+from cloudinit.config import cc_users_groups
+from cloudinit.tests.helpers import CiTestCase, mock
+
+MODPATH = "cloudinit.config.cc_users_groups"
+
+
+@mock.patch('cloudinit.distros.ubuntu.Distro.create_group')
+@mock.patch('cloudinit.distros.ubuntu.Distro.create_user')
+class TestHandleUsersGroups(CiTestCase):
+ """Test cc_users_groups handling of config."""
+
+ with_logs = True
+
+ def test_handle_no_cfg_creates_no_users_or_groups(self, m_user, m_group):
+ """Test handle with no config will not create users or groups."""
+ cfg = {} # merged cloud-config
+ # System config defines a default user for the distro.
+ sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
+ 'groups': ['lxd', 'sudo'],
+ 'shell': '/bin/bash'}}
+ metadata = {}
+ cloud = self.tmp_cloud(
+ distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
+ cc_users_groups.handle('modulename', cfg, cloud, None, None)
+ m_user.assert_not_called()
+ m_group.assert_not_called()
+
+ def test_handle_users_in_cfg_calls_create_users(self, m_user, m_group):
+ """When users in config, create users with distro.create_user."""
+ cfg = {'users': ['default', {'name': 'me2'}]} # merged cloud-config
+ # System config defines a default user for the distro.
+ sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
+ 'groups': ['lxd', 'sudo'],
+ 'shell': '/bin/bash'}}
+ metadata = {}
+ cloud = self.tmp_cloud(
+ distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
+ cc_users_groups.handle('modulename', cfg, cloud, None, None)
+ self.assertItemsEqual(
+ m_user.call_args_list,
+ [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
+ shell='/bin/bash'),
+ mock.call('me2', default=False)])
+ m_group.assert_not_called()
+
+ def test_users_with_ssh_redirect_user_passes_keys(self, m_user, m_group):
+ """When ssh_redirect_user is True pass default user and cloud keys."""
+ cfg = {
+ 'users': ['default', {'name': 'me2', 'ssh_redirect_user': True}]}
+ # System config defines a default user for the distro.
+ sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
+ 'groups': ['lxd', 'sudo'],
+ 'shell': '/bin/bash'}}
+ metadata = {'public-keys': ['key1']}
+ cloud = self.tmp_cloud(
+ distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
+ cc_users_groups.handle('modulename', cfg, cloud, None, None)
+ self.assertItemsEqual(
+ m_user.call_args_list,
+ [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
+ shell='/bin/bash'),
+ mock.call('me2', cloud_public_ssh_keys=['key1'], default=False,
+ ssh_redirect_user='ubuntu')])
+ m_group.assert_not_called()
+
+ def test_users_with_ssh_redirect_user_default_str(self, m_user, m_group):
+ """When ssh_redirect_user is 'default' pass default username."""
+ cfg = {
+ 'users': ['default', {'name': 'me2',
+ 'ssh_redirect_user': 'default'}]}
+ # System config defines a default user for the distro.
+ sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
+ 'groups': ['lxd', 'sudo'],
+ 'shell': '/bin/bash'}}
+ metadata = {'public-keys': ['key1']}
+ cloud = self.tmp_cloud(
+ distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
+ cc_users_groups.handle('modulename', cfg, cloud, None, None)
+ self.assertItemsEqual(
+ m_user.call_args_list,
+ [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
+ shell='/bin/bash'),
+ mock.call('me2', cloud_public_ssh_keys=['key1'], default=False,
+ ssh_redirect_user='ubuntu')])
+ m_group.assert_not_called()
+
+ def test_users_with_ssh_redirect_user_non_default(self, m_user, m_group):
+ """Warn when ssh_redirect_user is not 'default'."""
+ cfg = {
+ 'users': ['default', {'name': 'me2',
+ 'ssh_redirect_user': 'snowflake'}]}
+ # System config defines a default user for the distro.
+ sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
+ 'groups': ['lxd', 'sudo'],
+ 'shell': '/bin/bash'}}
+ metadata = {'public-keys': ['key1']}
+ cloud = self.tmp_cloud(
+ distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
+ with self.assertRaises(ValueError) as context_manager:
+ cc_users_groups.handle('modulename', cfg, cloud, None, None)
+ m_group.assert_not_called()
+ self.assertEqual(
+ 'Not creating user me2. Invalid value of ssh_redirect_user:'
+ ' snowflake. Expected values: true, default or false.',
+ str(context_manager.exception))
+
+ def test_users_with_ssh_redirect_user_default_false(self, m_user, m_group):
+ """When unspecified ssh_redirect_user is false and not set up."""
+ cfg = {'users': ['default', {'name': 'me2'}]}
+ # System config defines a default user for the distro.
+ sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
+ 'groups': ['lxd', 'sudo'],
+ 'shell': '/bin/bash'}}
+ metadata = {'public-keys': ['key1']}
+ cloud = self.tmp_cloud(
+ distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
+ cc_users_groups.handle('modulename', cfg, cloud, None, None)
+ self.assertItemsEqual(
+ m_user.call_args_list,
+ [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
+ shell='/bin/bash'),
+ mock.call('me2', default=False)])
+ m_group.assert_not_called()
+
+ def test_users_ssh_redirect_user_and_no_default(self, m_user, m_group):
+ """Warn when ssh_redirect_user is True and no default user present."""
+ cfg = {
+ 'users': ['default', {'name': 'me2', 'ssh_redirect_user': True}]}
+ # System config defines *no* default user for the distro.
+ sys_cfg = {}
+ metadata = {} # no public-keys defined
+ cloud = self.tmp_cloud(
+ distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
+ cc_users_groups.handle('modulename', cfg, cloud, None, None)
+ m_user.assert_called_once_with('me2', default=False)
+ m_group.assert_not_called()
+ self.assertEqual(
+ 'WARNING: Ignoring ssh_redirect_user: True for me2. No'
+ ' default_user defined. Perhaps missing'
+ ' cloud configuration users: [default, ..].\n',
+ self.logs.getvalue())
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index ab0b0776..ef618c28 100755..100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -74,11 +74,10 @@ class Distro(object):
def install_packages(self, pkglist):
raise NotImplementedError()
- @abc.abstractmethod
def _write_network(self, settings):
- # In the future use the http://fedorahosted.org/netcf/
- # to write this blob out in a distro format
- raise NotImplementedError()
+ raise RuntimeError(
+ "Legacy function '_write_network' was called in distro '%s'.\n"
+ "_write_network_config needs implementation.\n" % self.name)
def _write_network_config(self, settings):
raise NotImplementedError()
@@ -91,7 +90,7 @@ class Distro(object):
LOG.debug("Selected renderer '%s' from priority list: %s",
name, priority)
renderer = render_cls(config=self.renderer_configs.get(name))
- renderer.render_network_config(network_config=network_config)
+ renderer.render_network_config(network_config)
return []
def _find_tz_file(self, tz):
@@ -144,7 +143,11 @@ class Distro(object):
# this applies network where 'settings' is interfaces(5) style
# it is obsolete compared to apply_network_config
# Write it out
+
+ # pylint: disable=assignment-from-no-return
+ # We have implementations in arch, freebsd and gentoo still
dev_names = self._write_network(settings)
+ # pylint: enable=assignment-from-no-return
# Now try to bring them up
if bring_up:
return self._bring_up_interfaces(dev_names)
@@ -157,7 +160,7 @@ class Distro(object):
distro)
header = '\n'.join([
"# Converted from network_config for distro %s" % distro,
- "# Implmentation of _write_network_config is needed."
+ "# Implementation of _write_network_config is needed."
])
ns = network_state.parse_net_config_data(netconfig)
contents = eni.network_state_to_eni(
@@ -381,6 +384,9 @@ class Distro(object):
"""
Add a user to the system using standard GNU tools
"""
+ # XXX need to make add_user idempotent somehow as we
+ # still want to add groups or modify ssh keys on pre-existing
+ # users in the image.
if util.is_user(name):
LOG.info("User %s already exists, skipping.", name)
return
@@ -547,10 +553,24 @@ class Distro(object):
LOG.warning("Invalid type '%s' detected for"
" 'ssh_authorized_keys', expected list,"
" string, dict, or set.", type(keys))
+ keys = []
else:
keys = set(keys) or []
- ssh_util.setup_user_keys(keys, name, options=None)
-
+ ssh_util.setup_user_keys(set(keys), name)
+ if 'ssh_redirect_user' in kwargs:
+ cloud_keys = kwargs.get('cloud_public_ssh_keys', [])
+ if not cloud_keys:
+ LOG.warning(
+ 'Unable to disable ssh logins for %s given'
+ ' ssh_redirect_user: %s. No cloud public-keys present.',
+ name, kwargs['ssh_redirect_user'])
+ else:
+ redirect_user = kwargs['ssh_redirect_user']
+ disable_option = ssh_util.DISABLE_USER_OPTS
+ disable_option = disable_option.replace('$USER', redirect_user)
+ disable_option = disable_option.replace('$DISABLE_USER', name)
+ ssh_util.setup_user_keys(
+ set(cloud_keys), name, options=disable_option)
return True
def lock_passwd(self, name):
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index 33cc0bf1..d517fb88 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -109,11 +109,6 @@ class Distro(distros.Distro):
self.update_package_sources()
self.package_command('install', pkgs=pkglist)
- def _write_network(self, settings):
- # this is a legacy method, it will always write eni
- util.write_file(self.network_conf_fn["eni"], settings)
- return ['all']
-
def _write_network_config(self, netconfig):
_maybe_remove_legacy_eth0()
return self._supported_write_network_config(netconfig)
diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py
index 1ce1aa71..edfcd99d 100644
--- a/cloudinit/distros/net_util.py
+++ b/cloudinit/distros/net_util.py
@@ -67,6 +67,10 @@
# }
# }
+from cloudinit.net.network_state import (
+ net_prefix_to_ipv4_mask, mask_and_ipv4_to_bcast_addr)
+
+
def translate_network(settings):
# Get the standard cmd, args from the ubuntu format
entries = []
@@ -134,6 +138,21 @@ def translate_network(settings):
val = info[k].strip().lower()
if val:
iface_info[k] = val
+ # handle static ip configurations using
+ # ipaddress/prefix-length format
+ if 'address' in iface_info:
+ if 'netmask' not in iface_info:
+ # check if the address has a network prefix
+ addr, _, prefix = iface_info['address'].partition('/')
+ if prefix:
+ iface_info['netmask'] = (
+ net_prefix_to_ipv4_mask(prefix))
+ iface_info['address'] = addr
+ # if we set the netmask, we also can set the broadcast
+ iface_info['broadcast'] = (
+ mask_and_ipv4_to_bcast_addr(
+ iface_info['netmask'], addr))
+
# Name server info provided??
if 'dns-nameservers' in info:
iface_info['dns-nameservers'] = info['dns-nameservers'].split()
diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py
index 9f90e95e..1bfe0478 100644
--- a/cloudinit/distros/opensuse.py
+++ b/cloudinit/distros/opensuse.py
@@ -16,7 +16,6 @@ from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import util
-from cloudinit.distros import net_util
from cloudinit.distros import rhel_util as rhutil
from cloudinit.settings import PER_INSTANCE
@@ -28,13 +27,23 @@ class Distro(distros.Distro):
hostname_conf_fn = '/etc/HOSTNAME'
init_cmd = ['service']
locale_conf_fn = '/etc/sysconfig/language'
- network_conf_fn = '/etc/sysconfig/network'
+ network_conf_fn = '/etc/sysconfig/network/config'
network_script_tpl = '/etc/sysconfig/network/ifcfg-%s'
resolve_conf_fn = '/etc/resolv.conf'
route_conf_tpl = '/etc/sysconfig/network/ifroute-%s'
systemd_hostname_conf_fn = '/etc/hostname'
systemd_locale_conf_fn = '/etc/locale.conf'
tz_local_fn = '/etc/localtime'
+ renderer_configs = {
+ 'sysconfig': {
+ 'control': 'etc/sysconfig/network/config',
+ 'iface_templates': '%(base)s/network/ifcfg-%(name)s',
+ 'route_templates': {
+ 'ipv4': '%(base)s/network/ifroute-%(name)s',
+ 'ipv6': '%(base)s/network/ifroute-%(name)s',
+ }
+ }
+ }
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
@@ -162,51 +171,8 @@ class Distro(distros.Distro):
conf.set_hostname(hostname)
util.write_file(out_fn, str(conf), 0o644)
- def _write_network(self, settings):
- # Convert debian settings to ifcfg format
- entries = net_util.translate_network(settings)
- LOG.debug("Translated ubuntu style network settings %s into %s",
- settings, entries)
- # Make the intermediate format as the suse format...
- nameservers = []
- searchservers = []
- dev_names = entries.keys()
- for (dev, info) in entries.items():
- net_fn = self.network_script_tpl % (dev)
- route_fn = self.route_conf_tpl % (dev)
- mode = None
- if info.get('auto', None):
- mode = 'auto'
- else:
- mode = 'manual'
- bootproto = info.get('bootproto', None)
- gateway = info.get('gateway', None)
- net_cfg = {
- 'BOOTPROTO': bootproto,
- 'BROADCAST': info.get('broadcast'),
- 'GATEWAY': gateway,
- 'IPADDR': info.get('address'),
- 'LLADDR': info.get('hwaddress'),
- 'NETMASK': info.get('netmask'),
- 'STARTMODE': mode,
- 'USERCONTROL': 'no'
- }
- if dev != 'lo':
- net_cfg['ETHTOOL_OPTIONS'] = ''
- else:
- net_cfg['FIREWALL'] = 'no'
- rhutil.update_sysconfig_file(net_fn, net_cfg, True)
- if gateway and bootproto == 'static':
- default_route = 'default %s' % gateway
- util.write_file(route_fn, default_route, 0o644)
- if 'dns-nameservers' in info:
- nameservers.extend(info['dns-nameservers'])
- if 'dns-search' in info:
- searchservers.extend(info['dns-search'])
- if nameservers or searchservers:
- rhutil.update_resolve_conf_file(self.resolve_conf_fn,
- nameservers, searchservers)
- return dev_names
+ def _write_network_config(self, netconfig):
+ return self._supported_write_network_config(netconfig)
@property
def preferred_ntp_clients(self):
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index 1fecb619..f55d96f7 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -13,7 +13,6 @@ from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import util
-from cloudinit.distros import net_util
from cloudinit.distros import rhel_util
from cloudinit.settings import PER_INSTANCE
@@ -39,6 +38,16 @@ class Distro(distros.Distro):
resolve_conf_fn = "/etc/resolv.conf"
tz_local_fn = "/etc/localtime"
usr_lib_exec = "/usr/libexec"
+ renderer_configs = {
+ 'sysconfig': {
+ 'control': 'etc/sysconfig/network',
+ 'iface_templates': '%(base)s/network-scripts/ifcfg-%(name)s',
+ 'route_templates': {
+ 'ipv4': '%(base)s/network-scripts/route-%(name)s',
+ 'ipv6': '%(base)s/network-scripts/route6-%(name)s'
+ }
+ }
+ }
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
@@ -55,54 +64,6 @@ class Distro(distros.Distro):
def _write_network_config(self, netconfig):
return self._supported_write_network_config(netconfig)
- def _write_network(self, settings):
- # TODO(harlowja) fix this... since this is the ubuntu format
- entries = net_util.translate_network(settings)
- LOG.debug("Translated ubuntu style network settings %s into %s",
- settings, entries)
- # Make the intermediate format as the rhel format...
- nameservers = []
- searchservers = []
- dev_names = entries.keys()
- use_ipv6 = False
- for (dev, info) in entries.items():
- net_fn = self.network_script_tpl % (dev)
- net_cfg = {
- 'DEVICE': dev,
- 'NETMASK': info.get('netmask'),
- 'IPADDR': info.get('address'),
- 'BOOTPROTO': info.get('bootproto'),
- 'GATEWAY': info.get('gateway'),
- 'BROADCAST': info.get('broadcast'),
- 'MACADDR': info.get('hwaddress'),
- 'ONBOOT': _make_sysconfig_bool(info.get('auto')),
- }
- if info.get('inet6'):
- use_ipv6 = True
- net_cfg.update({
- 'IPV6INIT': _make_sysconfig_bool(True),
- 'IPV6ADDR': info.get('ipv6').get('address'),
- 'IPV6_DEFAULTGW': info.get('ipv6').get('gateway'),
- })
- rhel_util.update_sysconfig_file(net_fn, net_cfg)
- if 'dns-nameservers' in info:
- nameservers.extend(info['dns-nameservers'])
- if 'dns-search' in info:
- searchservers.extend(info['dns-search'])
- if nameservers or searchservers:
- rhel_util.update_resolve_conf_file(self.resolve_conf_fn,
- nameservers, searchservers)
- if dev_names:
- net_cfg = {
- 'NETWORKING': _make_sysconfig_bool(True),
- }
- # If IPv6 interface present, enable ipv6 networking
- if use_ipv6:
- net_cfg['NETWORKING_IPV6'] = _make_sysconfig_bool(True)
- net_cfg['IPV6_AUTOCONF'] = _make_sysconfig_bool(False)
- rhel_util.update_sysconfig_file(self.network_conf_fn, net_cfg)
- return dev_names
-
def apply_locale(self, locale, out_fn=None):
if self.uses_systemd():
if not out_fn:
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index c3576c04..0db75af9 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -41,7 +41,7 @@ PART_HANDLER_FN_TMPL = 'part-handler-%03d'
# For parts without filenames
PART_FN_TPL = 'part-%03d'
-# Different file beginnings to there content type
+# Different file beginnings to their content type
INCLUSION_TYPES_MAP = {
'#include': 'text/x-include-url',
'#include-once': 'text/x-include-once-url',
@@ -52,6 +52,7 @@ INCLUSION_TYPES_MAP = {
'#cloud-boothook': 'text/cloud-boothook',
'#cloud-config-archive': 'text/cloud-config-archive',
'#cloud-config-jsonp': 'text/cloud-config-jsonp',
+ '## template: jinja': 'text/jinja2',
}
# Sorted longest first
@@ -69,9 +70,13 @@ class Handler(object):
def __repr__(self):
return "%s: [%s]" % (type_utils.obj_name(self), self.list_types())
- @abc.abstractmethod
def list_types(self):
- raise NotImplementedError()
+ # Each subclass must define the supported content prefixes it handles.
+ if not hasattr(self, 'prefixes'):
+ raise NotImplementedError('Missing prefixes subclass attribute')
+ else:
+ return [INCLUSION_TYPES_MAP[prefix]
+ for prefix in getattr(self, 'prefixes')]
@abc.abstractmethod
def handle_part(self, *args, **kwargs):
diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py
index 057b4dbc..dca50a49 100644
--- a/cloudinit/handlers/boot_hook.py
+++ b/cloudinit/handlers/boot_hook.py
@@ -17,10 +17,13 @@ from cloudinit import util
from cloudinit.settings import (PER_ALWAYS)
LOG = logging.getLogger(__name__)
-BOOTHOOK_PREFIX = "#cloud-boothook"
class BootHookPartHandler(handlers.Handler):
+
+ # The content prefixes this handler understands.
+ prefixes = ['#cloud-boothook']
+
def __init__(self, paths, datasource, **_kwargs):
handlers.Handler.__init__(self, PER_ALWAYS)
self.boothook_dir = paths.get_ipath("boothooks")
@@ -28,16 +31,11 @@ class BootHookPartHandler(handlers.Handler):
if datasource:
self.instance_id = datasource.get_instance_id()
- def list_types(self):
- return [
- handlers.type_from_starts_with(BOOTHOOK_PREFIX),
- ]
-
def _write_part(self, payload, filename):
filename = util.clean_filename(filename)
filepath = os.path.join(self.boothook_dir, filename)
contents = util.strip_prefix_suffix(util.dos2unix(payload),
- prefix=BOOTHOOK_PREFIX)
+ prefix=self.prefixes[0])
util.write_file(filepath, contents.lstrip(), 0o700)
return filepath
diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py
index 178a5b9b..99bf0e61 100644
--- a/cloudinit/handlers/cloud_config.py
+++ b/cloudinit/handlers/cloud_config.py
@@ -42,14 +42,12 @@ DEF_MERGERS = mergers.string_extract_mergers('dict(replace)+list()+str()')
CLOUD_PREFIX = "#cloud-config"
JSONP_PREFIX = "#cloud-config-jsonp"
-# The file header -> content types this module will handle.
-CC_TYPES = {
- JSONP_PREFIX: handlers.type_from_starts_with(JSONP_PREFIX),
- CLOUD_PREFIX: handlers.type_from_starts_with(CLOUD_PREFIX),
-}
-
class CloudConfigPartHandler(handlers.Handler):
+
+ # The content prefixes this handler understands.
+ prefixes = [CLOUD_PREFIX, JSONP_PREFIX]
+
def __init__(self, paths, **_kwargs):
handlers.Handler.__init__(self, PER_ALWAYS, version=3)
self.cloud_buf = None
@@ -58,9 +56,6 @@ class CloudConfigPartHandler(handlers.Handler):
self.cloud_fn = paths.get_ipath(_kwargs["cloud_config_path"])
self.file_names = []
- def list_types(self):
- return list(CC_TYPES.values())
-
def _write_cloud_config(self):
if not self.cloud_fn:
return
@@ -138,7 +133,7 @@ class CloudConfigPartHandler(handlers.Handler):
# First time through, merge with an empty dict...
if self.cloud_buf is None or not self.file_names:
self.cloud_buf = {}
- if ctype == CC_TYPES[JSONP_PREFIX]:
+ if ctype == handlers.INCLUSION_TYPES_MAP[JSONP_PREFIX]:
self._merge_patch(payload)
else:
self._merge_part(payload, headers)
diff --git a/cloudinit/handlers/jinja_template.py b/cloudinit/handlers/jinja_template.py
new file mode 100644
index 00000000..3fa4097e
--- /dev/null
+++ b/cloudinit/handlers/jinja_template.py
@@ -0,0 +1,137 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+import re
+
+try:
+ from jinja2.exceptions import UndefinedError as JUndefinedError
+except ImportError:
+ # No jinja2 dependency
+ JUndefinedError = Exception
+
+from cloudinit import handlers
+from cloudinit import log as logging
+from cloudinit.sources import INSTANCE_JSON_FILE
+from cloudinit.templater import render_string, MISSING_JINJA_PREFIX
+from cloudinit.util import b64d, load_file, load_json, json_dumps
+
+from cloudinit.settings import PER_ALWAYS
+
+LOG = logging.getLogger(__name__)
+
+
+class JinjaTemplatePartHandler(handlers.Handler):
+
+ prefixes = ['## template: jinja']
+
+ def __init__(self, paths, **_kwargs):
+ handlers.Handler.__init__(self, PER_ALWAYS, version=3)
+ self.paths = paths
+ self.sub_handlers = {}
+ for handler in _kwargs.get('sub_handlers', []):
+ for ctype in handler.list_types():
+ self.sub_handlers[ctype] = handler
+
+ def handle_part(self, data, ctype, filename, payload, frequency, headers):
+ if ctype in handlers.CONTENT_SIGNALS:
+ return
+ jinja_json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
+ rendered_payload = render_jinja_payload_from_file(
+ payload, filename, jinja_json_file)
+ if not rendered_payload:
+ return
+ subtype = handlers.type_from_starts_with(rendered_payload)
+ sub_handler = self.sub_handlers.get(subtype)
+ if not sub_handler:
+ LOG.warning(
+ 'Ignoring jinja template for %s. Could not find supported'
+ ' sub-handler for type %s', filename, subtype)
+ return
+ if sub_handler.handler_version == 3:
+ sub_handler.handle_part(
+ data, ctype, filename, rendered_payload, frequency, headers)
+ elif sub_handler.handler_version == 2:
+ sub_handler.handle_part(
+ data, ctype, filename, rendered_payload, frequency)
+
+
+def render_jinja_payload_from_file(
+ payload, payload_fn, instance_data_file, debug=False):
+ """Render a jinja template payload sourcing variables from jinja_vars_path.
+
+ @param payload: String of jinja template content. Should begin with
+ ## template: jinja\n.
+ @param payload_fn: String representing the filename from which the payload
+ was read used in error reporting. Generally in part-handling this is
+ 'part-##'.
+ @param instance_data_file: A path to a json file containing variables that
+ will be used as jinja template variables.
+
+ @return: A string of jinja-rendered content with the jinja header removed.
+ Returns None on error.
+ """
+ instance_data = {}
+ rendered_payload = None
+ if not os.path.exists(instance_data_file):
+ raise RuntimeError(
+ 'Cannot render jinja template vars. Instance data not yet'
+ ' present at %s' % instance_data_file)
+ instance_data = load_json(load_file(instance_data_file))
+ rendered_payload = render_jinja_payload(
+ payload, payload_fn, instance_data, debug)
+ if not rendered_payload:
+ return None
+ return rendered_payload
+
+
+def render_jinja_payload(payload, payload_fn, instance_data, debug=False):
+ instance_jinja_vars = convert_jinja_instance_data(
+ instance_data,
+ decode_paths=instance_data.get('base64-encoded-keys', []))
+ if debug:
+ LOG.debug('Converted jinja variables\n%s',
+ json_dumps(instance_jinja_vars))
+ try:
+ rendered_payload = render_string(payload, instance_jinja_vars)
+ except (TypeError, JUndefinedError) as e:
+ LOG.warning(
+ 'Ignoring jinja template for %s: %s', payload_fn, str(e))
+ return None
+ warnings = [
+ "'%s'" % var.replace(MISSING_JINJA_PREFIX, '')
+ for var in re.findall(
+ r'%s[^\s]+' % MISSING_JINJA_PREFIX, rendered_payload)]
+ if warnings:
+ LOG.warning(
+ "Could not render jinja template variables in file '%s': %s",
+ payload_fn, ', '.join(warnings))
+ return rendered_payload
+
+
+def convert_jinja_instance_data(data, prefix='', sep='/', decode_paths=()):
+ """Process instance-data.json dict for use in jinja templates.
+
+ Replace hyphens with underscores for jinja templates and decode any
+ base64_encoded_keys.
+ """
+ result = {}
+ decode_paths = [path.replace('-', '_') for path in decode_paths]
+ for key, value in sorted(data.items()):
+ if '-' in key:
+ # Standardize keys for use in #cloud-config/shell templates
+ key = key.replace('-', '_')
+ key_path = '{0}{1}{2}'.format(prefix, sep, key) if prefix else key
+ if key_path in decode_paths:
+ value = b64d(value)
+ if isinstance(value, dict):
+ result[key] = convert_jinja_instance_data(
+ value, key_path, sep=sep, decode_paths=decode_paths)
+ if re.match(r'v\d+', key):
+ # Copy values to top-level aliases
+ for subkey, subvalue in result[key].items():
+ result[subkey] = subvalue
+ else:
+ result[key] = value
+ return result
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py
index e4945a23..214714bc 100644
--- a/cloudinit/handlers/shell_script.py
+++ b/cloudinit/handlers/shell_script.py
@@ -17,21 +17,18 @@ from cloudinit import util
from cloudinit.settings import (PER_ALWAYS)
LOG = logging.getLogger(__name__)
-SHELL_PREFIX = "#!"
class ShellScriptPartHandler(handlers.Handler):
+
+ prefixes = ['#!']
+
def __init__(self, paths, **_kwargs):
handlers.Handler.__init__(self, PER_ALWAYS)
self.script_dir = paths.get_ipath_cur('scripts')
if 'script_path' in _kwargs:
self.script_dir = paths.get_ipath_cur(_kwargs['script_path'])
- def list_types(self):
- return [
- handlers.type_from_starts_with(SHELL_PREFIX),
- ]
-
def handle_part(self, data, ctype, filename, payload, frequency):
if ctype in handlers.CONTENT_SIGNALS:
# TODO(harlowja): maybe delete existing things here
diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py
index dc338769..83fb0724 100644
--- a/cloudinit/handlers/upstart_job.py
+++ b/cloudinit/handlers/upstart_job.py
@@ -18,19 +18,16 @@ from cloudinit import util
from cloudinit.settings import (PER_INSTANCE)
LOG = logging.getLogger(__name__)
-UPSTART_PREFIX = "#upstart-job"
class UpstartJobPartHandler(handlers.Handler):
+
+ prefixes = ['#upstart-job']
+
def __init__(self, paths, **_kwargs):
handlers.Handler.__init__(self, PER_INSTANCE)
self.upstart_dir = paths.upstart_conf_d
- def list_types(self):
- return [
- handlers.type_from_starts_with(UPSTART_PREFIX),
- ]
-
def handle_part(self, data, ctype, filename, payload, frequency):
if ctype in handlers.CONTENT_SIGNALS:
return
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index 1979cd96..dcd2645e 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -239,6 +239,10 @@ class ConfigMerger(object):
if cc_fn and os.path.isfile(cc_fn):
try:
i_cfgs.append(util.read_conf(cc_fn))
+ except PermissionError:
+ LOG.debug(
+ 'Skipped loading cloud-config from %s due to'
+ ' non-root.', cc_fn)
except Exception:
util.logexc(LOG, 'Failed loading of cloud-config from %s',
cc_fn)
@@ -449,4 +453,8 @@ class DefaultingConfigParser(RawConfigParser):
contents = '\n'.join([header, contents, ''])
return contents
+
+def identity(object):
+ return object
+
# vi: ts=4 expandtab
diff --git a/cloudinit/log.py b/cloudinit/log.py
index 1d75c9ff..5ae312ba 100644
--- a/cloudinit/log.py
+++ b/cloudinit/log.py
@@ -38,10 +38,18 @@ DEF_CON_FORMAT = '%(asctime)s - %(filename)s[%(levelname)s]: %(message)s'
logging.Formatter.converter = time.gmtime
-def setupBasicLogging(level=DEBUG):
+def setupBasicLogging(level=DEBUG, formatter=None):
+ if not formatter:
+ formatter = logging.Formatter(DEF_CON_FORMAT)
root = logging.getLogger()
+ for handler in root.handlers:
+ if hasattr(handler, 'stream') and hasattr(handler.stream, 'name'):
+ if handler.stream.name == '<stderr>':
+ handler.setLevel(level)
+ return
+ # Didn't have an existing stderr handler; create a new handler
console = logging.StreamHandler(sys.stderr)
- console.setFormatter(logging.Formatter(DEF_CON_FORMAT))
+ console.setFormatter(formatter)
console.setLevel(level)
root.addHandler(console)
root.setLevel(level)
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 3ffde52c..f83d3681 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -569,6 +569,20 @@ def get_interface_mac(ifname):
return read_sys_net_safe(ifname, path)
+def get_ib_interface_hwaddr(ifname, ethernet_format):
+ """Returns the string value of an Infiniband interface's hardware
+ address. If ethernet_format is True, an Ethernet MAC-style 6 byte
+ representation of the address will be returned.
+ """
+ # Type 32 is Infiniband.
+ if read_sys_net_safe(ifname, 'type') == '32':
+ mac = get_interface_mac(ifname)
+ if mac and ethernet_format:
+ # Use bytes 13-15 and 18-20 of the hardware address.
+ mac = mac[36:-14] + mac[51:]
+ return mac
+
+
def get_interfaces_by_mac():
"""Build a dictionary of tuples {mac: name}.
@@ -580,6 +594,15 @@ def get_interfaces_by_mac():
"duplicate mac found! both '%s' and '%s' have mac '%s'" %
(name, ret[mac], mac))
ret[mac] = name
+ # Try to get an Infiniband hardware address (in 6 byte Ethernet format)
+ # for the interface.
+ ib_mac = get_ib_interface_hwaddr(name, True)
+ if ib_mac:
+ if ib_mac in ret:
+ raise RuntimeError(
+ "duplicate mac found! both '%s' and '%s' have mac '%s'" %
+ (name, ret[ib_mac], ib_mac))
+ ret[ib_mac] = name
return ret
@@ -607,6 +630,21 @@ def get_interfaces():
return ret
+def get_ib_hwaddrs_by_interface():
+ """Build a dictionary mapping Infiniband interface names to their hardware
+ address."""
+ ret = {}
+ for name, _, _, _ in get_interfaces():
+ ib_mac = get_ib_interface_hwaddr(name, False)
+ if ib_mac:
+ if ib_mac in ret:
+ raise RuntimeError(
+ "duplicate mac found! both '%s' and '%s' have mac '%s'" %
+ (name, ret[ib_mac], ib_mac))
+ ret[name] = ib_mac
+ return ret
+
+
class EphemeralIPv4Network(object):
"""Context manager which sets up temporary static network configuration.
@@ -698,6 +736,13 @@ class EphemeralIPv4Network(object):
self.interface, out.strip())
return
util.subp(
+ ['ip', '-4', 'route', 'add', self.router, 'dev', self.interface,
+ 'src', self.ip], capture=True)
+ self.cleanup_cmds.insert(
+ 0,
+ ['ip', '-4', 'route', 'del', self.router, 'dev', self.interface,
+ 'src', self.ip])
+ util.subp(
['ip', '-4', 'route', 'add', 'default', 'via', self.router,
'dev', self.interface], capture=True)
self.cleanup_cmds.insert(
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index bd20a361..c6f631a9 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -247,8 +247,15 @@ def _parse_deb_config_data(ifaces, contents, src_dir, src_path):
ifaces[currif]['bridge']['ports'] = []
for iface in split[1:]:
ifaces[currif]['bridge']['ports'].append(iface)
- elif option == "bridge_hw" and split[1].lower() == "mac":
- ifaces[currif]['bridge']['mac'] = split[2]
+ elif option == "bridge_hw":
+ # doc is confusing and thus some may put literal 'MAC'
+ # bridge_hw MAC <address>
+ # but correct is:
+ # bridge_hw <address>
+ if split[1].lower() == "mac":
+ ifaces[currif]['bridge']['mac'] = split[2]
+ else:
+ ifaces[currif]['bridge']['mac'] = split[1]
elif option == "bridge_pathcost":
if 'pathcost' not in ifaces[currif]['bridge']:
ifaces[currif]['bridge']['pathcost'] = {}
@@ -473,7 +480,7 @@ class Renderer(renderer.Renderer):
return '\n\n'.join(['\n'.join(s) for s in sections]) + "\n"
- def render_network_state(self, network_state, target=None):
+ def render_network_state(self, network_state, templates=None, target=None):
fpeni = util.target_path(target, self.eni_path)
util.ensure_dir(os.path.dirname(fpeni))
header = self.eni_header if self.eni_header else ""
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
index 40143634..bc1087f9 100644
--- a/cloudinit/net/netplan.py
+++ b/cloudinit/net/netplan.py
@@ -189,7 +189,7 @@ class Renderer(renderer.Renderer):
self._postcmds = config.get('postcmds', False)
self.clean_default = config.get('clean_default', True)
- def render_network_state(self, network_state, target):
+ def render_network_state(self, network_state, templates=None, target=None):
# check network state for version
# if v2, then extract network_state.config
# else render_v2_from_state
@@ -291,6 +291,8 @@ class Renderer(renderer.Renderer):
if len(bond_config) > 0:
bond.update({'parameters': bond_config})
+ if ifcfg.get('mac_address'):
+ bond['macaddress'] = ifcfg.get('mac_address').lower()
slave_interfaces = ifcfg.get('bond-slaves')
if slave_interfaces == 'none':
_extract_bond_slaves_by_name(interfaces, bond, ifname)
@@ -327,6 +329,8 @@ class Renderer(renderer.Renderer):
if len(br_config) > 0:
bridge.update({'parameters': br_config})
+ if ifcfg.get('mac_address'):
+ bridge['macaddress'] = ifcfg.get('mac_address').lower()
_extract_addresses(ifcfg, bridge, ifname)
bridges.update({ifname: bridge})
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 72c803eb..f76e508a 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -483,6 +483,10 @@ class NetworkStateInterpreter(object):
interfaces.update({iface['name']: iface})
+ @ensure_command_keys(['name'])
+ def handle_infiniband(self, command):
+ self.handle_physical(command)
+
@ensure_command_keys(['address'])
def handle_nameserver(self, command):
dns = self._network_state.get('dns')
diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py
index 57652e27..5f32e90f 100644
--- a/cloudinit/net/renderer.py
+++ b/cloudinit/net/renderer.py
@@ -45,11 +45,14 @@ class Renderer(object):
return content.getvalue()
@abc.abstractmethod
- def render_network_state(self, network_state, target=None):
+ def render_network_state(self, network_state, templates=None,
+ target=None):
"""Render network state."""
- def render_network_config(self, network_config, target=None):
+ def render_network_config(self, network_config, templates=None,
+ target=None):
return self.render_network_state(
- network_state=parse_net_config_data(network_config), target=target)
+ network_state=parse_net_config_data(network_config),
+ templates=templates, target=target)
# vi: ts=4 expandtab
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 3d719238..9c16d3a7 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -91,19 +91,20 @@ class ConfigMap(object):
class Route(ConfigMap):
"""Represents a route configuration."""
- route_fn_tpl_ipv4 = '%(base)s/network-scripts/route-%(name)s'
- route_fn_tpl_ipv6 = '%(base)s/network-scripts/route6-%(name)s'
-
- def __init__(self, route_name, base_sysconf_dir):
+ def __init__(self, route_name, base_sysconf_dir,
+ ipv4_tpl, ipv6_tpl):
super(Route, self).__init__()
self.last_idx = 1
self.has_set_default_ipv4 = False
self.has_set_default_ipv6 = False
self._route_name = route_name
self._base_sysconf_dir = base_sysconf_dir
+ self.route_fn_tpl_ipv4 = ipv4_tpl
+ self.route_fn_tpl_ipv6 = ipv6_tpl
def copy(self):
- r = Route(self._route_name, self._base_sysconf_dir)
+ r = Route(self._route_name, self._base_sysconf_dir,
+ self.route_fn_tpl_ipv4, self.route_fn_tpl_ipv6)
r._conf = self._conf.copy()
r.last_idx = self.last_idx
r.has_set_default_ipv4 = self.has_set_default_ipv4
@@ -169,18 +170,23 @@ class Route(ConfigMap):
class NetInterface(ConfigMap):
"""Represents a sysconfig/networking-script (and its config + children)."""
- iface_fn_tpl = '%(base)s/network-scripts/ifcfg-%(name)s'
-
iface_types = {
'ethernet': 'Ethernet',
'bond': 'Bond',
'bridge': 'Bridge',
+ 'infiniband': 'InfiniBand',
}
- def __init__(self, iface_name, base_sysconf_dir, kind='ethernet'):
+ def __init__(self, iface_name, base_sysconf_dir, templates,
+ kind='ethernet'):
super(NetInterface, self).__init__()
self.children = []
- self.routes = Route(iface_name, base_sysconf_dir)
+ self.templates = templates
+ route_tpl = self.templates.get('route_templates')
+ self.routes = Route(iface_name, base_sysconf_dir,
+ ipv4_tpl=route_tpl.get('ipv4'),
+ ipv6_tpl=route_tpl.get('ipv6'))
+ self.iface_fn_tpl = self.templates.get('iface_templates')
self.kind = kind
self._iface_name = iface_name
@@ -213,7 +219,8 @@ class NetInterface(ConfigMap):
'name': self.name})
def copy(self, copy_children=False, copy_routes=False):
- c = NetInterface(self.name, self._base_sysconf_dir, kind=self._kind)
+ c = NetInterface(self.name, self._base_sysconf_dir,
+ self.templates, kind=self._kind)
c._conf = self._conf.copy()
if copy_children:
c.children = list(self.children)
@@ -251,6 +258,8 @@ class Renderer(renderer.Renderer):
('bridge_bridgeprio', 'PRIO'),
])
+ templates = {}
+
def __init__(self, config=None):
if not config:
config = {}
@@ -261,6 +270,11 @@ class Renderer(renderer.Renderer):
nm_conf_path = 'etc/NetworkManager/conf.d/99-cloud-init.conf'
self.networkmanager_conf_path = config.get('networkmanager_conf_path',
nm_conf_path)
+ self.templates = {
+ 'control': config.get('control'),
+ 'iface_templates': config.get('iface_templates'),
+ 'route_templates': config.get('route_templates'),
+ }
@classmethod
def _render_iface_shared(cls, iface, iface_cfg):
@@ -512,7 +526,7 @@ class Renderer(renderer.Renderer):
return content_str
@staticmethod
- def _render_networkmanager_conf(network_state):
+ def _render_networkmanager_conf(network_state, templates=None):
content = networkmanager_conf.NetworkManagerConf("")
# If DNS server information is provided, configure
@@ -556,20 +570,36 @@ class Renderer(renderer.Renderer):
cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
@classmethod
- def _render_sysconfig(cls, base_sysconf_dir, network_state):
+ def _render_ib_interfaces(cls, network_state, iface_contents):
+ ib_filter = renderer.filter_by_type('infiniband')
+ for iface in network_state.iter_interfaces(ib_filter):
+ iface_name = iface['name']
+ iface_cfg = iface_contents[iface_name]
+ iface_cfg.kind = 'infiniband'
+ iface_subnets = iface.get("subnets", [])
+ route_cfg = iface_cfg.routes
+ cls._render_subnets(iface_cfg, iface_subnets)
+ cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
+
+ @classmethod
+ def _render_sysconfig(cls, base_sysconf_dir, network_state,
+ templates=None):
'''Given state, return /etc/sysconfig files + contents'''
+ if not templates:
+ templates = cls.templates
iface_contents = {}
for iface in network_state.iter_interfaces():
if iface['type'] == "loopback":
continue
iface_name = iface['name']
- iface_cfg = NetInterface(iface_name, base_sysconf_dir)
+ iface_cfg = NetInterface(iface_name, base_sysconf_dir, templates)
cls._render_iface_shared(iface, iface_cfg)
iface_contents[iface_name] = iface_cfg
cls._render_physical_interfaces(network_state, iface_contents)
cls._render_bond_interfaces(network_state, iface_contents)
cls._render_vlan_interfaces(network_state, iface_contents)
cls._render_bridge_interfaces(network_state, iface_contents)
+ cls._render_ib_interfaces(network_state, iface_contents)
contents = {}
for iface_name, iface_cfg in iface_contents.items():
if iface_cfg or iface_cfg.children:
@@ -578,17 +608,21 @@ class Renderer(renderer.Renderer):
if iface_cfg:
contents[iface_cfg.path] = iface_cfg.to_string()
if iface_cfg.routes:
- contents[iface_cfg.routes.path_ipv4] = \
- iface_cfg.routes.to_string("ipv4")
- contents[iface_cfg.routes.path_ipv6] = \
- iface_cfg.routes.to_string("ipv6")
+ for cpath, proto in zip([iface_cfg.routes.path_ipv4,
+ iface_cfg.routes.path_ipv6],
+ ["ipv4", "ipv6"]):
+ if cpath not in contents:
+ contents[cpath] = iface_cfg.routes.to_string(proto)
return contents
- def render_network_state(self, network_state, target=None):
+ def render_network_state(self, network_state, templates=None, target=None):
+ if not templates:
+ templates = self.templates
file_mode = 0o644
base_sysconf_dir = util.target_path(target, self.sysconf_dir)
for path, data in self._render_sysconfig(base_sysconf_dir,
- network_state).items():
+ network_state,
+ templates=templates).items():
util.write_file(path, data, file_mode)
if self.dns_path:
dns_path = util.target_path(target, self.dns_path)
@@ -598,7 +632,8 @@ class Renderer(renderer.Renderer):
if self.networkmanager_conf_path:
nm_conf_path = util.target_path(target,
self.networkmanager_conf_path)
- nm_conf_content = self._render_networkmanager_conf(network_state)
+ nm_conf_content = self._render_networkmanager_conf(network_state,
+ templates)
if nm_conf_content:
util.write_file(nm_conf_path, nm_conf_content, file_mode)
if self.netrules_path:
@@ -606,13 +641,16 @@ class Renderer(renderer.Renderer):
netrules_path = util.target_path(target, self.netrules_path)
util.write_file(netrules_path, netrules_content, file_mode)
- # always write /etc/sysconfig/network configuration
- sysconfig_path = util.target_path(target, "etc/sysconfig/network")
- netcfg = [_make_header(), 'NETWORKING=yes']
- if network_state.use_ipv6:
- netcfg.append('NETWORKING_IPV6=yes')
- netcfg.append('IPV6_AUTOCONF=no')
- util.write_file(sysconfig_path, "\n".join(netcfg) + "\n", file_mode)
+ sysconfig_path = util.target_path(target, templates.get('control'))
+ # Distros configuring /etc/sysconfig/network as a file e.g. Centos
+ if sysconfig_path.endswith('network'):
+ util.ensure_dir(os.path.dirname(sysconfig_path))
+ netcfg = [_make_header(), 'NETWORKING=yes']
+ if network_state.use_ipv6:
+ netcfg.append('NETWORKING_IPV6=yes')
+ netcfg.append('IPV6_AUTOCONF=no')
+ util.write_file(sysconfig_path,
+ "\n".join(netcfg) + "\n", file_mode)
def available(target=None):
diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py
index 5c017d15..58e0a591 100644
--- a/cloudinit/net/tests/test_init.py
+++ b/cloudinit/net/tests/test_init.py
@@ -199,6 +199,8 @@ class TestGenerateFallbackConfig(CiTestCase):
self.sysdir = self.tmp_dir() + '/'
self.m_sys_path.return_value = self.sysdir
self.addCleanup(sys_mock.stop)
+ self.add_patch('cloudinit.net.util.is_container', 'm_is_container',
+ return_value=False)
self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle')
def test_generate_fallback_finds_connected_eth_with_mac(self):
@@ -513,12 +515,17 @@ class TestEphemeralIPV4Network(CiTestCase):
capture=True),
mock.call(
['ip', 'route', 'show', '0.0.0.0/0'], capture=True),
+ mock.call(['ip', '-4', 'route', 'add', '192.168.2.1',
+ 'dev', 'eth0', 'src', '192.168.2.2'], capture=True),
mock.call(
['ip', '-4', 'route', 'add', 'default', 'via',
'192.168.2.1', 'dev', 'eth0'], capture=True)]
- expected_teardown_calls = [mock.call(
- ['ip', '-4', 'route', 'del', 'default', 'dev', 'eth0'],
- capture=True)]
+ expected_teardown_calls = [
+ mock.call(['ip', '-4', 'route', 'del', 'default', 'dev', 'eth0'],
+ capture=True),
+ mock.call(['ip', '-4', 'route', 'del', '192.168.2.1',
+ 'dev', 'eth0', 'src', '192.168.2.2'], capture=True),
+ ]
with net.EphemeralIPv4Network(**params):
self.assertEqual(expected_setup_calls, m_subp.call_args_list)
diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py
index 1ed2b487..ed5c7038 100644
--- a/cloudinit/reporting/__init__.py
+++ b/cloudinit/reporting/__init__.py
@@ -18,7 +18,7 @@ DEFAULT_CONFIG = {
def update_configuration(config):
- """Update the instanciated_handler_registry.
+ """Update the instantiated_handler_registry.
:param config:
The dictionary containing changes to apply. If a key is given
@@ -37,6 +37,12 @@ def update_configuration(config):
instantiated_handler_registry.register_item(handler_name, instance)
+def flush_events():
+ for _, handler in instantiated_handler_registry.registered_items.items():
+ if hasattr(handler, 'flush'):
+ handler.flush()
+
+
instantiated_handler_registry = DictRegistry()
update_configuration(DEFAULT_CONFIG)
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index 4066076c..6d23558e 100644
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -1,17 +1,32 @@
# This file is part of cloud-init. See LICENSE file for license information.
import abc
+import fcntl
import json
import six
+import os
+import re
+import struct
+import threading
+import time
from cloudinit import log as logging
from cloudinit.registry import DictRegistry
from cloudinit import (url_helper, util)
+from datetime import datetime
+if six.PY2:
+ from multiprocessing.queues import JoinableQueue as JQueue
+else:
+ from queue import Queue as JQueue
LOG = logging.getLogger(__name__)
+class ReportException(Exception):
+ pass
+
+
@six.add_metaclass(abc.ABCMeta)
class ReportingHandler(object):
"""Base class for report handlers.
@@ -24,6 +39,10 @@ class ReportingHandler(object):
def publish_event(self, event):
"""Publish an event."""
+ def flush(self):
+ """Ensure ReportingHandler has published all events"""
+ pass
+
class LogHandler(ReportingHandler):
"""Publishes events to the cloud-init log at the ``DEBUG`` log level."""
@@ -85,9 +104,236 @@ class WebHookHandler(ReportingHandler):
LOG.warning("failed posting event: %s", event.as_string())
+class HyperVKvpReportingHandler(ReportingHandler):
+ """
+ Reports events to a Hyper-V host using Key-Value-Pair exchange protocol
+ and can be used to obtain high level diagnostic information from the host.
+
+ To use this facility, the KVP user-space daemon (hv_kvp_daemon) has to be
+ running. It reads the kvp_file when the host requests the guest to
+ enumerate the KVP's.
+
+ This reporter collates all events for a module (origin|name) in a single
+ json string in the dictionary.
+
+ For more information, see
+ https://technet.microsoft.com/en-us/library/dn798287.aspx#Linux%20guests
+ """
+ HV_KVP_EXCHANGE_MAX_VALUE_SIZE = 2048
+ HV_KVP_EXCHANGE_MAX_KEY_SIZE = 512
+ HV_KVP_RECORD_SIZE = (HV_KVP_EXCHANGE_MAX_KEY_SIZE +
+ HV_KVP_EXCHANGE_MAX_VALUE_SIZE)
+ EVENT_PREFIX = 'CLOUD_INIT'
+ MSG_KEY = 'msg'
+ RESULT_KEY = 'result'
+ DESC_IDX_KEY = 'msg_i'
+ JSON_SEPARATORS = (',', ':')
+ KVP_POOL_FILE_GUEST = '/var/lib/hyperv/.kvp_pool_1'
+
+ def __init__(self,
+ kvp_file_path=KVP_POOL_FILE_GUEST,
+ event_types=None):
+ super(HyperVKvpReportingHandler, self).__init__()
+ self._kvp_file_path = kvp_file_path
+ self._event_types = event_types
+ self.q = JQueue()
+ self.kvp_file = None
+ self.incarnation_no = self._get_incarnation_no()
+ self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX,
+ self.incarnation_no)
+ self._current_offset = 0
+ self.publish_thread = threading.Thread(
+ target=self._publish_event_routine)
+ self.publish_thread.daemon = True
+ self.publish_thread.start()
+
+ def _get_incarnation_no(self):
+ """
+ use the time passed as the incarnation number.
+ the incarnation number is the number which are used to
+ distinguish the old data stored in kvp and the new data.
+ """
+ uptime_str = util.uptime()
+ try:
+ return int(time.time() - float(uptime_str))
+ except ValueError:
+ LOG.warning("uptime '%s' not in correct format.", uptime_str)
+ return 0
+
+ def _iterate_kvps(self, offset):
+ """iterate the kvp file from the current offset."""
+ try:
+ with open(self._kvp_file_path, 'rb+') as f:
+ self.kvp_file = f
+ fcntl.flock(f, fcntl.LOCK_EX)
+ f.seek(offset)
+ record_data = f.read(self.HV_KVP_RECORD_SIZE)
+ while len(record_data) == self.HV_KVP_RECORD_SIZE:
+ self._current_offset += self.HV_KVP_RECORD_SIZE
+ kvp_item = self._decode_kvp_item(record_data)
+ yield kvp_item
+ record_data = f.read(self.HV_KVP_RECORD_SIZE)
+ fcntl.flock(f, fcntl.LOCK_UN)
+ finally:
+ self.kvp_file = None
+
+ def _event_key(self, event):
+ """
+ the event key format is:
+ CLOUD_INIT|<incarnation number>|<event_type>|<event_name>
+ """
+ return u"{0}|{1}|{2}".format(self.event_key_prefix,
+ event.event_type, event.name)
+
+ def _encode_kvp_item(self, key, value):
+ data = (struct.pack("%ds%ds" % (
+ self.HV_KVP_EXCHANGE_MAX_KEY_SIZE,
+ self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE),
+ key.encode('utf-8'), value.encode('utf-8')))
+ return data
+
+ def _decode_kvp_item(self, record_data):
+ record_data_len = len(record_data)
+ if record_data_len != self.HV_KVP_RECORD_SIZE:
+ raise ReportException(
+ "record_data len not correct {0} {1}."
+ .format(record_data_len, self.HV_KVP_RECORD_SIZE))
+ k = (record_data[0:self.HV_KVP_EXCHANGE_MAX_KEY_SIZE].decode('utf-8')
+ .strip('\x00'))
+ v = (
+ record_data[
+ self.HV_KVP_EXCHANGE_MAX_KEY_SIZE:self.HV_KVP_RECORD_SIZE
+ ].decode('utf-8').strip('\x00'))
+
+ return {'key': k, 'value': v}
+
+ def _update_kvp_item(self, record_data):
+ if self.kvp_file is None:
+ raise ReportException(
+ "kvp file '{0}' not opened."
+ .format(self._kvp_file_path))
+ self.kvp_file.seek(-self.HV_KVP_RECORD_SIZE, 1)
+ self.kvp_file.write(record_data)
+
+ def _append_kvp_item(self, record_data):
+ with open(self._kvp_file_path, 'rb+') as f:
+ fcntl.flock(f, fcntl.LOCK_EX)
+ # seek to end of the file
+ f.seek(0, 2)
+ f.write(record_data)
+ f.flush()
+ fcntl.flock(f, fcntl.LOCK_UN)
+ self._current_offset = f.tell()
+
+ def _break_down(self, key, meta_data, description):
+ del meta_data[self.MSG_KEY]
+ des_in_json = json.dumps(description)
+ des_in_json = des_in_json[1:(len(des_in_json) - 1)]
+ i = 0
+ result_array = []
+ message_place_holder = "\"" + self.MSG_KEY + "\":\"\""
+ while True:
+ meta_data[self.DESC_IDX_KEY] = i
+ meta_data[self.MSG_KEY] = ''
+ data_without_desc = json.dumps(meta_data,
+ separators=self.JSON_SEPARATORS)
+ room_for_desc = (
+ self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE -
+ len(data_without_desc) - 8)
+ value = data_without_desc.replace(
+ message_place_holder,
+ '"{key}":"{desc}"'.format(
+ key=self.MSG_KEY, desc=des_in_json[:room_for_desc]))
+ result_array.append(self._encode_kvp_item(key, value))
+ i += 1
+ des_in_json = des_in_json[room_for_desc:]
+ if len(des_in_json) == 0:
+ break
+ return result_array
+
+ def _encode_event(self, event):
+ """
+ encode the event into kvp data bytes.
+ if the event content reaches the maximum length of kvp value.
+ then it would be cut to multiple slices.
+ """
+ key = self._event_key(event)
+ meta_data = {
+ "name": event.name,
+ "type": event.event_type,
+ "ts": (datetime.utcfromtimestamp(event.timestamp)
+ .isoformat() + 'Z'),
+ }
+ if hasattr(event, self.RESULT_KEY):
+ meta_data[self.RESULT_KEY] = event.result
+ meta_data[self.MSG_KEY] = event.description
+ value = json.dumps(meta_data, separators=self.JSON_SEPARATORS)
+ # if it reaches the maximum length of kvp value,
+ # break it down to slices.
+ # this should be very corner case.
+ if len(value) > self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE:
+ return self._break_down(key, meta_data, event.description)
+ else:
+ data = self._encode_kvp_item(key, value)
+ return [data]
+
+ def _publish_event_routine(self):
+ while True:
+ try:
+ event = self.q.get(block=True)
+ need_append = True
+ try:
+ if not os.path.exists(self._kvp_file_path):
+ LOG.warning(
+ "skip writing events %s to %s. file not present.",
+ event.as_string(),
+ self._kvp_file_path)
+ encoded_event = self._encode_event(event)
+ # for each encoded_event
+ for encoded_data in (encoded_event):
+ for kvp in self._iterate_kvps(self._current_offset):
+ match = (
+ re.match(
+ r"^{0}\|(\d+)\|.+"
+ .format(self.EVENT_PREFIX),
+ kvp['key']
+ ))
+ if match:
+ match_groups = match.groups(0)
+ if int(match_groups[0]) < self.incarnation_no:
+ need_append = False
+ self._update_kvp_item(encoded_data)
+ continue
+ if need_append:
+ self._append_kvp_item(encoded_data)
+ except IOError as e:
+ LOG.warning(
+ "failed posting event to kvp: %s e:%s",
+ event.as_string(), e)
+ finally:
+ self.q.task_done()
+
+ # when main process exits, q.get() will through EOFError
+ # indicating we should exit this thread.
+ except EOFError:
+ return
+
+ # since the saving to the kvp pool can be a time costing task
+ # if the kvp pool already contains a chunk of data,
+ # so defer it to another thread.
+ def publish_event(self, event):
+ if (not self._event_types or event.event_type in self._event_types):
+ self.q.put(event)
+
+ def flush(self):
+ LOG.debug('HyperVReportingHandler flushing remaining events')
+ self.q.join()
+
+
available_handlers = DictRegistry()
available_handlers.register_item('log', LogHandler)
available_handlers.register_item('print', PrintHandler)
available_handlers.register_item('webhook', WebHookHandler)
+available_handlers.register_item('hyperv', HyperVKvpReportingHandler)
# vi: ts=4 expandtab
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index dde5749d..b1ebaade 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -38,12 +38,13 @@ CFG_BUILTIN = {
'Scaleway',
'Hetzner',
'IBMCloud',
+ 'Oracle',
# At the end to act as a 'catch' when none of the above work...
'None',
],
'def_log_file': '/var/log/cloud-init.log',
'log_cfgs': [],
- 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel'],
+ 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel', 'root:root'],
'system_info': {
'paths': {
'cloud_dir': '/var/lib/cloud',
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index 24fd65ff..8cd312d0 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -181,27 +181,18 @@ class DataSourceAltCloud(sources.DataSource):
# modprobe floppy
try:
- cmd = CMD_PROBE_FLOPPY
- (cmd_out, _err) = util.subp(cmd)
- LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)
+ modprobe_floppy()
except ProcessExecutionError as e:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
- return False
- except OSError as e:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
+ util.logexc(LOG, 'Failed modprobe: %s', e)
return False
floppy_dev = '/dev/fd0'
# udevadm settle for floppy device
try:
- (cmd_out, _err) = util.udevadm_settle(exists=floppy_dev, timeout=5)
- LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)
- except ProcessExecutionError as e:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
- return False
- except OSError as e:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
+ util.udevadm_settle(exists=floppy_dev, timeout=5)
+ except (ProcessExecutionError, OSError) as e:
+ util.logexc(LOG, 'Failed udevadm_settle: %s\n', e)
return False
try:
@@ -258,6 +249,11 @@ class DataSourceAltCloud(sources.DataSource):
return False
+def modprobe_floppy():
+ out, _err = util.subp(CMD_PROBE_FLOPPY)
+ LOG.debug('Command: %s\nOutput%s', ' '.join(CMD_PROBE_FLOPPY), out)
+
+
# Used to match classes to dependencies
# Source DataSourceAltCloud does not really depend on networking.
# In the future 'dsmode' like behavior can be added to offer user
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 7007d9ea..783445e1 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -8,6 +8,7 @@ import base64
import contextlib
import crypt
from functools import partial
+import json
import os
import os.path
import re
@@ -17,6 +18,7 @@ import xml.etree.ElementTree as ET
from cloudinit import log as logging
from cloudinit import net
+from cloudinit.event import EventType
from cloudinit.net.dhcp import EphemeralDHCPv4
from cloudinit import sources
from cloudinit.sources.helpers.azure import get_metadata_from_fabric
@@ -49,7 +51,17 @@ DEFAULT_FS = 'ext4'
AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'
REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"
REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready"
-IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata"
+AGENT_SEED_DIR = '/var/lib/waagent'
+IMDS_URL = "http://169.254.169.254/metadata/"
+
+# List of static scripts and network config artifacts created by
+# stock ubuntu suported images.
+UBUNTU_EXTENDED_NETWORK_SCRIPTS = [
+ '/etc/netplan/90-azure-hotplug.yaml',
+ '/usr/local/sbin/ephemeral_eth.sh',
+ '/etc/udev/rules.d/10-net-device-added.rules',
+ '/run/network/interfaces.ephemeral.d',
+]
def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid):
@@ -185,7 +197,7 @@ if util.is_FreeBSD():
BUILTIN_DS_CONFIG = {
'agent_command': AGENT_START_BUILTIN,
- 'data_dir': "/var/lib/waagent",
+ 'data_dir': AGENT_SEED_DIR,
'set_hostname': True,
'hostname_bounce': {
'interface': DEFAULT_PRIMARY_NIC,
@@ -252,6 +264,7 @@ class DataSourceAzure(sources.DataSource):
dsname = 'Azure'
_negotiated = False
+ _metadata_imds = sources.UNSET
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -263,6 +276,8 @@ class DataSourceAzure(sources.DataSource):
BUILTIN_DS_CONFIG])
self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file')
self._network_config = None
+ # Regenerate network config new_instance boot and every boot
+ self.update_events['network'].add(EventType.BOOT)
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -336,15 +351,17 @@ class DataSourceAzure(sources.DataSource):
metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
return metadata
- def _get_data(self):
+ def crawl_metadata(self):
+ """Walk all instance metadata sources returning a dict on success.
+
+ @return: A dictionary of any metadata content for this instance.
+ @raise: InvalidMetaDataException when the expected metadata service is
+ unavailable, broken or disabled.
+ """
+ crawled_data = {}
# azure removes/ejects the cdrom containing the ovf-env.xml
# file on reboot. So, in order to successfully reboot we
# need to look in the datadir and consider that valid
- asset_tag = util.read_dmi_data('chassis-asset-tag')
- if asset_tag != AZURE_CHASSIS_ASSET_TAG:
- LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag)
- return False
-
ddir = self.ds_cfg['data_dir']
candidates = [self.seed_dir]
@@ -373,41 +390,84 @@ class DataSourceAzure(sources.DataSource):
except NonAzureDataSource:
continue
except BrokenAzureDataSource as exc:
- raise exc
+ msg = 'BrokenAzureDataSource: %s' % exc
+ raise sources.InvalidMetaDataException(msg)
except util.MountFailedError:
LOG.warning("%s was not mountable", cdev)
continue
if reprovision or self._should_reprovision(ret):
ret = self._reprovision()
- (md, self.userdata_raw, cfg, files) = ret
+ imds_md = get_metadata_from_imds(
+ self.fallback_interface, retries=3)
+ (md, userdata_raw, cfg, files) = ret
self.seed = cdev
- self.metadata = util.mergemanydict([md, DEFAULT_METADATA])
- self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG])
+ crawled_data.update({
+ 'cfg': cfg,
+ 'files': files,
+ 'metadata': util.mergemanydict(
+ [md, {'imds': imds_md}]),
+ 'userdata_raw': userdata_raw})
found = cdev
LOG.debug("found datasource in %s", cdev)
break
if not found:
- return False
+ raise sources.InvalidMetaDataException('No Azure metadata found')
if found == ddir:
LOG.debug("using files cached in %s", ddir)
seed = _get_random_seed()
if seed:
- self.metadata['random_seed'] = seed
+ crawled_data['metadata']['random_seed'] = seed
+ crawled_data['metadata']['instance-id'] = util.read_dmi_data(
+ 'system-uuid')
+ return crawled_data
+
+ def _is_platform_viable(self):
+ """Check platform environment to report if this datasource may run."""
+ return _is_platform_viable(self.seed_dir)
+
+ def clear_cached_attrs(self, attr_defaults=()):
+ """Reset any cached class attributes to defaults."""
+ super(DataSourceAzure, self).clear_cached_attrs(attr_defaults)
+ self._metadata_imds = sources.UNSET
+
+ def _get_data(self):
+ """Crawl and process datasource metadata caching metadata as attrs.
+
+ @return: True on success, False on error, invalid or disabled
+ datasource.
+ """
+ if not self._is_platform_viable():
+ return False
+ try:
+ crawled_data = util.log_time(
+ logfunc=LOG.debug, msg='Crawl of metadata service',
+ func=self.crawl_metadata)
+ except sources.InvalidMetaDataException as e:
+ LOG.warning('Could not crawl Azure metadata: %s', e)
+ return False
+ if self.distro and self.distro.name == 'ubuntu':
+ maybe_remove_ubuntu_network_config_scripts()
+
+ # Process crawled data and augment with various config defaults
+ self.cfg = util.mergemanydict(
+ [crawled_data['cfg'], BUILTIN_CLOUD_CONFIG])
+ self._metadata_imds = crawled_data['metadata']['imds']
+ self.metadata = util.mergemanydict(
+ [crawled_data['metadata'], DEFAULT_METADATA])
+ self.userdata_raw = crawled_data['userdata_raw']
user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
# walinux agent writes files world readable, but expects
# the directory to be protected.
- write_files(ddir, files, dirmode=0o700)
-
- self.metadata['instance-id'] = util.read_dmi_data('system-uuid')
-
+ write_files(
+ self.ds_cfg['data_dir'], crawled_data['files'], dirmode=0o700)
return True
def device_name_to_device(self, name):
@@ -436,7 +496,7 @@ class DataSourceAzure(sources.DataSource):
def _poll_imds(self):
"""Poll IMDS for the new provisioning data until we get a valid
response. Then return the returned JSON object."""
- url = IMDS_URL + "?api-version=2017-04-02"
+ url = IMDS_URL + "reprovisiondata?api-version=2017-04-02"
headers = {"Metadata": "true"}
report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE))
LOG.debug("Start polling IMDS")
@@ -487,7 +547,7 @@ class DataSourceAzure(sources.DataSource):
jump back into the polling loop in order to retrieve the ovf_env."""
if not ret:
return False
- (_md, self.userdata_raw, cfg, _files) = ret
+ (_md, _userdata_raw, cfg, _files) = ret
path = REPROVISION_MARKER_FILE
if (cfg.get('PreprovisionedVm') is True or
os.path.isfile(path)):
@@ -543,22 +603,15 @@ class DataSourceAzure(sources.DataSource):
@property
def network_config(self):
"""Generate a network config like net.generate_fallback_network() with
- the following execptions.
+ the following exceptions.
1. Probe the drivers of the net-devices present and inject them in
the network configuration under params: driver: <driver> value
2. Generate a fallback network config that does not include any of
the blacklisted devices.
"""
- blacklist = ['mlx4_core']
if not self._network_config:
- LOG.debug('Azure: generating fallback configuration')
- # generate a network config, blacklist picking any mlx4_core devs
- netconfig = net.generate_fallback_config(
- blacklist_drivers=blacklist, config_driver=True)
-
- self._network_config = netconfig
-
+ self._network_config = parse_network_config(self._metadata_imds)
return self._network_config
@@ -1025,6 +1078,151 @@ def load_azure_ds_dir(source_dir):
return (md, ud, cfg, {'ovf-env.xml': contents})
+def parse_network_config(imds_metadata):
+ """Convert imds_metadata dictionary to network v2 configuration.
+
+ Parses network configuration from imds metadata if present or generate
+ fallback network config excluding mlx4_core devices.
+
+ @param: imds_metadata: Dict of content read from IMDS network service.
+ @return: Dictionary containing network version 2 standard configuration.
+ """
+ if imds_metadata != sources.UNSET and imds_metadata:
+ netconfig = {'version': 2, 'ethernets': {}}
+ LOG.debug('Azure: generating network configuration from IMDS')
+ network_metadata = imds_metadata['network']
+ for idx, intf in enumerate(network_metadata['interface']):
+ nicname = 'eth{idx}'.format(idx=idx)
+ dev_config = {}
+ for addr4 in intf['ipv4']['ipAddress']:
+ privateIpv4 = addr4['privateIpAddress']
+ if privateIpv4:
+ if dev_config.get('dhcp4', False):
+ # Append static address config for nic > 1
+ netPrefix = intf['ipv4']['subnet'][0].get(
+ 'prefix', '24')
+ if not dev_config.get('addresses'):
+ dev_config['addresses'] = []
+ dev_config['addresses'].append(
+ '{ip}/{prefix}'.format(
+ ip=privateIpv4, prefix=netPrefix))
+ else:
+ dev_config['dhcp4'] = True
+ for addr6 in intf['ipv6']['ipAddress']:
+ privateIpv6 = addr6['privateIpAddress']
+ if privateIpv6:
+ dev_config['dhcp6'] = True
+ break
+ if dev_config:
+ mac = ':'.join(re.findall(r'..', intf['macAddress']))
+ dev_config.update(
+ {'match': {'macaddress': mac.lower()},
+ 'set-name': nicname})
+ netconfig['ethernets'][nicname] = dev_config
+ else:
+ blacklist = ['mlx4_core']
+ LOG.debug('Azure: generating fallback configuration')
+ # generate a network config, blacklist picking mlx4_core devs
+ netconfig = net.generate_fallback_config(
+ blacklist_drivers=blacklist, config_driver=True)
+ return netconfig
+
+
+def get_metadata_from_imds(fallback_nic, retries):
+ """Query Azure's network metadata service, returning a dictionary.
+
+ If network is not up, setup ephemeral dhcp on fallback_nic to talk to the
+ IMDS. For more info on IMDS:
+ https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service
+
+ @param fallback_nic: String. The name of the nic which requires active
+ network in order to query IMDS.
+ @param retries: The number of retries of the IMDS_URL.
+
+ @return: A dict of instance metadata containing compute and network
+ info.
+ """
+ kwargs = {'logfunc': LOG.debug,
+ 'msg': 'Crawl of Azure Instance Metadata Service (IMDS)',
+ 'func': _get_metadata_from_imds, 'args': (retries,)}
+ if net.is_up(fallback_nic):
+ return util.log_time(**kwargs)
+ else:
+ with EphemeralDHCPv4(fallback_nic):
+ return util.log_time(**kwargs)
+
+
+def _get_metadata_from_imds(retries):
+
+ def retry_on_url_error(msg, exception):
+ if isinstance(exception, UrlError) and exception.code == 404:
+ return True # Continue retries
+ return False # Stop retries on all other exceptions
+
+ url = IMDS_URL + "instance?api-version=2017-12-01"
+ headers = {"Metadata": "true"}
+ try:
+ response = readurl(
+ url, timeout=1, headers=headers, retries=retries,
+ exception_cb=retry_on_url_error)
+ except Exception as e:
+ LOG.debug('Ignoring IMDS instance metadata: %s', e)
+ return {}
+ try:
+ return util.load_json(str(response))
+ except json.decoder.JSONDecodeError:
+ LOG.warning(
+ 'Ignoring non-json IMDS instance metadata: %s', str(response))
+ return {}
+
+
+def maybe_remove_ubuntu_network_config_scripts(paths=None):
+ """Remove Azure-specific ubuntu network config for non-primary nics.
+
+ @param paths: List of networking scripts or directories to remove when
+ present.
+
+ In certain supported ubuntu images, static udev rules or netplan yaml
+ config is delivered in the base ubuntu image to support dhcp on any
+ additional interfaces which get attached by a customer at some point
+ after initial boot. Since the Azure datasource can now regenerate
+ network configuration as metadata reports these new devices, we no longer
+ want the udev rules or netplan's 90-azure-hotplug.yaml to configure
+ networking on eth1 or greater as it might collide with cloud-init's
+ configuration.
+
+ Remove the any existing extended network scripts if the datasource is
+ enabled to write network per-boot.
+ """
+ if not paths:
+ paths = UBUNTU_EXTENDED_NETWORK_SCRIPTS
+ logged = False
+ for path in paths:
+ if os.path.exists(path):
+ if not logged:
+ LOG.info(
+ 'Removing Ubuntu extended network scripts because'
+ ' cloud-init updates Azure network configuration on the'
+ ' following event: %s.',
+ EventType.BOOT)
+ logged = True
+ if os.path.isdir(path):
+ util.del_dir(path)
+ else:
+ util.del_file(path)
+
+
+def _is_platform_viable(seed_dir):
+ """Check platform environment to report if this datasource may run."""
+ asset_tag = util.read_dmi_data('chassis-asset-tag')
+ if asset_tag == AZURE_CHASSIS_ASSET_TAG:
+ return True
+ LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag)
+ if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')):
+ return True
+ return False
+
+
class BrokenAzureDataSource(Exception):
pass
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 4cb28977..664dc4b7 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -196,7 +196,7 @@ def on_first_boot(data, distro=None, network=True):
net_conf = data.get("network_config", '')
if net_conf and distro:
LOG.warning("Updating network interfaces from config drive")
- distro.apply_network(net_conf)
+ distro.apply_network_config(eni.convert_eni_data(net_conf))
write_injected_files(data.get('files'))
diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py
index 01106ec0..a5358148 100644
--- a/cloudinit/sources/DataSourceIBMCloud.py
+++ b/cloudinit/sources/DataSourceIBMCloud.py
@@ -295,7 +295,7 @@ def read_md():
results = metadata_from_dir(path)
else:
results = util.mount_cb(path, metadata_from_dir)
- except BrokenMetadata as e:
+ except sources.BrokenMetadata as e:
raise RuntimeError(
"Failed reading IBM config disk (platform=%s path=%s): %s" %
(platform, path, e))
@@ -304,10 +304,6 @@ def read_md():
return ret
-class BrokenMetadata(IOError):
- pass
-
-
def metadata_from_dir(source_dir):
"""Walk source_dir extracting standardized metadata.
@@ -352,12 +348,13 @@ def metadata_from_dir(source_dir):
try:
data = transl(raw)
except Exception as e:
- raise BrokenMetadata("Failed decoding %s: %s" % (path, e))
+ raise sources.BrokenMetadata(
+ "Failed decoding %s: %s" % (path, e))
results[name] = data
if results.get('metadata_raw') is None:
- raise BrokenMetadata(
+ raise sources.BrokenMetadata(
"%s missing required file 'meta_data.json'" % source_dir)
results['metadata'] = {}
@@ -368,7 +365,7 @@ def metadata_from_dir(source_dir):
try:
md['random_seed'] = base64.b64decode(md_raw['random_seed'])
except (ValueError, TypeError) as e:
- raise BrokenMetadata(
+ raise sources.BrokenMetadata(
"Badly formatted metadata random_seed entry: %s" % e)
renames = (
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 16c10785..77ccd128 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -232,7 +232,7 @@ class OpenNebulaNetwork(object):
# Set IPv6 default gateway
gateway6 = self.get_gateway6(c_dev)
- if gateway:
+ if gateway6:
devconf['gateway6'] = gateway6
# Set DNS servers and search domains
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index 365af96a..4a015240 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -13,6 +13,7 @@ from cloudinit import url_helper
from cloudinit import util
from cloudinit.sources.helpers import openstack
+from cloudinit.sources import DataSourceOracle as oracle
LOG = logging.getLogger(__name__)
@@ -121,8 +122,10 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
False when unable to contact metadata service or when metadata
format is invalid or disabled.
"""
- if not detect_openstack():
+ oracle_considered = 'Oracle' in self.sys_cfg.get('datasource_list')
+ if not detect_openstack(accept_oracle=not oracle_considered):
return False
+
if self.perform_dhcp_setup: # Setup networking in init-local stage.
try:
with EphemeralDHCPv4(self.fallback_interface):
@@ -214,7 +217,7 @@ def read_metadata_service(base_url, ssl_details=None,
return reader.read_v2()
-def detect_openstack():
+def detect_openstack(accept_oracle=False):
"""Return True when a potential OpenStack platform is detected."""
if not util.is_x86():
return True # Non-Intel cpus don't properly report dmi product names
@@ -223,6 +226,8 @@ def detect_openstack():
return True
elif util.read_dmi_data('chassis-asset-tag') in VALID_DMI_ASSET_TAGS:
return True
+ elif accept_oracle and oracle._is_platform_viable():
+ return True
elif util.get_proc_env(1).get('product_name') == DMI_PRODUCT_NOVA:
return True
return False
diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py
new file mode 100644
index 00000000..fab39af3
--- /dev/null
+++ b/cloudinit/sources/DataSourceOracle.py
@@ -0,0 +1,233 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Datasource for Oracle (OCI/Oracle Cloud Infrastructure)
+
+OCI provides a OpenStack like metadata service which provides only
+'2013-10-17' and 'latest' versions..
+
+Notes:
+ * This datasource does not support the OCI-Classic. OCI-Classic
+ provides an EC2 lookalike metadata service.
+ * The uuid provided in DMI data is not the same as the meta-data provided
+ instance-id, but has an equivalent lifespan.
+ * We do need to support upgrade from an instance that cloud-init
+ identified as OpenStack.
+ * Both bare-metal and vms use iscsi root
+ * Both bare-metal and vms provide chassis-asset-tag of OracleCloud.com
+"""
+
+from cloudinit.url_helper import combine_url, readurl, UrlError
+from cloudinit.net import dhcp
+from cloudinit import net
+from cloudinit import sources
+from cloudinit import util
+from cloudinit.net import cmdline
+from cloudinit import log as logging
+
+import json
+import re
+
+LOG = logging.getLogger(__name__)
+
+CHASSIS_ASSET_TAG = "OracleCloud.com"
+METADATA_ENDPOINT = "http://169.254.169.254/openstack/"
+
+
+class DataSourceOracle(sources.DataSource):
+
+ dsname = 'Oracle'
+ system_uuid = None
+ vendordata_pure = None
+ _network_config = sources.UNSET
+
+ def _is_platform_viable(self):
+ """Check platform environment to report if this datasource may run."""
+ return _is_platform_viable()
+
+ def _get_data(self):
+ if not self._is_platform_viable():
+ return False
+
+ # network may be configured if iscsi root. If that is the case
+ # then read_kernel_cmdline_config will return non-None.
+ if _is_iscsi_root():
+ data = self.crawl_metadata()
+ else:
+ with dhcp.EphemeralDHCPv4(net.find_fallback_nic()):
+ data = self.crawl_metadata()
+
+ self._crawled_metadata = data
+ vdata = data['2013-10-17']
+
+ self.userdata_raw = vdata.get('user_data')
+ self.system_uuid = vdata['system_uuid']
+
+ vd = vdata.get('vendor_data')
+ if vd:
+ self.vendordata_pure = vd
+ try:
+ self.vendordata_raw = sources.convert_vendordata(vd)
+ except ValueError as e:
+ LOG.warning("Invalid content in vendor-data: %s", e)
+ self.vendordata_raw = None
+
+ mdcopies = ('public_keys',)
+ md = dict([(k, vdata['meta_data'].get(k))
+ for k in mdcopies if k in vdata['meta_data']])
+
+ mdtrans = (
+ # oracle meta_data.json name, cloudinit.datasource.metadata name
+ ('availability_zone', 'availability-zone'),
+ ('hostname', 'local-hostname'),
+ ('launch_index', 'launch-index'),
+ ('uuid', 'instance-id'),
+ )
+ for dsname, ciname in mdtrans:
+ if dsname in vdata['meta_data']:
+ md[ciname] = vdata['meta_data'][dsname]
+
+ self.metadata = md
+ return True
+
+ def crawl_metadata(self):
+ return read_metadata()
+
+ def check_instance_id(self, sys_cfg):
+ """quickly check (local only) if self.instance_id is still valid
+
+ On Oracle, the dmi-provided system uuid differs from the instance-id
+ but has the same life-span."""
+ return sources.instance_id_matches_system_uuid(self.system_uuid)
+
+ def get_public_ssh_keys(self):
+ return sources.normalize_pubkey_data(self.metadata.get('public_keys'))
+
+ @property
+ def network_config(self):
+ """Network config is read from initramfs provided files
+ If none is present, then we fall back to fallback configuration.
+
+ One thing to note here is that this method is not currently
+ considered at all if there is is kernel/initramfs provided
+ data. In that case, stages considers that the cmdline data
+ overrides datasource provided data and does not consult here.
+
+ We nonetheless return cmdline provided config if present
+ and fallback to generate fallback."""
+ if self._network_config == sources.UNSET:
+ cmdline_cfg = cmdline.read_kernel_cmdline_config()
+ if cmdline_cfg:
+ self._network_config = cmdline_cfg
+ else:
+ self._network_config = self.distro.generate_fallback_config()
+ return self._network_config
+
+
+def _read_system_uuid():
+ sys_uuid = util.read_dmi_data('system-uuid')
+ return None if sys_uuid is None else sys_uuid.lower()
+
+
+def _is_platform_viable():
+ asset_tag = util.read_dmi_data('chassis-asset-tag')
+ return asset_tag == CHASSIS_ASSET_TAG
+
+
+def _is_iscsi_root():
+ return bool(cmdline.read_kernel_cmdline_config())
+
+
+def _load_index(content):
+ """Return a list entries parsed from content.
+
+ OpenStack's metadata service returns a newline delimited list
+ of items. Oracle's implementation has html formatted list of links.
+ The parser here just grabs targets from <a href="target">
+ and throws away "../".
+
+ Oracle has accepted that to be buggy and may fix in the future
+ to instead return a '\n' delimited plain text list. This function
+ will continue to work if that change is made."""
+ if not content.lower().startswith("<html>"):
+ return content.splitlines()
+ items = re.findall(
+ r'href="(?P<target>[^"]*)"', content, re.MULTILINE | re.IGNORECASE)
+ return [i for i in items if not i.startswith(".")]
+
+
+def read_metadata(endpoint_base=METADATA_ENDPOINT, sys_uuid=None,
+ version='2013-10-17'):
+ """Read metadata, return a dictionary.
+
+ Each path listed in the index will be represented in the dictionary.
+ If the path ends in .json, then the content will be decoded and
+ populated into the dictionary.
+
+ The system uuid (/sys/class/dmi/id/product_uuid) is also populated.
+ Example: given paths = ('user_data', 'meta_data.json')
+ This would return:
+ {version: {'user_data': b'blob', 'meta_data': json.loads(blob.decode())
+ 'system_uuid': '3b54f2e0-3ab2-458d-b770-af9926eee3b2'}}
+ """
+ endpoint = combine_url(endpoint_base, version) + "/"
+ if sys_uuid is None:
+ sys_uuid = _read_system_uuid()
+ if not sys_uuid:
+ raise sources.BrokenMetadata("Failed to read system uuid.")
+
+ try:
+ resp = readurl(endpoint)
+ if not resp.ok():
+ raise sources.BrokenMetadata(
+ "Bad response from %s: %s" % (endpoint, resp.code))
+ except UrlError as e:
+ raise sources.BrokenMetadata(
+ "Failed to read index at %s: %s" % (endpoint, e))
+
+ entries = _load_index(resp.contents.decode('utf-8'))
+ LOG.debug("index url %s contained: %s", endpoint, entries)
+
+ # meta_data.json is required.
+ mdj = 'meta_data.json'
+ if mdj not in entries:
+ raise sources.BrokenMetadata(
+ "Required field '%s' missing in index at %s" % (mdj, endpoint))
+
+ ret = {'system_uuid': sys_uuid}
+ for path in entries:
+ response = readurl(combine_url(endpoint, path))
+ if path.endswith(".json"):
+ ret[path.rpartition(".")[0]] = (
+ json.loads(response.contents.decode('utf-8')))
+ else:
+ ret[path] = response.contents
+
+ return {version: ret}
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceOracle, (sources.DEP_FILESYSTEM,)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
+
+
+if __name__ == "__main__":
+ import argparse
+ import os
+
+ parser = argparse.ArgumentParser(description='Query Oracle Cloud Metadata')
+ parser.add_argument("--endpoint", metavar="URL",
+ help="The url of the metadata service.",
+ default=METADATA_ENDPOINT)
+ args = parser.parse_args()
+ sys_uuid = "uuid-not-available-not-root" if os.geteuid() != 0 else None
+
+ data = read_metadata(endpoint_base=args.endpoint, sys_uuid=sys_uuid)
+ data['is_platform_viable'] = _is_platform_viable()
+ print(util.json_dumps(data))
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py
index e2502b02..9dc4ab23 100644
--- a/cloudinit/sources/DataSourceScaleway.py
+++ b/cloudinit/sources/DataSourceScaleway.py
@@ -29,7 +29,9 @@ from cloudinit import log as logging
from cloudinit import sources
from cloudinit import url_helper
from cloudinit import util
-
+from cloudinit import net
+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
+from cloudinit.event import EventType
LOG = logging.getLogger(__name__)
@@ -168,8 +170,8 @@ def query_data_api(api_type, api_address, retries, timeout):
class DataSourceScaleway(sources.DataSource):
-
dsname = "Scaleway"
+ update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]}
def __init__(self, sys_cfg, distro, paths):
super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths)
@@ -185,11 +187,10 @@ class DataSourceScaleway(sources.DataSource):
self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES))
self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT))
+ self._fallback_interface = None
+ self._network_config = None
- def _get_data(self):
- if not on_scaleway():
- return False
-
+ def _crawl_metadata(self):
resp = url_helper.readurl(self.metadata_address,
timeout=self.timeout,
retries=self.retries)
@@ -203,9 +204,48 @@ class DataSourceScaleway(sources.DataSource):
'vendor-data', self.vendordata_address,
self.retries, self.timeout
)
+
+ def _get_data(self):
+ if not on_scaleway():
+ return False
+
+ if self._fallback_interface is None:
+ self._fallback_interface = net.find_fallback_nic()
+ try:
+ with EphemeralDHCPv4(self._fallback_interface):
+ util.log_time(
+ logfunc=LOG.debug, msg='Crawl of metadata service',
+ func=self._crawl_metadata)
+ except (NoDHCPLeaseError) as e:
+ util.logexc(LOG, str(e))
+ return False
return True
@property
+ def network_config(self):
+ """
+ Configure networking according to data received from the
+ metadata API.
+ """
+ if self._network_config:
+ return self._network_config
+
+ if self._fallback_interface is None:
+ self._fallback_interface = net.find_fallback_nic()
+
+ netcfg = {'type': 'physical', 'name': '%s' % self._fallback_interface}
+ subnets = [{'type': 'dhcp4'}]
+ if self.metadata['ipv6']:
+ subnets += [{'type': 'static',
+ 'address': '%s' % self.metadata['ipv6']['address'],
+ 'gateway': '%s' % self.metadata['ipv6']['gateway'],
+ 'netmask': '%s' % self.metadata['ipv6']['netmask'],
+ }]
+ netcfg['subnets'] = subnets
+ self._network_config = {'version': 1, 'config': [netcfg]}
+ return self._network_config
+
+ @property
def launch_index(self):
return None
@@ -228,7 +268,7 @@ class DataSourceScaleway(sources.DataSource):
datasources = [
- (DataSourceScaleway, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceScaleway, (sources.DEP_FILESYSTEM,)),
]
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index f92e8b5c..593ac91a 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -564,7 +564,7 @@ class JoyentMetadataSerialClient(JoyentMetadataClient):
continue
LOG.warning('Unexpected response "%s" during flush', response)
except JoyentMetadataTimeoutException:
- LOG.warning('Timeout while initializing metadata client. ' +
+ LOG.warning('Timeout while initializing metadata client. '
'Is the host metadata service running?')
LOG.debug('Got "invalid command". Flush complete.')
self.fp.timeout = timeout
@@ -683,6 +683,18 @@ def jmc_client_factory(
raise ValueError("Unknown value for smartos_type: %s" % smartos_type)
+def identify_file(content_f):
+ cmd = ["file", "--brief", "--mime-type", content_f]
+ f_type = None
+ try:
+ (f_type, _err) = util.subp(cmd)
+ LOG.debug("script %s mime type is %s", content_f, f_type)
+ except util.ProcessExecutionError as e:
+ util.logexc(
+ LOG, ("Failed to identify script type for %s" % content_f, e))
+ return None if f_type is None else f_type.strip()
+
+
def write_boot_content(content, content_f, link=None, shebang=False,
mode=0o400):
"""
@@ -715,18 +727,11 @@ def write_boot_content(content, content_f, link=None, shebang=False,
util.write_file(content_f, content, mode=mode)
if shebang and not content.startswith("#!"):
- try:
- cmd = ["file", "--brief", "--mime-type", content_f]
- (f_type, _err) = util.subp(cmd)
- LOG.debug("script %s mime type is %s", content_f, f_type)
- if f_type.strip() == "text/plain":
- new_content = "\n".join(["#!/bin/bash", content])
- util.write_file(content_f, new_content, mode=mode)
- LOG.debug("added shebang to file %s", content_f)
-
- except Exception as e:
- util.logexc(LOG, ("Failed to identify script type for %s" %
- content_f, e))
+ f_type = identify_file(content_f)
+ if f_type == "text/plain":
+ util.write_file(
+ content_f, "\n".join(["#!/bin/bash", content]), mode=mode)
+ LOG.debug("added shebang to file %s", content_f)
if link:
try:
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index f424316a..5ac98826 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -38,8 +38,17 @@ DEP_FILESYSTEM = "FILESYSTEM"
DEP_NETWORK = "NETWORK"
DS_PREFIX = 'DataSource'
-# File in which instance meta-data, user-data and vendor-data is written
+EXPERIMENTAL_TEXT = (
+ "EXPERIMENTAL: The structure and format of content scoped under the 'ds'"
+ " key may change in subsequent releases of cloud-init.")
+
+
+# File in which public available instance meta-data is written
+# security-sensitive key values are redacted from this world-readable file
INSTANCE_JSON_FILE = 'instance-data.json'
+# security-sensitive key values are present in this root-readable file
+INSTANCE_JSON_SENSITIVE_FILE = 'instance-data-sensitive.json'
+REDACT_SENSITIVE_VALUE = 'redacted for non-root user'
# Key which can be provide a cloud's official product name to cloud-init
METADATA_CLOUD_NAME_KEY = 'cloud-name'
@@ -58,26 +67,55 @@ class InvalidMetaDataException(Exception):
pass
-def process_base64_metadata(metadata, key_path=''):
- """Strip ci-b64 prefix and return metadata with base64-encoded-keys set."""
+def process_instance_metadata(metadata, key_path='', sensitive_keys=()):
+ """Process all instance metadata cleaning it up for persisting as json.
+
+ Strip ci-b64 prefix and catalog any 'base64_encoded_keys' as a list
+
+ @return Dict copy of processed metadata.
+ """
md_copy = copy.deepcopy(metadata)
- md_copy['base64-encoded-keys'] = []
+ md_copy['base64_encoded_keys'] = []
+ md_copy['sensitive_keys'] = []
for key, val in metadata.items():
if key_path:
sub_key_path = key_path + '/' + key
else:
sub_key_path = key
+ if key in sensitive_keys or sub_key_path in sensitive_keys:
+ md_copy['sensitive_keys'].append(sub_key_path)
if isinstance(val, str) and val.startswith('ci-b64:'):
- md_copy['base64-encoded-keys'].append(sub_key_path)
+ md_copy['base64_encoded_keys'].append(sub_key_path)
md_copy[key] = val.replace('ci-b64:', '')
if isinstance(val, dict):
- return_val = process_base64_metadata(val, sub_key_path)
- md_copy['base64-encoded-keys'].extend(
- return_val.pop('base64-encoded-keys'))
+ return_val = process_instance_metadata(
+ val, sub_key_path, sensitive_keys)
+ md_copy['base64_encoded_keys'].extend(
+ return_val.pop('base64_encoded_keys'))
+ md_copy['sensitive_keys'].extend(
+ return_val.pop('sensitive_keys'))
md_copy[key] = return_val
return md_copy
+def redact_sensitive_keys(metadata, redact_value=REDACT_SENSITIVE_VALUE):
+ """Redact any sensitive keys from to provided metadata dictionary.
+
+ Replace any keys values listed in 'sensitive_keys' with redact_value.
+ """
+ if not metadata.get('sensitive_keys', []):
+ return metadata
+ md_copy = copy.deepcopy(metadata)
+ for key_path in metadata.get('sensitive_keys'):
+ path_parts = key_path.split('/')
+ obj = md_copy
+ for path in path_parts:
+ if isinstance(obj[path], dict) and path != path_parts[-1]:
+ obj = obj[path]
+ obj[path] = redact_value
+ return md_copy
+
+
URLParams = namedtuple(
'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries'])
@@ -103,14 +141,14 @@ class DataSource(object):
url_timeout = 10 # timeout for each metadata url read attempt
url_retries = 5 # number of times to retry url upon 404
- # The datasource defines a list of supported EventTypes during which
+ # The datasource defines a set of supported EventTypes during which
# the datasource can react to changes in metadata and regenerate
# network configuration on metadata changes.
# A datasource which supports writing network config on each system boot
- # would set update_events = {'network': [EventType.BOOT]}
+ # would call update_events['network'].add(EventType.BOOT).
# Default: generate network config on new instance id (first boot).
- update_events = {'network': [EventType.BOOT_NEW_INSTANCE]}
+ update_events = {'network': set([EventType.BOOT_NEW_INSTANCE])}
# N-tuple listing default values for any metadata-related class
# attributes cached on an instance by a process_data runs. These attribute
@@ -122,6 +160,10 @@ class DataSource(object):
_dirty_cache = False
+ # N-tuple of keypaths or keynames redact from instance-data.json for
+ # non-root users
+ sensitive_metadata_keys = ('security-credentials',)
+
def __init__(self, sys_cfg, distro, paths, ud_proc=None):
self.sys_cfg = sys_cfg
self.distro = distro
@@ -147,12 +189,24 @@ class DataSource(object):
def _get_standardized_metadata(self):
"""Return a dictionary of standardized metadata keys."""
- return {'v1': {
- 'local-hostname': self.get_hostname(),
- 'instance-id': self.get_instance_id(),
- 'cloud-name': self.cloud_name,
- 'region': self.region,
- 'availability-zone': self.availability_zone}}
+ local_hostname = self.get_hostname()
+ instance_id = self.get_instance_id()
+ availability_zone = self.availability_zone
+ cloud_name = self.cloud_name
+ # When adding new standard keys prefer underscore-delimited instead
+ # of hyphen-delimted to support simple variable references in jinja
+ # templates.
+ return {
+ 'v1': {
+ 'availability-zone': availability_zone,
+ 'availability_zone': availability_zone,
+ 'cloud-name': cloud_name,
+ 'cloud_name': cloud_name,
+ 'instance-id': instance_id,
+ 'instance_id': instance_id,
+ 'local-hostname': local_hostname,
+ 'local_hostname': local_hostname,
+ 'region': self.region}}
def clear_cached_attrs(self, attr_defaults=()):
"""Reset any cached metadata attributes to datasource defaults.
@@ -180,15 +234,22 @@ class DataSource(object):
"""
self._dirty_cache = True
return_value = self._get_data()
- json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
if not return_value:
return return_value
+ self.persist_instance_data()
+ return return_value
+
+ def persist_instance_data(self):
+ """Process and write INSTANCE_JSON_FILE with all instance metadata.
+ Replace any hyphens with underscores in key names for use in template
+ processing.
+
+ @return True on successful write, False otherwise.
+ """
instance_data = {
- 'ds': {
- 'meta-data': self.metadata,
- 'user-data': self.get_userdata_raw(),
- 'vendor-data': self.get_vendordata_raw()}}
+ 'ds': {'_doc': EXPERIMENTAL_TEXT,
+ 'meta_data': self.metadata}}
if hasattr(self, 'network_json'):
network_json = getattr(self, 'network_json')
if network_json != UNSET:
@@ -202,16 +263,23 @@ class DataSource(object):
try:
# Process content base64encoding unserializable values
content = util.json_dumps(instance_data)
- # Strip base64: prefix and return base64-encoded-keys
- processed_data = process_base64_metadata(json.loads(content))
+ # Strip base64: prefix and set base64_encoded_keys list.
+ processed_data = process_instance_metadata(
+ json.loads(content),
+ sensitive_keys=self.sensitive_metadata_keys)
except TypeError as e:
LOG.warning('Error persisting instance-data.json: %s', str(e))
- return return_value
+ return False
except UnicodeDecodeError as e:
LOG.warning('Error persisting instance-data.json: %s', str(e))
- return return_value
- write_json(json_file, processed_data, mode=0o600)
- return return_value
+ return False
+ json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
+ write_json(json_file, processed_data) # World readable
+ json_sensitive_file = os.path.join(self.paths.run_dir,
+ INSTANCE_JSON_SENSITIVE_FILE)
+ write_json(json_sensitive_file,
+ redact_sensitive_keys(processed_data), mode=0o600)
+ return True
def _get_data(self):
"""Walk metadata sources, process crawled data and save attributes."""
@@ -475,8 +543,8 @@ class DataSource(object):
for update_scope, update_events in self.update_events.items():
if event in update_events:
if not supported_events.get(update_scope):
- supported_events[update_scope] = []
- supported_events[update_scope].append(event)
+ supported_events[update_scope] = set()
+ supported_events[update_scope].add(event)
for scope, matched_events in supported_events.items():
LOG.debug(
"Update datasource metadata and %s config due to events: %s",
@@ -490,6 +558,8 @@ class DataSource(object):
result = self.get_data()
if result:
return True
+ LOG.debug("Datasource %s not updated for events: %s", self,
+ ', '.join(source_event_types))
return False
def check_instance_id(self, sys_cfg):
@@ -669,6 +739,10 @@ def convert_vendordata(data, recurse=True):
raise ValueError("Unknown data type for vendordata: %s" % type(data))
+class BrokenMetadata(IOError):
+ pass
+
+
# 'depends' is a list of dependencies (DEP_FILESYSTEM)
# ds_list is a list of 2 item lists
# ds_list = [
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index a4cf0667..9c29ceac 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -21,6 +21,8 @@ from cloudinit import sources
from cloudinit import url_helper
from cloudinit import util
+from cloudinit.sources import BrokenMetadata
+
# See https://docs.openstack.org/user-guide/cli-config-drive.html
LOG = logging.getLogger(__name__)
@@ -36,21 +38,38 @@ KEY_COPIES = (
('local-hostname', 'hostname', False),
('instance-id', 'uuid', True),
)
+
+# Versions and names taken from nova source nova/api/metadata/base.py
OS_LATEST = 'latest'
OS_FOLSOM = '2012-08-10'
OS_GRIZZLY = '2013-04-04'
OS_HAVANA = '2013-10-17'
OS_LIBERTY = '2015-10-15'
+# NEWTON_ONE adds 'devices' to md (sriov-pf-passthrough-neutron-port-vlan)
+OS_NEWTON_ONE = '2016-06-30'
+# NEWTON_TWO adds vendor_data2.json (vendordata-reboot)
+OS_NEWTON_TWO = '2016-10-06'
+# OS_OCATA adds 'vif' field to devices (sriov-pf-passthrough-neutron-port-vlan)
+OS_OCATA = '2017-02-22'
+# OS_ROCKY adds a vf_trusted field to devices (sriov-trusted-vfs)
+OS_ROCKY = '2018-08-27'
+
+
# keep this in chronological order. new supported versions go at the end.
OS_VERSIONS = (
OS_FOLSOM,
OS_GRIZZLY,
OS_HAVANA,
OS_LIBERTY,
+ OS_NEWTON_ONE,
+ OS_NEWTON_TWO,
+ OS_OCATA,
+ OS_ROCKY,
)
PHYSICAL_TYPES = (
None,
+ 'bgpovs', # not present in OpenStack upstream but used on OVH cloud.
'bridge',
'dvs',
'ethernet',
@@ -68,10 +87,6 @@ class NonReadable(IOError):
pass
-class BrokenMetadata(IOError):
- pass
-
-
class SourceMixin(object):
def _ec2_name_to_device(self, name):
if not self.ec2_metadata:
@@ -441,7 +456,7 @@ class MetadataReader(BaseReader):
return self._versions
found = []
version_path = self._path_join(self.base_path, "openstack")
- content = self._path_read(version_path)
+ content = self._path_read(version_path, decode=True)
for line in content.splitlines():
line = line.strip()
if not line:
@@ -589,6 +604,8 @@ def convert_net_json(network_json=None, known_macs=None):
cfg.update({'type': 'physical', 'mac_address': link_mac_addr})
elif link['type'] in ['bond']:
params = {}
+ if link_mac_addr:
+ params['mac_address'] = link_mac_addr
for k, v in link.items():
if k == 'bond_links':
continue
@@ -658,6 +675,17 @@ def convert_net_json(network_json=None, known_macs=None):
else:
cfg[key] = fmt % link_id_info[target]['name']
+ # Infiniband interfaces may be referenced in network_data.json by a 6 byte
+ # Ethernet MAC-style address, and we use that address to look up the
+ # interface name above. Now ensure that the hardware address is set to the
+ # full 20 byte address.
+ ib_known_hwaddrs = net.get_ib_hwaddrs_by_interface()
+ if ib_known_hwaddrs:
+ for cfg in config:
+ if cfg['name'] in ib_known_hwaddrs:
+ cfg['mac_address'] = ib_known_hwaddrs[cfg['name']]
+ cfg['type'] = 'infiniband'
+
for service in services:
cfg = service
cfg.update({'type': 'nameserver'})
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index 3ef8c624..e1890e23 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -164,7 +164,7 @@ class NicConfigurator(object):
return ([subnet], route_list)
# Add routes if there is no primary nic
- if not self._primaryNic:
+ if not self._primaryNic and v4.gateways:
route_list.extend(self.gen_ipv4_route(nic,
v4.gateways,
v4.netmask))
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
index dcd221be..8082019e 100644
--- a/cloudinit/sources/tests/test_init.py
+++ b/cloudinit/sources/tests/test_init.py
@@ -1,5 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import copy
import inspect
import os
import six
@@ -9,7 +10,8 @@ from cloudinit.event import EventType
from cloudinit.helpers import Paths
from cloudinit import importer
from cloudinit.sources import (
- INSTANCE_JSON_FILE, DataSource, UNSET)
+ EXPERIMENTAL_TEXT, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE,
+ REDACT_SENSITIVE_VALUE, UNSET, DataSource, redact_sensitive_keys)
from cloudinit.tests.helpers import CiTestCase, skipIf, mock
from cloudinit.user_data import UserDataProcessor
from cloudinit import util
@@ -20,24 +22,30 @@ class DataSourceTestSubclassNet(DataSource):
dsname = 'MyTestSubclass'
url_max_wait = 55
- def __init__(self, sys_cfg, distro, paths, custom_userdata=None):
+ def __init__(self, sys_cfg, distro, paths, custom_metadata=None,
+ custom_userdata=None, get_data_retval=True):
super(DataSourceTestSubclassNet, self).__init__(
sys_cfg, distro, paths)
self._custom_userdata = custom_userdata
+ self._custom_metadata = custom_metadata
+ self._get_data_retval = get_data_retval
def _get_cloud_name(self):
return 'SubclassCloudName'
def _get_data(self):
- self.metadata = {'availability_zone': 'myaz',
- 'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion'}
+ if self._custom_metadata:
+ self.metadata = self._custom_metadata
+ else:
+ self.metadata = {'availability_zone': 'myaz',
+ 'local-hostname': 'test-subclass-hostname',
+ 'region': 'myregion'}
if self._custom_userdata:
self.userdata_raw = self._custom_userdata
else:
self.userdata_raw = 'userdata_raw'
self.vendordata_raw = 'vendordata_raw'
- return True
+ return self._get_data_retval
class InvalidDataSourceTestSubclassNet(DataSource):
@@ -264,8 +272,19 @@ class TestDataSource(CiTestCase):
self.assertEqual('fqdnhostname.domain.com',
datasource.get_hostname(fqdn=True))
- def test_get_data_write_json_instance_data(self):
- """get_data writes INSTANCE_JSON_FILE to run_dir as readonly root."""
+ def test_get_data_does_not_write_instance_data_on_failure(self):
+ """get_data does not write INSTANCE_JSON_FILE on get_data False."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
+ get_data_retval=False)
+ self.assertFalse(datasource.get_data())
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ self.assertFalse(
+ os.path.exists(json_file), 'Found unexpected file %s' % json_file)
+
+ def test_get_data_writes_json_instance_data_on_success(self):
+ """get_data writes INSTANCE_JSON_FILE to run_dir as world readable."""
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
@@ -273,40 +292,126 @@ class TestDataSource(CiTestCase):
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
content = util.load_file(json_file)
expected = {
- 'base64-encoded-keys': [],
+ 'base64_encoded_keys': [],
+ 'sensitive_keys': [],
'v1': {
'availability-zone': 'myaz',
+ 'availability_zone': 'myaz',
'cloud-name': 'subclasscloudname',
+ 'cloud_name': 'subclasscloudname',
'instance-id': 'iid-datasource',
+ 'instance_id': 'iid-datasource',
'local-hostname': 'test-subclass-hostname',
+ 'local_hostname': 'test-subclass-hostname',
'region': 'myregion'},
'ds': {
- 'meta-data': {'availability_zone': 'myaz',
+ '_doc': EXPERIMENTAL_TEXT,
+ 'meta_data': {'availability_zone': 'myaz',
'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion'},
- 'user-data': 'userdata_raw',
- 'vendor-data': 'vendordata_raw'}}
+ 'region': 'myregion'}}}
self.assertEqual(expected, util.load_json(content))
file_stat = os.stat(json_file)
+ self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
+ self.assertEqual(expected, util.load_json(content))
+
+ def test_get_data_writes_json_instance_data_sensitive(self):
+ """get_data writes INSTANCE_JSON_SENSITIVE_FILE as readonly root."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
+ custom_metadata={
+ 'availability_zone': 'myaz',
+ 'local-hostname': 'test-subclass-hostname',
+ 'region': 'myregion',
+ 'some': {'security-credentials': {
+ 'cred1': 'sekret', 'cred2': 'othersekret'}}})
+ self.assertEqual(
+ ('security-credentials',), datasource.sensitive_metadata_keys)
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp)
+ redacted = util.load_json(util.load_file(json_file))
+ self.assertEqual(
+ {'cred1': 'sekret', 'cred2': 'othersekret'},
+ redacted['ds']['meta_data']['some']['security-credentials'])
+ content = util.load_file(sensitive_json_file)
+ expected = {
+ 'base64_encoded_keys': [],
+ 'sensitive_keys': ['ds/meta_data/some/security-credentials'],
+ 'v1': {
+ 'availability-zone': 'myaz',
+ 'availability_zone': 'myaz',
+ 'cloud-name': 'subclasscloudname',
+ 'cloud_name': 'subclasscloudname',
+ 'instance-id': 'iid-datasource',
+ 'instance_id': 'iid-datasource',
+ 'local-hostname': 'test-subclass-hostname',
+ 'local_hostname': 'test-subclass-hostname',
+ 'region': 'myregion'},
+ 'ds': {
+ '_doc': EXPERIMENTAL_TEXT,
+ 'meta_data': {
+ 'availability_zone': 'myaz',
+ 'local-hostname': 'test-subclass-hostname',
+ 'region': 'myregion',
+ 'some': {'security-credentials': REDACT_SENSITIVE_VALUE}}}
+ }
+ self.maxDiff = None
+ self.assertEqual(expected, util.load_json(content))
+ file_stat = os.stat(sensitive_json_file)
self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode))
+ self.assertEqual(expected, util.load_json(content))
def test_get_data_handles_redacted_unserializable_content(self):
"""get_data warns unserializable content in INSTANCE_JSON_FILE."""
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_userdata={'key1': 'val1', 'key2': {'key2.1': self.paths}})
- self.assertTrue(datasource.get_data())
+ custom_metadata={'key1': 'val1', 'key2': {'key2.1': self.paths}})
+ datasource.get_data()
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
content = util.load_file(json_file)
- expected_userdata = {
+ expected_metadata = {
'key1': 'val1',
'key2': {
'key2.1': "Warning: redacted unserializable type <class"
" 'cloudinit.helpers.Paths'>"}}
instance_json = util.load_json(content)
self.assertEqual(
- expected_userdata, instance_json['ds']['user-data'])
+ expected_metadata, instance_json['ds']['meta_data'])
+
+ def test_persist_instance_data_writes_ec2_metadata_when_set(self):
+ """When ec2_metadata class attribute is set, persist to json."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
+ datasource.ec2_metadata = UNSET
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ instance_data = util.load_json(util.load_file(json_file))
+ self.assertNotIn('ec2_metadata', instance_data['ds'])
+ datasource.ec2_metadata = {'ec2stuff': 'is good'}
+ datasource.persist_instance_data()
+ instance_data = util.load_json(util.load_file(json_file))
+ self.assertEqual(
+ {'ec2stuff': 'is good'},
+ instance_data['ds']['ec2_metadata'])
+
+ def test_persist_instance_data_writes_network_json_when_set(self):
+ """When network_data.json class attribute is set, persist to json."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ instance_data = util.load_json(util.load_file(json_file))
+ self.assertNotIn('network_json', instance_data['ds'])
+ datasource.network_json = {'network_json': 'is good'}
+ datasource.persist_instance_data()
+ instance_data = util.load_json(util.load_file(json_file))
+ self.assertEqual(
+ {'network_json': 'is good'},
+ instance_data['ds']['network_json'])
@skipIf(not six.PY3, "json serialization on <= py2.7 handles bytes")
def test_get_data_base64encodes_unserializable_bytes(self):
@@ -314,17 +419,17 @@ class TestDataSource(CiTestCase):
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
+ custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
self.assertTrue(datasource.get_data())
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
content = util.load_file(json_file)
instance_json = util.load_json(content)
- self.assertEqual(
- ['ds/user-data/key2/key2.1'],
- instance_json['base64-encoded-keys'])
+ self.assertItemsEqual(
+ ['ds/meta_data/key2/key2.1'],
+ instance_json['base64_encoded_keys'])
self.assertEqual(
{'key1': 'val1', 'key2': {'key2.1': 'EjM='}},
- instance_json['ds']['user-data'])
+ instance_json['ds']['meta_data'])
@skipIf(not six.PY2, "json serialization on <= py2.7 handles bytes")
def test_get_data_handles_bytes_values(self):
@@ -332,15 +437,15 @@ class TestDataSource(CiTestCase):
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
+ custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
self.assertTrue(datasource.get_data())
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
content = util.load_file(json_file)
instance_json = util.load_json(content)
- self.assertEqual([], instance_json['base64-encoded-keys'])
+ self.assertEqual([], instance_json['base64_encoded_keys'])
self.assertEqual(
{'key1': 'val1', 'key2': {'key2.1': '\x123'}},
- instance_json['ds']['user-data'])
+ instance_json['ds']['meta_data'])
@skipIf(not six.PY2, "Only python2 hits UnicodeDecodeErrors on non-utf8")
def test_non_utf8_encoding_logs_warning(self):
@@ -348,7 +453,7 @@ class TestDataSource(CiTestCase):
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}})
+ custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}})
self.assertTrue(datasource.get_data())
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
self.assertFalse(os.path.exists(json_file))
@@ -429,8 +534,9 @@ class TestDataSource(CiTestCase):
def test_update_metadata_only_acts_on_supported_update_events(self):
"""update_metadata won't get_data on unsupported update events."""
+ self.datasource.update_events['network'].discard(EventType.BOOT)
self.assertEqual(
- {'network': [EventType.BOOT_NEW_INSTANCE]},
+ {'network': set([EventType.BOOT_NEW_INSTANCE])},
self.datasource.update_events)
def fake_get_data():
@@ -461,4 +567,36 @@ class TestDataSource(CiTestCase):
self.logs.getvalue())
+class TestRedactSensitiveData(CiTestCase):
+
+ def test_redact_sensitive_data_noop_when_no_sensitive_keys_present(self):
+ """When sensitive_keys is absent or empty from metadata do nothing."""
+ md = {'my': 'data'}
+ self.assertEqual(
+ md, redact_sensitive_keys(md, redact_value='redacted'))
+ md['sensitive_keys'] = []
+ self.assertEqual(
+ md, redact_sensitive_keys(md, redact_value='redacted'))
+
+ def test_redact_sensitive_data_redacts_exact_match_name(self):
+ """Only exact matched sensitive_keys are redacted from metadata."""
+ md = {'sensitive_keys': ['md/secure'],
+ 'md': {'secure': 's3kr1t', 'insecure': 'publik'}}
+ secure_md = copy.deepcopy(md)
+ secure_md['md']['secure'] = 'redacted'
+ self.assertEqual(
+ secure_md,
+ redact_sensitive_keys(md, redact_value='redacted'))
+
+ def test_redact_sensitive_data_does_redacts_with_default_string(self):
+ """When redact_value is absent, REDACT_SENSITIVE_VALUE is used."""
+ md = {'sensitive_keys': ['md/secure'],
+ 'md': {'secure': 's3kr1t', 'insecure': 'publik'}}
+ secure_md = copy.deepcopy(md)
+ secure_md['md']['secure'] = 'redacted for non-root user'
+ self.assertEqual(
+ secure_md,
+ redact_sensitive_keys(md))
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py
new file mode 100644
index 00000000..7599126c
--- /dev/null
+++ b/cloudinit/sources/tests/test_oracle.py
@@ -0,0 +1,331 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.sources import DataSourceOracle as oracle
+from cloudinit.sources import BrokenMetadata
+from cloudinit import helpers
+
+from cloudinit.tests import helpers as test_helpers
+
+from textwrap import dedent
+import argparse
+import httpretty
+import json
+import mock
+import os
+import six
+import uuid
+
+DS_PATH = "cloudinit.sources.DataSourceOracle"
+MD_VER = "2013-10-17"
+
+
+class TestDataSourceOracle(test_helpers.CiTestCase):
+ """Test datasource DataSourceOracle."""
+
+ ds_class = oracle.DataSourceOracle
+
+ my_uuid = str(uuid.uuid4())
+ my_md = {"uuid": "ocid1.instance.oc1.phx.abyhqlj",
+ "name": "ci-vm1", "availability_zone": "phx-ad-3",
+ "hostname": "ci-vm1hostname",
+ "launch_index": 0, "files": [],
+ "public_keys": {"0": "ssh-rsa AAAAB3N...== user@host"},
+ "meta": {}}
+
+ def _patch_instance(self, inst, patches):
+ """Patch an instance of a class 'inst'.
+ for each name, kwargs in patches:
+ inst.name = mock.Mock(**kwargs)
+ returns a namespace object that has
+ namespace.name = mock.Mock(**kwargs)
+ Do not bother with cleanup as instance is assumed transient."""
+ mocks = argparse.Namespace()
+ for name, kwargs in patches.items():
+ imock = mock.Mock(name=name, spec=getattr(inst, name), **kwargs)
+ setattr(mocks, name, imock)
+ setattr(inst, name, imock)
+ return mocks
+
+ def _get_ds(self, sys_cfg=None, distro=None, paths=None, ud_proc=None,
+ patches=None):
+ if sys_cfg is None:
+ sys_cfg = {}
+ if patches is None:
+ patches = {}
+ if paths is None:
+ tmpd = self.tmp_dir()
+ dirs = {'cloud_dir': self.tmp_path('cloud_dir', tmpd),
+ 'run_dir': self.tmp_path('run_dir')}
+ for d in dirs.values():
+ os.mkdir(d)
+ paths = helpers.Paths(dirs)
+
+ ds = self.ds_class(sys_cfg=sys_cfg, distro=distro,
+ paths=paths, ud_proc=ud_proc)
+
+ return ds, self._patch_instance(ds, patches)
+
+ def test_platform_not_viable_returns_false(self):
+ ds, mocks = self._get_ds(
+ patches={'_is_platform_viable': {'return_value': False}})
+ self.assertFalse(ds._get_data())
+ mocks._is_platform_viable.assert_called_once_with()
+
+ @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
+ def test_without_userdata(self, m_is_iscsi_root):
+ """If no user-data is provided, it should not be in return dict."""
+ ds, mocks = self._get_ds(patches={
+ '_is_platform_viable': {'return_value': True},
+ 'crawl_metadata': {
+ 'return_value': {
+ MD_VER: {'system_uuid': self.my_uuid,
+ 'meta_data': self.my_md}}}})
+ self.assertTrue(ds._get_data())
+ mocks._is_platform_viable.assert_called_once_with()
+ mocks.crawl_metadata.assert_called_once_with()
+ self.assertEqual(self.my_uuid, ds.system_uuid)
+ self.assertEqual(self.my_md['availability_zone'], ds.availability_zone)
+ self.assertIn(self.my_md["public_keys"]["0"], ds.get_public_ssh_keys())
+ self.assertEqual(self.my_md['uuid'], ds.get_instance_id())
+ self.assertIsNone(ds.userdata_raw)
+
+ @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
+ def test_with_vendordata(self, m_is_iscsi_root):
+ """Test with vendor data."""
+ vd = {'cloud-init': '#cloud-config\nkey: value'}
+ ds, mocks = self._get_ds(patches={
+ '_is_platform_viable': {'return_value': True},
+ 'crawl_metadata': {
+ 'return_value': {
+ MD_VER: {'system_uuid': self.my_uuid,
+ 'meta_data': self.my_md,
+ 'vendor_data': vd}}}})
+ self.assertTrue(ds._get_data())
+ mocks._is_platform_viable.assert_called_once_with()
+ mocks.crawl_metadata.assert_called_once_with()
+ self.assertEqual(vd, ds.vendordata_pure)
+ self.assertEqual(vd['cloud-init'], ds.vendordata_raw)
+
+ @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
+ def test_with_userdata(self, m_is_iscsi_root):
+ """Ensure user-data is populated if present and is binary."""
+ my_userdata = b'abcdefg'
+ ds, mocks = self._get_ds(patches={
+ '_is_platform_viable': {'return_value': True},
+ 'crawl_metadata': {
+ 'return_value': {
+ MD_VER: {'system_uuid': self.my_uuid,
+ 'meta_data': self.my_md,
+ 'user_data': my_userdata}}}})
+ self.assertTrue(ds._get_data())
+ mocks._is_platform_viable.assert_called_once_with()
+ mocks.crawl_metadata.assert_called_once_with()
+ self.assertEqual(self.my_uuid, ds.system_uuid)
+ self.assertIn(self.my_md["public_keys"]["0"], ds.get_public_ssh_keys())
+ self.assertEqual(self.my_md['uuid'], ds.get_instance_id())
+ self.assertEqual(my_userdata, ds.userdata_raw)
+
+ @mock.patch(DS_PATH + ".cmdline.read_kernel_cmdline_config")
+ @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
+ def test_network_cmdline(self, m_is_iscsi_root, m_cmdline_config):
+ """network_config should read kernel cmdline."""
+ distro = mock.MagicMock()
+ ds, _ = self._get_ds(distro=distro, patches={
+ '_is_platform_viable': {'return_value': True},
+ 'crawl_metadata': {
+ 'return_value': {
+ MD_VER: {'system_uuid': self.my_uuid,
+ 'meta_data': self.my_md}}}})
+ ncfg = {'version': 1, 'config': [{'a': 'b'}]}
+ m_cmdline_config.return_value = ncfg
+ self.assertTrue(ds._get_data())
+ self.assertEqual(ncfg, ds.network_config)
+ m_cmdline_config.assert_called_once_with()
+ self.assertFalse(distro.generate_fallback_config.called)
+
+ @mock.patch(DS_PATH + ".cmdline.read_kernel_cmdline_config")
+ @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
+ def test_network_fallback(self, m_is_iscsi_root, m_cmdline_config):
+ """test that fallback network is generated if no kernel cmdline."""
+ distro = mock.MagicMock()
+ ds, _ = self._get_ds(distro=distro, patches={
+ '_is_platform_viable': {'return_value': True},
+ 'crawl_metadata': {
+ 'return_value': {
+ MD_VER: {'system_uuid': self.my_uuid,
+ 'meta_data': self.my_md}}}})
+ ncfg = {'version': 1, 'config': [{'a': 'b'}]}
+ m_cmdline_config.return_value = None
+ self.assertTrue(ds._get_data())
+ ncfg = {'version': 1, 'config': [{'distro1': 'value'}]}
+ distro.generate_fallback_config.return_value = ncfg
+ self.assertEqual(ncfg, ds.network_config)
+ m_cmdline_config.assert_called_once_with()
+ distro.generate_fallback_config.assert_called_once_with()
+ self.assertEqual(1, m_cmdline_config.call_count)
+
+ # test that the result got cached, and the methods not re-called.
+ self.assertEqual(ncfg, ds.network_config)
+ self.assertEqual(1, m_cmdline_config.call_count)
+
+
+@mock.patch(DS_PATH + "._read_system_uuid", return_value=str(uuid.uuid4()))
+class TestReadMetaData(test_helpers.HttprettyTestCase):
+ """Test the read_metadata which interacts with http metadata service."""
+
+ mdurl = oracle.METADATA_ENDPOINT
+ my_md = {"uuid": "ocid1.instance.oc1.phx.abyhqlj",
+ "name": "ci-vm1", "availability_zone": "phx-ad-3",
+ "hostname": "ci-vm1hostname",
+ "launch_index": 0, "files": [],
+ "public_keys": {"0": "ssh-rsa AAAAB3N...== user@host"},
+ "meta": {}}
+
+ def populate_md(self, data):
+ """call httppretty.register_url for each item dict 'data',
+ including valid indexes. Text values converted to bytes."""
+ httpretty.register_uri(
+ httpretty.GET, self.mdurl + MD_VER + "/",
+ '\n'.join(data.keys()).encode('utf-8'))
+ for k, v in data.items():
+ httpretty.register_uri(
+ httpretty.GET, self.mdurl + MD_VER + "/" + k,
+ v if not isinstance(v, six.text_type) else v.encode('utf-8'))
+
+ def test_broken_no_sys_uuid(self, m_read_system_uuid):
+ """Datasource requires ability to read system_uuid and true return."""
+ m_read_system_uuid.return_value = None
+ self.assertRaises(BrokenMetadata, oracle.read_metadata)
+
+ def test_broken_no_metadata_json(self, m_read_system_uuid):
+ """Datasource requires meta_data.json."""
+ httpretty.register_uri(
+ httpretty.GET, self.mdurl + MD_VER + "/",
+ '\n'.join(['user_data']).encode('utf-8'))
+ with self.assertRaises(BrokenMetadata) as cm:
+ oracle.read_metadata()
+ self.assertIn("Required field 'meta_data.json' missing",
+ str(cm.exception))
+
+ def test_with_userdata(self, m_read_system_uuid):
+ data = {'user_data': b'#!/bin/sh\necho hi world\n',
+ 'meta_data.json': json.dumps(self.my_md)}
+ self.populate_md(data)
+ result = oracle.read_metadata()[MD_VER]
+ self.assertEqual(data['user_data'], result['user_data'])
+ self.assertEqual(self.my_md, result['meta_data'])
+
+ def test_without_userdata(self, m_read_system_uuid):
+ data = {'meta_data.json': json.dumps(self.my_md)}
+ self.populate_md(data)
+ result = oracle.read_metadata()[MD_VER]
+ self.assertNotIn('user_data', result)
+ self.assertEqual(self.my_md, result['meta_data'])
+
+ def test_unknown_fields_included(self, m_read_system_uuid):
+ """Unknown fields listed in index should be included.
+ And those ending in .json should be decoded."""
+ some_data = {'key1': 'data1', 'subk1': {'subd1': 'subv'}}
+ some_vendor_data = {'cloud-init': 'foo'}
+ data = {'meta_data.json': json.dumps(self.my_md),
+ 'some_data.json': json.dumps(some_data),
+ 'vendor_data.json': json.dumps(some_vendor_data),
+ 'other_blob': b'this is blob'}
+ self.populate_md(data)
+ result = oracle.read_metadata()[MD_VER]
+ self.assertNotIn('user_data', result)
+ self.assertEqual(self.my_md, result['meta_data'])
+ self.assertEqual(some_data, result['some_data'])
+ self.assertEqual(some_vendor_data, result['vendor_data'])
+ self.assertEqual(data['other_blob'], result['other_blob'])
+
+
+class TestIsPlatformViable(test_helpers.CiTestCase):
+ @mock.patch(DS_PATH + ".util.read_dmi_data",
+ return_value=oracle.CHASSIS_ASSET_TAG)
+ def test_expected_viable(self, m_read_dmi_data):
+ """System with known chassis tag is viable."""
+ self.assertTrue(oracle._is_platform_viable())
+ m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')])
+
+ @mock.patch(DS_PATH + ".util.read_dmi_data", return_value=None)
+ def test_expected_not_viable_dmi_data_none(self, m_read_dmi_data):
+ """System without known chassis tag is not viable."""
+ self.assertFalse(oracle._is_platform_viable())
+ m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')])
+
+ @mock.patch(DS_PATH + ".util.read_dmi_data", return_value="LetsGoCubs")
+ def test_expected_not_viable_other(self, m_read_dmi_data):
+ """System with unnown chassis tag is not viable."""
+ self.assertFalse(oracle._is_platform_viable())
+ m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')])
+
+
+class TestLoadIndex(test_helpers.CiTestCase):
+ """_load_index handles parsing of an index into a proper list.
+ The tests here guarantee correct parsing of html version or
+ a fixed version. See the function docstring for more doc."""
+
+ _known_html_api_versions = dedent("""\
+ <html>
+ <head><title>Index of /openstack/</title></head>
+ <body bgcolor="white">
+ <h1>Index of /openstack/</h1><hr><pre><a href="../">../</a>
+ <a href="2013-10-17/">2013-10-17/</a> 27-Jun-2018 12:22 -
+ <a href="latest/">latest/</a> 27-Jun-2018 12:22 -
+ </pre><hr></body>
+ </html>""")
+
+ _known_html_contents = dedent("""\
+ <html>
+ <head><title>Index of /openstack/2013-10-17/</title></head>
+ <body bgcolor="white">
+ <h1>Index of /openstack/2013-10-17/</h1><hr><pre><a href="../">../</a>
+ <a href="meta_data.json">meta_data.json</a> 27-Jun-2018 12:22 679
+ <a href="user_data">user_data</a> 27-Jun-2018 12:22 146
+ </pre><hr></body>
+ </html>""")
+
+ def test_parse_html(self):
+ """Test parsing of lower case html."""
+ self.assertEqual(
+ ['2013-10-17/', 'latest/'],
+ oracle._load_index(self._known_html_api_versions))
+ self.assertEqual(
+ ['meta_data.json', 'user_data'],
+ oracle._load_index(self._known_html_contents))
+
+ def test_parse_html_upper(self):
+ """Test parsing of upper case html, although known content is lower."""
+ def _toupper(data):
+ return data.replace("<a", "<A").replace("html>", "HTML>")
+
+ self.assertEqual(
+ ['2013-10-17/', 'latest/'],
+ oracle._load_index(_toupper(self._known_html_api_versions)))
+ self.assertEqual(
+ ['meta_data.json', 'user_data'],
+ oracle._load_index(_toupper(self._known_html_contents)))
+
+ def test_parse_newline_list_with_endl(self):
+ """Test parsing of newline separated list with ending newline."""
+ self.assertEqual(
+ ['2013-10-17/', 'latest/'],
+ oracle._load_index("\n".join(["2013-10-17/", "latest/", ""])))
+ self.assertEqual(
+ ['meta_data.json', 'user_data'],
+ oracle._load_index("\n".join(["meta_data.json", "user_data", ""])))
+
+ def test_parse_newline_list_without_endl(self):
+ """Test parsing of newline separated list with no ending newline.
+
+ Actual openstack implementation does not include trailing newline."""
+ self.assertEqual(
+ ['2013-10-17/', 'latest/'],
+ oracle._load_index("\n".join(["2013-10-17/", "latest/"])))
+ self.assertEqual(
+ ['meta_data.json', 'user_data'],
+ oracle._load_index("\n".join(["meta_data.json", "user_data"])))
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index 73c31772..3f99b58c 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -41,6 +41,12 @@ VALID_KEY_TYPES = (
)
+DISABLE_USER_OPTS = (
+ "no-port-forwarding,no-agent-forwarding,"
+ "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\""
+ " rather than the user \\\"$DISABLE_USER\\\".\';echo;sleep 10\"")
+
+
class AuthKeyLine(object):
def __init__(self, source, keytype=None, base64=None,
comment=None, options=None):
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index c132b57d..8a064124 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -17,10 +17,11 @@ from cloudinit.settings import (
from cloudinit import handlers
# Default handlers (used if not overridden)
-from cloudinit.handlers import boot_hook as bh_part
-from cloudinit.handlers import cloud_config as cc_part
-from cloudinit.handlers import shell_script as ss_part
-from cloudinit.handlers import upstart_job as up_part
+from cloudinit.handlers.boot_hook import BootHookPartHandler
+from cloudinit.handlers.cloud_config import CloudConfigPartHandler
+from cloudinit.handlers.jinja_template import JinjaTemplatePartHandler
+from cloudinit.handlers.shell_script import ShellScriptPartHandler
+from cloudinit.handlers.upstart_job import UpstartJobPartHandler
from cloudinit.event import EventType
@@ -87,7 +88,7 @@ class Init(object):
# from whatever it was to a new set...
if self.datasource is not NULL_DATA_SOURCE:
self.datasource.distro = self._distro
- self.datasource.sys_cfg = system_config
+ self.datasource.sys_cfg = self.cfg
return self._distro
@property
@@ -413,12 +414,17 @@ class Init(object):
'datasource': self.datasource,
})
# TODO(harlowja) Hmmm, should we dynamically import these??
+ cloudconfig_handler = CloudConfigPartHandler(**opts)
+ shellscript_handler = ShellScriptPartHandler(**opts)
def_handlers = [
- cc_part.CloudConfigPartHandler(**opts),
- ss_part.ShellScriptPartHandler(**opts),
- bh_part.BootHookPartHandler(**opts),
- up_part.UpstartJobPartHandler(**opts),
+ cloudconfig_handler,
+ shellscript_handler,
+ BootHookPartHandler(**opts),
+ UpstartJobPartHandler(**opts),
]
+ opts.update(
+ {'sub_handlers': [cloudconfig_handler, shellscript_handler]})
+ def_handlers.append(JinjaTemplatePartHandler(**opts))
return def_handlers
def _default_userdata_handlers(self):
@@ -510,7 +516,7 @@ class Init(object):
# The default frequency if handlers don't have one
'frequency': frequency,
# This will be used when new handlers are found
- # to help write there contents to files with numbered
+ # to help write their contents to files with numbered
# names...
'handlercount': 0,
'excluded': excluded,
diff --git a/cloudinit/templater.py b/cloudinit/templater.py
index 7e7acb86..b668674b 100644
--- a/cloudinit/templater.py
+++ b/cloudinit/templater.py
@@ -13,6 +13,7 @@
import collections
import re
+
try:
from Cheetah.Template import Template as CTemplate
CHEETAH_AVAILABLE = True
@@ -20,23 +21,44 @@ except (ImportError, AttributeError):
CHEETAH_AVAILABLE = False
try:
- import jinja2
+ from jinja2.runtime import implements_to_string
from jinja2 import Template as JTemplate
+ from jinja2 import DebugUndefined as JUndefined
JINJA_AVAILABLE = True
except (ImportError, AttributeError):
+ from cloudinit.helpers import identity
+ implements_to_string = identity
JINJA_AVAILABLE = False
+ JUndefined = object
from cloudinit import log as logging
from cloudinit import type_utils as tu
from cloudinit import util
+
LOG = logging.getLogger(__name__)
TYPE_MATCHER = re.compile(r"##\s*template:(.*)", re.I)
BASIC_MATCHER = re.compile(r'\$\{([A-Za-z0-9_.]+)\}|\$([A-Za-z0-9_.]+)')
+MISSING_JINJA_PREFIX = u'CI_MISSING_JINJA_VAR/'
+
+
+@implements_to_string # Needed for python2.7. Otherwise cached super.__str__
+class UndefinedJinjaVariable(JUndefined):
+ """Class used to represent any undefined jinja template varible."""
+
+ def __str__(self):
+ return u'%s%s' % (MISSING_JINJA_PREFIX, self._undefined_name)
+
+ def __sub__(self, other):
+ other = str(other).replace(MISSING_JINJA_PREFIX, '')
+ raise TypeError(
+ 'Undefined jinja variable: "{this}-{other}". Jinja tried'
+ ' subtraction. Perhaps you meant "{this}_{other}"?'.format(
+ this=self._undefined_name, other=other))
def basic_render(content, params):
- """This does simple replacement of bash variable like templates.
+ """This does sumple replacement of bash variable like templates.
It identifies patterns like ${a} or $a and can also identify patterns like
${a.b} or $a.b which will look for a key 'b' in the dictionary rooted
@@ -82,7 +104,7 @@ def detect_template(text):
# keep_trailing_newline is in jinja2 2.7+, not 2.6
add = "\n" if content.endswith("\n") else ""
return JTemplate(content,
- undefined=jinja2.StrictUndefined,
+ undefined=UndefinedJinjaVariable,
trim_blocks=True).render(**params) + add
if text.find("\n") != -1:
diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py
index 5bfe7fa4..2eb7b0cd 100644
--- a/cloudinit/tests/helpers.py
+++ b/cloudinit/tests/helpers.py
@@ -10,16 +10,16 @@ import shutil
import sys
import tempfile
import time
-import unittest
import mock
import six
import unittest2
+from unittest2.util import strclass
try:
- from contextlib import ExitStack
+ from contextlib import ExitStack, contextmanager
except ImportError:
- from contextlib2 import ExitStack
+ from contextlib2 import ExitStack, contextmanager
try:
from configparser import ConfigParser
@@ -28,11 +28,18 @@ except ImportError:
from cloudinit.config.schema import (
SchemaValidationError, validate_cloudconfig_schema)
+from cloudinit import cloud
+from cloudinit import distros
from cloudinit import helpers as ch
+from cloudinit.sources import DataSourceNone
+from cloudinit.templater import JINJA_AVAILABLE
from cloudinit import util
+_real_subp = util.subp
+
# Used for skipping tests
SkipTest = unittest2.SkipTest
+skipIf = unittest2.skipIf
# Used for detecting different python versions
PY2 = False
@@ -112,6 +119,9 @@ class TestCase(unittest2.TestCase):
super(TestCase, self).setUp()
self.reset_global_state()
+ def shortDescription(self):
+ return strclass(self.__class__) + '.' + self._testMethodName
+
def add_patch(self, target, attr, *args, **kwargs):
"""Patches specified target object and sets it as attr on test
instance also schedules cleanup"""
@@ -140,6 +150,17 @@ class CiTestCase(TestCase):
# Subclass overrides for specific test behavior
# Whether or not a unit test needs logfile setup
with_logs = False
+ allowed_subp = False
+ SUBP_SHELL_TRUE = "shell=true"
+
+ @contextmanager
+ def allow_subp(self, allowed_subp):
+ orig = self.allowed_subp
+ try:
+ self.allowed_subp = allowed_subp
+ yield
+ finally:
+ self.allowed_subp = orig
def setUp(self):
super(CiTestCase, self).setUp()
@@ -152,11 +173,41 @@ class CiTestCase(TestCase):
handler.setFormatter(formatter)
self.old_handlers = self.logger.handlers
self.logger.handlers = [handler]
+ if self.allowed_subp is True:
+ util.subp = _real_subp
+ else:
+ util.subp = self._fake_subp
+
+ def _fake_subp(self, *args, **kwargs):
+ if 'args' in kwargs:
+ cmd = kwargs['args']
+ else:
+ cmd = args[0]
+
+ if not isinstance(cmd, six.string_types):
+ cmd = cmd[0]
+ pass_through = False
+ if not isinstance(self.allowed_subp, (list, bool)):
+ raise TypeError("self.allowed_subp supports list or bool.")
+ if isinstance(self.allowed_subp, bool):
+ pass_through = self.allowed_subp
+ else:
+ pass_through = (
+ (cmd in self.allowed_subp) or
+ (self.SUBP_SHELL_TRUE in self.allowed_subp and
+ kwargs.get('shell')))
+ if pass_through:
+ return _real_subp(*args, **kwargs)
+ raise Exception(
+ "called subp. set self.allowed_subp=True to allow\n subp(%s)" %
+ ', '.join([str(repr(a)) for a in args] +
+ ["%s=%s" % (k, repr(v)) for k, v in kwargs.items()]))
def tearDown(self):
if self.with_logs:
# Remove the handler we setup
logging.getLogger().handlers = self.old_handlers
+ util.subp = _real_subp
super(CiTestCase, self).tearDown()
def tmp_dir(self, dir=None, cleanup=True):
@@ -187,6 +238,29 @@ class CiTestCase(TestCase):
"""
raise SystemExit(code)
+ def tmp_cloud(self, distro, sys_cfg=None, metadata=None):
+ """Create a cloud with tmp working directory paths.
+
+ @param distro: Name of the distro to attach to the cloud.
+ @param metadata: Optional metadata to set on the datasource.
+
+ @return: The built cloud instance.
+ """
+ self.new_root = self.tmp_dir()
+ if not sys_cfg:
+ sys_cfg = {}
+ tmp_paths = {}
+ for var in ['templates_dir', 'run_dir', 'cloud_dir']:
+ tmp_paths[var] = self.tmp_path(var, dir=self.new_root)
+ util.ensure_dir(tmp_paths[var])
+ self.paths = ch.Paths(tmp_paths)
+ cls = distros.fetch(distro)
+ mydist = cls(distro, sys_cfg, self.paths)
+ myds = DataSourceNone.DataSourceNone(sys_cfg, mydist, self.paths)
+ if metadata:
+ myds.metadata.update(metadata)
+ return cloud.Cloud(myds, self.paths, sys_cfg, mydist, None)
+
class ResourceUsingTestCase(CiTestCase):
@@ -300,6 +374,13 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
self.patchOS(root)
return root
+ @contextmanager
+ def reRooted(self, root=None):
+ try:
+ yield self.reRoot(root)
+ finally:
+ self.patched_funcs.close()
+
class HttprettyTestCase(CiTestCase):
# necessary as http_proxy gets in the way of httpretty
@@ -426,21 +507,6 @@ def readResource(name, mode='r'):
try:
- skipIf = unittest.skipIf
-except AttributeError:
- # Python 2.6. Doesn't have to be high fidelity.
- def skipIf(condition, reason):
- def decorator(func):
- def wrapper(*args, **kws):
- if condition:
- return func(*args, **kws)
- else:
- print(reason, file=sys.stderr)
- return wrapper
- return decorator
-
-
-try:
import jsonschema
assert jsonschema # avoid pyflakes error F401: import unused
_missing_jsonschema_dep = False
@@ -453,6 +519,14 @@ def skipUnlessJsonSchema():
_missing_jsonschema_dep, "No python-jsonschema dependency present.")
+def skipUnlessJinja():
+ return skipIf(not JINJA_AVAILABLE, "No jinja dependency present.")
+
+
+def skipIfJinja():
+ return skipIf(JINJA_AVAILABLE, "Jinja dependency present.")
+
+
# older versions of mock do not have the useful 'assert_not_called'
if not hasattr(mock.Mock, 'assert_not_called'):
def __mock_assert_not_called(mmock):
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
index 6a31e505..edb0c18f 100644
--- a/cloudinit/tests/test_util.py
+++ b/cloudinit/tests/test_util.py
@@ -57,6 +57,34 @@ OS_RELEASE_CENTOS = dedent("""\
REDHAT_SUPPORT_PRODUCT_VERSION="7"
""")
+OS_RELEASE_REDHAT_7 = dedent("""\
+ NAME="Red Hat Enterprise Linux Server"
+ VERSION="7.5 (Maipo)"
+ ID="rhel"
+ ID_LIKE="fedora"
+ VARIANT="Server"
+ VARIANT_ID="server"
+ VERSION_ID="7.5"
+ PRETTY_NAME="Red Hat"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:redhat:enterprise_linux:7.5:GA:server"
+ HOME_URL="https://www.redhat.com/"
+ BUG_REPORT_URL="https://bugzilla.redhat.com/"
+
+ REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 7"
+ REDHAT_BUGZILLA_PRODUCT_VERSION=7.5
+ REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux"
+ REDHAT_SUPPORT_PRODUCT_VERSION="7.5"
+""")
+
+REDHAT_RELEASE_CENTOS_6 = "CentOS release 6.10 (Final)"
+REDHAT_RELEASE_CENTOS_7 = "CentOS Linux release 7.5.1804 (Core)"
+REDHAT_RELEASE_REDHAT_6 = (
+ "Red Hat Enterprise Linux Server release 6.10 (Santiago)")
+REDHAT_RELEASE_REDHAT_7 = (
+ "Red Hat Enterprise Linux Server release 7.5 (Maipo)")
+
+
OS_RELEASE_DEBIAN = dedent("""\
PRETTY_NAME="Debian GNU/Linux 9 (stretch)"
NAME="Debian GNU/Linux"
@@ -337,6 +365,12 @@ class TestGetLinuxDistro(CiTestCase):
if path == '/etc/os-release':
return 1
+ @classmethod
+ def redhat_release_exists(self, path):
+ """Side effect function """
+ if path == '/etc/redhat-release':
+ return 1
+
@mock.patch('cloudinit.util.load_file')
def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists):
"""Verify we get the correct name if the os-release file has
@@ -356,8 +390,48 @@ class TestGetLinuxDistro(CiTestCase):
self.assertEqual(('ubuntu', '16.04', 'xenial'), dist)
@mock.patch('cloudinit.util.load_file')
- def test_get_linux_centos(self, m_os_release, m_path_exists):
- """Verify we get the correct name and release name on CentOS."""
+ def test_get_linux_centos6(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and release name on CentOS 6."""
+ m_os_release.return_value = REDHAT_RELEASE_CENTOS_6
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('centos', '6.10', 'Final'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_centos7_redhat_release(self, m_os_release, m_exists):
+ """Verify the correct release info on CentOS 7 without os-release."""
+ m_os_release.return_value = REDHAT_RELEASE_CENTOS_7
+ m_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('centos', '7.5.1804', 'Core'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_redhat7_osrelease(self, m_os_release, m_path_exists):
+ """Verify redhat 7 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_REDHAT_7
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('redhat', '7.5', 'Maipo'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_redhat7_rhrelease(self, m_os_release, m_path_exists):
+ """Verify redhat 7 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_REDHAT_7
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('redhat', '7.5', 'Maipo'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_redhat6_rhrelease(self, m_os_release, m_path_exists):
+ """Verify redhat 6 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_REDHAT_6
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('redhat', '6.10', 'Santiago'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_copr_centos(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and release name on COPR CentOS."""
m_os_release.return_value = OS_RELEASE_CENTOS
m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
dist = util.get_linux_distro()
diff --git a/cloudinit/util.py b/cloudinit/util.py
index d0b0e90a..50680960 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -576,12 +576,42 @@ def get_cfg_option_int(yobj, key, default=0):
return int(get_cfg_option_str(yobj, key, default=default))
+def _parse_redhat_release(release_file=None):
+ """Return a dictionary of distro info fields from /etc/redhat-release.
+
+ Dict keys will align with /etc/os-release keys:
+ ID, VERSION_ID, VERSION_CODENAME
+ """
+
+ if not release_file:
+ release_file = '/etc/redhat-release'
+ if not os.path.exists(release_file):
+ return {}
+ redhat_release = load_file(release_file)
+ redhat_regex = (
+ r'(?P<name>.+) release (?P<version>[\d\.]+) '
+ r'\((?P<codename>[^)]+)\)')
+ match = re.match(redhat_regex, redhat_release)
+ if match:
+ group = match.groupdict()
+ group['name'] = group['name'].lower().partition(' linux')[0]
+ if group['name'] == 'red hat enterprise':
+ group['name'] = 'redhat'
+ return {'ID': group['name'], 'VERSION_ID': group['version'],
+ 'VERSION_CODENAME': group['codename']}
+ return {}
+
+
def get_linux_distro():
distro_name = ''
distro_version = ''
flavor = ''
+ os_release = {}
if os.path.exists('/etc/os-release'):
os_release = load_shell_content(load_file('/etc/os-release'))
+ if not os_release:
+ os_release = _parse_redhat_release()
+ if os_release:
distro_name = os_release.get('ID', '')
distro_version = os_release.get('VERSION_ID', '')
if 'sles' in distro_name or 'suse' in distro_name:
@@ -594,9 +624,11 @@ def get_linux_distro():
flavor = os_release.get('VERSION_CODENAME', '')
if not flavor:
match = re.match(r'[^ ]+ \((?P<codename>[^)]+)\)',
- os_release.get('VERSION'))
+ os_release.get('VERSION', ''))
if match:
flavor = match.groupdict()['codename']
+ if distro_name == 'rhel':
+ distro_name = 'redhat'
else:
dist = ('', '', '')
try:
diff --git a/cloudinit/version.py b/cloudinit/version.py
index 3b60fc49..844a02e0 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -4,7 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-__VERSION__ = "18.3"
+__VERSION__ = "18.4"
_PACKAGED_VERSION = '@@PACKAGED_VERSION@@'
FEATURES = [
diff --git a/cloudinit/warnings.py b/cloudinit/warnings.py
index f9f7a63c..1da90c40 100644
--- a/cloudinit/warnings.py
+++ b/cloudinit/warnings.py
@@ -130,7 +130,7 @@ def show_warning(name, cfg=None, sleep=None, mode=True, **kwargs):
os.path.join(_get_warn_dir(cfg), name),
topline + "\n".join(fmtlines) + "\n" + topline)
- LOG.warning(topline + "\n".join(fmtlines) + "\n" + closeline)
+ LOG.warning("%s%s\n%s", topline, "\n".join(fmtlines), closeline)
if sleep:
LOG.debug("sleeping %d seconds for warning '%s'", sleep, name)