From ceb229043cec98d79aa8e72c6eb5e79f796a96d7 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 11 Feb 2015 12:57:50 -0500 Subject: provide default final message in jinja to avoid WARN in log --- doc/examples/cloud-config.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'doc/examples') diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt index ed4eb7fc..1c59c2cf 100644 --- a/doc/examples/cloud-config.txt +++ b/doc/examples/cloud-config.txt @@ -484,7 +484,9 @@ resize_rootfs: True # final_message # default: cloud-init boot finished at $TIMESTAMP. Up $UPTIME seconds # this message is written by cloud-final when the system is finished -# its first boot +# its first boot. +# This message is rendered as if it were a template. If you +# want jinja, you have to start the line with '## template:jinja\n' final_message: "The system is finally up, after $UPTIME seconds" # configure where output will go -- cgit v1.2.3 From ac445690ab7a69ac6e9c74b136de9968b0c839df Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 4 Mar 2015 12:42:34 -0500 Subject: fix logging perms with list rather than single --- cloudinit/settings.py | 2 +- cloudinit/stages.py | 21 +++++++++++++++------ doc/examples/cloud-config.txt | 2 ++ 3 files changed, 18 insertions(+), 7 deletions(-) (limited to 'doc/examples') diff --git a/cloudinit/settings.py b/cloudinit/settings.py index 5efcb0b0..b61e5613 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -47,7 +47,7 @@ CFG_BUILTIN = { ], 'def_log_file': '/var/log/cloud-init.log', 'log_cfgs': [], - 'syslog_fix_perms': 'syslog:adm', + 'syslog_fix_perms': ['syslog:adm', 'root:adm'], 'system_info': { 'paths': { 'cloud_dir': '/var/lib/cloud', diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 45d64823..d28e765b 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -148,16 +148,25 @@ class Init(object): def _initialize_filesystem(self): util.ensure_dirs(self._initial_subdirs()) log_file = util.get_cfg_option_str(self.cfg, 'def_log_file') - perms = util.get_cfg_option_str(self.cfg, 'syslog_fix_perms') if log_file: util.ensure_file(log_file) - if perms: - u, g = util.extract_usergroup(perms) + perms = self.cfg.get('syslog_fix_perms') + if not perms: + perms = {} + if not isinstance(perms, list): + perms = [perms] + + error = None + for perm in perms: + u, g = util.extract_usergroup(perm) try: util.chownbyname(log_file, u, g) - except OSError: - util.logexc(LOG, "Unable to change the ownership of %s to " - "user %s, group %s", log_file, u, g) + return + except OSError as e: + error = e + + LOG.warn("Failed changing perms on '%s'. tried: %s. %s", + log_file, ','.join(perms), error) def read_cfg(self, extra_fns=None): # None check so that we don't keep on re-loading if empty diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt index 1c59c2cf..1236796c 100644 --- a/doc/examples/cloud-config.txt +++ b/doc/examples/cloud-config.txt @@ -536,6 +536,8 @@ timezone: US/Eastern # # to remedy this situation, 'def_log_file' can be set to a filename # and syslog_fix_perms to a string containing ":" +# if syslog_fix_perms is a list, it will iterate through and use the +# first pair that does not raise error. # # the default values are '/var/log/cloud-init.log' and 'syslog:adm' # the value of 'def_log_file' should match what is configured in logging -- cgit v1.2.3 From 7ea6b43ea863c2e0774be3ef562b47fc21dc510d Mon Sep 17 00:00:00 2001 From: Brent Baude Date: Thu, 14 May 2015 16:12:48 -0500 Subject: Adding an example file for rh_subscription in doc/examples --- doc/examples/cloud-config-rh_subscription.txt | 49 +++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 doc/examples/cloud-config-rh_subscription.txt (limited to 'doc/examples') diff --git a/doc/examples/cloud-config-rh_subscription.txt b/doc/examples/cloud-config-rh_subscription.txt new file mode 100644 index 00000000..be121338 --- /dev/null +++ b/doc/examples/cloud-config-rh_subscription.txt @@ -0,0 +1,49 @@ +#cloud-config + +# register your Red Hat Enterprise Linux based operating system +# +# this cloud-init plugin is capable of registering by username +# and password *or* activation and org. Following a successfully +# registration you can: +# - auto-attach subscriptions +# - set the service level +# - add subscriptions based on its pool ID +# - enable yum repositories based on its repo id +# - disable yum repositories based on its repo id +# - alter the rhsm_baseurl and server-hostname in the +# /etc/rhsm/rhs.conf file + +rh_subscription: + username: joe@foo.bar + + ## Quote your password if it has symbols to be safe + password: '1234abcd' + + ## If you prefer, you can use the activation key and + ## org instead of username and password. Be sure to + ## comment out username and password + + #activation-key: foobar + #org: 12345 + + ## Uncomment to auto-attach subscriptions to your system + #auto-attach: True + + ## Uncomment to set the service level for your + ## subscriptions + #service-level: self-support + + ## Uncomment to add pools (needs to be a list of IDs) + #add-pool: [] + + ## Uncomment to add or remove yum repos + ## (needs to be a list of repo IDs) + #enable-repo: [] + #disable-repo: [] + + ## Uncomment to alter the baseurl in /etc/rhsm/rhsm.conf + #rhsm-baseurl: http://url + + ## Uncomment to alter the server hostname in + ## /etc/rhsm/rhsm.conf + #server-hostname: foo.bar.com -- cgit v1.2.3 From 6970029c661ab858a55dd467e5c593694ab39512 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 24 Jul 2015 16:58:57 -0400 Subject: commit initial re-work/re-implementation of syslog config --- cloudinit/config/cc_syslog.py | 183 +++++++++++++++++++++ doc/examples/cloud-config-syslog.txt | 30 ++++ .../unittests/test_handler/test_handler_syslog.py | 32 ++++ 3 files changed, 245 insertions(+) create mode 100644 cloudinit/config/cc_syslog.py create mode 100644 doc/examples/cloud-config-syslog.txt create mode 100644 tests/unittests/test_handler/test_handler_syslog.py (limited to 'doc/examples') diff --git a/cloudinit/config/cc_syslog.py b/cloudinit/config/cc_syslog.py new file mode 100644 index 00000000..21a8e8a9 --- /dev/null +++ b/cloudinit/config/cc_syslog.py @@ -0,0 +1,183 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2015 Canonical Ltd. +# +# Author: Scott Moser +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from cloudinit import log as logging +from cloudinit import util +from cloudinit.settings import PER_INSTANCE + +import re + +LOG = logging.getLogger(__name__) + +frequency = PER_INSTANCE + +BUILTIN_CFG = { + 'remotes_file': '/etc/rsyslog.d/20-cloudinit-remotes.conf', + 'remotes': {}, + 'service_name': 'rsyslog', +} + +COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*') +HOST_PORT_RE = re.compile( + r'^(?P[@]{0,2})' + '(([[](?P[^\]]*)[\]])|(?P[^:]*))' + '([:](?P[0-9]+))?$') + + +def parse_remotes_line(line, name=None): + try: + data, comment = COMMENT_RE.split(line) + comment = comment.strip() + except ValueError: + data, comment = (line, None) + + toks = data.strip().split() + match = None + if len(toks) == 1: + host_port = data + elif len(toks) == 2: + match, host_port = toks + else: + raise ValueError("line had multiple spaces: %s" % data) + + toks = HOST_PORT_RE.match(host_port) + + if not toks: + raise ValueError("Invalid host specification '%s'" % host_port) + + proto = toks.group('proto') + addr = toks.group('addr') or toks.group('bracket_addr') + port = toks.group('port') + print("host_port: %s" % addr) + print("port: %s" % port) + + if addr.startswith("[") and not addr.endswith("]"): + raise ValueError("host spec had invalid brackets: %s" % addr) + + if comment and not name: + name = comment + + t = SyslogRemotesLine(name=name, match=match, proto=proto, + addr=addr, port=port) + t.validate() + return t + + +class SyslogRemotesLine(object): + def __init__(self, name=None, match=None, proto=None, addr=None, + port=None): + if not match: + match = "*.*" + self.name = name + self.match = match + if proto == "@": + proto = "udp" + elif proto == "@@": + proto = "tcp" + self.proto = proto + + self.addr = addr + if port: + self.port = int(port) + else: + self.port = None + + def validate(self): + if self.port: + try: + int(self.port) + except ValueError: + raise ValueError("port '%s' is not an integer" % self.port) + + if not self.addr: + raise ValueError("address is required") + + def __repr__(self): + return "[name=%s match=%s proto=%s address=%s port=%s]" % ( + self.name, self.match, self.proto, self.addr, self.port + ) + + def __str__(self): + buf = self.match + " " + if self.proto == "udp": + buf += " @" + elif self.proto == "tcp": + buf += " @@" + + if ":" in self.addr: + buf += "[" + self.addr + "]" + else: + buf += self.addr + + if self.port: + buf += ":%s" % self.port + + if self.name: + buf += " # %s" % self.name + return buf + + +def remotes_to_rsyslog_cfg(remotes, header=None): + if not remotes: + return None + lines = [] + if header is not None: + lines.append(header) + for name, line in remotes.items(): + try: + lines.append(parse_remotes_line(line, name=name)) + except ValueError as e: + LOG.warn("failed loading remote %s: %s [%s]", name, line, e) + return '\n'.join(str(lines)) + '\n' + + +def reload_syslog(systemd, service='rsyslog'): + if systemd: + cmd = ['systemctl', 'reload-or-try-restart', service] + else: + cmd = ['service', service, 'reload'] + try: + util.subp(cmd, capture=True) + except util.ProcessExecutionError as e: + LOG.warn("Failed to reload syslog using '%s': %s", ' '.join(cmd), e) + + +def handle(name, cfg, cloud, log, args): + cfgin = cfg.get('syslog') + if not cfgin: + cfgin = {} + mycfg = util.mergemanydict([cfgin, BUILTIN_CFG]) + + remotes_file = mycfg.get('remotes_file') + if util.is_false(remotes_file): + LOG.debug("syslog/remotes_file empty, doing nothing") + return + + remotes = mycfg.get('remotes_dict', {}) + if remotes and not isinstance(remotes, dict): + LOG.warn("syslog/remotes: content is not a dictionary") + return + + config_data = remotes_to_rsyslog_cfg( + remotes, header="#cloud-init syslog module") + + util.write_file(remotes_file, config_data) + + reload_syslog( + systemd=cloud.distro.uses_systemd(), + service=mycfg.get('service_name')) diff --git a/doc/examples/cloud-config-syslog.txt b/doc/examples/cloud-config-syslog.txt new file mode 100644 index 00000000..9ec5e120 --- /dev/null +++ b/doc/examples/cloud-config-syslog.txt @@ -0,0 +1,30 @@ +## syslog module allows you to configure the systems syslog. +## configuration of syslog is under the top level cloud-config +## entry 'syslog'. +## +## "remotes" +## remotes is a dictionary. items are of 'name: remote_info' +## name is simply a name (example 'maas'). It has no importance other than +## for cloud-init merging configs +## +## remote_info is of the format +## * optional filter for log messages +## default if not present: *.* +## * optional leading '@' or '@@' (indicates udp or tcp). +## default if not present (udp): @ +## This is rsyslog format for that. if not present, is '@' which is udp +## * ipv4 or ipv6 or hostname +## ipv6 addresses must be encoded in [::1] format. example: @[fd00::1]:514 +## * optional port +## port defaults to 514 +## +## Example: +#cloud-config +syslog: + remotes: + # udp to host 'maas.mydomain' port 514 + maashost: maas.mydomain + # udp to ipv4 host on port 514 + maas: "@[10.5.1.56]:514" + # tcp to host ipv6 host on port 555 + maasipv6: "*.* @@[FE80::0202:B3FF:FE1E:8329]:555" diff --git a/tests/unittests/test_handler/test_handler_syslog.py b/tests/unittests/test_handler/test_handler_syslog.py new file mode 100644 index 00000000..bbfd521e --- /dev/null +++ b/tests/unittests/test_handler/test_handler_syslog.py @@ -0,0 +1,32 @@ +from cloudinit.config.cc_syslog import ( + parse_remotes_line, SyslogRemotesLine, remotes_to_rsyslog_cfg) +from cloudinit import util +from .. import helpers as t_help + + +class TestParseRemotesLine(t_help.TestCase): + def test_valid_port(self): + r = parse_remotes_line("foo:9") + self.assertEqual(9, r.port) + + def test_invalid_port(self): + with self.assertRaises(ValueError): + parse_remotes_line("*.* foo:abc") + + def test_valid_ipv6(self): + r = parse_remotes_line("*.* [::1]") + self.assertEqual("*.* [::1]", str(r)) + + def test_valid_ipv6_with_port(self): + r = parse_remotes_line("*.* [::1]:100") + self.assertEqual(r.port, 100) + self.assertEqual(r.addr, "::1") + self.assertEqual("*.* [::1]:100", str(r)) + + def test_invalid_multiple_colon(self): + with self.assertRaises(ValueError): + parse_remotes_line("*.* ::1:100") + + def test_name_in_string(self): + r = parse_remotes_line("syslog.host", name="foobar") + self.assertEqual("*.* syslog.host # foobar", str(r)) -- cgit v1.2.3 From 6dd505fd02e0933d8770c8932a927940f6a0e025 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 28 Jul 2015 09:27:26 -0400 Subject: add support for 'remotes' --- cloudinit/config/cc_rsyslog.py | 156 ++++++++++++++++++++- cloudinit/config/cc_syslog.py | 2 +- doc/examples/cloud-config-rsyslog.txt | 37 +++++ doc/examples/cloud-config-syslog.txt | 30 ---- .../unittests/test_handler/test_handler_rsyslog.py | 38 ++++- 5 files changed, 227 insertions(+), 36 deletions(-) create mode 100644 doc/examples/cloud-config-rsyslog.txt delete mode 100644 doc/examples/cloud-config-syslog.txt (limited to 'doc/examples') diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index 9599e925..8c02e826 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -37,10 +37,33 @@ Under 'rsyslog' you can define: For simply logging to an existing remote syslog server, via udp: configs: ["*.* @192.168.1.1"] + - remotes: [default={}] + This is a dictionary of name / value pairs. + In comparison to 'config's, it is more focused in that it only supports + remote syslog configuration. It is not rsyslog specific, and could + convert to other syslog implementations. + + Each entry in remotes is a 'name' and a 'value'. + * name: an string identifying the entry. good practice would indicate + using a consistent and identifiable string for the producer. + For example, the MAAS service could use 'maas' as the key. + * value consists of the following parts: + * optional filter for log messages + default if not present: *.* + * optional leading '@' or '@@' (indicates udp or tcp respectively). + default if not present (udp): @ + This is rsyslog format for that. if not present, is '@'. + * ipv4 or ipv6 or hostname + ipv6 addresses must be in [::1] format. (@[fd00::1]:514) + * optional port + port defaults to 514 + - config_filename: [default=20-cloud-config.conf] this is the file name to use if none is provided in a config entry. + - config_dir: [default=/etc/rsyslog.d] this directory is used for filenames that are not absolute paths. + - service_reload_command: [default="auto"] this command is executed if files have been written and thus the syslog daemon needs to be told. @@ -61,9 +84,12 @@ Example config: configs: - "*.* @@192.158.1.1" - content: "*.* @@192.0.2.1:10514" - - filename: 01-examplecom.conf + filename: 01-example.conf - content: | *.* @@syslogd.example.com + remotes: + maas: "192.168.1.1" + juju: "10.0.4.1" config_dir: config_dir config_filename: config_filename service_reload_command: [your, syslog, restart, command] @@ -77,6 +103,7 @@ Example Legacy config: """ import os +import re import six from cloudinit import log as logging @@ -85,6 +112,7 @@ from cloudinit import util DEF_FILENAME = "20-cloud-config.conf" DEF_DIR = "/etc/rsyslog.d" DEF_RELOAD = "auto" +DEF_REMOTES = {} KEYNAME_CONFIGS = 'configs' KEYNAME_FILENAME = 'config_filename' @@ -92,9 +120,15 @@ KEYNAME_DIR = 'config_dir' KEYNAME_RELOAD = 'service_reload_command' KEYNAME_LEGACY_FILENAME = 'rsyslog_filename' KEYNAME_LEGACY_DIR = 'rsyslog_dir' +KEYNAME_REMOTES = 'remotes' LOG = logging.getLogger(__name__) +COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*') +HOST_PORT_RE = re.compile( + r'^(?P[@]{0,2})' + '(([[](?P[^\]]*)[\]])|(?P[^:]*))' + '([:](?P[0-9]+))?$') def reload_syslog(command=DEF_RELOAD, systemd=False): service = 'rsyslog' @@ -124,7 +158,8 @@ def load_config(cfg): (KEYNAME_CONFIGS, [], list), (KEYNAME_DIR, DEF_DIR, six.string_types), (KEYNAME_FILENAME, DEF_FILENAME, six.string_types), - (KEYNAME_RELOAD, DEF_RELOAD, six.string_types + (list,))) + (KEYNAME_RELOAD, DEF_RELOAD, six.string_types + (list,)), + (KEYNAME_REMOTES, DEF_REMOTES, dict)) for key, default, vtypes in fillup: if key not in mycfg or not isinstance(mycfg[key], vtypes): @@ -171,6 +206,113 @@ def apply_rsyslog_changes(configs, def_fname, cfg_dir): return files +def parse_remotes_line(line, name=None): + try: + data, comment = COMMENT_RE.split(line) + comment = comment.strip() + except ValueError: + data, comment = (line, None) + + toks = data.strip().split() + match = None + if len(toks) == 1: + host_port = data + elif len(toks) == 2: + match, host_port = toks + else: + raise ValueError("line had multiple spaces: %s" % data) + + toks = HOST_PORT_RE.match(host_port) + + if not toks: + raise ValueError("Invalid host specification '%s'" % host_port) + + proto = toks.group('proto') + addr = toks.group('addr') or toks.group('bracket_addr') + port = toks.group('port') + + if addr.startswith("[") and not addr.endswith("]"): + raise ValueError("host spec had invalid brackets: %s" % addr) + + if comment and not name: + name = comment + + t = SyslogRemotesLine(name=name, match=match, proto=proto, + addr=addr, port=port) + t.validate() + return t + + +class SyslogRemotesLine(object): + def __init__(self, name=None, match=None, proto=None, addr=None, + port=None): + if not match: + match = "*.*" + self.name = name + self.match = match + if proto == "@": + proto = "udp" + elif proto == "@@": + proto = "tcp" + self.proto = proto + + self.addr = addr + if port: + self.port = int(port) + else: + self.port = None + + def validate(self): + if self.port: + try: + int(self.port) + except ValueError: + raise ValueError("port '%s' is not an integer" % self.port) + + if not self.addr: + raise ValueError("address is required") + + def __repr__(self): + return "[name=%s match=%s proto=%s address=%s port=%s]" % ( + self.name, self.match, self.proto, self.addr, self.port + ) + + def __str__(self): + buf = self.match + " " + if self.proto == "udp": + buf += " @" + elif self.proto == "tcp": + buf += " @@" + + if ":" in self.addr: + buf += "[" + self.addr + "]" + else: + buf += self.addr + + if self.port: + buf += ":%s" % self.port + + if self.name: + buf += " # %s" % self.name + return buf + + +def remotes_to_rsyslog_cfg(remotes, header=None, footer=None): + if not remotes: + return None + lines = [] + if header is not None: + lines.append(header) + for name, line in remotes.items(): + try: + lines.append(parse_remotes_line(line, name=name)) + except ValueError as e: + LOG.warn("failed loading remote %s: %s [%s]", name, line, e) + if footer is not None: + lines.append(footer) + return '\n'.join(str(lines)) + '\n' + + def handle(name, cfg, cloud, log, _args): if 'rsyslog' not in cfg: log.debug(("Skipping module named %s," @@ -178,6 +320,16 @@ def handle(name, cfg, cloud, log, _args): return mycfg = load_config(cfg) + configs = mycfg[KEYNAME_CONFIGS] + + if mycfg[KEYNAME_REMOTES]: + configs.append( + remotes_to_rsyslog_cfg( + mycfg[KEYNAME_REMOTES], + header="# begin remotes", + footer="# end remotes", + )) + if not mycfg['configs']: log.debug("Empty config rsyslog['configs'], nothing to do") return diff --git a/cloudinit/config/cc_syslog.py b/cloudinit/config/cc_syslog.py index 21a8e8a9..27793f8b 100644 --- a/cloudinit/config/cc_syslog.py +++ b/cloudinit/config/cc_syslog.py @@ -168,7 +168,7 @@ def handle(name, cfg, cloud, log, args): LOG.debug("syslog/remotes_file empty, doing nothing") return - remotes = mycfg.get('remotes_dict', {}) + remotes = mycfg.get('remotes', {}) if remotes and not isinstance(remotes, dict): LOG.warn("syslog/remotes: content is not a dictionary") return diff --git a/doc/examples/cloud-config-rsyslog.txt b/doc/examples/cloud-config-rsyslog.txt new file mode 100644 index 00000000..ff60e3a8 --- /dev/null +++ b/doc/examples/cloud-config-rsyslog.txt @@ -0,0 +1,37 @@ +## the rsyslog module allows you to configure the systems syslog. +## configuration of syslog is under the top level cloud-config +## entry 'rsyslog'. +## +## Example: +#cloud-config +rsyslog: + remotes: + # udp to host 'maas.mydomain' port 514 + maashost: maas.mydomain + # udp to ipv4 host on port 514 + maas: "@[10.5.1.56]:514" + # tcp to host ipv6 host on port 555 + maasipv6: "*.* @@[FE80::0202:B3FF:FE1E:8329]:555" + configs: + - "*.* @@192.158.1.1" + - content: "*.* @@192.0.2.1:10514" + filename: 01-example.conf + - content: | + *.* @@syslogd.example.com + config_dir: /etc/rsyslog.d + config_filename: 20-cloud-config.conf + service_reload_command: [your, syslog, reload, command] + +## Additionally the following legacy format is supported +## it is converted into the format above before use. +## rsyslog_filename -> rsyslog/config_filename +## rsyslog_dir -> rsyslog/config_dir +## rsyslog -> rsyslog/configs +# rsyslog: +# - "*.* @@192.158.1.1" +# - content: "*.* @@192.0.2.1:10514" +# filename: 01-example.conf +# - content: | +# *.* @@syslogd.example.com +# rsyslog_filename: 20-cloud-config.conf +# rsyslog_dir: /etc/rsyslog.d diff --git a/doc/examples/cloud-config-syslog.txt b/doc/examples/cloud-config-syslog.txt deleted file mode 100644 index 9ec5e120..00000000 --- a/doc/examples/cloud-config-syslog.txt +++ /dev/null @@ -1,30 +0,0 @@ -## syslog module allows you to configure the systems syslog. -## configuration of syslog is under the top level cloud-config -## entry 'syslog'. -## -## "remotes" -## remotes is a dictionary. items are of 'name: remote_info' -## name is simply a name (example 'maas'). It has no importance other than -## for cloud-init merging configs -## -## remote_info is of the format -## * optional filter for log messages -## default if not present: *.* -## * optional leading '@' or '@@' (indicates udp or tcp). -## default if not present (udp): @ -## This is rsyslog format for that. if not present, is '@' which is udp -## * ipv4 or ipv6 or hostname -## ipv6 addresses must be encoded in [::1] format. example: @[fd00::1]:514 -## * optional port -## port defaults to 514 -## -## Example: -#cloud-config -syslog: - remotes: - # udp to host 'maas.mydomain' port 514 - maashost: maas.mydomain - # udp to ipv4 host on port 514 - maas: "@[10.5.1.56]:514" - # tcp to host ipv6 host on port 555 - maasipv6: "*.* @@[FE80::0202:B3FF:FE1E:8329]:555" diff --git a/tests/unittests/test_handler/test_handler_rsyslog.py b/tests/unittests/test_handler/test_handler_rsyslog.py index 3501ff95..0bace685 100644 --- a/tests/unittests/test_handler/test_handler_rsyslog.py +++ b/tests/unittests/test_handler/test_handler_rsyslog.py @@ -3,7 +3,8 @@ import shutil import tempfile from cloudinit.config.cc_rsyslog import ( - load_config, DEF_FILENAME, DEF_DIR, DEF_RELOAD, apply_rsyslog_changes) + apply_rsyslog_changes, DEF_DIR, DEF_FILENAME, DEF_RELOAD, load_config, + parse_remotes_line) from cloudinit import util from .. import helpers as t_help @@ -17,6 +18,7 @@ class TestLoadConfig(t_help.TestCase): 'config_dir': DEF_DIR, 'service_reload_command': DEF_RELOAD, 'configs': [], + 'remotes': {}, } def test_legacy_full(self): @@ -24,12 +26,14 @@ class TestLoadConfig(t_help.TestCase): 'rsyslog': ['*.* @192.168.1.1'], 'rsyslog_dir': "mydir", 'rsyslog_filename': "myfilename"}) - expected = { + self.basecfg.update({ 'configs': ['*.* @192.168.1.1'], 'config_dir': "mydir", 'config_filename': 'myfilename', 'service_reload_command': 'auto'} - self.assertEqual(found, expected) + ) + + self.assertEqual(found, self.basecfg) def test_legacy_defaults(self): found = load_config({ @@ -111,3 +115,31 @@ class TestApplyChanges(t_help.TestCase): expected_content = '\n'.join([c for c in configs]) + '\n' found_content = util.load_file(fname) self.assertEqual(expected_content, found_content) + + +class TestParseRemotesLine(t_help.TestCase): + def test_valid_port(self): + r = parse_remotes_line("foo:9") + self.assertEqual(9, r.port) + + def test_invalid_port(self): + with self.assertRaises(ValueError): + parse_remotes_line("*.* foo:abc") + + def test_valid_ipv6(self): + r = parse_remotes_line("*.* [::1]") + self.assertEqual("*.* [::1]", str(r)) + + def test_valid_ipv6_with_port(self): + r = parse_remotes_line("*.* [::1]:100") + self.assertEqual(r.port, 100) + self.assertEqual(r.addr, "::1") + self.assertEqual("*.* [::1]:100", str(r)) + + def test_invalid_multiple_colon(self): + with self.assertRaises(ValueError): + parse_remotes_line("*.* ::1:100") + + def test_name_in_string(self): + r = parse_remotes_line("syslog.host", name="foobar") + self.assertEqual("*.* syslog.host # foobar", str(r)) -- cgit v1.2.3 From 8c6a61bb87119affc047af3ec94c1eaae324f4f6 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 28 Jul 2015 10:48:49 -0400 Subject: doc: mention how to run syslog server --- doc/examples/cloud-config-rsyslog.txt | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'doc/examples') diff --git a/doc/examples/cloud-config-rsyslog.txt b/doc/examples/cloud-config-rsyslog.txt index ff60e3a8..d54960e8 100644 --- a/doc/examples/cloud-config-rsyslog.txt +++ b/doc/examples/cloud-config-rsyslog.txt @@ -35,3 +35,12 @@ rsyslog: # *.* @@syslogd.example.com # rsyslog_filename: 20-cloud-config.conf # rsyslog_dir: /etc/rsyslog.d + +## to configure rsyslog to accept remote logging on Ubuntu +## write the following into /etc/rsyslog.d/20-remote-udp.conf +## $ModLoad imudp +## $UDPServerRun 514 +## $template LogRemote,"/var/log/maas/rsyslog/%HOSTNAME%/messages" +## :fromhost-ip, !isequal, "127.0.0.1" ?LogRemote +## then: +## sudo service rsyslog restart -- cgit v1.2.3 From 95bfe5d5150e2bf0a26dd1b97578c4fd04152365 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 7 Aug 2015 14:44:00 -0500 Subject: add doc, remove some debug / print statements. --- bin/cloud-init | 1 - cloudinit/reporting/__init__.py | 1 - cloudinit/reporting/handlers.py | 16 ++++++++++++++-- doc/examples/cloud-config-reporting.txt | 17 +++++++++++++++++ 4 files changed, 31 insertions(+), 4 deletions(-) create mode 100644 doc/examples/cloud-config-reporting.txt (limited to 'doc/examples') diff --git a/bin/cloud-init b/bin/cloud-init index 86780408..1f64461e 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -648,7 +648,6 @@ def main(): "running single module %s" % args.name) report_on = args.report - reporting.update_configuration({'print': {'type': 'print'}}) args.reporter = reporting.ReportEventStack( rname, rdesc, reporting_enabled=report_on) with args.reporter: diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py index a3b8332f..e23fab32 100644 --- a/cloudinit/reporting/__init__.py +++ b/cloudinit/reporting/__init__.py @@ -18,7 +18,6 @@ START_EVENT_TYPE = 'start' DEFAULT_CONFIG = { 'logging': {'type': 'log'}, - 'print': {'type': 'print'}, } diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py index 9cf8bd2b..1343311f 100644 --- a/cloudinit/reporting/handlers.py +++ b/cloudinit/reporting/handlers.py @@ -28,17 +28,29 @@ class ReportingHandler(object): class LogHandler(ReportingHandler): """Publishes events to the cloud-init log at the ``INFO`` log level.""" + def __init__(self, level="DEBUG"): + super(LogHandler, self).__init__() + if isinstance(level, int): + pass + else: + input_level = level + try: + level = gettattr(logging, level.upper()) + except: + LOG.warn("invalid level '%s', using WARN", input_level) + level = logging.WARN + self.level = level + def publish_event(self, event): """Publish an event to the ``INFO`` log level.""" logger = logging.getLogger( '.'.join(['cloudinit', 'reporting', event.event_type, event.name])) - logger.info(event.as_string()) + logger.log(self.level, event.as_string()) class PrintHandler(ReportingHandler): def publish_event(self, event): """Publish an event to the ``INFO`` log level.""" - print(event.as_string()) class WebHookHandler(ReportingHandler): diff --git a/doc/examples/cloud-config-reporting.txt b/doc/examples/cloud-config-reporting.txt new file mode 100644 index 00000000..ee00078f --- /dev/null +++ b/doc/examples/cloud-config-reporting.txt @@ -0,0 +1,17 @@ +#cloud-config +## +## The following sets up 2 reporting end points. +## A 'webhook' and a 'log' type. +## It also disables the built in default 'log' +reporting: + smtest: + type: webhook + endpoint: "http://myhost:8000/" + consumer_key: "ckey_foo" + consumer_secret: "csecret_foo" + token_key: "tkey_foo" + token_secret: "tkey_foo" + smlogger: + type: log + level: WARN + log: null -- cgit v1.2.3 From ba3e59cbb5ae58a2267fcbcd23eecaaa26f2c396 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 8 Sep 2015 16:53:59 -0400 Subject: power_state: support 'condition' argument if 'condition' is provided to config in power_state, then consult it before powering off. This allows the user to shut down only if a condition is met, and leave the system in a debuggable state otherwise. An example is as simple as: power_state: mode: poweroff condition: ['sh', '-c', '[ -f /disable-poweroff ]'] --- ChangeLog | 1 + cloudinit/config/cc_power_state_change.py | 57 +++++++++++++++++++--- doc/examples/cloud-config-power-state.txt | 9 ++++ .../test_handler/test_handler_power_state.py | 48 ++++++++++++++++-- 4 files changed, 105 insertions(+), 10 deletions(-) (limited to 'doc/examples') diff --git a/ChangeLog b/ChangeLog index 6fb70696..bbb7e990 100644 --- a/ChangeLog +++ b/ChangeLog @@ -61,6 +61,7 @@ - status_wrapper in main: fix use of print_exc when handling exception - reporting: add reporting module for web hook or logging of events. - NoCloud: fix consumption of vendordata (LP: #1493453) + - power_state_change: support 'condition' to disable or enable poweroff 0.7.6: - open 0.7.6 - Enable vendordata on CloudSigma datasource (LP: #1303986) diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 09d37371..7d9567e3 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -22,6 +22,7 @@ from cloudinit import util import errno import os import re +import six import subprocess import time @@ -48,10 +49,40 @@ def givecmdline(pid): return None +def check_condition(cond, log=None): + if isinstance(cond, bool): + if log: + log.debug("Static Condition: %s" % cond) + return cond + + pre = "check_condition command (%s): " % cond + try: + proc = subprocess.Popen(cond, shell=not isinstance(cond, list)) + proc.communicate() + ret = proc.returncode + if ret == 0: + if log: + log.debug(pre + "exited 0. condition met.") + return True + elif ret == 1: + if log: + log.debug(pre + "exited 1. condition not met.") + return False + else: + if log: + log.warn(pre + "unexpected exit %s. " % ret + + "do not apply change.") + return False + except Exception as e: + if log: + log.warn(pre + "Unexpected error: %s" % e) + return False + + def handle(_name, cfg, _cloud, log, _args): try: - (args, timeout) = load_power_state(cfg) + (args, timeout, condition) = load_power_state(cfg) if args is None: log.debug("no power_state provided. doing nothing") return @@ -59,6 +90,10 @@ def handle(_name, cfg, _cloud, log, _args): log.warn("%s Not performing power state change!" % str(e)) return + if condition is False: + log.debug("Condition was false. Will not perform state change.") + return + mypid = os.getpid() cmdline = givecmdline(mypid) @@ -70,8 +105,8 @@ def handle(_name, cfg, _cloud, log, _args): log.debug("After pid %s ends, will execute: %s" % (mypid, ' '.join(args))) - util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log, execmd, - [args, devnull_fp]) + util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log, + condition, execmd, [args, devnull_fp]) def load_power_state(cfg): @@ -80,7 +115,7 @@ def load_power_state(cfg): pstate = cfg.get('power_state') if pstate is None: - return (None, None) + return (None, None, None) if not isinstance(pstate, dict): raise TypeError("power_state is not a dict.") @@ -115,7 +150,10 @@ def load_power_state(cfg): raise ValueError("failed to convert timeout '%s' to float." % pstate['timeout']) - return (args, timeout) + condition = pstate.get("condition", True) + if not isinstance(condition, six.string_types + (list, bool)): + raise TypeError("condition type %s invalid. must be list, bool, str") + return (args, timeout, condition) def doexit(sysexit): @@ -133,7 +171,7 @@ def execmd(exe_args, output=None, data_in=None): doexit(ret) -def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args): +def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args): # wait until pid, with /proc/pid/cmdline contents of pidcmdline # is no longer alive. After it is gone, or timeout has passed # execute func(args) @@ -175,4 +213,11 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args): if log: log.debug(msg) + + try: + if not check_condition(condition, log): + return + except Exception as e: + fatal("Unexpected Exception when checking condition: %s" % e) + func(*args) diff --git a/doc/examples/cloud-config-power-state.txt b/doc/examples/cloud-config-power-state.txt index 8df14366..b470153d 100644 --- a/doc/examples/cloud-config-power-state.txt +++ b/doc/examples/cloud-config-power-state.txt @@ -23,9 +23,18 @@ # message: provided as the message argument to 'shutdown'. default is none. # timeout: the amount of time to give the cloud-init process to finish # before executing shutdown. +# condition: apply state change only if condition is met. +# May be boolean True (always met), or False (never met), +# or a command string or list to be executed. +# command's exit code indicates: +# 0: condition met +# 1: condition not met +# other exit codes will result in 'not met', but are reserved +# for future use. # power_state: delay: "+30" mode: poweroff message: Bye Bye timeout: 30 + condition: True diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py index 2f86b8f8..5687b10d 100644 --- a/tests/unittests/test_handler/test_handler_power_state.py +++ b/tests/unittests/test_handler/test_handler_power_state.py @@ -1,6 +1,9 @@ +import sys + from cloudinit.config import cc_power_state_change as psc from .. import helpers as t_help +from ..helpers import mock class TestLoadPowerState(t_help.TestCase): @@ -9,12 +12,12 @@ class TestLoadPowerState(t_help.TestCase): def test_no_config(self): # completely empty config should mean do nothing - (cmd, _timeout) = psc.load_power_state({}) + (cmd, _timeout, _condition) = psc.load_power_state({}) self.assertEqual(cmd, None) def test_irrelevant_config(self): # no power_state field in config should return None for cmd - (cmd, _timeout) = psc.load_power_state({'foo': 'bar'}) + (cmd, _timeout, _condition) = psc.load_power_state({'foo': 'bar'}) self.assertEqual(cmd, None) def test_invalid_mode(self): @@ -53,23 +56,60 @@ class TestLoadPowerState(t_help.TestCase): def test_no_message(self): # if message is not present, then no argument should be passed for it cfg = {'power_state': {'mode': 'poweroff'}} - (cmd, _timeout) = psc.load_power_state(cfg) + (cmd, _timeout, _condition) = psc.load_power_state(cfg) self.assertNotIn("", cmd) check_lps_ret(psc.load_power_state(cfg)) self.assertTrue(len(cmd) == 3) + def test_condition_null_raises(self): + cfg = {'power_state': {'mode': 'poweroff', 'condition': None}} + self.assertRaises(TypeError, psc.load_power_state, cfg) + + def test_condition_default_is_true(self): + cfg = {'power_state': {'mode': 'poweroff'}} + _cmd, _timeout, cond = psc.load_power_state(cfg) + self.assertEqual(cond, True) + + +class TestCheckCondition(t_help.TestCase): + def cmd_with_exit(self, rc): + return([sys.executable, '-c', 'import sys; sys.exit(%s)' % rc]) + + def test_true_is_true(self): + self.assertEqual(psc.check_condition(True), True) + + def test_false_is_false(self): + self.assertEqual(psc.check_condition(False), False) + + def test_cmd_exit_zero_true(self): + self.assertEqual(psc.check_condition(self.cmd_with_exit(0)), True) + + def test_cmd_exit_one_false(self): + self.assertEqual(psc.check_condition(self.cmd_with_exit(1)), False) + + def test_cmd_exit_nonzero_warns(self): + mocklog = mock.Mock() + self.assertEqual( + psc.check_condition(self.cmd_with_exit(2), mocklog), False) + self.assertEqual(mocklog.warn.call_count, 1) + + def check_lps_ret(psc_return, mode=None): - if len(psc_return) != 2: + if len(psc_return) != 3: raise TypeError("length returned = %d" % len(psc_return)) errs = [] cmd = psc_return[0] timeout = psc_return[1] + condition = psc_return[2] if 'shutdown' not in psc_return[0][0]: errs.append("string 'shutdown' not in cmd") + if 'condition' is None: + errs.append("condition was not returned") + if mode is not None: opt = {'halt': '-H', 'poweroff': '-P', 'reboot': '-r'}[mode] if opt not in psc_return[0]: -- cgit v1.2.3 From 601535f5cb457a50180c99010fa6406b99da2b13 Mon Sep 17 00:00:00 2001 From: Robert Jennings Date: Fri, 29 Jan 2016 11:47:48 -0600 Subject: Correct lock_passwd in docs --- doc/examples/cloud-config-user-groups.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'doc/examples') diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt index 31491faf..0e8ed243 100644 --- a/doc/examples/cloud-config-user-groups.txt +++ b/doc/examples/cloud-config-user-groups.txt @@ -15,14 +15,14 @@ users: selinux-user: staff_u expiredate: 2012-09-01 ssh-import-id: foobar - lock-passwd: false + lock_passwd: false passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/ - name: barfoo gecos: Bar B. Foo sudo: ALL=(ALL) NOPASSWD:ALL groups: users, admin ssh-import-id: None - lock-passwd: true + lock_passwd: true ssh-authorized-keys: - - @@ -42,7 +42,7 @@ users: # selinux-user: Optional. The SELinux user for the user's login, such as # "staff_u". When this is omitted the system will select the default # SELinux user. -# lock-passwd: Defaults to true. Lock the password to disable password login +# lock_passwd: Defaults to true. Lock the password to disable password login # inactive: Create the user as inactive # passwd: The hash -- not the password itself -- of the password you want # to use for this user. You can generate a safe hash via: -- cgit v1.2.3 From 75ba44d2730b89f13b2069961ea8de63f65ea780 Mon Sep 17 00:00:00 2001 From: Robert Jennings Date: Thu, 4 Feb 2016 15:52:08 -0600 Subject: SmartOS: Add support for Joyent LX-Brand Zones (LP: #1540965) LX-brand zones on Joyent's SmartOS use a different metadata source (socket file) than the KVM-based SmartOS virtualization (serial port). This patch adds support for recognizing the different flavors of virtualization on SmartOS and setting up a metadata source file object. After the file object is created, the rest of the code for the datasource LP: #1540965 --- cloudinit/sources/DataSourceSmartOS.py | 257 ++++++++++++++---------- doc/examples/cloud-config-datasources.txt | 7 + tests/unittests/test_datasource/test_smartos.py | 85 +++++--- 3 files changed, 216 insertions(+), 133 deletions(-) (limited to 'doc/examples') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index c9b497df..7453379a 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -20,10 +20,13 @@ # Datasource for provisioning on SmartOS. This works on Joyent # and public/private Clouds using SmartOS. # -# SmartOS hosts use a serial console (/dev/ttyS1) on Linux Guests. +# SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests # The meta-data is transmitted via key/value pairs made by # requests on the console. For example, to get the hostname, you # would send "GET hostname" on /dev/ttyS1. +# For Linux Guests running in LX-Brand Zones on SmartOS hosts +# a socket (/native/.zonecontrol/metadata.sock) is used instead +# of a serial console. # # Certain behavior is defined by the DataDictionary # http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html @@ -34,6 +37,8 @@ import contextlib import os import random import re +import socket +import stat import serial @@ -46,6 +51,7 @@ LOG = logging.getLogger(__name__) SMARTOS_ATTRIB_MAP = { # Cloud-init Key : (SmartOS Key, Strip line endings) + 'instance-id': ('sdc:uuid', True), 'local-hostname': ('hostname', True), 'public-keys': ('root_authorized_keys', True), 'user-script': ('user-script', False), @@ -76,6 +82,7 @@ DS_CFG_PATH = ['datasource', DS_NAME] # BUILTIN_DS_CONFIG = { 'serial_device': '/dev/ttyS1', + 'metadata_sockfile': '/native/.zonecontrol/metadata.sock', 'seed_timeout': 60, 'no_base64_decode': ['root_authorized_keys', 'motd_sys_info', @@ -83,6 +90,7 @@ BUILTIN_DS_CONFIG = { 'user-data', 'user-script', 'sdc:datacenter_name', + 'sdc:uuid', ], 'base64_keys': [], 'base64_all': False, @@ -150,17 +158,27 @@ class DataSourceSmartOS(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.is_smartdc = None - self.ds_cfg = util.mergemanydict([ self.ds_cfg, util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]) self.metadata = {} - self.cfg = BUILTIN_CLOUD_CONFIG - self.seed = self.ds_cfg.get("serial_device") - self.seed_timeout = self.ds_cfg.get("serial_timeout") + # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but + # report 'BrandZ virtual linux' as the kernel version + if os.uname()[3].lower() == 'brandz virtual linux': + LOG.debug("Host is SmartOS, guest in Zone") + self.is_smartdc = True + self.smartos_type = 'lx-brand' + self.cfg = {} + self.seed = self.ds_cfg.get("metadata_sockfile") + else: + self.is_smartdc = True + self.smartos_type = 'kvm' + self.seed = self.ds_cfg.get("serial_device") + self.cfg = BUILTIN_CLOUD_CONFIG + self.seed_timeout = self.ds_cfg.get("serial_timeout") self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode') self.b64_keys = self.ds_cfg.get('base64_keys') self.b64_all = self.ds_cfg.get('base64_all') @@ -170,12 +188,49 @@ class DataSourceSmartOS(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) + def _get_seed_file_object(self): + if not self.seed: + raise AttributeError("seed device is not set") + + if self.smartos_type == 'lx-brand': + if not stat.S_ISSOCK(os.stat(self.seed).st_mode): + LOG.debug("Seed %s is not a socket", self.seed) + return None + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.connect(self.seed) + return sock.makefile('rwb') + else: + if not stat.S_ISCHR(os.stat(self.seed).st_mode): + LOG.debug("Seed %s is not a character device") + return None + ser = serial.Serial(self.seed, timeout=self.seed_timeout) + if not ser.isOpen(): + raise SystemError("Unable to open %s" % self.seed) + return ser + return None + + def _set_provisioned(self): + '''Mark the instance provisioning state as successful. + + When run in a zone, the host OS will look for /var/svc/provisioning + to be renamed as /var/svc/provision_success. This should be done + after meta-data is successfully retrieved and from this point + the host considers the provision of the zone to be a success and + keeps the zone running. + ''' + + LOG.debug('Instance provisioning state set as successful') + svc_path = '/var/svc' + if os.path.exists('/'.join([svc_path, 'provisioning'])): + os.rename('/'.join([svc_path, 'provisioning']), + '/'.join([svc_path, 'provision_success'])) + def get_data(self): md = {} ud = "" if not device_exists(self.seed): - LOG.debug("No serial device '%s' found for SmartOS datasource", + LOG.debug("No metadata device '%s' found for SmartOS datasource", self.seed) return False @@ -185,29 +240,36 @@ class DataSourceSmartOS(sources.DataSource): LOG.debug("Disabling SmartOS datasource on arm (LP: #1243287)") return False - dmi_info = dmi_data() - if dmi_info is False: - LOG.debug("No dmidata utility found") - return False - - system_uuid, system_type = tuple(dmi_info) - if 'smartdc' not in system_type.lower(): - LOG.debug("Host is not on SmartOS. system_type=%s", system_type) + # SDC KVM instances will provide dmi data, LX-brand does not + if self.smartos_type == 'kvm': + dmi_info = dmi_data() + if dmi_info is False: + LOG.debug("No dmidata utility found") + return False + + system_type = dmi_info + if 'smartdc' not in system_type.lower(): + LOG.debug("Host is not on SmartOS. system_type=%s", + system_type) + return False + LOG.debug("Host is SmartOS, guest in KVM") + + seed_obj = self._get_seed_file_object() + if seed_obj is None: + LOG.debug('Seed file object not found.') return False - self.is_smartdc = True - md['instance-id'] = system_uuid + with contextlib.closing(seed_obj) as seed: + b64_keys = self.query('base64_keys', seed, strip=True, b64=False) + if b64_keys is not None: + self.b64_keys = [k.strip() for k in str(b64_keys).split(',')] - b64_keys = self.query('base64_keys', strip=True, b64=False) - if b64_keys is not None: - self.b64_keys = [k.strip() for k in str(b64_keys).split(',')] + b64_all = self.query('base64_all', seed, strip=True, b64=False) + if b64_all is not None: + self.b64_all = util.is_true(b64_all) - b64_all = self.query('base64_all', strip=True, b64=False) - if b64_all is not None: - self.b64_all = util.is_true(b64_all) - - for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items(): - smartos_noun, strip = attribute - md[ci_noun] = self.query(smartos_noun, strip=strip) + for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items(): + smartos_noun, strip = attribute + md[ci_noun] = self.query(smartos_noun, seed, strip=strip) # @datadictionary: This key may contain a program that is written # to a file in the filesystem of the guest on each boot and then @@ -240,7 +302,7 @@ class DataSourceSmartOS(sources.DataSource): # Handle the cloud-init regular meta if not md['local-hostname']: - md['local-hostname'] = system_uuid + md['local-hostname'] = md['instance-id'] ud = None if md['user-data']: @@ -257,6 +319,8 @@ class DataSourceSmartOS(sources.DataSource): self.metadata = util.mergemanydict([md, self.metadata]) self.userdata_raw = ud self.vendordata_raw = md['vendor-data'] + + self._set_provisioned() return True def device_name_to_device(self, name): @@ -268,40 +332,64 @@ class DataSourceSmartOS(sources.DataSource): def get_instance_id(self): return self.metadata['instance-id'] - def query(self, noun, strip=False, default=None, b64=None): + def query(self, noun, seed_file, strip=False, default=None, b64=None): if b64 is None: if noun in self.smartos_no_base64: b64 = False elif self.b64_all or noun in self.b64_keys: b64 = True - return query_data(noun=noun, strip=strip, seed_device=self.seed, - seed_timeout=self.seed_timeout, default=default, - b64=b64) + return self._query_data(noun, seed_file, strip=strip, + default=default, b64=b64) + def _query_data(self, noun, seed_file, strip=False, + default=None, b64=None): + """Makes a request via "GET " -def device_exists(device): - """Symplistic method to determine if the device exists or not""" - return os.path.exists(device) + In the response, the first line is the status, while subsequent + lines are is the value. A blank line with a "." is used to + indicate end of response. + If the response is expected to be base64 encoded, then set + b64encoded to true. Unfortantely, there is no way to know if + something is 100% encoded, so this method relies on being told + if the data is base64 or not. + """ -def get_serial(seed_device, seed_timeout): - """This is replaced in unit testing, allowing us to replace - serial.Serial with a mocked class. + if not noun: + return False - The timeout value of 60 seconds should never be hit. The value - is taken from SmartOS own provisioning tools. Since we are reading - each line individually up until the single ".", the transfer is - usually very fast (i.e. microseconds) to get the response. - """ - if not seed_device: - raise AttributeError("seed_device value is not set") + response = JoyentMetadataClient(seed_file).get_metadata(noun) + + if response is None: + return default + + if b64 is None: + b64 = self._query_data('b64-%s' % noun, seed_file, b64=False, + default=False, strip=True) + b64 = util.is_true(b64) + + resp = None + if b64 or strip: + resp = "".join(response).rstrip() + else: + resp = "".join(response) - ser = serial.Serial(seed_device, timeout=seed_timeout) - if not ser.isOpen(): - raise SystemError("Unable to open %s" % seed_device) + if b64: + try: + return util.b64d(resp) + # Bogus input produces different errors in Python 2 and 3; + # catch both. + except (TypeError, binascii.Error): + LOG.warn("Failed base64 decoding key '%s'", noun) + return resp - return ser + return resp + + +def device_exists(device): + """Symplistic method to determine if the device exists or not""" + return os.path.exists(device) class JoyentMetadataFetchException(Exception): @@ -320,8 +408,8 @@ class JoyentMetadataClient(object): r' (?P(?P[0-9a-f]+) (?PSUCCESS|NOTFOUND)' r'( (?P.+))?)') - def __init__(self, serial): - self.serial = serial + def __init__(self, metasource): + self.metasource = metasource def _checksum(self, body): return '{0:08x}'.format( @@ -356,67 +444,30 @@ class JoyentMetadataClient(object): util.b64e(metadata_key)) msg = 'V2 {0} {1} {2}\n'.format( len(message_body), self._checksum(message_body), message_body) - LOG.debug('Writing "%s" to serial port.', msg) - self.serial.write(msg.encode('ascii')) - response = self.serial.readline().decode('ascii') - LOG.debug('Read "%s" from serial port.', response) - return self._get_value_from_frame(request_id, response) - - -def query_data(noun, seed_device, seed_timeout, strip=False, default=None, - b64=None): - """Makes a request to via the serial console via "GET " - - In the response, the first line is the status, while subsequent lines - are is the value. A blank line with a "." is used to indicate end of - response. - - If the response is expected to be base64 encoded, then set b64encoded - to true. Unfortantely, there is no way to know if something is 100% - encoded, so this method relies on being told if the data is base64 or - not. - """ - if not noun: - return False - - with contextlib.closing(get_serial(seed_device, seed_timeout)) as ser: - client = JoyentMetadataClient(ser) - response = client.get_metadata(noun) - - if response is None: - return default - - if b64 is None: - b64 = query_data('b64-%s' % noun, seed_device=seed_device, - seed_timeout=seed_timeout, b64=False, - default=False, strip=True) - b64 = util.is_true(b64) - - resp = None - if b64 or strip: - resp = "".join(response).rstrip() - else: - resp = "".join(response) - - if b64: - try: - return util.b64d(resp) - # Bogus input produces different errors in Python 2 and 3; catch both. - except (TypeError, binascii.Error): - LOG.warn("Failed base64 decoding key '%s'", noun) - return resp + LOG.debug('Writing "%s" to metadata transport.', msg) + self.metasource.write(msg.encode('ascii')) + self.metasource.flush() + + response = bytearray() + response.extend(self.metasource.read(1)) + while response[-1:] != b'\n': + response.extend(self.metasource.read(1)) + response = response.rstrip().decode('ascii') + LOG.debug('Read "%s" from metadata transport.', response) + + if 'SUCCESS' not in response: + return None - return resp + return self._get_value_from_frame(request_id, response) def dmi_data(): - sys_uuid = util.read_dmi_data("system-uuid") sys_type = util.read_dmi_data("system-product-name") - if not sys_uuid or not sys_type: + if not sys_type: return None - return (sys_uuid.lower(), sys_type) + return sys_type def write_boot_content(content, content_f, link=None, shebang=False, diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt index 3bde4aac..2651c027 100644 --- a/doc/examples/cloud-config-datasources.txt +++ b/doc/examples/cloud-config-datasources.txt @@ -51,12 +51,19 @@ datasource: policy: on # [can be 'on', 'off' or 'force'] SmartOS: + # For KVM guests: # Smart OS datasource works over a serial console interacting with # a server on the other end. By default, the second serial console is the # device. SmartOS also uses a serial timeout of 60 seconds. serial_device: /dev/ttyS1 serial_timeout: 60 + # For LX-Brand Zones guests: + # Smart OS datasource works over a socket interacting with + # the host on the other end. By default, the socket file is in + # the native .zoncontrol directory. + metadata_sockfile: /native/.zonecontrol/metadata.sock + # a list of keys that will not be base64 decoded even if base64_all no_base64_decode: ['root_authorized_keys', 'motd_sys_info', 'iptables_disable'] diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index adee9019..1235436d 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -31,6 +31,7 @@ import shutil import stat import tempfile import uuid +import unittest from binascii import crc32 import serial @@ -56,12 +57,13 @@ MOCK_RETURNS = { 'cloud-init:user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']), 'sdc:datacenter_name': 'somewhere2', 'sdc:operator-script': '\n'.join(['bin/true', '']), + 'sdc:uuid': str(uuid.uuid4()), 'sdc:vendor-data': '\n'.join(['VENDOR_DATA', '']), 'user-data': '\n'.join(['something', '']), 'user-script': '\n'.join(['/bin/true', '']), } -DMI_DATA_RETURN = (str(uuid.uuid4()), 'smartdc') +DMI_DATA_RETURN = 'smartdc' def get_mock_client(mockdata): @@ -111,7 +113,8 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase): ret = apply_patches(patches) self.unapply += ret - def _get_ds(self, sys_cfg=None, ds_cfg=None, mockdata=None, dmi_data=None): + def _get_ds(self, sys_cfg=None, ds_cfg=None, mockdata=None, dmi_data=None, + is_lxbrand=False): mod = DataSourceSmartOS if mockdata is None: @@ -124,9 +127,13 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase): return dmi_data def _os_uname(): - # LP: #1243287. tests assume this runs, but running test on - # arm would cause them all to fail. - return ('LINUX', 'NODENAME', 'RELEASE', 'VERSION', 'x86_64') + if not is_lxbrand: + # LP: #1243287. tests assume this runs, but running test on + # arm would cause them all to fail. + return ('LINUX', 'NODENAME', 'RELEASE', 'VERSION', 'x86_64') + else: + return ('LINUX', 'NODENAME', 'RELEASE', 'BRANDZ VIRTUAL LINUX', + 'X86_64') if sys_cfg is None: sys_cfg = {} @@ -136,7 +143,6 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase): sys_cfg['datasource']['SmartOS'] = ds_cfg self.apply_patches([(mod, 'LEGACY_USER_D', self.legacy_user_d)]) - self.apply_patches([(mod, 'get_serial', mock.MagicMock())]) self.apply_patches([ (mod, 'JoyentMetadataClient', get_mock_client(mockdata))]) self.apply_patches([(mod, 'dmi_data', _dmi_data)]) @@ -144,6 +150,7 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase): self.apply_patches([(mod, 'device_exists', lambda d: True)]) dsrc = mod.DataSourceSmartOS(sys_cfg, distro=None, paths=self.paths) + self.apply_patches([(dsrc, '_get_seed_file_object', mock.MagicMock())]) return dsrc def test_seed(self): @@ -151,14 +158,29 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase): dsrc = self._get_ds() ret = dsrc.get_data() self.assertTrue(ret) + self.assertEquals('kvm', dsrc.smartos_type) self.assertEquals('/dev/ttyS1', dsrc.seed) + def test_seed_lxbrand(self): + # default seed should be /dev/ttyS1 + dsrc = self._get_ds(is_lxbrand=True) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals('lx-brand', dsrc.smartos_type) + self.assertEquals('/native/.zonecontrol/metadata.sock', dsrc.seed) + def test_issmartdc(self): dsrc = self._get_ds() ret = dsrc.get_data() self.assertTrue(ret) self.assertTrue(dsrc.is_smartdc) + def test_issmartdc_lxbrand(self): + dsrc = self._get_ds(is_lxbrand=True) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertTrue(dsrc.is_smartdc) + def test_no_base64(self): ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True} dsrc = self._get_ds(ds_cfg=ds_cfg) @@ -169,7 +191,8 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase): dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEquals(DMI_DATA_RETURN[0], dsrc.metadata['instance-id']) + self.assertEquals(MOCK_RETURNS['sdc:uuid'], + dsrc.metadata['instance-id']) def test_root_keys(self): dsrc = self._get_ds(mockdata=MOCK_RETURNS) @@ -407,18 +430,6 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase): self.assertEqual(dsrc.device_name_to_device('FOO'), mydscfg['disk_aliases']['FOO']) - @mock.patch('cloudinit.sources.DataSourceSmartOS.JoyentMetadataClient') - @mock.patch('cloudinit.sources.DataSourceSmartOS.get_serial') - def test_serial_console_closed_on_error(self, get_serial, metadata_client): - class OurException(Exception): - pass - metadata_client.side_effect = OurException - try: - DataSourceSmartOS.query_data('noun', 'device', 0) - except OurException: - pass - self.assertEqual(1, get_serial.return_value.close.call_count) - def apply_patches(patches): ret = [] @@ -447,14 +458,25 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase): } def make_response(): - payload = '' - if self.response_parts['payload']: - payload = ' {0}'.format(self.response_parts['payload']) - del self.response_parts['payload'] - return ( - 'V2 {length} {crc} {request_id} {command}{payload}\n'.format( - payload=payload, **self.response_parts).encode('ascii')) - self.serial.readline.side_effect = make_response + payloadstr = '' + if 'payload' in self.response_parts: + payloadstr = ' {0}'.format(self.response_parts['payload']) + return ('V2 {length} {crc} {request_id} ' + '{command}{payloadstr}\n'.format( + payloadstr=payloadstr, + **self.response_parts).encode('ascii')) + + self.metasource_data = None + + def read_response(length): + if not self.metasource_data: + self.metasource_data = make_response() + self.metasource_data_len = len(self.metasource_data) + resp = self.metasource_data[:length] + self.metasource_data = self.metasource_data[length:] + return resp + + self.serial.read.side_effect = read_response self.patched_funcs.enter_context( mock.patch('cloudinit.sources.DataSourceSmartOS.random.randint', mock.Mock(return_value=self.request_id))) @@ -477,7 +499,9 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase): client.get_metadata('some_key') self.assertEqual(1, self.serial.write.call_count) written_line = self.serial.write.call_args[0][0] - self.assertEndsWith(written_line, b'\n') + print(type(written_line)) + self.assertEndsWith(written_line.decode('ascii'), + b'\n'.decode('ascii')) self.assertEqual(1, written_line.count(b'\n')) def _get_written_line(self, key='some_key'): @@ -489,7 +513,8 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase): self.assertIsInstance(self._get_written_line(), six.binary_type) def test_get_metadata_line_starts_with_v2(self): - self.assertStartsWith(self._get_written_line(), b'V2') + foo = self._get_written_line() + self.assertStartsWith(foo.decode('ascii'), b'V2'.decode('ascii')) def test_get_metadata_uses_get_command(self): parts = self._get_written_line().decode('ascii').strip().split(' ') @@ -526,7 +551,7 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase): def test_get_metadata_reads_a_line(self): client = self._get_client() client.get_metadata('some_key') - self.assertEqual(1, self.serial.readline.call_count) + self.assertEqual(self.metasource_data_len, self.serial.read.call_count) def test_get_metadata_returns_valid_value(self): client = self._get_client() -- cgit v1.2.3 From 814e04654736db6b7443d72d5daa62b56f7a61b9 Mon Sep 17 00:00:00 2001 From: Wesley Wiedenmeier Date: Thu, 4 Feb 2016 16:14:15 -0600 Subject: Added example cc_lxd config --- doc/examples/cloud-config-lxd.txt | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 doc/examples/cloud-config-lxd.txt (limited to 'doc/examples') diff --git a/doc/examples/cloud-config-lxd.txt b/doc/examples/cloud-config-lxd.txt new file mode 100644 index 00000000..f66da4c3 --- /dev/null +++ b/doc/examples/cloud-config-lxd.txt @@ -0,0 +1,21 @@ +#cloud-config + +# configure lxd +# default: none +# all options default to none if not specified +# lxd: config sections for lxd +# init: dict of options for lxd init, see 'man lxd' +# network_address: address for lxd to listen on +# network_port: port for lxd to listen on +# storage_backend: either 'zfs' or 'dir' +# storage_create_device: device based storage using specified device +# storage_create_loop: set up loop based storage with size in GB +# storage_pool: name of storage pool to use or create +# trust_password: password required to add new clients + +lxd: + init: + network_address: 0.0.0.0 + network_port: 8443 + storage_backend: zfs + storage_pool: datapool -- cgit v1.2.3 From 14915526ca67bbf7842028d48170015b85f87469 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 1 Mar 2016 00:19:55 -0500 Subject: lxd: general fix after testing A few changes: a.) change to using '--name=value' rather than '--name' 'value' b.) make sure only strings are passed to command (useful for storage_create_loop: which is likely an integer) c.) document simple working example d.) support installing zfs if not present and storage_backedn has it. --- cloudinit/config/cc_lxd.py | 35 ++++++++++++++++++------ doc/examples/cloud-config-lxd.txt | 7 +++++ tests/unittests/test_handler/test_handler_lxd.py | 9 +++--- 3 files changed, 38 insertions(+), 13 deletions(-) (limited to 'doc/examples') diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index aaafb643..84eec7a5 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -38,16 +38,36 @@ from cloudinit import util def handle(name, cfg, cloud, log, args): # Get config lxd_cfg = cfg.get('lxd') - if not lxd_cfg and isinstance(lxd_cfg, dict): + if not lxd_cfg: log.debug("Skipping module named %s, not present or disabled by cfg") return + if not isinstance(lxd_cfg, dict): + log.warn("lxd config must be a dictionary. found a '%s'", + type(lxd_cfg)) + return + + init_cfg = lxd_cfg.get('init') + if not init_cfg: + init_cfg = {} + + if not isinstance(init_cfg, dict): + log.warn("lxd/init config must be a dictionary. found a '%s'", + type(init_cfg)) + init_cfg = {} + + packages = [] + if (init_cfg.get("storage_backend") == "zfs" and not util.which('zfs')): + packages.append('zfs') # Ensure lxd is installed if not util.which("lxd"): + packages.append('lxd') + + if len(packages): try: - cloud.distro.install_packages(("lxd",)) + cloud.distro.install_packages(packages) except util.ProcessExecutionError as e: - log.warn("no lxd executable and could not install lxd:", e) + log.warn("failed to install packages %s: %s", packages, e) return # Set up lxd if init config is given @@ -55,14 +75,11 @@ def handle(name, cfg, cloud, log, args): 'network_address', 'network_port', 'storage_backend', 'storage_create_device', 'storage_create_loop', 'storage_pool', 'trust_password') - init_cfg = lxd_cfg.get('init') + if init_cfg: - if not isinstance(init_cfg, dict): - log.warn("lxd/init config must be a dictionary. found a '%s'", - type(f)) - return cmd = ['lxd', 'init', '--auto'] for k in init_keys: if init_cfg.get(k): - cmd.extend(["--%s" % k.replace('_', '-'), init_cfg[k]]) + cmd.extend(["--%s=%s" % + (k.replace('_', '-'), str(init_cfg[k]))]) util.subp(cmd) diff --git a/doc/examples/cloud-config-lxd.txt b/doc/examples/cloud-config-lxd.txt index f66da4c3..b9bb4aa5 100644 --- a/doc/examples/cloud-config-lxd.txt +++ b/doc/examples/cloud-config-lxd.txt @@ -19,3 +19,10 @@ lxd: network_port: 8443 storage_backend: zfs storage_pool: datapool + storage_create_loop: 10 + + +# The simplist working configuration is +# lxd: +# init: +# storage_backend: dir diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py index 4d858b8f..65794a41 100644 --- a/tests/unittests/test_handler/test_handler_lxd.py +++ b/tests/unittests/test_handler/test_handler_lxd.py @@ -43,9 +43,10 @@ class TestLxd(t_help.TestCase): self.assertTrue(mock_util.which.called) init_call = mock_util.subp.call_args_list[0][0][0] self.assertEquals(init_call, - ['lxd', 'init', '--auto', '--network-address', - '0.0.0.0', '--storage-backend', 'zfs', - '--storage-pool', 'poolname']) + ['lxd', 'init', '--auto', + '--network-address=0.0.0.0', + '--storage-backend=zfs', + '--storage-pool=poolname']) @mock.patch("cloudinit.config.cc_lxd.util") def test_lxd_install(self, mock_util): @@ -55,4 +56,4 @@ class TestLxd(t_help.TestCase): cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, LOG, []) self.assertTrue(cc.distro.install_packages.called) install_pkg = cc.distro.install_packages.call_args_list[0][0][0] - self.assertEquals(install_pkg, ('lxd',)) + self.assertEquals(sorted(install_pkg), ['lxd', 'zfs']) -- cgit v1.2.3 From f39e9b337778a0348ab08161d19c116408de5312 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 8 Mar 2016 11:20:41 -0500 Subject: add doc --- doc/examples/cloud-config-seed-random.txt | 32 +++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 doc/examples/cloud-config-seed-random.txt (limited to 'doc/examples') diff --git a/doc/examples/cloud-config-seed-random.txt b/doc/examples/cloud-config-seed-random.txt new file mode 100644 index 00000000..08f69a9f --- /dev/null +++ b/doc/examples/cloud-config-seed-random.txt @@ -0,0 +1,32 @@ +#cloud-config +# +# random_seed is a dictionary. +# +# The config module will write seed data from the datasource +# to 'file' described below. +# +# Entries in this dictionary are: +# file: the file to write random data to (default is /dev/urandom) +# data: this data will be written to 'file' before data from +# the datasource +# encoding: this will be used to decode 'data' provided. +# allowed values are 'encoding', 'raw', 'base64', 'b64' +# 'gzip', or 'gz'. Default is 'raw' +# +# command: execute this command to seed random. +# the command will have RANDOM_SEED_FILE in its environment +# set to the value of 'file' above. +# command_required: default False +# if true, and 'command' is not available to be run +# then exception is raised and cloud-init will record failure. +# Otherwise, only debug error is mentioned. +# +# Note: command could be ['pollinate', +# '--server=http://local.pollinate.server'] +# which would have pollinate populate /dev/urandom from provided server +seed_random: + file: '/dev/urandom' + data: 'my random string' + encoding: 'raw' + command: ['sh', '-c', 'dd if=/dev/urandom of=$RANDOM_SEED_FILE'] + command_required: True -- cgit v1.2.3