summaryrefslogtreecommitdiff
path: root/cloudinit/cmd
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit/cmd')
-rw-r--r--cloudinit/cmd/clean.py65
-rwxr-xr-xcloudinit/cmd/cloud_id.py83
-rw-r--r--cloudinit/cmd/devel/__init__.py3
-rw-r--r--cloudinit/cmd/devel/hotplug_hook.py291
-rw-r--r--cloudinit/cmd/devel/logs.py134
-rwxr-xr-xcloudinit/cmd/devel/make_mime.py113
-rwxr-xr-xcloudinit/cmd/devel/net_convert.py150
-rw-r--r--cloudinit/cmd/devel/parser.py45
-rwxr-xr-xcloudinit/cmd/devel/render.py54
-rw-r--r--cloudinit/cmd/devel/tests/__init__.py0
-rw-r--r--cloudinit/cmd/devel/tests/test_logs.py167
-rw-r--r--cloudinit/cmd/devel/tests/test_render.py144
-rw-r--r--cloudinit/cmd/main.py630
-rw-r--r--cloudinit/cmd/query.py286
-rw-r--r--cloudinit/cmd/status.py141
-rw-r--r--cloudinit/cmd/tests/__init__.py0
-rw-r--r--cloudinit/cmd/tests/test_clean.py178
-rw-r--r--cloudinit/cmd/tests/test_cloud_id.py127
-rw-r--r--cloudinit/cmd/tests/test_main.py162
-rw-r--r--cloudinit/cmd/tests/test_query.py341
-rw-r--r--cloudinit/cmd/tests/test_status.py391
21 files changed, 1400 insertions, 2105 deletions
diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py
index 928a8eea..0e1db118 100644
--- a/cloudinit/cmd/clean.py
+++ b/cloudinit/cmd/clean.py
@@ -10,12 +10,14 @@ import os
import sys
from cloudinit.stages import Init
-from cloudinit.subp import (ProcessExecutionError, subp)
-from cloudinit.util import (del_dir, del_file, get_config_logfiles, is_link)
-
-
-def error(msg):
- sys.stderr.write("ERROR: " + msg + "\n")
+from cloudinit.subp import ProcessExecutionError, subp
+from cloudinit.util import (
+ del_dir,
+ del_file,
+ error,
+ get_config_logfiles,
+ is_link,
+)
def get_parser(parser=None):
@@ -29,18 +31,35 @@ def get_parser(parser=None):
"""
if not parser:
parser = argparse.ArgumentParser(
- prog='clean',
- description=('Remove logs and artifacts so cloud-init re-runs on '
- 'a clean system'))
+ prog="clean",
+ description=(
+ "Remove logs and artifacts so cloud-init re-runs on "
+ "a clean system"
+ ),
+ )
parser.add_argument(
- '-l', '--logs', action='store_true', default=False, dest='remove_logs',
- help='Remove cloud-init logs.')
+ "-l",
+ "--logs",
+ action="store_true",
+ default=False,
+ dest="remove_logs",
+ help="Remove cloud-init logs.",
+ )
parser.add_argument(
- '-r', '--reboot', action='store_true', default=False,
- help='Reboot system after logs are cleaned so cloud-init re-runs.')
+ "-r",
+ "--reboot",
+ action="store_true",
+ default=False,
+ help="Reboot system after logs are cleaned so cloud-init re-runs.",
+ )
parser.add_argument(
- '-s', '--seed', action='store_true', default=False, dest='remove_seed',
- help='Remove cloud-init seed directory /var/lib/cloud/seed.')
+ "-s",
+ "--seed",
+ action="store_true",
+ default=False,
+ dest="remove_seed",
+ help="Remove cloud-init seed directory /var/lib/cloud/seed.",
+ )
return parser
@@ -61,8 +80,8 @@ def remove_artifacts(remove_logs, remove_seed=False):
if not os.path.isdir(init.paths.cloud_dir):
return 0 # Artifacts dir already cleaned
- seed_path = os.path.join(init.paths.cloud_dir, 'seed')
- for path in glob.glob('%s/*' % init.paths.cloud_dir):
+ seed_path = os.path.join(init.paths.cloud_dir, "seed")
+ for path in glob.glob("%s/*" % init.paths.cloud_dir):
if path == seed_path and not remove_seed:
continue
try:
@@ -71,7 +90,7 @@ def remove_artifacts(remove_logs, remove_seed=False):
else:
del_file(path)
except OSError as e:
- error('Could not remove {0}: {1}'.format(path, str(e)))
+ error("Could not remove {0}: {1}".format(path, str(e)))
return 1
return 0
@@ -80,13 +99,15 @@ def handle_clean_args(name, args):
"""Handle calls to 'cloud-init clean' as a subcommand."""
exit_code = remove_artifacts(args.remove_logs, args.remove_seed)
if exit_code == 0 and args.reboot:
- cmd = ['shutdown', '-r', 'now']
+ cmd = ["shutdown", "-r", "now"]
try:
subp(cmd, capture=False)
except ProcessExecutionError as e:
error(
'Could not reboot this system using "{0}": {1}'.format(
- cmd, str(e)))
+ cmd, str(e)
+ )
+ )
exit_code = 1
return exit_code
@@ -94,10 +115,10 @@ def handle_clean_args(name, args):
def main():
"""Tool to collect and tar all cloud-init related logs."""
parser = get_parser()
- sys.exit(handle_clean_args('clean', parser.parse_args()))
+ sys.exit(handle_clean_args("clean", parser.parse_args()))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/cloud_id.py b/cloudinit/cmd/cloud_id.py
index 97608921..b9c30fb4 100755
--- a/cloudinit/cmd/cloud_id.py
+++ b/cloudinit/cmd/cloud_id.py
@@ -6,12 +6,17 @@ import argparse
import json
import sys
+from cloudinit.cmd.status import UXAppStatus, get_status_details
from cloudinit.sources import (
- INSTANCE_JSON_FILE, METADATA_UNKNOWN, canonical_cloud_id)
+ INSTANCE_JSON_FILE,
+ METADATA_UNKNOWN,
+ canonical_cloud_id,
+)
+from cloudinit.util import error
-DEFAULT_INSTANCE_JSON = '/run/cloud-init/%s' % INSTANCE_JSON_FILE
+DEFAULT_INSTANCE_JSON = "/run/cloud-init/%s" % INSTANCE_JSON_FILE
-NAME = 'cloud-id'
+NAME = "cloud-id"
def get_parser(parser=None):
@@ -26,55 +31,75 @@ def get_parser(parser=None):
if not parser:
parser = argparse.ArgumentParser(
prog=NAME,
- description='Report the canonical cloud-id for this instance')
+ description="Report the canonical cloud-id for this instance",
+ )
parser.add_argument(
- '-j', '--json', action='store_true', default=False,
- help='Report all standardized cloud-id information as json.')
+ "-j",
+ "--json",
+ action="store_true",
+ default=False,
+ help="Report all standardized cloud-id information as json.",
+ )
parser.add_argument(
- '-l', '--long', action='store_true', default=False,
- help='Report extended cloud-id information as tab-delimited string.')
+ "-l",
+ "--long",
+ action="store_true",
+ default=False,
+ help="Report extended cloud-id information as tab-delimited string.",
+ )
parser.add_argument(
- '-i', '--instance-data', type=str, default=DEFAULT_INSTANCE_JSON,
- help=('Path to instance-data.json file. Default is %s' %
- DEFAULT_INSTANCE_JSON))
+ "-i",
+ "--instance-data",
+ type=str,
+ default=DEFAULT_INSTANCE_JSON,
+ help="Path to instance-data.json file. Default is %s"
+ % DEFAULT_INSTANCE_JSON,
+ )
return parser
-def error(msg):
- sys.stderr.write('ERROR: %s\n' % msg)
- return 1
-
-
def handle_args(name, args):
"""Handle calls to 'cloud-id' cli.
Print the canonical cloud-id on which the instance is running.
- @return: 0 on success, 1 otherwise.
+ @return: 0 on success, 1 on error, 2 on disabled, 3 on cloud-init not run.
"""
+ status, _status_details, _time = get_status_details()
+ if status == UXAppStatus.DISABLED:
+ sys.stdout.write("{0}\n".format(status.value))
+ return 2
+ elif status == UXAppStatus.NOT_RUN:
+ sys.stdout.write("{0}\n".format(status.value))
+ return 3
+
try:
instance_data = json.load(open(args.instance_data))
except IOError:
return error(
"File not found '%s'. Provide a path to instance data json file"
- ' using --instance-data' % args.instance_data)
+ " using --instance-data" % args.instance_data
+ )
except ValueError as e:
return error(
- "File '%s' is not valid json. %s" % (args.instance_data, e))
- v1 = instance_data.get('v1', {})
+ "File '%s' is not valid json. %s" % (args.instance_data, e)
+ )
+ v1 = instance_data.get("v1", {})
cloud_id = canonical_cloud_id(
- v1.get('cloud_name', METADATA_UNKNOWN),
- v1.get('region', METADATA_UNKNOWN),
- v1.get('platform', METADATA_UNKNOWN))
+ v1.get("cloud_name", METADATA_UNKNOWN),
+ v1.get("region", METADATA_UNKNOWN),
+ v1.get("platform", METADATA_UNKNOWN),
+ )
if args.json:
- v1['cloud_id'] = cloud_id
- response = json.dumps( # Pretty, sorted json
- v1, indent=1, sort_keys=True, separators=(',', ': '))
+ v1["cloud_id"] = cloud_id
+ response = json.dumps( # Pretty, sorted json
+ v1, indent=1, sort_keys=True, separators=(",", ": ")
+ )
elif args.long:
- response = '%s\t%s' % (cloud_id, v1.get('region', METADATA_UNKNOWN))
+ response = "%s\t%s" % (cloud_id, v1.get("region", METADATA_UNKNOWN))
else:
response = cloud_id
- sys.stdout.write('%s\n' % response)
+ sys.stdout.write("%s\n" % response)
return 0
@@ -84,7 +109,7 @@ def main():
sys.exit(handle_args(NAME, parser.parse_args()))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/__init__.py b/cloudinit/cmd/devel/__init__.py
index 3ae28b69..ead5f7a9 100644
--- a/cloudinit/cmd/devel/__init__.py
+++ b/cloudinit/cmd/devel/__init__.py
@@ -11,7 +11,7 @@ from cloudinit.stages import Init
def addLogHandlerCLI(logger, log_level):
"""Add a commandline logging handler to emit messages to stderr."""
- formatter = logging.Formatter('%(levelname)s: %(message)s')
+ formatter = logging.Formatter("%(levelname)s: %(message)s")
log.setupBasicLogging(log_level, formatter=formatter)
return logger
@@ -22,4 +22,5 @@ def read_cfg_paths():
init.read_cfg()
return init.paths
+
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py
new file mode 100644
index 00000000..a9be0379
--- /dev/null
+++ b/cloudinit/cmd/devel/hotplug_hook.py
@@ -0,0 +1,291 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Handle reconfiguration on hotplug events"""
+import abc
+import argparse
+import os
+import sys
+import time
+
+from cloudinit import log, reporting, stages
+from cloudinit.event import EventScope, EventType
+from cloudinit.net import activators, read_sys_net_safe
+from cloudinit.net.network_state import parse_net_config_data
+from cloudinit.reporting import events
+from cloudinit.sources import DataSource # noqa: F401
+from cloudinit.sources import DataSourceNotFoundException
+from cloudinit.stages import Init
+
+LOG = log.getLogger(__name__)
+NAME = "hotplug-hook"
+
+
+def get_parser(parser=None):
+ """Build or extend an arg parser for hotplug-hook utility.
+
+ @param parser: Optional existing ArgumentParser instance representing the
+ subcommand which will be extended to support the args of this utility.
+
+ @returns: ArgumentParser with proper argument configuration.
+ """
+ if not parser:
+ parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
+
+ parser.description = __doc__
+ parser.add_argument(
+ "-s",
+ "--subsystem",
+ required=True,
+ help="subsystem to act on",
+ choices=["net"],
+ )
+
+ subparsers = parser.add_subparsers(
+ title="Hotplug Action", dest="hotplug_action"
+ )
+ subparsers.required = True
+
+ subparsers.add_parser(
+ "query", help="query if hotplug is enabled for given subsystem"
+ )
+
+ parser_handle = subparsers.add_parser(
+ "handle", help="handle the hotplug event"
+ )
+ parser_handle.add_argument(
+ "-d",
+ "--devpath",
+ required=True,
+ metavar="PATH",
+ help="sysfs path to hotplugged device",
+ )
+ parser_handle.add_argument(
+ "-u",
+ "--udevaction",
+ required=True,
+ help="action to take",
+ choices=["add", "remove"],
+ )
+
+ return parser
+
+
+class UeventHandler(abc.ABC):
+ def __init__(self, id, datasource, devpath, action, success_fn):
+ self.id = id
+ self.datasource = datasource # type: DataSource
+ self.devpath = devpath
+ self.action = action
+ self.success_fn = success_fn
+
+ @abc.abstractmethod
+ def apply(self):
+ raise NotImplementedError()
+
+ @property
+ @abc.abstractmethod
+ def config(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def device_detected(self) -> bool:
+ raise NotImplementedError()
+
+ def detect_hotplugged_device(self):
+ detect_presence = None
+ if self.action == "add":
+ detect_presence = True
+ elif self.action == "remove":
+ detect_presence = False
+ else:
+ raise ValueError("Unknown action: %s" % self.action)
+
+ if detect_presence != self.device_detected():
+ raise RuntimeError(
+ "Failed to detect %s in updated metadata" % self.id
+ )
+
+ def success(self):
+ return self.success_fn()
+
+ def update_metadata(self):
+ result = self.datasource.update_metadata_if_supported(
+ [EventType.HOTPLUG]
+ )
+ if not result:
+ raise RuntimeError(
+ "Datasource %s not updated for event %s"
+ % (self.datasource, EventType.HOTPLUG)
+ )
+ return result
+
+
+class NetHandler(UeventHandler):
+ def __init__(self, datasource, devpath, action, success_fn):
+ # convert devpath to mac address
+ id = read_sys_net_safe(os.path.basename(devpath), "address")
+ super().__init__(id, datasource, devpath, action, success_fn)
+
+ def apply(self):
+ self.datasource.distro.apply_network_config(
+ self.config,
+ bring_up=False,
+ )
+ interface_name = os.path.basename(self.devpath)
+ activator = activators.select_activator()
+ if self.action == "add":
+ if not activator.bring_up_interface(interface_name):
+ raise RuntimeError(
+ "Failed to bring up device: {}".format(self.devpath)
+ )
+ elif self.action == "remove":
+ if not activator.bring_down_interface(interface_name):
+ raise RuntimeError(
+ "Failed to bring down device: {}".format(self.devpath)
+ )
+
+ @property
+ def config(self):
+ return self.datasource.network_config
+
+ def device_detected(self) -> bool:
+ netstate = parse_net_config_data(self.config)
+ found = [
+ iface
+ for iface in netstate.iter_interfaces()
+ if iface.get("mac_address") == self.id
+ ]
+ LOG.debug("Ifaces with ID=%s : %s", self.id, found)
+ return len(found) > 0
+
+
+SUBSYSTEM_PROPERTES_MAP = {
+ "net": (NetHandler, EventScope.NETWORK),
+}
+
+
+def is_enabled(hotplug_init, subsystem):
+ try:
+ scope = SUBSYSTEM_PROPERTES_MAP[subsystem][1]
+ except KeyError as e:
+ raise Exception(
+ "hotplug-hook: cannot handle events for subsystem: {}".format(
+ subsystem
+ )
+ ) from e
+
+ return stages.update_event_enabled(
+ datasource=hotplug_init.datasource,
+ cfg=hotplug_init.cfg,
+ event_source_type=EventType.HOTPLUG,
+ scope=scope,
+ )
+
+
+def initialize_datasource(hotplug_init, subsystem):
+ LOG.debug("Fetching datasource")
+ datasource = hotplug_init.fetch(existing="trust")
+
+ if not datasource.get_supported_events([EventType.HOTPLUG]):
+ LOG.debug("hotplug not supported for event of type %s", subsystem)
+ return
+
+ if not is_enabled(hotplug_init, subsystem):
+ LOG.debug("hotplug not enabled for event of type %s", subsystem)
+ return
+ return datasource
+
+
+def handle_hotplug(hotplug_init: Init, devpath, subsystem, udevaction):
+ datasource = initialize_datasource(hotplug_init, subsystem)
+ if not datasource:
+ return
+ handler_cls = SUBSYSTEM_PROPERTES_MAP[subsystem][0]
+ LOG.debug("Creating %s event handler", subsystem)
+ event_handler = handler_cls(
+ datasource=datasource,
+ devpath=devpath,
+ action=udevaction,
+ success_fn=hotplug_init._write_to_cache,
+ ) # type: UeventHandler
+ wait_times = [1, 3, 5, 10, 30]
+ for attempt, wait in enumerate(wait_times):
+ LOG.debug(
+ "subsystem=%s update attempt %s/%s",
+ subsystem,
+ attempt,
+ len(wait_times),
+ )
+ try:
+ LOG.debug("Refreshing metadata")
+ event_handler.update_metadata()
+ LOG.debug("Detecting device in updated metadata")
+ event_handler.detect_hotplugged_device()
+ LOG.debug("Applying config change")
+ event_handler.apply()
+ LOG.debug("Updating cache")
+ event_handler.success()
+ break
+ except Exception as e:
+ LOG.debug("Exception while processing hotplug event. %s", e)
+ time.sleep(wait)
+ last_exception = e
+ else:
+ raise last_exception # type: ignore
+
+
+def handle_args(name, args):
+ # Note that if an exception happens between now and when logging is
+ # setup, we'll only see it in the journal
+ hotplug_reporter = events.ReportEventStack(
+ name, __doc__, reporting_enabled=True
+ )
+
+ hotplug_init = Init(ds_deps=[], reporter=hotplug_reporter)
+ hotplug_init.read_cfg()
+
+ log.setupLogging(hotplug_init.cfg)
+ if "reporting" in hotplug_init.cfg:
+ reporting.update_configuration(hotplug_init.cfg.get("reporting"))
+ # Logging isn't going to be setup until now
+ LOG.debug(
+ "%s called with the following arguments: {"
+ "hotplug_action: %s, subsystem: %s, udevaction: %s, devpath: %s}",
+ name,
+ args.hotplug_action,
+ args.subsystem,
+ args.udevaction if "udevaction" in args else None,
+ args.devpath if "devpath" in args else None,
+ )
+
+ with hotplug_reporter:
+ try:
+ if args.hotplug_action == "query":
+ try:
+ datasource = initialize_datasource(
+ hotplug_init, args.subsystem
+ )
+ except DataSourceNotFoundException:
+ print(
+ "Unable to determine hotplug state. No datasource "
+ "detected"
+ )
+ sys.exit(1)
+ print("enabled" if datasource else "disabled")
+ else:
+ handle_hotplug(
+ hotplug_init=hotplug_init,
+ devpath=args.devpath,
+ subsystem=args.subsystem,
+ udevaction=args.udevaction,
+ )
+ except Exception:
+ LOG.exception("Received fatal exception handling hotplug!")
+ raise
+
+ LOG.debug("Exiting hotplug handler")
+ reporting.flush_events()
+
+
+if __name__ == "__main__":
+ args = get_parser().parse_args()
+ handle_args(NAME, args)
diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py
index 51c61cca..d54b809a 100644
--- a/cloudinit/cmd/devel/logs.py
+++ b/cloudinit/cmd/devel/logs.py
@@ -5,20 +5,19 @@
"""Define 'collect-logs' utility and handler to include in cloud-init cmd."""
import argparse
-from datetime import datetime
import os
import shutil
import sys
+from datetime import datetime
from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
+from cloudinit.subp import ProcessExecutionError, subp
from cloudinit.temp_utils import tempdir
-from cloudinit.subp import (ProcessExecutionError, subp)
-from cloudinit.util import (chdir, copy, ensure_dir, write_file)
+from cloudinit.util import chdir, copy, ensure_dir, write_file
-
-CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log']
-CLOUDINIT_RUN_DIR = '/run/cloud-init'
-USER_DATA_FILE = '/var/lib/cloud/instance/user-data.txt' # Optional
+CLOUDINIT_LOGS = ["/var/log/cloud-init.log", "/var/log/cloud-init-output.log"]
+CLOUDINIT_RUN_DIR = "/run/cloud-init"
+USER_DATA_FILE = "/var/lib/cloud/instance/user-data.txt" # Optional
def get_parser(parser=None):
@@ -32,27 +31,49 @@ def get_parser(parser=None):
"""
if not parser:
parser = argparse.ArgumentParser(
- prog='collect-logs',
- description='Collect and tar all cloud-init debug info')
- parser.add_argument('--verbose', '-v', action='count', default=0,
- dest='verbosity', help="Be more verbose.")
+ prog="collect-logs",
+ description="Collect and tar all cloud-init debug info",
+ )
+ parser.add_argument(
+ "--verbose",
+ "-v",
+ action="count",
+ default=0,
+ dest="verbosity",
+ help="Be more verbose.",
+ )
parser.add_argument(
- "--tarfile", '-t', default='cloud-init.tar.gz',
- help=('The tarfile to create containing all collected logs.'
- ' Default: cloud-init.tar.gz'))
+ "--tarfile",
+ "-t",
+ default="cloud-init.tar.gz",
+ help=(
+ "The tarfile to create containing all collected logs."
+ " Default: cloud-init.tar.gz"
+ ),
+ )
parser.add_argument(
- "--include-userdata", '-u', default=False, action='store_true',
- dest='userdata', help=(
- 'Optionally include user-data from {0} which could contain'
- ' sensitive information.'.format(USER_DATA_FILE)))
+ "--include-userdata",
+ "-u",
+ default=False,
+ action="store_true",
+ dest="userdata",
+ help=(
+ "Optionally include user-data from {0} which could contain"
+ " sensitive information.".format(USER_DATA_FILE)
+ ),
+ )
return parser
-def _copytree_ignore_sensitive_files(curdir, files):
- """Return a list of files to ignore if we are non-root"""
- if os.getuid() == 0:
- return ()
- return (INSTANCE_JSON_SENSITIVE_FILE,) # Ignore root-permissioned files
+def _copytree_rundir_ignore_files(curdir, files):
+ """Return a list of files to ignore for /run/cloud-init directory"""
+ ignored_files = [
+ "hook-hotplug-cmd", # named pipe for hotplug
+ ]
+ if os.getuid() != 0:
+ # Ignore root-permissioned files
+ ignored_files.append(INSTANCE_JSON_SENSITIVE_FILE)
+ return ignored_files
def _write_command_output_to_file(cmd, filename, msg, verbosity):
@@ -90,48 +111,67 @@ def collect_logs(tarfile, include_userdata, verbosity=0):
if include_userdata and os.getuid() != 0:
sys.stderr.write(
"To include userdata, root user is required."
- " Try sudo cloud-init collect-logs\n")
+ " Try sudo cloud-init collect-logs\n"
+ )
return 1
tarfile = os.path.abspath(tarfile)
- date = datetime.utcnow().date().strftime('%Y-%m-%d')
- log_dir = 'cloud-init-logs-{0}'.format(date)
- with tempdir(dir='/tmp') as tmp_dir:
+ date = datetime.utcnow().date().strftime("%Y-%m-%d")
+ log_dir = "cloud-init-logs-{0}".format(date)
+ with tempdir(dir="/tmp") as tmp_dir:
log_dir = os.path.join(tmp_dir, log_dir)
version = _write_command_output_to_file(
- ['cloud-init', '--version'],
- os.path.join(log_dir, 'version'),
- "cloud-init --version", verbosity)
+ ["cloud-init", "--version"],
+ os.path.join(log_dir, "version"),
+ "cloud-init --version",
+ verbosity,
+ )
dpkg_ver = _write_command_output_to_file(
- ['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'],
- os.path.join(log_dir, 'dpkg-version'),
- "dpkg version", verbosity)
+ ["dpkg-query", "--show", "-f=${Version}\n", "cloud-init"],
+ os.path.join(log_dir, "dpkg-version"),
+ "dpkg version",
+ verbosity,
+ )
if not version:
version = dpkg_ver if dpkg_ver else "not-available"
_debug("collected cloud-init version: %s\n" % version, 1, verbosity)
_write_command_output_to_file(
- ['dmesg'], os.path.join(log_dir, 'dmesg.txt'),
- "dmesg output", verbosity)
+ ["dmesg"],
+ os.path.join(log_dir, "dmesg.txt"),
+ "dmesg output",
+ verbosity,
+ )
_write_command_output_to_file(
- ['journalctl', '--boot=0', '-o', 'short-precise'],
- os.path.join(log_dir, 'journal.txt'),
- "systemd journal of current boot", verbosity)
+ ["journalctl", "--boot=0", "-o", "short-precise"],
+ os.path.join(log_dir, "journal.txt"),
+ "systemd journal of current boot",
+ verbosity,
+ )
for log in CLOUDINIT_LOGS:
_collect_file(log, log_dir, verbosity)
if include_userdata:
_collect_file(USER_DATA_FILE, log_dir, verbosity)
- run_dir = os.path.join(log_dir, 'run')
+ run_dir = os.path.join(log_dir, "run")
ensure_dir(run_dir)
if os.path.exists(CLOUDINIT_RUN_DIR):
- shutil.copytree(CLOUDINIT_RUN_DIR,
- os.path.join(run_dir, 'cloud-init'),
- ignore=_copytree_ignore_sensitive_files)
+ try:
+ shutil.copytree(
+ CLOUDINIT_RUN_DIR,
+ os.path.join(run_dir, "cloud-init"),
+ ignore=_copytree_rundir_ignore_files,
+ )
+ except shutil.Error as e:
+ sys.stderr.write("Failed collecting file(s) due to error:\n")
+ sys.stderr.write(str(e) + "\n")
_debug("collected dir %s\n" % CLOUDINIT_RUN_DIR, 1, verbosity)
else:
- _debug("directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, 1,
- verbosity)
+ _debug(
+ "directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR,
+ 1,
+ verbosity,
+ )
with chdir(tmp_dir):
- subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')])
+ subp(["tar", "czvf", tarfile, log_dir.replace(tmp_dir + "/", "")])
sys.stderr.write("Wrote %s\n" % tarfile)
return 0
@@ -144,10 +184,10 @@ def handle_collect_logs_args(name, args):
def main():
"""Tool to collect and tar all cloud-init related logs."""
parser = get_parser()
- return handle_collect_logs_args('collect-logs', parser.parse_args())
+ return handle_collect_logs_args("collect-logs", parser.parse_args())
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(main())
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/make_mime.py b/cloudinit/cmd/devel/make_mime.py
index 4e6a5778..c7671a93 100755
--- a/cloudinit/cmd/devel/make_mime.py
+++ b/cloudinit/cmd/devel/make_mime.py
@@ -9,19 +9,44 @@ from email.mime.text import MIMEText
from cloudinit import log
from cloudinit.handlers import INCLUSION_TYPES_MAP
+
from . import addLogHandlerCLI
-NAME = 'make-mime'
+NAME = "make-mime"
LOG = log.getLogger(NAME)
-EPILOG = ("Example: make-mime -a config.yaml:cloud-config "
- "-a script.sh:x-shellscript > user-data")
+EPILOG = (
+ "Example: make-mime -a config.yaml:cloud-config "
+ "-a script.sh:x-shellscript > user-data"
+)
+
+
+def create_mime_message(files):
+ sub_messages = []
+ errors = []
+ for i, (fh, filename, format_type) in enumerate(files):
+ contents = fh.read()
+ sub_message = MIMEText(contents, format_type, sys.getdefaultencoding())
+ sub_message.add_header(
+ "Content-Disposition", 'attachment; filename="%s"' % (filename)
+ )
+ content_type = sub_message.get_content_type().lower()
+ if content_type not in get_content_types():
+ msg = (
+ "content type %r for attachment %s " "may be incorrect!"
+ ) % (content_type, i + 1)
+ errors.append(msg)
+ sub_messages.append(sub_message)
+ combined_message = MIMEMultipart()
+ for msg in sub_messages:
+ combined_message.attach(msg)
+ return (combined_message, errors)
def file_content_type(text):
- """ Return file content type by reading the first line of the input. """
+ """Return file content type by reading the first line of the input."""
try:
filename, content_type = text.split(":", 1)
- return (open(filename, 'r'), filename, content_type.strip())
+ return (open(filename, "r"), filename, content_type.strip())
except ValueError as e:
raise argparse.ArgumentError(
text, "Invalid value for %r" % (text)
@@ -41,26 +66,43 @@ def get_parser(parser=None):
# update the parser's doc and add an epilog to show an example
parser.description = __doc__
parser.epilog = EPILOG
- parser.add_argument("-a", "--attach", dest="files", type=file_content_type,
- action='append', default=[],
- metavar="<file>:<content-type>",
- help=("attach the given file as the specified "
- "content-type"))
- parser.add_argument('-l', '--list-types', action='store_true',
- default=False,
- help='List support cloud-init content types.')
- parser.add_argument('-f', '--force', action='store_true',
- default=False,
- help='Ignore unknown content-type warnings')
+ parser.add_argument(
+ "-a",
+ "--attach",
+ dest="files",
+ type=file_content_type,
+ action="append",
+ default=[],
+ metavar="<file>:<content-type>",
+ help="attach the given file as the specified content-type",
+ )
+ parser.add_argument(
+ "-l",
+ "--list-types",
+ action="store_true",
+ default=False,
+ help="List support cloud-init content types.",
+ )
+ parser.add_argument(
+ "-f",
+ "--force",
+ action="store_true",
+ default=False,
+ help="Ignore unknown content-type warnings",
+ )
return parser
def get_content_types(strip_prefix=False):
- """ Return a list of cloud-init supported content types. Optionally
- strip out the leading 'text/' of the type if strip_prefix=True.
+ """Return a list of cloud-init supported content types. Optionally
+ strip out the leading 'text/' of the type if strip_prefix=True.
"""
- return sorted([ctype.replace("text/", "") if strip_prefix else ctype
- for ctype in INCLUSION_TYPES_MAP.values()])
+ return sorted(
+ [
+ ctype.replace("text/", "") if strip_prefix else ctype
+ for ctype in INCLUSION_TYPES_MAP.values()
+ ]
+ )
def handle_args(name, args):
@@ -77,37 +119,24 @@ def handle_args(name, args):
print("\n".join(get_content_types(strip_prefix=True)))
return 0
- sub_messages = []
- errors = []
- for i, (fh, filename, format_type) in enumerate(args.files):
- contents = fh.read()
- sub_message = MIMEText(contents, format_type, sys.getdefaultencoding())
- sub_message.add_header('Content-Disposition',
- 'attachment; filename="%s"' % (filename))
- content_type = sub_message.get_content_type().lower()
- if content_type not in get_content_types():
- level = "WARNING" if args.force else "ERROR"
- msg = (level + ": content type %r for attachment %s "
- "may be incorrect!") % (content_type, i + 1)
- sys.stderr.write(msg + '\n')
- errors.append(msg)
- sub_messages.append(sub_message)
- if len(errors) and not args.force:
+ combined_message, errors = create_mime_message(args.files)
+ if errors:
+ level = "WARNING" if args.force else "ERROR"
+ for error in errors:
+ sys.stderr.write(f"{level}: {error}\n")
sys.stderr.write("Invalid content-types, override with --force\n")
- return 1
- combined_message = MIMEMultipart()
- for msg in sub_messages:
- combined_message.attach(msg)
+ if not args.force:
+ return 1
print(combined_message)
return 0
def main():
args = get_parser().parse_args()
- return(handle_args(NAME, args))
+ return handle_args(NAME, args)
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(main())
diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py
index 80d217ca..18b1e7ff 100755
--- a/cloudinit/cmd/devel/net_convert.py
+++ b/cloudinit/cmd/devel/net_convert.py
@@ -6,15 +6,13 @@ import json
import os
import sys
-from cloudinit.sources.helpers import openstack
+from cloudinit import distros, log, safeyaml
+from cloudinit.net import eni, netplan, network_state, networkd, sysconfig
from cloudinit.sources import DataSourceAzure as azure
from cloudinit.sources import DataSourceOVF as ovf
+from cloudinit.sources.helpers import openstack
-from cloudinit import distros, safeyaml
-from cloudinit.net import eni, netplan, network_state, sysconfig
-from cloudinit import log
-
-NAME = 'net-convert'
+NAME = "net-convert"
def get_parser(parser=None):
@@ -27,30 +25,59 @@ def get_parser(parser=None):
"""
if not parser:
parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
- parser.add_argument("-p", "--network-data", type=open,
- metavar="PATH", required=True)
- parser.add_argument("-k", "--kind",
- choices=['eni', 'network_data.json', 'yaml',
- 'azure-imds', 'vmware-imc'],
- required=True)
- parser.add_argument("-d", "--directory",
- metavar="PATH",
- help="directory to place output in",
- required=True)
- parser.add_argument("-D", "--distro",
- choices=[item for sublist in
- distros.OSFAMILIES.values()
- for item in sublist],
- required=True)
- parser.add_argument("-m", "--mac",
- metavar="name,mac",
- action='append',
- help="interface name to mac mapping")
- parser.add_argument("--debug", action='store_true',
- help='enable debug logging to stderr.')
- parser.add_argument("-O", "--output-kind",
- choices=['eni', 'netplan', 'sysconfig'],
- required=True)
+ parser.add_argument(
+ "-p",
+ "--network-data",
+ type=open,
+ metavar="PATH",
+ required=True,
+ help="The network configuration to read",
+ )
+ parser.add_argument(
+ "-k",
+ "--kind",
+ choices=[
+ "eni",
+ "network_data.json",
+ "yaml",
+ "azure-imds",
+ "vmware-imc",
+ ],
+ required=True,
+ help="The format of the given network config",
+ )
+ parser.add_argument(
+ "-d",
+ "--directory",
+ metavar="PATH",
+ help="directory to place output in",
+ required=True,
+ )
+ parser.add_argument(
+ "-D",
+ "--distro",
+ choices=[
+ item for sublist in distros.OSFAMILIES.values() for item in sublist
+ ],
+ required=True,
+ )
+ parser.add_argument(
+ "-m",
+ "--mac",
+ metavar="name,mac",
+ action="append",
+ help="interface name to mac mapping",
+ )
+ parser.add_argument(
+ "--debug", action="store_true", help="enable debug logging to stderr."
+ )
+ parser.add_argument(
+ "-O",
+ "--output-kind",
+ choices=["eni", "netplan", "networkd", "sysconfig"],
+ required=True,
+ help="The network config format to emit",
+ )
return parser
@@ -78,57 +105,68 @@ def handle_args(name, args):
pre_ns = eni.convert_eni_data(net_data)
elif args.kind == "yaml":
pre_ns = safeyaml.load(net_data)
- if 'network' in pre_ns:
- pre_ns = pre_ns.get('network')
+ if "network" in pre_ns:
+ pre_ns = pre_ns.get("network")
if args.debug:
- sys.stderr.write('\n'.join(
- ["Input YAML", safeyaml.dumps(pre_ns), ""]))
- elif args.kind == 'network_data.json':
+ sys.stderr.write(
+ "\n".join(["Input YAML", safeyaml.dumps(pre_ns), ""])
+ )
+ elif args.kind == "network_data.json":
pre_ns = openstack.convert_net_json(
- json.loads(net_data), known_macs=known_macs)
- elif args.kind == 'azure-imds':
+ json.loads(net_data), known_macs=known_macs
+ )
+ elif args.kind == "azure-imds":
pre_ns = azure.parse_network_config(json.loads(net_data))
- elif args.kind == 'vmware-imc':
+ elif args.kind == "vmware-imc":
config = ovf.Config(ovf.ConfigFile(args.network_data.name))
pre_ns = ovf.get_network_config_from_conf(config, False)
ns = network_state.parse_net_config_data(pre_ns)
- if not ns:
- raise RuntimeError("No valid network_state object created from"
- " input data")
if args.debug:
- sys.stderr.write('\n'.join(
- ["", "Internal State", safeyaml.dumps(ns), ""]))
+ sys.stderr.write(
+ "\n".join(["", "Internal State", safeyaml.dumps(ns), ""])
+ )
distro_cls = distros.fetch(args.distro)
distro = distro_cls(args.distro, {}, None)
config = {}
if args.output_kind == "eni":
r_cls = eni.Renderer
- config = distro.renderer_configs.get('eni')
+ config = distro.renderer_configs.get("eni")
elif args.output_kind == "netplan":
r_cls = netplan.Renderer
- config = distro.renderer_configs.get('netplan')
+ config = distro.renderer_configs.get("netplan")
# don't run netplan generate/apply
- config['postcmds'] = False
+ config["postcmds"] = False
# trim leading slash
- config['netplan_path'] = config['netplan_path'][1:]
+ config["netplan_path"] = config["netplan_path"][1:]
# enable some netplan features
- config['features'] = ['dhcp-use-domains', 'ipv6-mtu']
- else:
+ config["features"] = ["dhcp-use-domains", "ipv6-mtu"]
+ elif args.output_kind == "networkd":
+ r_cls = networkd.Renderer
+ config = distro.renderer_configs.get("networkd")
+ elif args.output_kind == "sysconfig":
r_cls = sysconfig.Renderer
- config = distro.renderer_configs.get('sysconfig')
+ config = distro.renderer_configs.get("sysconfig")
+ else:
+ raise RuntimeError("Invalid output_kind")
r = r_cls(config=config)
- sys.stderr.write(''.join([
- "Read input format '%s' from '%s'.\n" % (
- args.kind, args.network_data.name),
- "Wrote output format '%s' to '%s'\n" % (
- args.output_kind, args.directory)]) + "\n")
+ sys.stderr.write(
+ "".join(
+ [
+ "Read input format '%s' from '%s'.\n"
+ % (args.kind, args.network_data.name),
+ "Wrote output format '%s' to '%s'\n"
+ % (args.output_kind, args.directory),
+ ]
+ )
+ + "\n"
+ )
r.render_network_state(network_state=ns, target=args.directory)
-if __name__ == '__main__':
+if __name__ == "__main__":
args = get_parser().parse_args()
handle_args(NAME, args)
diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py
index 1a3c46a4..76b16c2e 100644
--- a/cloudinit/cmd/devel/parser.py
+++ b/cloudinit/cmd/devel/parser.py
@@ -5,30 +5,47 @@
"""Define 'devel' subcommand argument parsers to include in cloud-init cmd."""
import argparse
+
from cloudinit.config import schema
-from . import net_convert
-from . import render
-from . import make_mime
+from . import hotplug_hook, make_mime, net_convert, render
def get_parser(parser=None):
if not parser:
parser = argparse.ArgumentParser(
- prog='cloudinit-devel',
- description='Run development cloud-init tools')
- subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
+ prog="cloudinit-devel",
+ description="Run development cloud-init tools",
+ )
+ subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand")
subparsers.required = True
subcmds = [
- ('schema', 'Validate cloud-config files for document schema',
- schema.get_parser, schema.handle_schema_args),
- (net_convert.NAME, net_convert.__doc__,
- net_convert.get_parser, net_convert.handle_args),
- (render.NAME, render.__doc__,
- render.get_parser, render.handle_args),
- (make_mime.NAME, make_mime.__doc__,
- make_mime.get_parser, make_mime.handle_args),
+ (
+ hotplug_hook.NAME,
+ hotplug_hook.__doc__,
+ hotplug_hook.get_parser,
+ hotplug_hook.handle_args,
+ ),
+ (
+ "schema",
+ "Validate cloud-config files for document schema",
+ schema.get_parser,
+ schema.handle_schema_args,
+ ),
+ (
+ net_convert.NAME,
+ net_convert.__doc__,
+ net_convert.get_parser,
+ net_convert.handle_args,
+ ),
+ (render.NAME, render.__doc__, render.get_parser, render.handle_args),
+ (
+ make_mime.NAME,
+ make_mime.__doc__,
+ make_mime.get_parser,
+ make_mime.handle_args,
+ ),
]
for (subcmd, helpmsg, get_parser, handler) in subcmds:
parser = subparsers.add_parser(subcmd, help=helpmsg)
diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py
index 1090aa16..2f9a22a8 100755
--- a/cloudinit/cmd/devel/render.py
+++ b/cloudinit/cmd/devel/render.py
@@ -6,12 +6,13 @@ import argparse
import os
import sys
-from cloudinit.handlers.jinja_template import render_jinja_payload_from_file
from cloudinit import log
+from cloudinit.handlers.jinja_template import render_jinja_payload_from_file
from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE
+
from . import addLogHandlerCLI, read_cfg_paths
-NAME = 'render'
+NAME = "render"
LOG = log.getLogger(NAME)
@@ -27,13 +28,24 @@ def get_parser(parser=None):
if not parser:
parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
parser.add_argument(
- 'user_data', type=str, help='Path to the user-data file to render')
+ "user_data", type=str, help="Path to the user-data file to render"
+ )
+ parser.add_argument(
+ "-i",
+ "--instance-data",
+ type=str,
+ help=(
+ "Optional path to instance-data.json file. Defaults to"
+ " /run/cloud-init/instance-data.json"
+ ),
+ )
parser.add_argument(
- '-i', '--instance-data', type=str,
- help=('Optional path to instance-data.json file. Defaults to'
- ' /run/cloud-init/instance-data.json'))
- parser.add_argument('-d', '--debug', action='store_true', default=False,
- help='Add verbose messages during template render')
+ "-d",
+ "--debug",
+ action="store_true",
+ default=False,
+ help="Add verbose messages during template render",
+ )
return parser
@@ -54,34 +66,38 @@ def handle_args(name, args):
redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE)
if uid == 0:
instance_data_fn = os.path.join(
- paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE)
+ paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE
+ )
if not os.path.exists(instance_data_fn):
LOG.warning(
- 'Missing root-readable %s. Using redacted %s instead.',
- instance_data_fn, redacted_data_fn
+ "Missing root-readable %s. Using redacted %s instead.",
+ instance_data_fn,
+ redacted_data_fn,
)
instance_data_fn = redacted_data_fn
else:
instance_data_fn = redacted_data_fn
if not os.path.exists(instance_data_fn):
- LOG.error('Missing instance-data.json file: %s', instance_data_fn)
+ LOG.error("Missing instance-data.json file: %s", instance_data_fn)
return 1
try:
with open(args.user_data) as stream:
user_data = stream.read()
except IOError:
- LOG.error('Missing user-data file: %s', args.user_data)
+ LOG.error("Missing user-data file: %s", args.user_data)
return 1
try:
rendered_payload = render_jinja_payload_from_file(
- payload=user_data, payload_fn=args.user_data,
+ payload=user_data,
+ payload_fn=args.user_data,
instance_data_file=instance_data_fn,
- debug=True if args.debug else False)
+ debug=True if args.debug else False,
+ )
except RuntimeError as e:
- LOG.error('Cannot render from instance data: %s', str(e))
+ LOG.error("Cannot render from instance data: %s", str(e))
return 1
if not rendered_payload:
- LOG.error('Unable to render user-data file: %s', args.user_data)
+ LOG.error("Unable to render user-data file: %s", args.user_data)
return 1
sys.stdout.write(rendered_payload)
return 0
@@ -89,10 +105,10 @@ def handle_args(name, args):
def main():
args = get_parser().parse_args()
- return(handle_args(NAME, args))
+ return handle_args(NAME, args)
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(main())
diff --git a/cloudinit/cmd/devel/tests/__init__.py b/cloudinit/cmd/devel/tests/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/cloudinit/cmd/devel/tests/__init__.py
+++ /dev/null
diff --git a/cloudinit/cmd/devel/tests/test_logs.py b/cloudinit/cmd/devel/tests/test_logs.py
deleted file mode 100644
index ddfd58e1..00000000
--- a/cloudinit/cmd/devel/tests/test_logs.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from datetime import datetime
-import os
-from io import StringIO
-
-from cloudinit.cmd.devel import logs
-from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
-from cloudinit.tests.helpers import (
- FilesystemMockingTestCase, mock, wrap_and_call)
-from cloudinit.subp import subp
-from cloudinit.util import ensure_dir, load_file, write_file
-
-
-@mock.patch('cloudinit.cmd.devel.logs.os.getuid')
-class TestCollectLogs(FilesystemMockingTestCase):
-
- def setUp(self):
- super(TestCollectLogs, self).setUp()
- self.new_root = self.tmp_dir()
- self.run_dir = self.tmp_path('run', self.new_root)
-
- def test_collect_logs_with_userdata_requires_root_user(self, m_getuid):
- """collect-logs errors when non-root user collects userdata ."""
- m_getuid.return_value = 100 # non-root
- output_tarfile = self.tmp_path('logs.tgz')
- with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
- self.assertEqual(
- 1, logs.collect_logs(output_tarfile, include_userdata=True))
- self.assertEqual(
- 'To include userdata, root user is required.'
- ' Try sudo cloud-init collect-logs\n',
- m_stderr.getvalue())
-
- def test_collect_logs_creates_tarfile(self, m_getuid):
- """collect-logs creates a tarfile with all related cloud-init info."""
- m_getuid.return_value = 100
- log1 = self.tmp_path('cloud-init.log', self.new_root)
- write_file(log1, 'cloud-init-log')
- log2 = self.tmp_path('cloud-init-output.log', self.new_root)
- write_file(log2, 'cloud-init-output-log')
- ensure_dir(self.run_dir)
- write_file(self.tmp_path('results.json', self.run_dir), 'results')
- write_file(self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, self.run_dir),
- 'sensitive')
- output_tarfile = self.tmp_path('logs.tgz')
-
- date = datetime.utcnow().date().strftime('%Y-%m-%d')
- date_logdir = 'cloud-init-logs-{0}'.format(date)
-
- version_out = '/usr/bin/cloud-init 18.2fake\n'
- expected_subp = {
- ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'):
- '0.7fake\n',
- ('cloud-init', '--version'): version_out,
- ('dmesg',): 'dmesg-out\n',
- ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n',
- ('tar', 'czvf', output_tarfile, date_logdir): ''
- }
-
- def fake_subp(cmd):
- cmd_tuple = tuple(cmd)
- if cmd_tuple not in expected_subp:
- raise AssertionError(
- 'Unexpected command provided to subp: {0}'.format(cmd))
- if cmd == ['tar', 'czvf', output_tarfile, date_logdir]:
- subp(cmd) # Pass through tar cmd so we can check output
- return expected_subp[cmd_tuple], ''
-
- fake_stderr = mock.MagicMock()
-
- wrap_and_call(
- 'cloudinit.cmd.devel.logs',
- {'subp': {'side_effect': fake_subp},
- 'sys.stderr': {'new': fake_stderr},
- 'CLOUDINIT_LOGS': {'new': [log1, log2]},
- 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}},
- logs.collect_logs, output_tarfile, include_userdata=False)
- # unpack the tarfile and check file contents
- subp(['tar', 'zxvf', output_tarfile, '-C', self.new_root])
- out_logdir = self.tmp_path(date_logdir, self.new_root)
- self.assertFalse(
- os.path.exists(
- os.path.join(out_logdir, 'run', 'cloud-init',
- INSTANCE_JSON_SENSITIVE_FILE)),
- 'Unexpected file found: %s' % INSTANCE_JSON_SENSITIVE_FILE)
- self.assertEqual(
- '0.7fake\n',
- load_file(os.path.join(out_logdir, 'dpkg-version')))
- self.assertEqual(version_out,
- load_file(os.path.join(out_logdir, 'version')))
- self.assertEqual(
- 'cloud-init-log',
- load_file(os.path.join(out_logdir, 'cloud-init.log')))
- self.assertEqual(
- 'cloud-init-output-log',
- load_file(os.path.join(out_logdir, 'cloud-init-output.log')))
- self.assertEqual(
- 'dmesg-out\n',
- load_file(os.path.join(out_logdir, 'dmesg.txt')))
- self.assertEqual(
- 'journal-out\n',
- load_file(os.path.join(out_logdir, 'journal.txt')))
- self.assertEqual(
- 'results',
- load_file(
- os.path.join(out_logdir, 'run', 'cloud-init', 'results.json')))
- fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile)
-
- def test_collect_logs_includes_optional_userdata(self, m_getuid):
- """collect-logs include userdata when --include-userdata is set."""
- m_getuid.return_value = 0
- log1 = self.tmp_path('cloud-init.log', self.new_root)
- write_file(log1, 'cloud-init-log')
- log2 = self.tmp_path('cloud-init-output.log', self.new_root)
- write_file(log2, 'cloud-init-output-log')
- userdata = self.tmp_path('user-data.txt', self.new_root)
- write_file(userdata, 'user-data')
- ensure_dir(self.run_dir)
- write_file(self.tmp_path('results.json', self.run_dir), 'results')
- write_file(self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, self.run_dir),
- 'sensitive')
- output_tarfile = self.tmp_path('logs.tgz')
-
- date = datetime.utcnow().date().strftime('%Y-%m-%d')
- date_logdir = 'cloud-init-logs-{0}'.format(date)
-
- version_out = '/usr/bin/cloud-init 18.2fake\n'
- expected_subp = {
- ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'):
- '0.7fake',
- ('cloud-init', '--version'): version_out,
- ('dmesg',): 'dmesg-out\n',
- ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n',
- ('tar', 'czvf', output_tarfile, date_logdir): ''
- }
-
- def fake_subp(cmd):
- cmd_tuple = tuple(cmd)
- if cmd_tuple not in expected_subp:
- raise AssertionError(
- 'Unexpected command provided to subp: {0}'.format(cmd))
- if cmd == ['tar', 'czvf', output_tarfile, date_logdir]:
- subp(cmd) # Pass through tar cmd so we can check output
- return expected_subp[cmd_tuple], ''
-
- fake_stderr = mock.MagicMock()
-
- wrap_and_call(
- 'cloudinit.cmd.devel.logs',
- {'subp': {'side_effect': fake_subp},
- 'sys.stderr': {'new': fake_stderr},
- 'CLOUDINIT_LOGS': {'new': [log1, log2]},
- 'CLOUDINIT_RUN_DIR': {'new': self.run_dir},
- 'USER_DATA_FILE': {'new': userdata}},
- logs.collect_logs, output_tarfile, include_userdata=True)
- # unpack the tarfile and check file contents
- subp(['tar', 'zxvf', output_tarfile, '-C', self.new_root])
- out_logdir = self.tmp_path(date_logdir, self.new_root)
- self.assertEqual(
- 'user-data',
- load_file(os.path.join(out_logdir, 'user-data.txt')))
- self.assertEqual(
- 'sensitive',
- load_file(os.path.join(out_logdir, 'run', 'cloud-init',
- INSTANCE_JSON_SENSITIVE_FILE)))
- fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile)
diff --git a/cloudinit/cmd/devel/tests/test_render.py b/cloudinit/cmd/devel/tests/test_render.py
deleted file mode 100644
index a7fcf2ce..00000000
--- a/cloudinit/cmd/devel/tests/test_render.py
+++ /dev/null
@@ -1,144 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import os
-from io import StringIO
-
-from collections import namedtuple
-from cloudinit.cmd.devel import render
-from cloudinit.helpers import Paths
-from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE
-from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJinja
-from cloudinit.util import ensure_dir, write_file
-
-
-class TestRender(CiTestCase):
-
- with_logs = True
-
- args = namedtuple('renderargs', 'user_data instance_data debug')
-
- def setUp(self):
- super(TestRender, self).setUp()
- self.tmp = self.tmp_dir()
-
- def test_handle_args_error_on_missing_user_data(self):
- """When user_data file path does not exist, log an error."""
- absent_file = self.tmp_path('user-data', dir=self.tmp)
- instance_data = self.tmp_path('instance-data', dir=self.tmp)
- write_file(instance_data, '{}')
- args = self.args(
- user_data=absent_file, instance_data=instance_data, debug=False)
- with mock.patch('sys.stderr', new_callable=StringIO):
- self.assertEqual(1, render.handle_args('anyname', args))
- self.assertIn(
- 'Missing user-data file: %s' % absent_file,
- self.logs.getvalue())
-
- def test_handle_args_error_on_missing_instance_data(self):
- """When instance_data file path does not exist, log an error."""
- user_data = self.tmp_path('user-data', dir=self.tmp)
- absent_file = self.tmp_path('instance-data', dir=self.tmp)
- args = self.args(
- user_data=user_data, instance_data=absent_file, debug=False)
- with mock.patch('sys.stderr', new_callable=StringIO):
- self.assertEqual(1, render.handle_args('anyname', args))
- self.assertIn(
- 'Missing instance-data.json file: %s' % absent_file,
- self.logs.getvalue())
-
- def test_handle_args_defaults_instance_data(self):
- """When no instance_data argument, default to configured run_dir."""
- user_data = self.tmp_path('user-data', dir=self.tmp)
- run_dir = self.tmp_path('run_dir', dir=self.tmp)
- ensure_dir(run_dir)
- paths = Paths({'run_dir': run_dir})
- self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths')
- self.m_paths.return_value = paths
- args = self.args(
- user_data=user_data, instance_data=None, debug=False)
- with mock.patch('sys.stderr', new_callable=StringIO):
- self.assertEqual(1, render.handle_args('anyname', args))
- json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
- self.assertIn(
- 'Missing instance-data.json file: %s' % json_file,
- self.logs.getvalue())
-
- def test_handle_args_root_fallback_from_sensitive_instance_data(self):
- """When root user defaults to sensitive.json."""
- user_data = self.tmp_path('user-data', dir=self.tmp)
- run_dir = self.tmp_path('run_dir', dir=self.tmp)
- ensure_dir(run_dir)
- paths = Paths({'run_dir': run_dir})
- self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths')
- self.m_paths.return_value = paths
- args = self.args(
- user_data=user_data, instance_data=None, debug=False)
- with mock.patch('sys.stderr', new_callable=StringIO):
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 0
- self.assertEqual(1, render.handle_args('anyname', args))
- json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
- json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE)
- self.assertIn(
- 'WARNING: Missing root-readable %s. Using redacted %s' % (
- json_sensitive, json_file), self.logs.getvalue())
- self.assertIn(
- 'ERROR: Missing instance-data.json file: %s' % json_file,
- self.logs.getvalue())
-
- def test_handle_args_root_uses_sensitive_instance_data(self):
- """When root user, and no instance-data arg, use sensitive.json."""
- user_data = self.tmp_path('user-data', dir=self.tmp)
- write_file(user_data, '##template: jinja\nrendering: {{ my_var }}')
- run_dir = self.tmp_path('run_dir', dir=self.tmp)
- ensure_dir(run_dir)
- json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE)
- write_file(json_sensitive, '{"my-var": "jinja worked"}')
- paths = Paths({'run_dir': run_dir})
- self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths')
- self.m_paths.return_value = paths
- args = self.args(
- user_data=user_data, instance_data=None, debug=False)
- with mock.patch('sys.stderr', new_callable=StringIO):
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 0
- self.assertEqual(0, render.handle_args('anyname', args))
- self.assertIn('rendering: jinja worked', m_stdout.getvalue())
-
- @skipUnlessJinja()
- def test_handle_args_renders_instance_data_vars_in_template(self):
- """If user_data file is a jinja template render instance-data vars."""
- user_data = self.tmp_path('user-data', dir=self.tmp)
- write_file(user_data, '##template: jinja\nrendering: {{ my_var }}')
- instance_data = self.tmp_path('instance-data', dir=self.tmp)
- write_file(instance_data, '{"my-var": "jinja worked"}')
- args = self.args(
- user_data=user_data, instance_data=instance_data, debug=True)
- with mock.patch('sys.stderr', new_callable=StringIO) as m_console_err:
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- self.assertEqual(0, render.handle_args('anyname', args))
- self.assertIn(
- 'DEBUG: Converted jinja variables\n{', self.logs.getvalue())
- self.assertIn(
- 'DEBUG: Converted jinja variables\n{', m_console_err.getvalue())
- self.assertEqual('rendering: jinja worked', m_stdout.getvalue())
-
- @skipUnlessJinja()
- def test_handle_args_warns_and_gives_up_on_invalid_jinja_operation(self):
- """If user_data file has invalid jinja operations log warnings."""
- user_data = self.tmp_path('user-data', dir=self.tmp)
- write_file(user_data, '##template: jinja\nrendering: {{ my-var }}')
- instance_data = self.tmp_path('instance-data', dir=self.tmp)
- write_file(instance_data, '{"my-var": "jinja worked"}')
- args = self.args(
- user_data=user_data, instance_data=instance_data, debug=True)
- with mock.patch('sys.stderr', new_callable=StringIO):
- self.assertEqual(1, render.handle_args('anyname', args))
- self.assertIn(
- 'WARNING: Ignoring jinja template for %s: Undefined jinja'
- ' variable: "my-var". Jinja tried subtraction. Perhaps you meant'
- ' "my_var"?' % user_data,
- self.logs.getvalue())
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index a5446da7..c9be41b3 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
#
# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
@@ -20,8 +19,10 @@ import time
import traceback
from cloudinit import patcher
-patcher.patch() # noqa
+patcher.patch_logging()
+
+from cloudinit.config.schema import validate_cloudconfig_schema
from cloudinit import log as logging
from cloudinit import netinfo
from cloudinit import signal_handler
@@ -35,8 +36,7 @@ from cloudinit import warnings
from cloudinit import reporting
from cloudinit.reporting import events
-from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE,
- CLOUD_CONFIG)
+from cloudinit.settings import PER_INSTANCE, PER_ALWAYS, PER_ONCE, CLOUD_CONFIG
from cloudinit import atomic_helper
@@ -45,8 +45,10 @@ from cloudinit import dhclient_hook
# Welcome message template
-WELCOME_MSG_TPL = ("Cloud-init v. {version} running '{action}' at "
- "{timestamp}. Up {uptime} seconds.")
+WELCOME_MSG_TPL = (
+ "Cloud-init v. {version} running '{action}' at "
+ "{timestamp}. Up {uptime} seconds."
+)
# Module section template
MOD_SECTION_TPL = "cloud_%s_modules"
@@ -54,9 +56,9 @@ MOD_SECTION_TPL = "cloud_%s_modules"
# Frequency shortname to full name
# (so users don't have to remember the full name...)
FREQ_SHORT_NAMES = {
- 'instance': PER_INSTANCE,
- 'always': PER_ALWAYS,
- 'once': PER_ONCE,
+ "instance": PER_INSTANCE,
+ "always": PER_ALWAYS,
+ "once": PER_ONCE,
}
LOG = logging.getLogger()
@@ -64,21 +66,20 @@ LOG = logging.getLogger()
# Used for when a logger may not be active
# and we still want to print exceptions...
-def print_exc(msg=''):
+def print_exc(msg=""):
if msg:
sys.stderr.write("%s\n" % (msg))
- sys.stderr.write('-' * 60)
+ sys.stderr.write("-" * 60)
sys.stderr.write("\n")
traceback.print_exc(file=sys.stderr)
- sys.stderr.write('-' * 60)
+ sys.stderr.write("-" * 60)
sys.stderr.write("\n")
def welcome(action, msg=None):
if not msg:
msg = welcome_format(action)
- util.multi_log("%s\n" % (msg),
- console=False, stderr=True, log=LOG)
+ util.multi_log("%s\n" % (msg), console=False, stderr=True, log=LOG)
return msg
@@ -87,7 +88,8 @@ def welcome_format(action):
version=version.version_string(),
uptime=util.uptime(),
timestamp=util.time_rfc2822(),
- action=action)
+ action=action,
+ )
def extract_fns(args):
@@ -108,29 +110,31 @@ def run_module_section(mods, action_name, section):
(which_ran, failures) = mods.run_section(full_section_name)
total_attempted = len(which_ran) + len(failures)
if total_attempted == 0:
- msg = ("No '%s' modules to run"
- " under section '%s'") % (action_name, full_section_name)
+ msg = "No '%s' modules to run under section '%s'" % (
+ action_name,
+ full_section_name,
+ )
sys.stderr.write("%s\n" % (msg))
LOG.debug(msg)
return []
else:
- LOG.debug("Ran %s modules with %s failures",
- len(which_ran), len(failures))
+ LOG.debug(
+ "Ran %s modules with %s failures", len(which_ran), len(failures)
+ )
return failures
def apply_reporting_cfg(cfg):
- if cfg.get('reporting'):
- reporting.update_configuration(cfg.get('reporting'))
+ if cfg.get("reporting"):
+ reporting.update_configuration(cfg.get("reporting"))
-def parse_cmdline_url(cmdline, names=('cloud-config-url', 'url')):
+def parse_cmdline_url(cmdline, names=("cloud-config-url", "url")):
data = util.keyval_str_to_dict(cmdline)
for key in names:
if key in data:
return key, data[key]
- raise KeyError("No keys (%s) found in string '%s'" %
- (cmdline, names))
+ raise KeyError("No keys (%s) found in string '%s'" % (cmdline, names))
def attempt_cmdline_url(path, network=True, cmdline=None):
@@ -164,51 +168,96 @@ def attempt_cmdline_url(path, network=True, cmdline=None):
if path_is_local and os.path.exists(path):
if network:
- m = ("file '%s' existed, possibly from local stage download"
- " of command line url '%s'. Not re-writing." % (path, url))
+ m = (
+ "file '%s' existed, possibly from local stage download"
+ " of command line url '%s'. Not re-writing." % (path, url)
+ )
level = logging.INFO
if path_is_local:
level = logging.DEBUG
else:
- m = ("file '%s' existed, possibly from previous boot download"
- " of command line url '%s'. Not re-writing." % (path, url))
+ m = (
+ "file '%s' existed, possibly from previous boot download"
+ " of command line url '%s'. Not re-writing." % (path, url)
+ )
level = logging.WARN
return (level, m)
- kwargs = {'url': url, 'timeout': 10, 'retries': 2}
+ kwargs = {"url": url, "timeout": 10, "retries": 2}
if network or path_is_local:
level = logging.WARN
- kwargs['sec_between'] = 1
+ kwargs["sec_between"] = 1
else:
level = logging.DEBUG
- kwargs['sec_between'] = .1
+ kwargs["sec_between"] = 0.1
data = None
- header = b'#cloud-config'
+ header = b"#cloud-config"
try:
resp = url_helper.read_file_or_url(**kwargs)
if resp.ok():
data = resp.contents
if not resp.contents.startswith(header):
- if cmdline_name == 'cloud-config-url':
+ if cmdline_name == "cloud-config-url":
level = logging.WARN
else:
level = logging.INFO
return (
level,
- "contents of '%s' did not start with %s" % (url, header))
+ "contents of '%s' did not start with %s" % (url, header),
+ )
else:
- return (level,
- "url '%s' returned code %s. Ignoring." % (url, resp.code))
+ return (
+ level,
+ "url '%s' returned code %s. Ignoring." % (url, resp.code),
+ )
except url_helper.UrlError as e:
return (level, "retrieving url '%s' failed: %s" % (url, e))
util.write_file(path, data, mode=0o600)
- return (logging.INFO,
- "wrote cloud-config data from %s='%s' to %s" %
- (cmdline_name, url, path))
+ return (
+ logging.INFO,
+ "wrote cloud-config data from %s='%s' to %s"
+ % (cmdline_name, url, path),
+ )
+
+
+def purge_cache_on_python_version_change(init):
+ """Purge the cache if python version changed on us.
+
+ There could be changes not represented in our cache (obj.pkl) after we
+ upgrade to a new version of python, so at that point clear the cache
+ """
+ current_python_version = "%d.%d" % (
+ sys.version_info.major,
+ sys.version_info.minor,
+ )
+ python_version_path = os.path.join(
+ init.paths.get_cpath("data"), "python-version"
+ )
+ if os.path.exists(python_version_path):
+ cached_python_version = open(python_version_path).read()
+ # The Python version has changed out from under us, anything that was
+ # pickled previously is likely useless due to API changes.
+ if cached_python_version != current_python_version:
+ LOG.debug("Python version change detected. Purging cache")
+ init.purge_cache(True)
+ util.write_file(python_version_path, current_python_version)
+ else:
+ if os.path.exists(init.paths.get_ipath_cur("obj_pkl")):
+ LOG.info(
+ "Writing python-version file. "
+ "Cache compatibility status is currently unknown."
+ )
+ util.write_file(python_version_path, current_python_version)
+
+
+def _should_bring_up_interfaces(init, args):
+ if util.get_cfg_option_bool(init.cfg, "disable_network_activation"):
+ return False
+ return not args.local
def main_init(name, args):
@@ -216,10 +265,14 @@ def main_init(name, args):
if args.local:
deps = [sources.DEP_FILESYSTEM]
- early_logs = [attempt_cmdline_url(
- path=os.path.join("%s.d" % CLOUD_CONFIG,
- "91_kernel_cmdline_url.cfg"),
- network=not args.local)]
+ early_logs = [
+ attempt_cmdline_url(
+ path=os.path.join(
+ "%s.d" % CLOUD_CONFIG, "91_kernel_cmdline_url.cfg"
+ ),
+ network=not args.local,
+ )
+ ]
# Cloud-init 'init' stage is broken up into the following sub-stages
# 1. Ensure that the init object fetches its config without errors
@@ -255,8 +308,9 @@ def main_init(name, args):
early_logs.append((logging.WARN, msg))
if args.debug:
# Reset so that all the debug handlers are closed out
- LOG.debug(("Logging being reset, this logger may no"
- " longer be active shortly"))
+ LOG.debug(
+ "Logging being reset, this logger may no longer be active shortly"
+ )
logging.resetLogging()
logging.setupLogging(init.cfg)
apply_reporting_cfg(init.cfg)
@@ -277,14 +331,17 @@ def main_init(name, args):
util.logexc(LOG, "Failed to initialize, likely bad things to come!")
# Stage 4
path_helper = init.paths
+ purge_cache_on_python_version_change(init)
mode = sources.DSMODE_LOCAL if args.local else sources.DSMODE_NETWORK
if mode == sources.DSMODE_NETWORK:
existing = "trust"
sys.stderr.write("%s\n" % (netinfo.debug_info()))
- LOG.debug(("Checking to see if files that we need already"
- " exist from a previous run that would allow us"
- " to stop early."))
+ LOG.debug(
+ "Checking to see if files that we need already"
+ " exist from a previous run that would allow us"
+ " to stop early."
+ )
# no-net is written by upstart cloud-init-nonet when network failed
# to come up
stop_files = [
@@ -296,15 +353,18 @@ def main_init(name, args):
existing_files.append(fn)
if existing_files:
- LOG.debug("[%s] Exiting. stop file %s existed",
- mode, existing_files)
+ LOG.debug(
+ "[%s] Exiting. stop file %s existed", mode, existing_files
+ )
return (None, [])
else:
- LOG.debug("Execution continuing, no previous run detected that"
- " would allow us to stop early.")
+ LOG.debug(
+ "Execution continuing, no previous run detected that"
+ " would allow us to stop early."
+ )
else:
existing = "check"
- mcfg = util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False)
+ mcfg = util.get_cfg_option_bool(init.cfg, "manual_cache_clean", False)
if mcfg:
LOG.debug("manual cache clean set from config")
existing = "trust"
@@ -319,13 +379,17 @@ def main_init(name, args):
util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net"))
# Stage 5
+ bring_up_interfaces = _should_bring_up_interfaces(init, args)
try:
init.fetch(existing=existing)
# if in network mode, and the datasource is local
# then work was done at that stage.
if mode == sources.DSMODE_NETWORK and init.datasource.dsmode != mode:
- LOG.debug("[%s] Exiting. datasource %s in local mode",
- mode, init.datasource)
+ LOG.debug(
+ "[%s] Exiting. datasource %s in local mode",
+ mode,
+ init.datasource,
+ )
return (None, [])
except sources.DataSourceNotFoundException:
# In the case of 'cloud-init init' without '--local' it is a bit
@@ -335,56 +399,71 @@ def main_init(name, args):
if mode == sources.DSMODE_LOCAL:
LOG.debug("No local datasource found")
else:
- util.logexc(LOG, ("No instance datasource found!"
- " Likely bad things to come!"))
+ util.logexc(
+ LOG, "No instance datasource found! Likely bad things to come!"
+ )
if not args.force:
- init.apply_network_config(bring_up=not args.local)
+ init.apply_network_config(bring_up=bring_up_interfaces)
LOG.debug("[%s] Exiting without datasource", mode)
if mode == sources.DSMODE_LOCAL:
return (None, [])
else:
return (None, ["No instance datasource found."])
else:
- LOG.debug("[%s] barreling on in force mode without datasource",
- mode)
+ LOG.debug(
+ "[%s] barreling on in force mode without datasource", mode
+ )
_maybe_persist_instance_data(init)
# Stage 6
iid = init.instancify()
- LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s",
- mode, name, iid, init.is_new_instance())
+ LOG.debug(
+ "[%s] %s will now be targeting instance id: %s. new=%s",
+ mode,
+ name,
+ iid,
+ init.is_new_instance(),
+ )
if mode == sources.DSMODE_LOCAL:
# Before network comes up, set any configured hostname to allow
# dhcp clients to advertize this hostname to any DDNS services
# LP: #1746455.
- _maybe_set_hostname(init, stage='local', retry_stage='network')
- init.apply_network_config(bring_up=bool(mode != sources.DSMODE_LOCAL))
+ _maybe_set_hostname(init, stage="local", retry_stage="network")
+ init.apply_network_config(bring_up=bring_up_interfaces)
if mode == sources.DSMODE_LOCAL:
if init.datasource.dsmode != mode:
- LOG.debug("[%s] Exiting. datasource %s not in local mode.",
- mode, init.datasource)
+ LOG.debug(
+ "[%s] Exiting. datasource %s not in local mode.",
+ mode,
+ init.datasource,
+ )
return (init.datasource, [])
else:
- LOG.debug("[%s] %s is in local mode, will apply init modules now.",
- mode, init.datasource)
+ LOG.debug(
+ "[%s] %s is in local mode, will apply init modules now.",
+ mode,
+ init.datasource,
+ )
# Give the datasource a chance to use network resources.
# This is used on Azure to communicate with the fabric over network.
init.setup_datasource()
# update fully realizes user-data (pulling in #include if necessary)
init.update()
- _maybe_set_hostname(init, stage='init-net', retry_stage='modules:config')
+ _maybe_set_hostname(init, stage="init-net", retry_stage="modules:config")
# Stage 7
try:
# Attempt to consume the data per instance.
# This may run user-data handlers and/or perform
# url downloads and such as needed.
- (ran, _results) = init.cloudify().run('consume_data',
- init.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
+ (ran, _results) = init.cloudify().run(
+ "consume_data",
+ init.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
if not ran:
# Just consume anything that is set to run per-always
# if nothing ran in the per-instance code
@@ -396,6 +475,12 @@ def main_init(name, args):
util.logexc(LOG, "Consuming user data failed!")
return (init.datasource, ["Consuming user data failed!"])
+ # Validate user-data adheres to schema definition
+ if os.path.exists(init.paths.get_ipath_cur("userdata_raw")):
+ validate_cloudconfig_schema(config=init.cfg, strict=False)
+ else:
+ LOG.debug("Skipping user-data validation. No user-data found.")
+
apply_reporting_cfg(init.cfg)
# Stage 8 - re-read and apply relevant cloud-config to include user-data
@@ -406,8 +491,7 @@ def main_init(name, args):
errfmt_orig = errfmt
(outfmt, errfmt) = util.get_output_cfg(mods.cfg, name)
if outfmt_orig != outfmt or errfmt_orig != errfmt:
- LOG.warning("Stdout, stderr changing to (%s, %s)",
- outfmt, errfmt)
+ LOG.warning("Stdout, stderr changing to (%s, %s)", outfmt, errfmt)
(outfmt, errfmt) = util.fixup_output(mods.cfg, name)
except Exception:
util.logexc(LOG, "Failed to re-adjust output redirection!")
@@ -423,11 +507,11 @@ def main_init(name, args):
def di_report_warn(datasource, cfg):
- if 'di_report' not in cfg:
+ if "di_report" not in cfg:
LOG.debug("no di_report found in config.")
return
- dicfg = cfg['di_report']
+ dicfg = cfg["di_report"]
if dicfg is None:
# ds-identify may write 'di_report:\n #comment\n'
# which reads as {'di_report': None}
@@ -438,7 +522,7 @@ def di_report_warn(datasource, cfg):
LOG.warning("di_report config not a dictionary: %s", dicfg)
return
- dslist = dicfg.get('datasource_list')
+ dslist = dicfg.get("datasource_list")
if dslist is None:
LOG.warning("no 'datasource_list' found in di_report.")
return
@@ -450,18 +534,26 @@ def di_report_warn(datasource, cfg):
# where Name is the thing that shows up in datasource_list.
modname = datasource.__module__.rpartition(".")[2]
if modname.startswith(sources.DS_PREFIX):
- modname = modname[len(sources.DS_PREFIX):]
+ modname = modname[len(sources.DS_PREFIX) :]
else:
- LOG.warning("Datasource '%s' came from unexpected module '%s'.",
- datasource, modname)
+ LOG.warning(
+ "Datasource '%s' came from unexpected module '%s'.",
+ datasource,
+ modname,
+ )
if modname in dslist:
- LOG.debug("used datasource '%s' from '%s' was in di_report's list: %s",
- datasource, modname, dslist)
+ LOG.debug(
+ "used datasource '%s' from '%s' was in di_report's list: %s",
+ datasource,
+ modname,
+ dslist,
+ )
return
- warnings.show_warning('dsid_missing_source', cfg,
- source=modname, dslist=str(dslist))
+ warnings.show_warning(
+ "dsid_missing_source", cfg, source=modname, dslist=str(dslist)
+ )
def main_modules(action_name, args):
@@ -485,8 +577,10 @@ def main_modules(action_name, args):
init.fetch(existing="trust")
except sources.DataSourceNotFoundException:
# There was no datasource found, theres nothing to do
- msg = ('Can not apply stage %s, no datasource found! Likely bad '
- 'things to come!' % name)
+ msg = (
+ "Can not apply stage %s, no datasource found! Likely bad "
+ "things to come!" % name
+ )
util.logexc(LOG, msg)
print_exc(msg)
if not args.force:
@@ -503,8 +597,9 @@ def main_modules(action_name, args):
util.logexc(LOG, "Failed to setup output redirection!")
if args.debug:
# Reset so that all the debug handlers are closed out
- LOG.debug(("Logging being reset, this logger may no"
- " longer be active shortly"))
+ LOG.debug(
+ "Logging being reset, this logger may no longer be active shortly"
+ )
logging.resetLogging()
logging.setupLogging(mods.cfg)
apply_reporting_cfg(init.cfg)
@@ -537,10 +632,12 @@ def main_single(name, args):
# There was no datasource found,
# that might be bad (or ok) depending on
# the module being ran (so continue on)
- util.logexc(LOG, ("Failed to fetch your datasource,"
- " likely bad things to come!"))
- print_exc(("Failed to fetch your datasource,"
- " likely bad things to come!"))
+ util.logexc(
+ LOG, "Failed to fetch your datasource, likely bad things to come!"
+ )
+ print_exc(
+ "Failed to fetch your datasource, likely bad things to come!"
+ )
if not args.force:
return 1
_maybe_persist_instance_data(init)
@@ -562,8 +659,9 @@ def main_single(name, args):
util.logexc(LOG, "Failed to setup output redirection!")
if args.debug:
# Reset so that all the debug handlers are closed out
- LOG.debug(("Logging being reset, this logger may no"
- " longer be active shortly"))
+ LOG.debug(
+ "Logging being reset, this logger may no longer be active shortly"
+ )
logging.resetLogging()
logging.setupLogging(mods.cfg)
apply_reporting_cfg(init.cfg)
@@ -572,9 +670,7 @@ def main_single(name, args):
welcome(name, msg=w_msg)
# Stage 5
- (which_ran, failures) = mods.run_single(mod_name,
- mod_args,
- mod_freq)
+ (which_ran, failures) = mods.run_single(mod_name, mod_args, mod_freq)
if failures:
LOG.warning("Ran %s but it failed!", mod_name)
return 1
@@ -597,7 +693,12 @@ def status_wrapper(name, args, data_d=None, link_d=None):
result_path = os.path.join(data_d, "result.json")
result_link = os.path.join(link_d, "result.json")
- util.ensure_dirs((data_d, link_d,))
+ util.ensure_dirs(
+ (
+ data_d,
+ link_d,
+ )
+ )
(_name, functor) = args.action
@@ -611,14 +712,20 @@ def status_wrapper(name, args, data_d=None, link_d=None):
else:
raise ValueError("unknown name: %s" % name)
- modes = ('init', 'init-local', 'modules-init', 'modules-config',
- 'modules-final')
+ modes = (
+ "init",
+ "init-local",
+ "modules-init",
+ "modules-config",
+ "modules-final",
+ )
if mode not in modes:
raise ValueError(
- "Invalid cloud init mode specified '{0}'".format(mode))
+ "Invalid cloud init mode specified '{0}'".format(mode)
+ )
status = None
- if mode == 'init-local':
+ if mode == "init-local":
for f in (status_link, result_link, status_path, result_path):
util.del_file(f)
else:
@@ -628,45 +735,46 @@ def status_wrapper(name, args, data_d=None, link_d=None):
pass
nullstatus = {
- 'errors': [],
- 'start': None,
- 'finished': None,
+ "errors": [],
+ "start": None,
+ "finished": None,
}
if status is None:
- status = {'v1': {}}
- status['v1']['datasource'] = None
+ status = {"v1": {}}
+ status["v1"]["datasource"] = None
for m in modes:
- if m not in status['v1']:
- status['v1'][m] = nullstatus.copy()
+ if m not in status["v1"]:
+ status["v1"][m] = nullstatus.copy()
- v1 = status['v1']
- v1['stage'] = mode
- v1[mode]['start'] = time.time()
+ v1 = status["v1"]
+ v1["stage"] = mode
+ v1[mode]["start"] = time.time()
atomic_helper.write_json(status_path, status)
- util.sym_link(os.path.relpath(status_path, link_d), status_link,
- force=True)
+ util.sym_link(
+ os.path.relpath(status_path, link_d), status_link, force=True
+ )
try:
ret = functor(name, args)
- if mode in ('init', 'init-local'):
+ if mode in ("init", "init-local"):
(datasource, errors) = ret
if datasource is not None:
- v1['datasource'] = str(datasource)
+ v1["datasource"] = str(datasource)
else:
errors = ret
- v1[mode]['errors'] = [str(e) for e in errors]
+ v1[mode]["errors"] = [str(e) for e in errors]
except Exception as e:
util.logexc(LOG, "failed stage %s", mode)
print_exc("failed run of stage %s" % mode)
- v1[mode]['errors'] = [str(e)]
+ v1[mode]["errors"] = [str(e)]
- v1[mode]['finished'] = time.time()
- v1['stage'] = None
+ v1[mode]["finished"] = time.time()
+ v1["stage"] = None
atomic_helper.write_json(status_path, status)
@@ -674,23 +782,26 @@ def status_wrapper(name, args, data_d=None, link_d=None):
# write the 'finished' file
errors = []
for m in modes:
- if v1[m]['errors']:
- errors.extend(v1[m].get('errors', []))
+ if v1[m]["errors"]:
+ errors.extend(v1[m].get("errors", []))
atomic_helper.write_json(
- result_path, {'v1': {'datasource': v1['datasource'],
- 'errors': errors}})
- util.sym_link(os.path.relpath(result_path, link_d), result_link,
- force=True)
+ result_path,
+ {"v1": {"datasource": v1["datasource"], "errors": errors}},
+ )
+ util.sym_link(
+ os.path.relpath(result_path, link_d), result_link, force=True
+ )
- return len(v1[mode]['errors'])
+ return len(v1[mode]["errors"])
def _maybe_persist_instance_data(init):
"""Write instance-data.json file if absent and datasource is restored."""
if init.ds_restored:
instance_data_file = os.path.join(
- init.paths.run_dir, sources.INSTANCE_JSON_FILE)
+ init.paths.run_dir, sources.INSTANCE_JSON_FILE
+ )
if not os.path.exists(instance_data_file):
init.datasource.persist_instance_data()
@@ -703,18 +814,23 @@ def _maybe_set_hostname(init, stage, retry_stage):
"""
cloud = init.cloudify()
(hostname, _fqdn) = util.get_hostname_fqdn(
- init.cfg, cloud, metadata_only=True)
+ init.cfg, cloud, metadata_only=True
+ )
if hostname: # meta-data or user-data hostname content
try:
- cc_set_hostname.handle('set-hostname', init.cfg, cloud, LOG, None)
+ cc_set_hostname.handle("set-hostname", init.cfg, cloud, LOG, None)
except cc_set_hostname.SetHostnameError as e:
LOG.debug(
- 'Failed setting hostname in %s stage. Will'
- ' retry in %s stage. Error: %s.', stage, retry_stage, str(e))
+ "Failed setting hostname in %s stage. Will"
+ " retry in %s stage. Error: %s.",
+ stage,
+ retry_stage,
+ str(e),
+ )
def main_features(name, args):
- sys.stdout.write('\n'.join(sorted(version.FEATURES)) + '\n')
+ sys.stdout.write("\n".join(sorted(version.FEATURES)) + "\n")
def main(sysv_args=None):
@@ -724,129 +840,182 @@ def main(sysv_args=None):
sysv_args = sysv_args[1:]
# Top level args
- parser.add_argument('--version', '-v', action='version',
- version='%(prog)s ' + (version.version_string()))
- parser.add_argument('--file', '-f', action='append',
- dest='files',
- help=('additional yaml configuration'
- ' files to use'),
- type=argparse.FileType('rb'))
- parser.add_argument('--debug', '-d', action='store_true',
- help=('show additional pre-action'
- ' logging (default: %(default)s)'),
- default=False)
- parser.add_argument('--force', action='store_true',
- help=('force running even if no datasource is'
- ' found (use at your own risk)'),
- dest='force',
- default=False)
+ parser.add_argument(
+ "--version",
+ "-v",
+ action="version",
+ version="%(prog)s " + (version.version_string()),
+ )
+ parser.add_argument(
+ "--file",
+ "-f",
+ action="append",
+ dest="files",
+ help="additional yaml configuration files to use",
+ type=argparse.FileType("rb"),
+ )
+ parser.add_argument(
+ "--debug",
+ "-d",
+ action="store_true",
+ help="show additional pre-action logging (default: %(default)s)",
+ default=False,
+ )
+ parser.add_argument(
+ "--force",
+ action="store_true",
+ help=(
+ "force running even if no datasource is"
+ " found (use at your own risk)"
+ ),
+ dest="force",
+ default=False,
+ )
parser.set_defaults(reporter=None)
- subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
+ subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand")
subparsers.required = True
# Each action and its sub-options (if any)
- parser_init = subparsers.add_parser('init',
- help=('initializes cloud-init and'
- ' performs initial modules'))
- parser_init.add_argument("--local", '-l', action='store_true',
- help="start in local mode (default: %(default)s)",
- default=False)
+ parser_init = subparsers.add_parser(
+ "init", help="initializes cloud-init and performs initial modules"
+ )
+ parser_init.add_argument(
+ "--local",
+ "-l",
+ action="store_true",
+ help="start in local mode (default: %(default)s)",
+ default=False,
+ )
# This is used so that we can know which action is selected +
# the functor to use to run this subcommand
- parser_init.set_defaults(action=('init', main_init))
+ parser_init.set_defaults(action=("init", main_init))
# These settings are used for the 'config' and 'final' stages
- parser_mod = subparsers.add_parser('modules',
- help=('activates modules using '
- 'a given configuration key'))
- parser_mod.add_argument("--mode", '-m', action='store',
- help=("module configuration name "
- "to use (default: %(default)s)"),
- default='config',
- choices=('init', 'config', 'final'))
- parser_mod.set_defaults(action=('modules', main_modules))
+ parser_mod = subparsers.add_parser(
+ "modules", help="activates modules using a given configuration key"
+ )
+ parser_mod.add_argument(
+ "--mode",
+ "-m",
+ action="store",
+ help="module configuration name to use (default: %(default)s)",
+ default="config",
+ choices=("init", "config", "final"),
+ )
+ parser_mod.set_defaults(action=("modules", main_modules))
# This subcommand allows you to run a single module
- parser_single = subparsers.add_parser('single',
- help=('run a single module '))
- parser_single.add_argument("--name", '-n', action="store",
- help="module name to run",
- required=True)
- parser_single.add_argument("--frequency", action="store",
- help=("frequency of the module"),
- required=False,
- choices=list(FREQ_SHORT_NAMES.keys()))
- parser_single.add_argument("--report", action="store_true",
- help="enable reporting",
- required=False)
- parser_single.add_argument("module_args", nargs="*",
- metavar='argument',
- help=('any additional arguments to'
- ' pass to this module'))
- parser_single.set_defaults(action=('single', main_single))
+ parser_single = subparsers.add_parser(
+ "single", help="run a single module "
+ )
+ parser_single.add_argument(
+ "--name",
+ "-n",
+ action="store",
+ help="module name to run",
+ required=True,
+ )
+ parser_single.add_argument(
+ "--frequency",
+ action="store",
+ help="frequency of the module",
+ required=False,
+ choices=list(FREQ_SHORT_NAMES.keys()),
+ )
+ parser_single.add_argument(
+ "--report",
+ action="store_true",
+ help="enable reporting",
+ required=False,
+ )
+ parser_single.add_argument(
+ "module_args",
+ nargs="*",
+ metavar="argument",
+ help="any additional arguments to pass to this module",
+ )
+ parser_single.set_defaults(action=("single", main_single))
parser_query = subparsers.add_parser(
- 'query',
- help='Query standardized instance metadata from the command line.')
+ "query",
+ help="Query standardized instance metadata from the command line.",
+ )
parser_dhclient = subparsers.add_parser(
- dhclient_hook.NAME, help=dhclient_hook.__doc__)
+ dhclient_hook.NAME, help=dhclient_hook.__doc__
+ )
dhclient_hook.get_parser(parser_dhclient)
- parser_features = subparsers.add_parser('features',
- help=('list defined features'))
- parser_features.set_defaults(action=('features', main_features))
+ parser_features = subparsers.add_parser(
+ "features", help="list defined features"
+ )
+ parser_features.set_defaults(action=("features", main_features))
parser_analyze = subparsers.add_parser(
- 'analyze', help='Devel tool: Analyze cloud-init logs and data')
+ "analyze", help="Devel tool: Analyze cloud-init logs and data"
+ )
- parser_devel = subparsers.add_parser(
- 'devel', help='Run development tools')
+ parser_devel = subparsers.add_parser("devel", help="Run development tools")
parser_collect_logs = subparsers.add_parser(
- 'collect-logs', help='Collect and tar all cloud-init debug info')
+ "collect-logs", help="Collect and tar all cloud-init debug info"
+ )
parser_clean = subparsers.add_parser(
- 'clean', help='Remove logs and artifacts so cloud-init can re-run.')
+ "clean", help="Remove logs and artifacts so cloud-init can re-run."
+ )
parser_status = subparsers.add_parser(
- 'status', help='Report cloud-init status or wait on completion.')
+ "status", help="Report cloud-init status or wait on completion."
+ )
if sysv_args:
# Only load subparsers if subcommand is specified to avoid load cost
- if sysv_args[0] == 'analyze':
+ if sysv_args[0] == "analyze":
from cloudinit.analyze.__main__ import get_parser as analyze_parser
+
# Construct analyze subcommand parser
analyze_parser(parser_analyze)
- elif sysv_args[0] == 'devel':
+ elif sysv_args[0] == "devel":
from cloudinit.cmd.devel.parser import get_parser as devel_parser
+
# Construct devel subcommand parser
devel_parser(parser_devel)
- elif sysv_args[0] == 'collect-logs':
+ elif sysv_args[0] == "collect-logs":
from cloudinit.cmd.devel.logs import (
- get_parser as logs_parser, handle_collect_logs_args)
+ get_parser as logs_parser,
+ handle_collect_logs_args,
+ )
+
logs_parser(parser_collect_logs)
parser_collect_logs.set_defaults(
- action=('collect-logs', handle_collect_logs_args))
- elif sysv_args[0] == 'clean':
+ action=("collect-logs", handle_collect_logs_args)
+ )
+ elif sysv_args[0] == "clean":
from cloudinit.cmd.clean import (
- get_parser as clean_parser, handle_clean_args)
+ get_parser as clean_parser,
+ handle_clean_args,
+ )
+
clean_parser(parser_clean)
- parser_clean.set_defaults(
- action=('clean', handle_clean_args))
- elif sysv_args[0] == 'query':
+ parser_clean.set_defaults(action=("clean", handle_clean_args))
+ elif sysv_args[0] == "query":
from cloudinit.cmd.query import (
- get_parser as query_parser, handle_args as handle_query_args)
+ get_parser as query_parser,
+ handle_args as handle_query_args,
+ )
+
query_parser(parser_query)
- parser_query.set_defaults(
- action=('render', handle_query_args))
- elif sysv_args[0] == 'status':
+ parser_query.set_defaults(action=("render", handle_query_args))
+ elif sysv_args[0] == "status":
from cloudinit.cmd.status import (
- get_parser as status_parser, handle_status_args)
+ get_parser as status_parser,
+ handle_status_args,
+ )
+
status_parser(parser_status)
- parser_status.set_defaults(
- action=('status', handle_status_args))
+ parser_status.set_defaults(action=("status", handle_status_args))
args = parser.parse_args(args=sysv_args)
@@ -870,14 +1039,20 @@ def main(sysv_args=None):
if args.local:
rname, rdesc = ("init-local", "searching for local datasources")
else:
- rname, rdesc = ("init-network",
- "searching for network datasources")
+ rname, rdesc = (
+ "init-network",
+ "searching for network datasources",
+ )
elif name == "modules":
- rname, rdesc = ("modules-%s" % args.mode,
- "running modules for %s" % args.mode)
+ rname, rdesc = (
+ "modules-%s" % args.mode,
+ "running modules for %s" % args.mode,
+ )
elif name == "single":
- rname, rdesc = ("single/%s" % args.name,
- "running single module %s" % args.name)
+ rname, rdesc = (
+ "single/%s" % args.name,
+ "running single module %s" % args.name,
+ )
report_on = args.report
else:
rname = name
@@ -885,19 +1060,24 @@ def main(sysv_args=None):
report_on = False
args.reporter = events.ReportEventStack(
- rname, rdesc, reporting_enabled=report_on)
+ rname, rdesc, reporting_enabled=report_on
+ )
with args.reporter:
retval = util.log_time(
- logfunc=LOG.debug, msg="cloud-init mode '%s'" % name,
- get_uptime=True, func=functor, args=(name, args))
+ logfunc=LOG.debug,
+ msg="cloud-init mode '%s'" % name,
+ get_uptime=True,
+ func=functor,
+ args=(name, args),
+ )
reporting.flush_events()
return retval
-if __name__ == '__main__':
- if 'TZ' not in os.environ:
- os.environ['TZ'] = ":/etc/localtime"
+if __name__ == "__main__":
+ if "TZ" not in os.environ:
+ os.environ["TZ"] = ":/etc/localtime"
return_value = main(sys.argv)
if return_value:
sys.exit(return_value)
diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py
index 07db9552..46f17699 100644
--- a/cloudinit/cmd/query.py
+++ b/cloudinit/cmd/query.py
@@ -14,19 +14,24 @@ output; if this fails, they are treated as binary.
"""
import argparse
-from errno import EACCES
import os
import sys
+from errno import EACCES
-from cloudinit.handlers.jinja_template import (
- convert_jinja_instance_data, render_jinja_payload)
+from cloudinit import log, util
from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths
-from cloudinit import log
+from cloudinit.handlers.jinja_template import (
+ convert_jinja_instance_data,
+ get_jinja_variable_alias,
+ render_jinja_payload,
+)
from cloudinit.sources import (
- INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE, REDACT_SENSITIVE_VALUE)
-from cloudinit import util
+ INSTANCE_JSON_FILE,
+ INSTANCE_JSON_SENSITIVE_FILE,
+ REDACT_SENSITIVE_VALUE,
+)
-NAME = 'query'
+NAME = "query"
LOG = log.getLogger(NAME)
@@ -40,41 +45,79 @@ def get_parser(parser=None):
@returns: ArgumentParser with proper argument configuration.
"""
if not parser:
- parser = argparse.ArgumentParser(
- prog=NAME, description=__doc__)
+ parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
parser.add_argument(
- '-d', '--debug', action='store_true', default=False,
- help='Add verbose messages during template render')
+ "-d",
+ "--debug",
+ action="store_true",
+ default=False,
+ help="Add verbose messages during template render",
+ )
parser.add_argument(
- '-i', '--instance-data', type=str,
- help=('Path to instance-data.json file. Default is /run/cloud-init/%s'
- % INSTANCE_JSON_FILE))
+ "-i",
+ "--instance-data",
+ type=str,
+ help="Path to instance-data.json file. Default is /run/cloud-init/%s"
+ % INSTANCE_JSON_FILE,
+ )
parser.add_argument(
- '-l', '--list-keys', action='store_true', default=False,
- help=('List query keys available at the provided instance-data'
- ' <varname>.'))
+ "-l",
+ "--list-keys",
+ action="store_true",
+ default=False,
+ help=(
+ "List query keys available at the provided instance-data"
+ " <varname>."
+ ),
+ )
parser.add_argument(
- '-u', '--user-data', type=str,
- help=('Path to user-data file. Default is'
- ' /var/lib/cloud/instance/user-data.txt'))
+ "-u",
+ "--user-data",
+ type=str,
+ help=(
+ "Path to user-data file. Default is"
+ " /var/lib/cloud/instance/user-data.txt"
+ ),
+ )
parser.add_argument(
- '-v', '--vendor-data', type=str,
- help=('Path to vendor-data file. Default is'
- ' /var/lib/cloud/instance/vendor-data.txt'))
+ "-v",
+ "--vendor-data",
+ type=str,
+ help=(
+ "Path to vendor-data file. Default is"
+ " /var/lib/cloud/instance/vendor-data.txt"
+ ),
+ )
parser.add_argument(
- 'varname', type=str, nargs='?',
- help=('A dot-delimited specific variable to query from'
- ' instance-data. For example: v1.local_hostname. If the'
- ' value is not JSON serializable, it will be base64-encoded and'
- ' will contain the prefix "ci-b64:". '))
+ "varname",
+ type=str,
+ nargs="?",
+ help=(
+ "A dot-delimited specific variable to query from"
+ " instance-data. For example: v1.local_hostname. If the"
+ " value is not JSON serializable, it will be base64-encoded and"
+ ' will contain the prefix "ci-b64:". '
+ ),
+ )
parser.add_argument(
- '-a', '--all', action='store_true', default=False, dest='dump_all',
- help='Dump all available instance-data')
+ "-a",
+ "--all",
+ action="store_true",
+ default=False,
+ dest="dump_all",
+ help="Dump all available instance-data",
+ )
parser.add_argument(
- '-f', '--format', type=str, dest='format',
- help=('Optionally specify a custom output format string. Any'
- ' instance-data variable can be specified between double-curly'
- ' braces. For example -f "{{ v2.cloud_name }}"'))
+ "-f",
+ "--format",
+ type=str,
+ dest="format",
+ help=(
+ "Optionally specify a custom output format string. Any"
+ " instance-data variable can be specified between double-curly"
+ ' braces. For example -f "{{ v2.cloud_name }}"'
+ ),
+ )
return parser
@@ -88,50 +131,54 @@ def load_userdata(ud_file_path):
"""
bdata = util.load_file(ud_file_path, decode=False)
try:
- return bdata.decode('utf-8')
+ return bdata.decode("utf-8")
except UnicodeDecodeError:
return util.decomp_gzip(bdata, quiet=False, decode=True)
-def handle_args(name, args):
- """Handle calls to 'cloud-init query' as a subcommand."""
- paths = None
- addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING)
- if not any([args.list_keys, args.varname, args.format, args.dump_all]):
- LOG.error(
- 'Expected one of the options: --all, --format,'
- ' --list-keys or varname')
- get_parser().print_help()
- return 1
+def _read_instance_data(instance_data, user_data, vendor_data) -> dict:
+ """Return a dict of merged instance-data, vendordata and userdata.
+
+ The dict will contain supplemental userdata and vendordata keys sourced
+ from default user-data and vendor-data files.
+ Non-root users will have redacted INSTANCE_JSON_FILE content and redacted
+ vendordata and userdata values.
+
+ :raise: IOError/OSError on absence of instance-data.json file or invalid
+ access perms.
+ """
+ paths = None
uid = os.getuid()
- if not all([args.instance_data, args.user_data, args.vendor_data]):
+ if not all([instance_data, user_data, vendor_data]):
paths = read_cfg_paths()
- if args.instance_data:
- instance_data_fn = args.instance_data
+ if instance_data:
+ instance_data_fn = instance_data
else:
redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE)
if uid == 0:
sensitive_data_fn = os.path.join(
- paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE)
+ paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE
+ )
if os.path.exists(sensitive_data_fn):
instance_data_fn = sensitive_data_fn
else:
LOG.warning(
- 'Missing root-readable %s. Using redacted %s instead.',
- sensitive_data_fn, redacted_data_fn
+ "Missing root-readable %s. Using redacted %s instead.",
+ sensitive_data_fn,
+ redacted_data_fn,
)
instance_data_fn = redacted_data_fn
else:
instance_data_fn = redacted_data_fn
- if args.user_data:
- user_data_fn = args.user_data
+ if user_data:
+ user_data_fn = user_data
else:
- user_data_fn = os.path.join(paths.instance_link, 'user-data.txt')
- if args.vendor_data:
- vendor_data_fn = args.vendor_data
+ user_data_fn = os.path.join(paths.instance_link, "user-data.txt")
+ if vendor_data:
+ vendor_data_fn = vendor_data
else:
- vendor_data_fn = os.path.join(paths.instance_link, 'vendor-data.txt')
+ vendor_data_fn = os.path.join(paths.instance_link, "vendor-data.txt")
try:
instance_json = util.load_file(instance_data_fn)
@@ -139,44 +186,123 @@ def handle_args(name, args):
if e.errno == EACCES:
LOG.error("No read permission on '%s'. Try sudo", instance_data_fn)
else:
- LOG.error('Missing instance-data file: %s', instance_data_fn)
- return 1
+ LOG.error("Missing instance-data file: %s", instance_data_fn)
+ raise
instance_data = util.load_json(instance_json)
if uid != 0:
- instance_data['userdata'] = (
- '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, user_data_fn))
- instance_data['vendordata'] = (
- '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, vendor_data_fn))
+ instance_data["userdata"] = "<%s> file:%s" % (
+ REDACT_SENSITIVE_VALUE,
+ user_data_fn,
+ )
+ instance_data["vendordata"] = "<%s> file:%s" % (
+ REDACT_SENSITIVE_VALUE,
+ vendor_data_fn,
+ )
else:
- instance_data['userdata'] = load_userdata(user_data_fn)
- instance_data['vendordata'] = load_userdata(vendor_data_fn)
+ instance_data["userdata"] = load_userdata(user_data_fn)
+ instance_data["vendordata"] = load_userdata(vendor_data_fn)
+ return instance_data
+
+
+def _find_instance_data_leaf_by_varname_path(
+ jinja_vars_without_aliases: dict,
+ jinja_vars_with_aliases: dict,
+ varname: str,
+ list_keys: bool,
+):
+ """Return the value of the dot-delimited varname path in instance-data
+
+ Split a dot-delimited jinja variable name path into components, walk the
+ path components into the instance_data and look up a matching jinja
+ variable name or cloud-init's underscore-delimited key aliases.
+
+ :raises: ValueError when varname represents an invalid key name or path or
+ if list-keys is provided by varname isn't a dict object.
+ """
+ walked_key_path = ""
+ response = jinja_vars_without_aliases
+ for key_path_part in varname.split("."):
+ try:
+ # Walk key path using complete aliases dict, yet response
+ # should only contain jinja_without_aliases
+ jinja_vars_with_aliases = jinja_vars_with_aliases[key_path_part]
+ except KeyError as e:
+ if walked_key_path:
+ msg = "instance-data '{key_path}' has no '{leaf}'".format(
+ leaf=key_path_part, key_path=walked_key_path
+ )
+ else:
+ msg = "Undefined instance-data key '{}'".format(varname)
+ raise ValueError(msg) from e
+ if key_path_part in response:
+ response = response[key_path_part]
+ else: # We are an underscore_delimited key alias
+ for key in response:
+ if get_jinja_variable_alias(key) == key_path_part:
+ response = response[key]
+ break
+ if walked_key_path:
+ walked_key_path += "."
+ walked_key_path += key_path_part
+ return response
+
+
+def handle_args(name, args):
+ """Handle calls to 'cloud-init query' as a subcommand."""
+ addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING)
+ if not any([args.list_keys, args.varname, args.format, args.dump_all]):
+ LOG.error(
+ "Expected one of the options: --all, --format,"
+ " --list-keys or varname"
+ )
+ get_parser().print_help()
+ return 1
+ try:
+ instance_data = _read_instance_data(
+ args.instance_data, args.user_data, args.vendor_data
+ )
+ except (IOError, OSError):
+ return 1
if args.format:
- payload = '## template: jinja\n{fmt}'.format(fmt=args.format)
+ payload = "## template: jinja\n{fmt}".format(fmt=args.format)
rendered_payload = render_jinja_payload(
- payload=payload, payload_fn='query commandline',
+ payload=payload,
+ payload_fn="query commandline",
instance_data=instance_data,
- debug=True if args.debug else False)
+ debug=True if args.debug else False,
+ )
if rendered_payload:
print(rendered_payload)
return 0
return 1
+ # If not rendering a structured format above, query output will be either:
+ # - JSON dump of all instance-data/jinja variables
+ # - JSON dump of a value at an dict path into the instance-data dict.
+ # - a list of keys for a specific dict path into the instance-data dict.
response = convert_jinja_instance_data(instance_data)
if args.varname:
+ jinja_vars_with_aliases = convert_jinja_instance_data(
+ instance_data, include_key_aliases=True
+ )
try:
- for var in args.varname.split('.'):
- response = response[var]
- except KeyError:
- LOG.error('Undefined instance-data key %s', args.varname)
+ response = _find_instance_data_leaf_by_varname_path(
+ jinja_vars_without_aliases=response,
+ jinja_vars_with_aliases=jinja_vars_with_aliases,
+ varname=args.varname,
+ list_keys=args.list_keys,
+ )
+ except (KeyError, ValueError) as e:
+ LOG.error(e)
+ return 1
+ if args.list_keys:
+ if not isinstance(response, dict):
+ LOG.error(
+ "--list-keys provided but '%s' is not a dict", args.varname
+ )
return 1
- if args.list_keys:
- if not isinstance(response, dict):
- LOG.error("--list-keys provided but '%s' is not a dict", var)
- return 1
- response = '\n'.join(sorted(response.keys()))
- elif args.list_keys:
- response = '\n'.join(sorted(response.keys()))
+ response = "\n".join(sorted(response.keys()))
if not isinstance(response, str):
response = util.json_dumps(response)
print(response)
@@ -189,7 +315,7 @@ def main():
sys.exit(handle_args(NAME, parser.parse_args()))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py
index ea79a85b..5176549d 100644
--- a/cloudinit/cmd/status.py
+++ b/cloudinit/cmd/status.py
@@ -5,22 +5,28 @@
"""Define 'status' utility and handler as part of cloud-init commandline."""
import argparse
+import enum
import os
import sys
-from time import gmtime, strftime, sleep
+from time import gmtime, sleep, strftime
from cloudinit.distros import uses_systemd
from cloudinit.stages import Init
from cloudinit.util import get_cmdline, load_file, load_json
-CLOUDINIT_DISABLED_FILE = '/etc/cloud/cloud-init.disabled'
+CLOUDINIT_DISABLED_FILE = "/etc/cloud/cloud-init.disabled"
+
# customer visible status messages
-STATUS_ENABLED_NOT_RUN = 'not run'
-STATUS_RUNNING = 'running'
-STATUS_DONE = 'done'
-STATUS_ERROR = 'error'
-STATUS_DISABLED = 'disabled'
+@enum.unique
+class UXAppStatus(enum.Enum):
+ """Enum representing user-visible cloud-init application status."""
+
+ NOT_RUN = "not run"
+ RUNNING = "running"
+ DONE = "done"
+ ERROR = "error"
+ DISABLED = "disabled"
def get_parser(parser=None):
@@ -34,15 +40,25 @@ def get_parser(parser=None):
"""
if not parser:
parser = argparse.ArgumentParser(
- prog='status',
- description='Report run status of cloud init')
+ prog="status", description="Report run status of cloud init"
+ )
parser.add_argument(
- '-l', '--long', action='store_true', default=False,
- help=('Report long format of statuses including run stage name and'
- ' error messages'))
+ "-l",
+ "--long",
+ action="store_true",
+ default=False,
+ help=(
+ "Report long format of statuses including run stage name and"
+ " error messages"
+ ),
+ )
parser.add_argument(
- '-w', '--wait', action='store_true', default=False,
- help='Block waiting on cloud-init to complete')
+ "-w",
+ "--wait",
+ action="store_true",
+ default=False,
+ help="Block waiting on cloud-init to complete",
+ )
return parser
@@ -51,23 +67,20 @@ def handle_status_args(name, args):
# Read configured paths
init = Init(ds_deps=[])
init.read_cfg()
-
- status, status_detail, time = _get_status_details(init.paths)
+ status, status_detail, time = get_status_details(init.paths)
if args.wait:
- while status in (STATUS_ENABLED_NOT_RUN, STATUS_RUNNING):
- sys.stdout.write('.')
+ while status in (UXAppStatus.NOT_RUN, UXAppStatus.RUNNING):
+ sys.stdout.write(".")
sys.stdout.flush()
- status, status_detail, time = _get_status_details(init.paths)
+ status, status_detail, time = get_status_details(init.paths)
sleep(0.25)
- sys.stdout.write('\n')
+ sys.stdout.write("\n")
+ print("status: {0}".format(status.value))
if args.long:
- print('status: {0}'.format(status))
if time:
- print('time: {0}'.format(time))
- print('detail:\n{0}'.format(status_detail))
- else:
- print('status: {0}'.format(status))
- return 1 if status == STATUS_ERROR else 0
+ print("time: {0}".format(time))
+ print("detail:\n{0}".format(status_detail))
+ return 1 if status == UXAppStatus.ERROR else 0
def _is_cloudinit_disabled(disable_file, paths):
@@ -81,83 +94,91 @@ def _is_cloudinit_disabled(disable_file, paths):
is_disabled = False
cmdline_parts = get_cmdline().split()
if not uses_systemd():
- reason = 'Cloud-init enabled on sysvinit'
- elif 'cloud-init=enabled' in cmdline_parts:
- reason = 'Cloud-init enabled by kernel command line cloud-init=enabled'
+ reason = "Cloud-init enabled on sysvinit"
+ elif "cloud-init=enabled" in cmdline_parts:
+ reason = "Cloud-init enabled by kernel command line cloud-init=enabled"
elif os.path.exists(disable_file):
is_disabled = True
- reason = 'Cloud-init disabled by {0}'.format(disable_file)
- elif 'cloud-init=disabled' in cmdline_parts:
+ reason = "Cloud-init disabled by {0}".format(disable_file)
+ elif "cloud-init=disabled" in cmdline_parts:
is_disabled = True
- reason = 'Cloud-init disabled by kernel parameter cloud-init=disabled'
- elif not os.path.exists(os.path.join(paths.run_dir, 'enabled')):
+ reason = "Cloud-init disabled by kernel parameter cloud-init=disabled"
+ elif os.path.exists(os.path.join(paths.run_dir, "disabled")):
is_disabled = True
- reason = 'Cloud-init disabled by cloud-init-generator'
+ reason = "Cloud-init disabled by cloud-init-generator"
+ elif os.path.exists(os.path.join(paths.run_dir, "enabled")):
+ reason = "Cloud-init enabled by systemd cloud-init-generator"
else:
- reason = 'Cloud-init enabled by systemd cloud-init-generator'
+ reason = "Systemd generator may not have run yet."
return (is_disabled, reason)
-def _get_status_details(paths):
+def get_status_details(paths=None):
"""Return a 3-tuple of status, status_details and time of last event.
@param paths: An initialized cloudinit.helpers.paths object.
Values are obtained from parsing paths.run_dir/status.json.
"""
- status = STATUS_ENABLED_NOT_RUN
- status_detail = ''
+ if not paths:
+ init = Init(ds_deps=[])
+ init.read_cfg()
+ paths = init.paths
+
+ status = UXAppStatus.NOT_RUN
+ status_detail = ""
status_v1 = {}
- status_file = os.path.join(paths.run_dir, 'status.json')
- result_file = os.path.join(paths.run_dir, 'result.json')
+ status_file = os.path.join(paths.run_dir, "status.json")
+ result_file = os.path.join(paths.run_dir, "result.json")
(is_disabled, reason) = _is_cloudinit_disabled(
- CLOUDINIT_DISABLED_FILE, paths)
+ CLOUDINIT_DISABLED_FILE, paths
+ )
if is_disabled:
- status = STATUS_DISABLED
+ status = UXAppStatus.DISABLED
status_detail = reason
if os.path.exists(status_file):
if not os.path.exists(result_file):
- status = STATUS_RUNNING
- status_v1 = load_json(load_file(status_file)).get('v1', {})
+ status = UXAppStatus.RUNNING
+ status_v1 = load_json(load_file(status_file)).get("v1", {})
errors = []
latest_event = 0
for key, value in sorted(status_v1.items()):
- if key == 'stage':
+ if key == "stage":
if value:
- status = STATUS_RUNNING
- status_detail = 'Running in stage: {0}'.format(value)
- elif key == 'datasource':
+ status = UXAppStatus.RUNNING
+ status_detail = "Running in stage: {0}".format(value)
+ elif key == "datasource":
status_detail = value
elif isinstance(value, dict):
- errors.extend(value.get('errors', []))
- start = value.get('start') or 0
- finished = value.get('finished') or 0
+ errors.extend(value.get("errors", []))
+ start = value.get("start") or 0
+ finished = value.get("finished") or 0
if finished == 0 and start != 0:
- status = STATUS_RUNNING
+ status = UXAppStatus.RUNNING
event_time = max(start, finished)
if event_time > latest_event:
latest_event = event_time
if errors:
- status = STATUS_ERROR
- status_detail = '\n'.join(errors)
- elif status == STATUS_ENABLED_NOT_RUN and latest_event > 0:
- status = STATUS_DONE
+ status = UXAppStatus.ERROR
+ status_detail = "\n".join(errors)
+ elif status == UXAppStatus.NOT_RUN and latest_event > 0:
+ status = UXAppStatus.DONE
if latest_event:
- time = strftime('%a, %d %b %Y %H:%M:%S %z', gmtime(latest_event))
+ time = strftime("%a, %d %b %Y %H:%M:%S %z", gmtime(latest_event))
else:
- time = ''
+ time = ""
return status, status_detail, time
def main():
"""Tool to report status of cloud-init."""
parser = get_parser()
- sys.exit(handle_status_args('status', parser.parse_args()))
+ sys.exit(handle_status_args("status", parser.parse_args()))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/tests/__init__.py b/cloudinit/cmd/tests/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/cloudinit/cmd/tests/__init__.py
+++ /dev/null
diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py
deleted file mode 100644
index a848a810..00000000
--- a/cloudinit/cmd/tests/test_clean.py
+++ /dev/null
@@ -1,178 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.cmd import clean
-from cloudinit.util import ensure_dir, sym_link, write_file
-from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock
-from collections import namedtuple
-import os
-from io import StringIO
-
-mypaths = namedtuple('MyPaths', 'cloud_dir')
-
-
-class TestClean(CiTestCase):
-
- def setUp(self):
- super(TestClean, self).setUp()
- self.new_root = self.tmp_dir()
- self.artifact_dir = self.tmp_path('artifacts', self.new_root)
- self.log1 = self.tmp_path('cloud-init.log', self.new_root)
- self.log2 = self.tmp_path('cloud-init-output.log', self.new_root)
-
- class FakeInit(object):
- cfg = {'def_log_file': self.log1,
- 'output': {'all': '|tee -a {0}'.format(self.log2)}}
- # Ensure cloud_dir has a trailing slash, to match real behaviour
- paths = mypaths(cloud_dir='{}/'.format(self.artifact_dir))
-
- def __init__(self, ds_deps):
- pass
-
- def read_cfg(self):
- pass
-
- self.init_class = FakeInit
-
- def test_remove_artifacts_removes_logs(self):
- """remove_artifacts removes logs when remove_logs is True."""
- write_file(self.log1, 'cloud-init-log')
- write_file(self.log2, 'cloud-init-output-log')
-
- self.assertFalse(
- os.path.exists(self.artifact_dir), 'Unexpected artifacts dir')
- retcode = wrap_and_call(
- 'cloudinit.cmd.clean',
- {'Init': {'side_effect': self.init_class}},
- clean.remove_artifacts, remove_logs=True)
- self.assertFalse(os.path.exists(self.log1), 'Unexpected file')
- self.assertFalse(os.path.exists(self.log2), 'Unexpected file')
- self.assertEqual(0, retcode)
-
- def test_remove_artifacts_preserves_logs(self):
- """remove_artifacts leaves logs when remove_logs is False."""
- write_file(self.log1, 'cloud-init-log')
- write_file(self.log2, 'cloud-init-output-log')
-
- retcode = wrap_and_call(
- 'cloudinit.cmd.clean',
- {'Init': {'side_effect': self.init_class}},
- clean.remove_artifacts, remove_logs=False)
- self.assertTrue(os.path.exists(self.log1), 'Missing expected file')
- self.assertTrue(os.path.exists(self.log2), 'Missing expected file')
- self.assertEqual(0, retcode)
-
- def test_remove_artifacts_removes_unlinks_symlinks(self):
- """remove_artifacts cleans artifacts dir unlinking any symlinks."""
- dir1 = os.path.join(self.artifact_dir, 'dir1')
- ensure_dir(dir1)
- symlink = os.path.join(self.artifact_dir, 'mylink')
- sym_link(dir1, symlink)
-
- retcode = wrap_and_call(
- 'cloudinit.cmd.clean',
- {'Init': {'side_effect': self.init_class}},
- clean.remove_artifacts, remove_logs=False)
- self.assertEqual(0, retcode)
- for path in (dir1, symlink):
- self.assertFalse(
- os.path.exists(path),
- 'Unexpected {0} dir'.format(path))
-
- def test_remove_artifacts_removes_artifacts_skipping_seed(self):
- """remove_artifacts cleans artifacts dir with exception of seed dir."""
- dirs = [
- self.artifact_dir,
- os.path.join(self.artifact_dir, 'seed'),
- os.path.join(self.artifact_dir, 'dir1'),
- os.path.join(self.artifact_dir, 'dir2')]
- for _dir in dirs:
- ensure_dir(_dir)
-
- retcode = wrap_and_call(
- 'cloudinit.cmd.clean',
- {'Init': {'side_effect': self.init_class}},
- clean.remove_artifacts, remove_logs=False)
- self.assertEqual(0, retcode)
- for expected_dir in dirs[:2]:
- self.assertTrue(
- os.path.exists(expected_dir),
- 'Missing {0} dir'.format(expected_dir))
- for deleted_dir in dirs[2:]:
- self.assertFalse(
- os.path.exists(deleted_dir),
- 'Unexpected {0} dir'.format(deleted_dir))
-
- def test_remove_artifacts_removes_artifacts_removes_seed(self):
- """remove_artifacts removes seed dir when remove_seed is True."""
- dirs = [
- self.artifact_dir,
- os.path.join(self.artifact_dir, 'seed'),
- os.path.join(self.artifact_dir, 'dir1'),
- os.path.join(self.artifact_dir, 'dir2')]
- for _dir in dirs:
- ensure_dir(_dir)
-
- retcode = wrap_and_call(
- 'cloudinit.cmd.clean',
- {'Init': {'side_effect': self.init_class}},
- clean.remove_artifacts, remove_logs=False, remove_seed=True)
- self.assertEqual(0, retcode)
- self.assertTrue(
- os.path.exists(self.artifact_dir), 'Missing artifact dir')
- for deleted_dir in dirs[1:]:
- self.assertFalse(
- os.path.exists(deleted_dir),
- 'Unexpected {0} dir'.format(deleted_dir))
-
- def test_remove_artifacts_returns_one_on_errors(self):
- """remove_artifacts returns non-zero on failure and prints an error."""
- ensure_dir(self.artifact_dir)
- ensure_dir(os.path.join(self.artifact_dir, 'dir1'))
-
- with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
- retcode = wrap_and_call(
- 'cloudinit.cmd.clean',
- {'del_dir': {'side_effect': OSError('oops')},
- 'Init': {'side_effect': self.init_class}},
- clean.remove_artifacts, remove_logs=False)
- self.assertEqual(1, retcode)
- self.assertEqual(
- 'ERROR: Could not remove %s/dir1: oops\n' % self.artifact_dir,
- m_stderr.getvalue())
-
- def test_handle_clean_args_reboots(self):
- """handle_clean_args_reboots when reboot arg is provided."""
-
- called_cmds = []
-
- def fake_subp(cmd, capture):
- called_cmds.append((cmd, capture))
- return '', ''
-
- myargs = namedtuple('MyArgs', 'remove_logs remove_seed reboot')
- cmdargs = myargs(remove_logs=False, remove_seed=False, reboot=True)
- retcode = wrap_and_call(
- 'cloudinit.cmd.clean',
- {'subp': {'side_effect': fake_subp},
- 'Init': {'side_effect': self.init_class}},
- clean.handle_clean_args, name='does not matter', args=cmdargs)
- self.assertEqual(0, retcode)
- self.assertEqual(
- [(['shutdown', '-r', 'now'], False)], called_cmds)
-
- def test_status_main(self):
- '''clean.main can be run as a standalone script.'''
- write_file(self.log1, 'cloud-init-log')
- with self.assertRaises(SystemExit) as context_manager:
- wrap_and_call(
- 'cloudinit.cmd.clean',
- {'Init': {'side_effect': self.init_class},
- 'sys.argv': {'new': ['clean', '--logs']}},
- clean.main)
-
- self.assertEqual(0, context_manager.exception.code)
- self.assertFalse(
- os.path.exists(self.log1), 'Unexpected log {0}'.format(self.log1))
-
-
-# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/cmd/tests/test_cloud_id.py b/cloudinit/cmd/tests/test_cloud_id.py
deleted file mode 100644
index 3f3727fd..00000000
--- a/cloudinit/cmd/tests/test_cloud_id.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Tests for cloud-id command line utility."""
-
-from cloudinit import util
-from collections import namedtuple
-from io import StringIO
-
-from cloudinit.cmd import cloud_id
-
-from cloudinit.tests.helpers import CiTestCase, mock
-
-
-class TestCloudId(CiTestCase):
-
- args = namedtuple('cloudidargs', ('instance_data json long'))
-
- def setUp(self):
- super(TestCloudId, self).setUp()
- self.tmp = self.tmp_dir()
- self.instance_data = self.tmp_path('instance-data.json', dir=self.tmp)
-
- def test_cloud_id_arg_parser_defaults(self):
- """Validate the argument defaults when not provided by the end-user."""
- cmd = ['cloud-id']
- with mock.patch('sys.argv', cmd):
- args = cloud_id.get_parser().parse_args()
- self.assertEqual(
- '/run/cloud-init/instance-data.json',
- args.instance_data)
- self.assertEqual(False, args.long)
- self.assertEqual(False, args.json)
-
- def test_cloud_id_arg_parse_overrides(self):
- """Override argument defaults by specifying values for each param."""
- util.write_file(self.instance_data, '{}')
- cmd = ['cloud-id', '--instance-data', self.instance_data, '--long',
- '--json']
- with mock.patch('sys.argv', cmd):
- args = cloud_id.get_parser().parse_args()
- self.assertEqual(self.instance_data, args.instance_data)
- self.assertEqual(True, args.long)
- self.assertEqual(True, args.json)
-
- def test_cloud_id_missing_instance_data_json(self):
- """Exit error when the provided instance-data.json does not exist."""
- cmd = ['cloud-id', '--instance-data', self.instance_data]
- with mock.patch('sys.argv', cmd):
- with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
- with self.assertRaises(SystemExit) as context_manager:
- cloud_id.main()
- self.assertEqual(1, context_manager.exception.code)
- self.assertIn(
- "ERROR: File not found '%s'" % self.instance_data,
- m_stderr.getvalue())
-
- def test_cloud_id_non_json_instance_data(self):
- """Exit error when the provided instance-data.json is not json."""
- cmd = ['cloud-id', '--instance-data', self.instance_data]
- util.write_file(self.instance_data, '{')
- with mock.patch('sys.argv', cmd):
- with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
- with self.assertRaises(SystemExit) as context_manager:
- cloud_id.main()
- self.assertEqual(1, context_manager.exception.code)
- self.assertIn(
- "ERROR: File '%s' is not valid json." % self.instance_data,
- m_stderr.getvalue())
-
- def test_cloud_id_from_cloud_name_in_instance_data(self):
- """Report canonical cloud-id from cloud_name in instance-data."""
- util.write_file(
- self.instance_data,
- '{"v1": {"cloud_name": "mycloud", "region": "somereg"}}')
- cmd = ['cloud-id', '--instance-data', self.instance_data]
- with mock.patch('sys.argv', cmd):
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- with self.assertRaises(SystemExit) as context_manager:
- cloud_id.main()
- self.assertEqual(0, context_manager.exception.code)
- self.assertEqual("mycloud\n", m_stdout.getvalue())
-
- def test_cloud_id_long_name_from_instance_data(self):
- """Report long cloud-id format from cloud_name and region."""
- util.write_file(
- self.instance_data,
- '{"v1": {"cloud_name": "mycloud", "region": "somereg"}}')
- cmd = ['cloud-id', '--instance-data', self.instance_data, '--long']
- with mock.patch('sys.argv', cmd):
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- with self.assertRaises(SystemExit) as context_manager:
- cloud_id.main()
- self.assertEqual(0, context_manager.exception.code)
- self.assertEqual("mycloud\tsomereg\n", m_stdout.getvalue())
-
- def test_cloud_id_lookup_from_instance_data_region(self):
- """Report discovered canonical cloud_id when region lookup matches."""
- util.write_file(
- self.instance_data,
- '{"v1": {"cloud_name": "aws", "region": "cn-north-1",'
- ' "platform": "ec2"}}')
- cmd = ['cloud-id', '--instance-data', self.instance_data, '--long']
- with mock.patch('sys.argv', cmd):
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- with self.assertRaises(SystemExit) as context_manager:
- cloud_id.main()
- self.assertEqual(0, context_manager.exception.code)
- self.assertEqual("aws-china\tcn-north-1\n", m_stdout.getvalue())
-
- def test_cloud_id_lookup_json_instance_data_adds_cloud_id_to_json(self):
- """Report v1 instance-data content with cloud_id when --json set."""
- util.write_file(
- self.instance_data,
- '{"v1": {"cloud_name": "unknown", "region": "dfw",'
- ' "platform": "openstack", "public_ssh_keys": []}}')
- expected = util.json_dumps({
- 'cloud_id': 'openstack', 'cloud_name': 'unknown',
- 'platform': 'openstack', 'public_ssh_keys': [], 'region': 'dfw'})
- cmd = ['cloud-id', '--instance-data', self.instance_data, '--json']
- with mock.patch('sys.argv', cmd):
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- with self.assertRaises(SystemExit) as context_manager:
- cloud_id.main()
- self.assertEqual(0, context_manager.exception.code)
- self.assertEqual(expected + '\n', m_stdout.getvalue())
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py
deleted file mode 100644
index 585b3b0e..00000000
--- a/cloudinit/cmd/tests/test_main.py
+++ /dev/null
@@ -1,162 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from collections import namedtuple
-import copy
-import os
-from io import StringIO
-
-from cloudinit.cmd import main
-from cloudinit import safeyaml
-from cloudinit.util import (
- ensure_dir, load_file, write_file)
-from cloudinit.tests.helpers import (
- FilesystemMockingTestCase, wrap_and_call)
-
-mypaths = namedtuple('MyPaths', 'run_dir')
-myargs = namedtuple('MyArgs', 'debug files force local reporter subcommand')
-
-
-class TestMain(FilesystemMockingTestCase):
-
- def setUp(self):
- super(TestMain, self).setUp()
- self.new_root = self.tmp_dir()
- self.cloud_dir = self.tmp_path('var/lib/cloud/', dir=self.new_root)
- os.makedirs(self.cloud_dir)
- self.replicateTestRoot('simple_ubuntu', self.new_root)
- self.cfg = {
- 'datasource_list': ['None'],
- 'runcmd': ['ls /etc'], # test ALL_DISTROS
- 'system_info': {'paths': {'cloud_dir': self.cloud_dir,
- 'run_dir': self.new_root}},
- 'write_files': [
- {
- 'path': '/etc/blah.ini',
- 'content': 'blah',
- 'permissions': 0o755,
- },
- ],
- 'cloud_init_modules': ['write-files', 'runcmd'],
- }
- cloud_cfg = safeyaml.dumps(self.cfg)
- ensure_dir(os.path.join(self.new_root, 'etc', 'cloud'))
- self.cloud_cfg_file = os.path.join(
- self.new_root, 'etc', 'cloud', 'cloud.cfg')
- write_file(self.cloud_cfg_file, cloud_cfg)
- self.patchOS(self.new_root)
- self.patchUtils(self.new_root)
- self.stderr = StringIO()
- self.patchStdoutAndStderr(stderr=self.stderr)
-
- def test_main_init_run_net_stops_on_file_no_net(self):
- """When no-net file is present, main_init does not process modules."""
- stop_file = os.path.join(self.cloud_dir, 'data', 'no-net') # stop file
- write_file(stop_file, '')
- cmdargs = myargs(
- debug=False, files=None, force=False, local=False, reporter=None,
- subcommand='init')
- (_item1, item2) = wrap_and_call(
- 'cloudinit.cmd.main',
- {'util.close_stdin': True,
- 'netinfo.debug_info': 'my net debug info',
- 'util.fixup_output': ('outfmt', 'errfmt')},
- main.main_init, 'init', cmdargs)
- # We should not run write_files module
- self.assertFalse(
- os.path.exists(os.path.join(self.new_root, 'etc/blah.ini')),
- 'Unexpected run of write_files module produced blah.ini')
- self.assertEqual([], item2)
- # Instancify is called
- instance_id_path = 'var/lib/cloud/data/instance-id'
- self.assertFalse(
- os.path.exists(os.path.join(self.new_root, instance_id_path)),
- 'Unexpected call to datasource.instancify produced instance-id')
- expected_logs = [
- "Exiting. stop file ['{stop_file}'] existed\n".format(
- stop_file=stop_file),
- 'my net debug info' # netinfo.debug_info
- ]
- for log in expected_logs:
- self.assertIn(log, self.stderr.getvalue())
-
- def test_main_init_run_net_runs_modules(self):
- """Modules like write_files are run in 'net' mode."""
- cmdargs = myargs(
- debug=False, files=None, force=False, local=False, reporter=None,
- subcommand='init')
- (_item1, item2) = wrap_and_call(
- 'cloudinit.cmd.main',
- {'util.close_stdin': True,
- 'netinfo.debug_info': 'my net debug info',
- 'util.fixup_output': ('outfmt', 'errfmt')},
- main.main_init, 'init', cmdargs)
- self.assertEqual([], item2)
- # Instancify is called
- instance_id_path = 'var/lib/cloud/data/instance-id'
- self.assertEqual(
- 'iid-datasource-none\n',
- os.path.join(load_file(
- os.path.join(self.new_root, instance_id_path))))
- # modules are run (including write_files)
- self.assertEqual(
- 'blah', load_file(os.path.join(self.new_root, 'etc/blah.ini')))
- expected_logs = [
- 'network config is disabled by fallback', # apply_network_config
- 'my net debug info', # netinfo.debug_info
- 'no previous run detected'
- ]
- for log in expected_logs:
- self.assertIn(log, self.stderr.getvalue())
-
- def test_main_init_run_net_calls_set_hostname_when_metadata_present(self):
- """When local-hostname metadata is present, call cc_set_hostname."""
- self.cfg['datasource'] = {
- 'None': {'metadata': {'local-hostname': 'md-hostname'}}}
- cloud_cfg = safeyaml.dumps(self.cfg)
- write_file(self.cloud_cfg_file, cloud_cfg)
- cmdargs = myargs(
- debug=False, files=None, force=False, local=False, reporter=None,
- subcommand='init')
-
- def set_hostname(name, cfg, cloud, log, args):
- self.assertEqual('set-hostname', name)
- updated_cfg = copy.deepcopy(self.cfg)
- updated_cfg.update(
- {'def_log_file': '/var/log/cloud-init.log',
- 'log_cfgs': [],
- 'syslog_fix_perms': [
- 'syslog:adm', 'root:adm', 'root:wheel', 'root:root'
- ],
- 'vendor_data': {'enabled': True, 'prefix': []}})
- updated_cfg.pop('system_info')
-
- self.assertEqual(updated_cfg, cfg)
- self.assertEqual(main.LOG, log)
- self.assertIsNone(args)
-
- (_item1, item2) = wrap_and_call(
- 'cloudinit.cmd.main',
- {'util.close_stdin': True,
- 'netinfo.debug_info': 'my net debug info',
- 'cc_set_hostname.handle': {'side_effect': set_hostname},
- 'util.fixup_output': ('outfmt', 'errfmt')},
- main.main_init, 'init', cmdargs)
- self.assertEqual([], item2)
- # Instancify is called
- instance_id_path = 'var/lib/cloud/data/instance-id'
- self.assertEqual(
- 'iid-datasource-none\n',
- os.path.join(load_file(
- os.path.join(self.new_root, instance_id_path))))
- # modules are run (including write_files)
- self.assertEqual(
- 'blah', load_file(os.path.join(self.new_root, 'etc/blah.ini')))
- expected_logs = [
- 'network config is disabled by fallback', # apply_network_config
- 'my net debug info', # netinfo.debug_info
- 'no previous run detected'
- ]
- for log in expected_logs:
- self.assertIn(log, self.stderr.getvalue())
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/tests/test_query.py b/cloudinit/cmd/tests/test_query.py
deleted file mode 100644
index c258d321..00000000
--- a/cloudinit/cmd/tests/test_query.py
+++ /dev/null
@@ -1,341 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import errno
-import gzip
-from io import BytesIO
-import json
-from textwrap import dedent
-
-import pytest
-
-from collections import namedtuple
-from cloudinit.cmd import query
-from cloudinit.helpers import Paths
-from cloudinit.sources import (
- REDACT_SENSITIVE_VALUE, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE)
-from cloudinit.tests.helpers import mock
-
-from cloudinit.util import b64e, write_file
-
-
-def _gzip_data(data):
- with BytesIO() as iobuf:
- with gzip.GzipFile(mode="wb", fileobj=iobuf) as gzfp:
- gzfp.write(data)
- return iobuf.getvalue()
-
-
-@mock.patch("cloudinit.cmd.query.addLogHandlerCLI", lambda *args: "")
-class TestQuery:
-
- args = namedtuple(
- 'queryargs',
- ('debug dump_all format instance_data list_keys user_data vendor_data'
- ' varname'))
-
- def _setup_paths(self, tmpdir, ud_val=None, vd_val=None):
- """Write userdata and vendordata into a tmpdir.
-
- Return:
- 4-tuple : (paths, run_dir_path, userdata_path, vendordata_path)
- """
- if ud_val:
- user_data = tmpdir.join('user-data')
- write_file(user_data.strpath, ud_val)
- else:
- user_data = None
- if vd_val:
- vendor_data = tmpdir.join('vendor-data')
- write_file(vendor_data.strpath, vd_val)
- else:
- vendor_data = None
- run_dir = tmpdir.join('run_dir')
- run_dir.ensure_dir()
- return (
- Paths({'run_dir': run_dir.strpath}),
- run_dir,
- user_data,
- vendor_data
- )
-
- def test_handle_args_error_on_missing_param(self, caplog, capsys):
- """Error when missing required parameters and print usage."""
- args = self.args(
- debug=False, dump_all=False, format=None, instance_data=None,
- list_keys=False, user_data=None, vendor_data=None, varname=None)
- with mock.patch(
- "cloudinit.cmd.query.addLogHandlerCLI", return_value=""
- ) as m_cli_log:
- assert 1 == query.handle_args('anyname', args)
- expected_error = (
- 'Expected one of the options: --all, --format, --list-keys'
- ' or varname\n')
- assert expected_error in caplog.text
- out, _err = capsys.readouterr()
- assert 'usage: query' in out
- assert 1 == m_cli_log.call_count
-
- def test_handle_args_error_on_missing_instance_data(self, caplog, tmpdir):
- """When instance_data file path does not exist, log an error."""
- absent_fn = tmpdir.join('absent')
- args = self.args(
- debug=False, dump_all=True, format=None,
- instance_data=absent_fn.strpath,
- list_keys=False, user_data='ud', vendor_data='vd', varname=None)
- assert 1 == query.handle_args('anyname', args)
-
- msg = 'Missing instance-data file: %s' % absent_fn
- assert msg in caplog.text
-
- def test_handle_args_error_when_no_read_permission_instance_data(
- self, caplog, tmpdir
- ):
- """When instance_data file is unreadable, log an error."""
- noread_fn = tmpdir.join('unreadable')
- noread_fn.write('thou shall not pass')
- args = self.args(
- debug=False, dump_all=True, format=None,
- instance_data=noread_fn.strpath,
- list_keys=False, user_data='ud', vendor_data='vd', varname=None)
- with mock.patch('cloudinit.cmd.query.util.load_file') as m_load:
- m_load.side_effect = OSError(errno.EACCES, 'Not allowed')
- assert 1 == query.handle_args('anyname', args)
- msg = "No read permission on '%s'. Try sudo" % noread_fn
- assert msg in caplog.text
-
- def test_handle_args_defaults_instance_data(self, caplog, tmpdir):
- """When no instance_data argument, default to configured run_dir."""
- args = self.args(
- debug=False, dump_all=True, format=None, instance_data=None,
- list_keys=False, user_data=None, vendor_data=None, varname=None)
- paths, run_dir, _, _ = self._setup_paths(tmpdir)
- with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths:
- m_paths.return_value = paths
- assert 1 == query.handle_args('anyname', args)
- json_file = run_dir.join(INSTANCE_JSON_FILE)
- msg = 'Missing instance-data file: %s' % json_file.strpath
- assert msg in caplog.text
-
- def test_handle_args_root_fallsback_to_instance_data(self, caplog, tmpdir):
- """When no instance_data argument, root falls back to redacted json."""
- args = self.args(
- debug=False, dump_all=True, format=None, instance_data=None,
- list_keys=False, user_data=None, vendor_data=None, varname=None)
- paths, run_dir, _, _ = self._setup_paths(tmpdir)
- with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths:
- m_paths.return_value = paths
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 0
- assert 1 == query.handle_args('anyname', args)
- json_file = run_dir.join(INSTANCE_JSON_FILE)
- sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
- msg = (
- 'Missing root-readable %s. Using redacted %s instead.' %
- (
- sensitive_file.strpath, json_file.strpath
- )
- )
- assert msg in caplog.text
-
- @pytest.mark.parametrize(
- 'ud_src,ud_expected,vd_src,vd_expected',
- (
- ('hi mom', 'hi mom', 'hi pops', 'hi pops'),
- ('ud'.encode('utf-8'), 'ud', 'vd'.encode('utf-8'), 'vd'),
- (_gzip_data(b'ud'), 'ud', _gzip_data(b'vd'), 'vd'),
- (_gzip_data('ud'.encode('utf-8')), 'ud', _gzip_data(b'vd'), 'vd'),
- )
- )
- def test_handle_args_root_processes_user_data(
- self, ud_src, ud_expected, vd_src, vd_expected, capsys, tmpdir
- ):
- """Support reading multiple user-data file content types"""
- paths, run_dir, user_data, vendor_data = self._setup_paths(
- tmpdir, ud_val=ud_src, vd_val=vd_src
- )
- sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
- sensitive_file.write('{"my-var": "it worked"}')
- args = self.args(
- debug=False, dump_all=True, format=None, instance_data=None,
- list_keys=False, user_data=user_data.strpath,
- vendor_data=vendor_data.strpath, varname=None)
- with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths:
- m_paths.return_value = paths
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 0
- assert 0 == query.handle_args('anyname', args)
- out, _err = capsys.readouterr()
- cmd_output = json.loads(out)
- assert "it worked" == cmd_output['my_var']
- if ud_expected == "ci-b64:":
- ud_expected = "ci-b64:{}".format(b64e(ud_src))
- if vd_expected == "ci-b64:":
- vd_expected = "ci-b64:{}".format(b64e(vd_src))
- assert ud_expected == cmd_output['userdata']
- assert vd_expected == cmd_output['vendordata']
-
- def test_handle_args_root_uses_instance_sensitive_data(
- self, capsys, tmpdir
- ):
- """When no instance_data argument, root uses sensitive json."""
- paths, run_dir, user_data, vendor_data = self._setup_paths(
- tmpdir, ud_val='ud', vd_val='vd'
- )
- sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
- sensitive_file.write('{"my-var": "it worked"}')
- args = self.args(
- debug=False, dump_all=True, format=None, instance_data=None,
- list_keys=False, user_data=user_data.strpath,
- vendor_data=vendor_data.strpath, varname=None)
- with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths:
- m_paths.return_value = paths
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 0
- assert 0 == query.handle_args('anyname', args)
- expected = (
- '{\n "my_var": "it worked",\n "userdata": "ud",\n '
- '"vendordata": "vd"\n}\n'
- )
- out, _err = capsys.readouterr()
- assert expected == out
-
- def test_handle_args_dumps_all_instance_data(self, capsys, tmpdir):
- """When --all is specified query will dump all instance data vars."""
- instance_data = tmpdir.join('instance-data')
- instance_data.write('{"my-var": "it worked"}')
- args = self.args(
- debug=False, dump_all=True, format=None,
- instance_data=instance_data.strpath, list_keys=False,
- user_data='ud', vendor_data='vd', varname=None)
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- assert 0 == query.handle_args('anyname', args)
- expected = (
- '{\n "my_var": "it worked",\n "userdata": "<%s> file:ud",\n'
- ' "vendordata": "<%s> file:vd"\n}\n' % (
- REDACT_SENSITIVE_VALUE, REDACT_SENSITIVE_VALUE
- )
- )
- out, _err = capsys.readouterr()
- assert expected == out
-
- def test_handle_args_returns_top_level_varname(self, capsys, tmpdir):
- """When the argument varname is passed, report its value."""
- instance_data = tmpdir.join('instance-data')
- instance_data.write('{"my-var": "it worked"}')
- args = self.args(
- debug=False, dump_all=True, format=None,
- instance_data=instance_data.strpath, list_keys=False,
- user_data='ud', vendor_data='vd', varname='my_var')
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- assert 0 == query.handle_args('anyname', args)
- out, _err = capsys.readouterr()
- assert 'it worked\n' == out
-
- def test_handle_args_returns_nested_varname(self, capsys, tmpdir):
- """If user_data file is a jinja template render instance-data vars."""
- instance_data = tmpdir.join('instance-data')
- instance_data.write(
- '{"v1": {"key-2": "value-2"}, "my-var": "it worked"}'
- )
- args = self.args(
- debug=False, dump_all=False, format=None,
- instance_data=instance_data.strpath, user_data='ud',
- vendor_data='vd', list_keys=False, varname='v1.key_2')
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- assert 0 == query.handle_args('anyname', args)
- out, _err = capsys.readouterr()
- assert 'value-2\n' == out
-
- def test_handle_args_returns_standardized_vars_to_top_level_aliases(
- self, capsys, tmpdir
- ):
- """Any standardized vars under v# are promoted as top-level aliases."""
- instance_data = tmpdir.join('instance-data')
- instance_data.write(
- '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},'
- ' "top": "gun"}')
- expected = dedent("""\
- {
- "top": "gun",
- "userdata": "<redacted for non-root user> file:ud",
- "v1": {
- "v1_1": "val1.1"
- },
- "v1_1": "val1.1",
- "v2": {
- "v2_2": "val2.2"
- },
- "v2_2": "val2.2",
- "vendordata": "<redacted for non-root user> file:vd"
- }
- """)
- args = self.args(
- debug=False, dump_all=True, format=None,
- instance_data=instance_data.strpath, user_data='ud',
- vendor_data='vd', list_keys=False, varname=None)
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- assert 0 == query.handle_args('anyname', args)
- out, _err = capsys.readouterr()
- assert expected == out
-
- def test_handle_args_list_keys_sorts_top_level_keys_when_no_varname(
- self, capsys, tmpdir
- ):
- """Sort all top-level keys when only --list-keys provided."""
- instance_data = tmpdir.join('instance-data')
- instance_data.write(
- '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},'
- ' "top": "gun"}')
- expected = 'top\nuserdata\nv1\nv1_1\nv2\nv2_2\nvendordata\n'
- args = self.args(
- debug=False, dump_all=False, format=None,
- instance_data=instance_data.strpath, list_keys=True,
- user_data='ud', vendor_data='vd', varname=None)
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- assert 0 == query.handle_args('anyname', args)
- out, _err = capsys.readouterr()
- assert expected == out
-
- def test_handle_args_list_keys_sorts_nested_keys_when_varname(
- self, capsys, tmpdir
- ):
- """Sort all nested keys of varname object when --list-keys provided."""
- instance_data = tmpdir.join('instance-data')
- instance_data.write(
- '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2":' +
- ' {"v2_2": "val2.2"}, "top": "gun"}')
- expected = 'v1_1\nv1_2\n'
- args = self.args(
- debug=False, dump_all=False, format=None,
- instance_data=instance_data.strpath, list_keys=True,
- user_data='ud', vendor_data='vd', varname='v1')
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- assert 0 == query.handle_args('anyname', args)
- out, _err = capsys.readouterr()
- assert expected == out
-
- def test_handle_args_list_keys_errors_when_varname_is_not_a_dict(
- self, caplog, tmpdir
- ):
- """Raise an error when --list-keys and varname specify a non-list."""
- instance_data = tmpdir.join('instance-data')
- instance_data.write(
- '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2": ' +
- '{"v2_2": "val2.2"}, "top": "gun"}')
- expected_error = "--list-keys provided but 'top' is not a dict"
- args = self.args(
- debug=False, dump_all=False, format=None,
- instance_data=instance_data.strpath, list_keys=True,
- user_data='ud', vendor_data='vd', varname='top')
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- assert 1 == query.handle_args('anyname', args)
- assert expected_error in caplog.text
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py
deleted file mode 100644
index 1c9eec37..00000000
--- a/cloudinit/cmd/tests/test_status.py
+++ /dev/null
@@ -1,391 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from collections import namedtuple
-import os
-from io import StringIO
-from textwrap import dedent
-
-from cloudinit.atomic_helper import write_json
-from cloudinit.cmd import status
-from cloudinit.util import ensure_file
-from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock
-
-mypaths = namedtuple('MyPaths', 'run_dir')
-myargs = namedtuple('MyArgs', 'long wait')
-
-
-class TestStatus(CiTestCase):
-
- def setUp(self):
- super(TestStatus, self).setUp()
- self.new_root = self.tmp_dir()
- self.status_file = self.tmp_path('status.json', self.new_root)
- self.disable_file = self.tmp_path('cloudinit-disable', self.new_root)
- self.paths = mypaths(run_dir=self.new_root)
-
- class FakeInit(object):
- paths = self.paths
-
- def __init__(self, ds_deps):
- pass
-
- def read_cfg(self):
- pass
-
- self.init_class = FakeInit
-
- def test__is_cloudinit_disabled_false_on_sysvinit(self):
- '''When not in an environment using systemd, return False.'''
- ensure_file(self.disable_file) # Create the ignored disable file
- (is_disabled, reason) = wrap_and_call(
- 'cloudinit.cmd.status',
- {'uses_systemd': False,
- 'get_cmdline': "root=/dev/my-root not-important"},
- status._is_cloudinit_disabled, self.disable_file, self.paths)
- self.assertFalse(
- is_disabled, 'expected enabled cloud-init on sysvinit')
- self.assertEqual('Cloud-init enabled on sysvinit', reason)
-
- def test__is_cloudinit_disabled_true_on_disable_file(self):
- '''When using systemd and disable_file is present return disabled.'''
- ensure_file(self.disable_file) # Create observed disable file
- (is_disabled, reason) = wrap_and_call(
- 'cloudinit.cmd.status',
- {'uses_systemd': True,
- 'get_cmdline': "root=/dev/my-root not-important"},
- status._is_cloudinit_disabled, self.disable_file, self.paths)
- self.assertTrue(is_disabled, 'expected disabled cloud-init')
- self.assertEqual(
- 'Cloud-init disabled by {0}'.format(self.disable_file), reason)
-
- def test__is_cloudinit_disabled_false_on_kernel_cmdline_enable(self):
- '''Not disabled when using systemd and enabled via commandline.'''
- ensure_file(self.disable_file) # Create ignored disable file
- (is_disabled, reason) = wrap_and_call(
- 'cloudinit.cmd.status',
- {'uses_systemd': True,
- 'get_cmdline': 'something cloud-init=enabled else'},
- status._is_cloudinit_disabled, self.disable_file, self.paths)
- self.assertFalse(is_disabled, 'expected enabled cloud-init')
- self.assertEqual(
- 'Cloud-init enabled by kernel command line cloud-init=enabled',
- reason)
-
- def test__is_cloudinit_disabled_true_on_kernel_cmdline(self):
- '''When using systemd and disable_file is present return disabled.'''
- (is_disabled, reason) = wrap_and_call(
- 'cloudinit.cmd.status',
- {'uses_systemd': True,
- 'get_cmdline': 'something cloud-init=disabled else'},
- status._is_cloudinit_disabled, self.disable_file, self.paths)
- self.assertTrue(is_disabled, 'expected disabled cloud-init')
- self.assertEqual(
- 'Cloud-init disabled by kernel parameter cloud-init=disabled',
- reason)
-
- def test__is_cloudinit_disabled_true_when_generator_disables(self):
- '''When cloud-init-generator doesn't write enabled file return True.'''
- enabled_file = os.path.join(self.paths.run_dir, 'enabled')
- self.assertFalse(os.path.exists(enabled_file))
- (is_disabled, reason) = wrap_and_call(
- 'cloudinit.cmd.status',
- {'uses_systemd': True,
- 'get_cmdline': 'something'},
- status._is_cloudinit_disabled, self.disable_file, self.paths)
- self.assertTrue(is_disabled, 'expected disabled cloud-init')
- self.assertEqual('Cloud-init disabled by cloud-init-generator', reason)
-
- def test__is_cloudinit_disabled_false_when_enabled_in_systemd(self):
- '''Report enabled when systemd generator creates the enabled file.'''
- enabled_file = os.path.join(self.paths.run_dir, 'enabled')
- ensure_file(enabled_file)
- (is_disabled, reason) = wrap_and_call(
- 'cloudinit.cmd.status',
- {'uses_systemd': True,
- 'get_cmdline': 'something ignored'},
- status._is_cloudinit_disabled, self.disable_file, self.paths)
- self.assertFalse(is_disabled, 'expected enabled cloud-init')
- self.assertEqual(
- 'Cloud-init enabled by systemd cloud-init-generator', reason)
-
- def test_status_returns_not_run(self):
- '''When status.json does not exist yet, return 'not run'.'''
- self.assertFalse(
- os.path.exists(self.status_file), 'Unexpected status.json found')
- cmdargs = myargs(long=False, wait=False)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- 'cloudinit.cmd.status',
- {'_is_cloudinit_disabled': (False, ''),
- 'Init': {'side_effect': self.init_class}},
- status.handle_status_args, 'ignored', cmdargs)
- self.assertEqual(0, retcode)
- self.assertEqual('status: not run\n', m_stdout.getvalue())
-
- def test_status_returns_disabled_long_on_presence_of_disable_file(self):
- '''When cloudinit is disabled, return disabled reason.'''
-
- checked_files = []
-
- def fakeexists(filepath):
- checked_files.append(filepath)
- status_file = os.path.join(self.paths.run_dir, 'status.json')
- return bool(not filepath == status_file)
-
- cmdargs = myargs(long=True, wait=False)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- 'cloudinit.cmd.status',
- {'os.path.exists': {'side_effect': fakeexists},
- '_is_cloudinit_disabled': (True, 'disabled for some reason'),
- 'Init': {'side_effect': self.init_class}},
- status.handle_status_args, 'ignored', cmdargs)
- self.assertEqual(0, retcode)
- self.assertEqual(
- [os.path.join(self.paths.run_dir, 'status.json')],
- checked_files)
- expected = dedent('''\
- status: disabled
- detail:
- disabled for some reason
- ''')
- self.assertEqual(expected, m_stdout.getvalue())
-
- def test_status_returns_running_on_no_results_json(self):
- '''Report running when status.json exists but result.json does not.'''
- result_file = self.tmp_path('result.json', self.new_root)
- write_json(self.status_file, {})
- self.assertFalse(
- os.path.exists(result_file), 'Unexpected result.json found')
- cmdargs = myargs(long=False, wait=False)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- 'cloudinit.cmd.status',
- {'_is_cloudinit_disabled': (False, ''),
- 'Init': {'side_effect': self.init_class}},
- status.handle_status_args, 'ignored', cmdargs)
- self.assertEqual(0, retcode)
- self.assertEqual('status: running\n', m_stdout.getvalue())
-
- def test_status_returns_running(self):
- '''Report running when status exists with an unfinished stage.'''
- ensure_file(self.tmp_path('result.json', self.new_root))
- write_json(self.status_file,
- {'v1': {'init': {'start': 1, 'finished': None}}})
- cmdargs = myargs(long=False, wait=False)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- 'cloudinit.cmd.status',
- {'_is_cloudinit_disabled': (False, ''),
- 'Init': {'side_effect': self.init_class}},
- status.handle_status_args, 'ignored', cmdargs)
- self.assertEqual(0, retcode)
- self.assertEqual('status: running\n', m_stdout.getvalue())
-
- def test_status_returns_done(self):
- '''Report done results.json exists no stages are unfinished.'''
- ensure_file(self.tmp_path('result.json', self.new_root))
- write_json(
- self.status_file,
- {'v1': {'stage': None, # No current stage running
- 'datasource': (
- 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]'
- '[dsmode=net]'),
- 'blah': {'finished': 123.456},
- 'init': {'errors': [], 'start': 124.567,
- 'finished': 125.678},
- 'init-local': {'start': 123.45, 'finished': 123.46}}})
- cmdargs = myargs(long=False, wait=False)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- 'cloudinit.cmd.status',
- {'_is_cloudinit_disabled': (False, ''),
- 'Init': {'side_effect': self.init_class}},
- status.handle_status_args, 'ignored', cmdargs)
- self.assertEqual(0, retcode)
- self.assertEqual('status: done\n', m_stdout.getvalue())
-
- def test_status_returns_done_long(self):
- '''Long format of done status includes datasource info.'''
- ensure_file(self.tmp_path('result.json', self.new_root))
- write_json(
- self.status_file,
- {'v1': {'stage': None,
- 'datasource': (
- 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]'
- '[dsmode=net]'),
- 'init': {'start': 124.567, 'finished': 125.678},
- 'init-local': {'start': 123.45, 'finished': 123.46}}})
- cmdargs = myargs(long=True, wait=False)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- 'cloudinit.cmd.status',
- {'_is_cloudinit_disabled': (False, ''),
- 'Init': {'side_effect': self.init_class}},
- status.handle_status_args, 'ignored', cmdargs)
- self.assertEqual(0, retcode)
- expected = dedent('''\
- status: done
- time: Thu, 01 Jan 1970 00:02:05 +0000
- detail:
- DataSourceNoCloud [seed=/var/.../seed/nocloud-net][dsmode=net]
- ''')
- self.assertEqual(expected, m_stdout.getvalue())
-
- def test_status_on_errors(self):
- '''Reports error when any stage has errors.'''
- write_json(
- self.status_file,
- {'v1': {'stage': None,
- 'blah': {'errors': [], 'finished': 123.456},
- 'init': {'errors': ['error1'], 'start': 124.567,
- 'finished': 125.678},
- 'init-local': {'start': 123.45, 'finished': 123.46}}})
- cmdargs = myargs(long=False, wait=False)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- 'cloudinit.cmd.status',
- {'_is_cloudinit_disabled': (False, ''),
- 'Init': {'side_effect': self.init_class}},
- status.handle_status_args, 'ignored', cmdargs)
- self.assertEqual(1, retcode)
- self.assertEqual('status: error\n', m_stdout.getvalue())
-
- def test_status_on_errors_long(self):
- '''Long format of error status includes all error messages.'''
- write_json(
- self.status_file,
- {'v1': {'stage': None,
- 'datasource': (
- 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]'
- '[dsmode=net]'),
- 'init': {'errors': ['error1'], 'start': 124.567,
- 'finished': 125.678},
- 'init-local': {'errors': ['error2', 'error3'],
- 'start': 123.45, 'finished': 123.46}}})
- cmdargs = myargs(long=True, wait=False)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- 'cloudinit.cmd.status',
- {'_is_cloudinit_disabled': (False, ''),
- 'Init': {'side_effect': self.init_class}},
- status.handle_status_args, 'ignored', cmdargs)
- self.assertEqual(1, retcode)
- expected = dedent('''\
- status: error
- time: Thu, 01 Jan 1970 00:02:05 +0000
- detail:
- error1
- error2
- error3
- ''')
- self.assertEqual(expected, m_stdout.getvalue())
-
- def test_status_returns_running_long_format(self):
- '''Long format reports the stage in which we are running.'''
- write_json(
- self.status_file,
- {'v1': {'stage': 'init',
- 'init': {'start': 124.456, 'finished': None},
- 'init-local': {'start': 123.45, 'finished': 123.46}}})
- cmdargs = myargs(long=True, wait=False)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- 'cloudinit.cmd.status',
- {'_is_cloudinit_disabled': (False, ''),
- 'Init': {'side_effect': self.init_class}},
- status.handle_status_args, 'ignored', cmdargs)
- self.assertEqual(0, retcode)
- expected = dedent('''\
- status: running
- time: Thu, 01 Jan 1970 00:02:04 +0000
- detail:
- Running in stage: init
- ''')
- self.assertEqual(expected, m_stdout.getvalue())
-
- def test_status_wait_blocks_until_done(self):
- '''Specifying wait will poll every 1/4 second until done state.'''
- running_json = {
- 'v1': {'stage': 'init',
- 'init': {'start': 124.456, 'finished': None},
- 'init-local': {'start': 123.45, 'finished': 123.46}}}
- done_json = {
- 'v1': {'stage': None,
- 'init': {'start': 124.456, 'finished': 125.678},
- 'init-local': {'start': 123.45, 'finished': 123.46}}}
-
- self.sleep_calls = 0
-
- def fake_sleep(interval):
- self.assertEqual(0.25, interval)
- self.sleep_calls += 1
- if self.sleep_calls == 2:
- write_json(self.status_file, running_json)
- elif self.sleep_calls == 3:
- write_json(self.status_file, done_json)
- result_file = self.tmp_path('result.json', self.new_root)
- ensure_file(result_file)
-
- cmdargs = myargs(long=False, wait=True)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- 'cloudinit.cmd.status',
- {'sleep': {'side_effect': fake_sleep},
- '_is_cloudinit_disabled': (False, ''),
- 'Init': {'side_effect': self.init_class}},
- status.handle_status_args, 'ignored', cmdargs)
- self.assertEqual(0, retcode)
- self.assertEqual(4, self.sleep_calls)
- self.assertEqual('....\nstatus: done\n', m_stdout.getvalue())
-
- def test_status_wait_blocks_until_error(self):
- '''Specifying wait will poll every 1/4 second until error state.'''
- running_json = {
- 'v1': {'stage': 'init',
- 'init': {'start': 124.456, 'finished': None},
- 'init-local': {'start': 123.45, 'finished': 123.46}}}
- error_json = {
- 'v1': {'stage': None,
- 'init': {'errors': ['error1'], 'start': 124.456,
- 'finished': 125.678},
- 'init-local': {'start': 123.45, 'finished': 123.46}}}
-
- self.sleep_calls = 0
-
- def fake_sleep(interval):
- self.assertEqual(0.25, interval)
- self.sleep_calls += 1
- if self.sleep_calls == 2:
- write_json(self.status_file, running_json)
- elif self.sleep_calls == 3:
- write_json(self.status_file, error_json)
-
- cmdargs = myargs(long=False, wait=True)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- 'cloudinit.cmd.status',
- {'sleep': {'side_effect': fake_sleep},
- '_is_cloudinit_disabled': (False, ''),
- 'Init': {'side_effect': self.init_class}},
- status.handle_status_args, 'ignored', cmdargs)
- self.assertEqual(1, retcode)
- self.assertEqual(4, self.sleep_calls)
- self.assertEqual('....\nstatus: error\n', m_stdout.getvalue())
-
- def test_status_main(self):
- '''status.main can be run as a standalone script.'''
- write_json(self.status_file,
- {'v1': {'init': {'start': 1, 'finished': None}}})
- with self.assertRaises(SystemExit) as context_manager:
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- wrap_and_call(
- 'cloudinit.cmd.status',
- {'sys.argv': {'new': ['status']},
- '_is_cloudinit_disabled': (False, ''),
- 'Init': {'side_effect': self.init_class}},
- status.main)
- self.assertEqual(0, context_manager.exception.code)
- self.assertEqual('status: running\n', m_stdout.getvalue())
-
-# vi: ts=4 expandtab syntax=python