summaryrefslogtreecommitdiff
path: root/cloudinit/cmd
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit/cmd')
-rw-r--r--cloudinit/cmd/clean.py59
-rwxr-xr-xcloudinit/cmd/cloud_id.py68
-rw-r--r--cloudinit/cmd/devel/__init__.py3
-rw-r--r--cloudinit/cmd/devel/hotplug_hook.py138
-rw-r--r--cloudinit/cmd/devel/logs.py120
-rwxr-xr-xcloudinit/cmd/devel/make_mime.py76
-rwxr-xr-xcloudinit/cmd/devel/net_convert.py145
-rw-r--r--cloudinit/cmd/devel/parser.py48
-rwxr-xr-xcloudinit/cmd/devel/render.py54
-rw-r--r--cloudinit/cmd/main.py595
-rw-r--r--cloudinit/cmd/query.py170
-rw-r--r--cloudinit/cmd/status.py101
12 files changed, 972 insertions, 605 deletions
diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py
index 3502dd56..0e1db118 100644
--- a/cloudinit/cmd/clean.py
+++ b/cloudinit/cmd/clean.py
@@ -10,9 +10,13 @@ import os
import sys
from cloudinit.stages import Init
-from cloudinit.subp import (ProcessExecutionError, subp)
+from cloudinit.subp import ProcessExecutionError, subp
from cloudinit.util import (
- del_dir, del_file, get_config_logfiles, is_link, error
+ del_dir,
+ del_file,
+ error,
+ get_config_logfiles,
+ is_link,
)
@@ -27,18 +31,35 @@ def get_parser(parser=None):
"""
if not parser:
parser = argparse.ArgumentParser(
- prog='clean',
- description=('Remove logs and artifacts so cloud-init re-runs on '
- 'a clean system'))
+ prog="clean",
+ description=(
+ "Remove logs and artifacts so cloud-init re-runs on "
+ "a clean system"
+ ),
+ )
parser.add_argument(
- '-l', '--logs', action='store_true', default=False, dest='remove_logs',
- help='Remove cloud-init logs.')
+ "-l",
+ "--logs",
+ action="store_true",
+ default=False,
+ dest="remove_logs",
+ help="Remove cloud-init logs.",
+ )
parser.add_argument(
- '-r', '--reboot', action='store_true', default=False,
- help='Reboot system after logs are cleaned so cloud-init re-runs.')
+ "-r",
+ "--reboot",
+ action="store_true",
+ default=False,
+ help="Reboot system after logs are cleaned so cloud-init re-runs.",
+ )
parser.add_argument(
- '-s', '--seed', action='store_true', default=False, dest='remove_seed',
- help='Remove cloud-init seed directory /var/lib/cloud/seed.')
+ "-s",
+ "--seed",
+ action="store_true",
+ default=False,
+ dest="remove_seed",
+ help="Remove cloud-init seed directory /var/lib/cloud/seed.",
+ )
return parser
@@ -59,8 +80,8 @@ def remove_artifacts(remove_logs, remove_seed=False):
if not os.path.isdir(init.paths.cloud_dir):
return 0 # Artifacts dir already cleaned
- seed_path = os.path.join(init.paths.cloud_dir, 'seed')
- for path in glob.glob('%s/*' % init.paths.cloud_dir):
+ seed_path = os.path.join(init.paths.cloud_dir, "seed")
+ for path in glob.glob("%s/*" % init.paths.cloud_dir):
if path == seed_path and not remove_seed:
continue
try:
@@ -69,7 +90,7 @@ def remove_artifacts(remove_logs, remove_seed=False):
else:
del_file(path)
except OSError as e:
- error('Could not remove {0}: {1}'.format(path, str(e)))
+ error("Could not remove {0}: {1}".format(path, str(e)))
return 1
return 0
@@ -78,13 +99,15 @@ def handle_clean_args(name, args):
"""Handle calls to 'cloud-init clean' as a subcommand."""
exit_code = remove_artifacts(args.remove_logs, args.remove_seed)
if exit_code == 0 and args.reboot:
- cmd = ['shutdown', '-r', 'now']
+ cmd = ["shutdown", "-r", "now"]
try:
subp(cmd, capture=False)
except ProcessExecutionError as e:
error(
'Could not reboot this system using "{0}": {1}'.format(
- cmd, str(e)))
+ cmd, str(e)
+ )
+ )
exit_code = 1
return exit_code
@@ -92,10 +115,10 @@ def handle_clean_args(name, args):
def main():
"""Tool to collect and tar all cloud-init related logs."""
parser = get_parser()
- sys.exit(handle_clean_args('clean', parser.parse_args()))
+ sys.exit(handle_clean_args("clean", parser.parse_args()))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/cloud_id.py b/cloudinit/cmd/cloud_id.py
index 0cdc9675..b92b03a8 100755
--- a/cloudinit/cmd/cloud_id.py
+++ b/cloudinit/cmd/cloud_id.py
@@ -6,13 +6,16 @@ import argparse
import json
import sys
-from cloudinit.util import error
from cloudinit.sources import (
- INSTANCE_JSON_FILE, METADATA_UNKNOWN, canonical_cloud_id)
+ INSTANCE_JSON_FILE,
+ METADATA_UNKNOWN,
+ canonical_cloud_id,
+)
+from cloudinit.util import error
-DEFAULT_INSTANCE_JSON = '/run/cloud-init/%s' % INSTANCE_JSON_FILE
+DEFAULT_INSTANCE_JSON = "/run/cloud-init/%s" % INSTANCE_JSON_FILE
-NAME = 'cloud-id'
+NAME = "cloud-id"
def get_parser(parser=None):
@@ -27,17 +30,30 @@ def get_parser(parser=None):
if not parser:
parser = argparse.ArgumentParser(
prog=NAME,
- description='Report the canonical cloud-id for this instance')
+ description="Report the canonical cloud-id for this instance",
+ )
parser.add_argument(
- '-j', '--json', action='store_true', default=False,
- help='Report all standardized cloud-id information as json.')
+ "-j",
+ "--json",
+ action="store_true",
+ default=False,
+ help="Report all standardized cloud-id information as json.",
+ )
parser.add_argument(
- '-l', '--long', action='store_true', default=False,
- help='Report extended cloud-id information as tab-delimited string.')
+ "-l",
+ "--long",
+ action="store_true",
+ default=False,
+ help="Report extended cloud-id information as tab-delimited string.",
+ )
parser.add_argument(
- '-i', '--instance-data', type=str, default=DEFAULT_INSTANCE_JSON,
- help=('Path to instance-data.json file. Default is %s' %
- DEFAULT_INSTANCE_JSON))
+ "-i",
+ "--instance-data",
+ type=str,
+ default=DEFAULT_INSTANCE_JSON,
+ help="Path to instance-data.json file. Default is %s"
+ % DEFAULT_INSTANCE_JSON,
+ )
return parser
@@ -53,24 +69,28 @@ def handle_args(name, args):
except IOError:
return error(
"File not found '%s'. Provide a path to instance data json file"
- ' using --instance-data' % args.instance_data)
+ " using --instance-data" % args.instance_data
+ )
except ValueError as e:
return error(
- "File '%s' is not valid json. %s" % (args.instance_data, e))
- v1 = instance_data.get('v1', {})
+ "File '%s' is not valid json. %s" % (args.instance_data, e)
+ )
+ v1 = instance_data.get("v1", {})
cloud_id = canonical_cloud_id(
- v1.get('cloud_name', METADATA_UNKNOWN),
- v1.get('region', METADATA_UNKNOWN),
- v1.get('platform', METADATA_UNKNOWN))
+ v1.get("cloud_name", METADATA_UNKNOWN),
+ v1.get("region", METADATA_UNKNOWN),
+ v1.get("platform", METADATA_UNKNOWN),
+ )
if args.json:
- v1['cloud_id'] = cloud_id
- response = json.dumps( # Pretty, sorted json
- v1, indent=1, sort_keys=True, separators=(',', ': '))
+ v1["cloud_id"] = cloud_id
+ response = json.dumps( # Pretty, sorted json
+ v1, indent=1, sort_keys=True, separators=(",", ": ")
+ )
elif args.long:
- response = '%s\t%s' % (cloud_id, v1.get('region', METADATA_UNKNOWN))
+ response = "%s\t%s" % (cloud_id, v1.get("region", METADATA_UNKNOWN))
else:
response = cloud_id
- sys.stdout.write('%s\n' % response)
+ sys.stdout.write("%s\n" % response)
return 0
@@ -80,7 +100,7 @@ def main():
sys.exit(handle_args(NAME, parser.parse_args()))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/__init__.py b/cloudinit/cmd/devel/__init__.py
index 3ae28b69..ead5f7a9 100644
--- a/cloudinit/cmd/devel/__init__.py
+++ b/cloudinit/cmd/devel/__init__.py
@@ -11,7 +11,7 @@ from cloudinit.stages import Init
def addLogHandlerCLI(logger, log_level):
"""Add a commandline logging handler to emit messages to stderr."""
- formatter = logging.Formatter('%(levelname)s: %(message)s')
+ formatter = logging.Formatter("%(levelname)s: %(message)s")
log.setupBasicLogging(log_level, formatter=formatter)
return logger
@@ -22,4 +22,5 @@ def read_cfg_paths():
init.read_cfg()
return init.paths
+
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py
index f6f36a00..a9be0379 100644
--- a/cloudinit/cmd/devel/hotplug_hook.py
+++ b/cloudinit/cmd/devel/hotplug_hook.py
@@ -6,20 +6,17 @@ import os
import sys
import time
-from cloudinit import log
-from cloudinit import reporting
-from cloudinit import stages
+from cloudinit import log, reporting, stages
from cloudinit.event import EventScope, EventType
from cloudinit.net import activators, read_sys_net_safe
from cloudinit.net.network_state import parse_net_config_data
from cloudinit.reporting import events
-from cloudinit.stages import Init
from cloudinit.sources import DataSource # noqa: F401
from cloudinit.sources import DataSourceNotFoundException
-
+from cloudinit.stages import Init
LOG = log.getLogger(__name__)
-NAME = 'hotplug-hook'
+NAME = "hotplug-hook"
def get_parser(parser=None):
@@ -35,33 +32,38 @@ def get_parser(parser=None):
parser.description = __doc__
parser.add_argument(
- "-s", "--subsystem", required=True,
+ "-s",
+ "--subsystem",
+ required=True,
help="subsystem to act on",
- choices=['net']
+ choices=["net"],
)
subparsers = parser.add_subparsers(
- title='Hotplug Action',
- dest='hotplug_action'
+ title="Hotplug Action", dest="hotplug_action"
)
subparsers.required = True
subparsers.add_parser(
- 'query',
- help='query if hotplug is enabled for given subsystem'
+ "query", help="query if hotplug is enabled for given subsystem"
)
parser_handle = subparsers.add_parser(
- 'handle', help='handle the hotplug event')
+ "handle", help="handle the hotplug event"
+ )
parser_handle.add_argument(
- "-d", "--devpath", required=True,
+ "-d",
+ "--devpath",
+ required=True,
metavar="PATH",
- help="sysfs path to hotplugged device"
+ help="sysfs path to hotplugged device",
)
parser_handle.add_argument(
- "-u", "--udevaction", required=True,
+ "-u",
+ "--udevaction",
+ required=True,
help="action to take",
- choices=['add', 'remove']
+ choices=["add", "remove"],
)
return parser
@@ -90,27 +92,29 @@ class UeventHandler(abc.ABC):
def detect_hotplugged_device(self):
detect_presence = None
- if self.action == 'add':
+ if self.action == "add":
detect_presence = True
- elif self.action == 'remove':
+ elif self.action == "remove":
detect_presence = False
else:
- raise ValueError('Unknown action: %s' % self.action)
+ raise ValueError("Unknown action: %s" % self.action)
if detect_presence != self.device_detected():
raise RuntimeError(
- 'Failed to detect %s in updated metadata' % self.id)
+ "Failed to detect %s in updated metadata" % self.id
+ )
def success(self):
return self.success_fn()
def update_metadata(self):
- result = self.datasource.update_metadata_if_supported([
- EventType.HOTPLUG])
+ result = self.datasource.update_metadata_if_supported(
+ [EventType.HOTPLUG]
+ )
if not result:
raise RuntimeError(
- 'Datasource %s not updated for '
- 'event %s' % (self.datasource, EventType.HOTPLUG)
+ "Datasource %s not updated for event %s"
+ % (self.datasource, EventType.HOTPLUG)
)
return result
@@ -118,7 +122,7 @@ class UeventHandler(abc.ABC):
class NetHandler(UeventHandler):
def __init__(self, datasource, devpath, action, success_fn):
# convert devpath to mac address
- id = read_sys_net_safe(os.path.basename(devpath), 'address')
+ id = read_sys_net_safe(os.path.basename(devpath), "address")
super().__init__(id, datasource, devpath, action, success_fn)
def apply(self):
@@ -128,14 +132,16 @@ class NetHandler(UeventHandler):
)
interface_name = os.path.basename(self.devpath)
activator = activators.select_activator()
- if self.action == 'add':
+ if self.action == "add":
if not activator.bring_up_interface(interface_name):
raise RuntimeError(
- 'Failed to bring up device: {}'.format(self.devpath))
- elif self.action == 'remove':
+ "Failed to bring up device: {}".format(self.devpath)
+ )
+ elif self.action == "remove":
if not activator.bring_down_interface(interface_name):
raise RuntimeError(
- 'Failed to bring down device: {}'.format(self.devpath))
+ "Failed to bring down device: {}".format(self.devpath)
+ )
@property
def config(self):
@@ -144,15 +150,16 @@ class NetHandler(UeventHandler):
def device_detected(self) -> bool:
netstate = parse_net_config_data(self.config)
found = [
- iface for iface in netstate.iter_interfaces()
- if iface.get('mac_address') == self.id
+ iface
+ for iface in netstate.iter_interfaces()
+ if iface.get("mac_address") == self.id
]
- LOG.debug('Ifaces with ID=%s : %s', self.id, found)
+ LOG.debug("Ifaces with ID=%s : %s", self.id, found)
return len(found) > 0
SUBSYSTEM_PROPERTES_MAP = {
- 'net': (NetHandler, EventScope.NETWORK),
+ "net": (NetHandler, EventScope.NETWORK),
}
@@ -161,66 +168,65 @@ def is_enabled(hotplug_init, subsystem):
scope = SUBSYSTEM_PROPERTES_MAP[subsystem][1]
except KeyError as e:
raise Exception(
- 'hotplug-hook: cannot handle events for subsystem: {}'.format(
- subsystem)
+ "hotplug-hook: cannot handle events for subsystem: {}".format(
+ subsystem
+ )
) from e
return stages.update_event_enabled(
datasource=hotplug_init.datasource,
cfg=hotplug_init.cfg,
event_source_type=EventType.HOTPLUG,
- scope=scope
+ scope=scope,
)
def initialize_datasource(hotplug_init, subsystem):
- LOG.debug('Fetching datasource')
+ LOG.debug("Fetching datasource")
datasource = hotplug_init.fetch(existing="trust")
if not datasource.get_supported_events([EventType.HOTPLUG]):
- LOG.debug('hotplug not supported for event of type %s', subsystem)
+ LOG.debug("hotplug not supported for event of type %s", subsystem)
return
if not is_enabled(hotplug_init, subsystem):
- LOG.debug('hotplug not enabled for event of type %s', subsystem)
+ LOG.debug("hotplug not enabled for event of type %s", subsystem)
return
return datasource
-def handle_hotplug(
- hotplug_init: Init, devpath, subsystem, udevaction
-):
+def handle_hotplug(hotplug_init: Init, devpath, subsystem, udevaction):
datasource = initialize_datasource(hotplug_init, subsystem)
if not datasource:
return
handler_cls = SUBSYSTEM_PROPERTES_MAP[subsystem][0]
- LOG.debug('Creating %s event handler', subsystem)
+ LOG.debug("Creating %s event handler", subsystem)
event_handler = handler_cls(
datasource=datasource,
devpath=devpath,
action=udevaction,
- success_fn=hotplug_init._write_to_cache
+ success_fn=hotplug_init._write_to_cache,
) # type: UeventHandler
wait_times = [1, 3, 5, 10, 30]
for attempt, wait in enumerate(wait_times):
LOG.debug(
- 'subsystem=%s update attempt %s/%s',
+ "subsystem=%s update attempt %s/%s",
subsystem,
attempt,
- len(wait_times)
+ len(wait_times),
)
try:
- LOG.debug('Refreshing metadata')
+ LOG.debug("Refreshing metadata")
event_handler.update_metadata()
- LOG.debug('Detecting device in updated metadata')
+ LOG.debug("Detecting device in updated metadata")
event_handler.detect_hotplugged_device()
- LOG.debug('Applying config change')
+ LOG.debug("Applying config change")
event_handler.apply()
- LOG.debug('Updating cache')
+ LOG.debug("Updating cache")
event_handler.success()
break
except Exception as e:
- LOG.debug('Exception while processing hotplug event. %s', e)
+ LOG.debug("Exception while processing hotplug event. %s", e)
time.sleep(wait)
last_exception = e
else:
@@ -238,31 +244,33 @@ def handle_args(name, args):
hotplug_init.read_cfg()
log.setupLogging(hotplug_init.cfg)
- if 'reporting' in hotplug_init.cfg:
- reporting.update_configuration(hotplug_init.cfg.get('reporting'))
+ if "reporting" in hotplug_init.cfg:
+ reporting.update_configuration(hotplug_init.cfg.get("reporting"))
# Logging isn't going to be setup until now
LOG.debug(
- '%s called with the following arguments: {'
- 'hotplug_action: %s, subsystem: %s, udevaction: %s, devpath: %s}',
+ "%s called with the following arguments: {"
+ "hotplug_action: %s, subsystem: %s, udevaction: %s, devpath: %s}",
name,
args.hotplug_action,
args.subsystem,
- args.udevaction if 'udevaction' in args else None,
- args.devpath if 'devpath' in args else None,
+ args.udevaction if "udevaction" in args else None,
+ args.devpath if "devpath" in args else None,
)
with hotplug_reporter:
try:
- if args.hotplug_action == 'query':
+ if args.hotplug_action == "query":
try:
datasource = initialize_datasource(
- hotplug_init, args.subsystem)
+ hotplug_init, args.subsystem
+ )
except DataSourceNotFoundException:
print(
"Unable to determine hotplug state. No datasource "
- "detected")
+ "detected"
+ )
sys.exit(1)
- print('enabled' if datasource else 'disabled')
+ print("enabled" if datasource else "disabled")
else:
handle_hotplug(
hotplug_init=hotplug_init,
@@ -271,13 +279,13 @@ def handle_args(name, args):
udevaction=args.udevaction,
)
except Exception:
- LOG.exception('Received fatal exception handling hotplug!')
+ LOG.exception("Received fatal exception handling hotplug!")
raise
- LOG.debug('Exiting hotplug handler')
+ LOG.debug("Exiting hotplug handler")
reporting.flush_events()
-if __name__ == '__main__':
+if __name__ == "__main__":
args = get_parser().parse_args()
handle_args(NAME, args)
diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py
index 31ade73d..d54b809a 100644
--- a/cloudinit/cmd/devel/logs.py
+++ b/cloudinit/cmd/devel/logs.py
@@ -5,20 +5,19 @@
"""Define 'collect-logs' utility and handler to include in cloud-init cmd."""
import argparse
-from datetime import datetime
import os
import shutil
import sys
+from datetime import datetime
from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
+from cloudinit.subp import ProcessExecutionError, subp
from cloudinit.temp_utils import tempdir
-from cloudinit.subp import (ProcessExecutionError, subp)
-from cloudinit.util import (chdir, copy, ensure_dir, write_file)
+from cloudinit.util import chdir, copy, ensure_dir, write_file
-
-CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log']
-CLOUDINIT_RUN_DIR = '/run/cloud-init'
-USER_DATA_FILE = '/var/lib/cloud/instance/user-data.txt' # Optional
+CLOUDINIT_LOGS = ["/var/log/cloud-init.log", "/var/log/cloud-init-output.log"]
+CLOUDINIT_RUN_DIR = "/run/cloud-init"
+USER_DATA_FILE = "/var/lib/cloud/instance/user-data.txt" # Optional
def get_parser(parser=None):
@@ -32,26 +31,44 @@ def get_parser(parser=None):
"""
if not parser:
parser = argparse.ArgumentParser(
- prog='collect-logs',
- description='Collect and tar all cloud-init debug info')
- parser.add_argument('--verbose', '-v', action='count', default=0,
- dest='verbosity', help="Be more verbose.")
+ prog="collect-logs",
+ description="Collect and tar all cloud-init debug info",
+ )
+ parser.add_argument(
+ "--verbose",
+ "-v",
+ action="count",
+ default=0,
+ dest="verbosity",
+ help="Be more verbose.",
+ )
parser.add_argument(
- "--tarfile", '-t', default='cloud-init.tar.gz',
- help=('The tarfile to create containing all collected logs.'
- ' Default: cloud-init.tar.gz'))
+ "--tarfile",
+ "-t",
+ default="cloud-init.tar.gz",
+ help=(
+ "The tarfile to create containing all collected logs."
+ " Default: cloud-init.tar.gz"
+ ),
+ )
parser.add_argument(
- "--include-userdata", '-u', default=False, action='store_true',
- dest='userdata', help=(
- 'Optionally include user-data from {0} which could contain'
- ' sensitive information.'.format(USER_DATA_FILE)))
+ "--include-userdata",
+ "-u",
+ default=False,
+ action="store_true",
+ dest="userdata",
+ help=(
+ "Optionally include user-data from {0} which could contain"
+ " sensitive information.".format(USER_DATA_FILE)
+ ),
+ )
return parser
def _copytree_rundir_ignore_files(curdir, files):
"""Return a list of files to ignore for /run/cloud-init directory"""
ignored_files = [
- 'hook-hotplug-cmd', # named pipe for hotplug
+ "hook-hotplug-cmd", # named pipe for hotplug
]
if os.getuid() != 0:
# Ignore root-permissioned files
@@ -94,52 +111,67 @@ def collect_logs(tarfile, include_userdata, verbosity=0):
if include_userdata and os.getuid() != 0:
sys.stderr.write(
"To include userdata, root user is required."
- " Try sudo cloud-init collect-logs\n")
+ " Try sudo cloud-init collect-logs\n"
+ )
return 1
tarfile = os.path.abspath(tarfile)
- date = datetime.utcnow().date().strftime('%Y-%m-%d')
- log_dir = 'cloud-init-logs-{0}'.format(date)
- with tempdir(dir='/tmp') as tmp_dir:
+ date = datetime.utcnow().date().strftime("%Y-%m-%d")
+ log_dir = "cloud-init-logs-{0}".format(date)
+ with tempdir(dir="/tmp") as tmp_dir:
log_dir = os.path.join(tmp_dir, log_dir)
version = _write_command_output_to_file(
- ['cloud-init', '--version'],
- os.path.join(log_dir, 'version'),
- "cloud-init --version", verbosity)
+ ["cloud-init", "--version"],
+ os.path.join(log_dir, "version"),
+ "cloud-init --version",
+ verbosity,
+ )
dpkg_ver = _write_command_output_to_file(
- ['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'],
- os.path.join(log_dir, 'dpkg-version'),
- "dpkg version", verbosity)
+ ["dpkg-query", "--show", "-f=${Version}\n", "cloud-init"],
+ os.path.join(log_dir, "dpkg-version"),
+ "dpkg version",
+ verbosity,
+ )
if not version:
version = dpkg_ver if dpkg_ver else "not-available"
_debug("collected cloud-init version: %s\n" % version, 1, verbosity)
_write_command_output_to_file(
- ['dmesg'], os.path.join(log_dir, 'dmesg.txt'),
- "dmesg output", verbosity)
+ ["dmesg"],
+ os.path.join(log_dir, "dmesg.txt"),
+ "dmesg output",
+ verbosity,
+ )
_write_command_output_to_file(
- ['journalctl', '--boot=0', '-o', 'short-precise'],
- os.path.join(log_dir, 'journal.txt'),
- "systemd journal of current boot", verbosity)
+ ["journalctl", "--boot=0", "-o", "short-precise"],
+ os.path.join(log_dir, "journal.txt"),
+ "systemd journal of current boot",
+ verbosity,
+ )
for log in CLOUDINIT_LOGS:
_collect_file(log, log_dir, verbosity)
if include_userdata:
_collect_file(USER_DATA_FILE, log_dir, verbosity)
- run_dir = os.path.join(log_dir, 'run')
+ run_dir = os.path.join(log_dir, "run")
ensure_dir(run_dir)
if os.path.exists(CLOUDINIT_RUN_DIR):
try:
- shutil.copytree(CLOUDINIT_RUN_DIR,
- os.path.join(run_dir, 'cloud-init'),
- ignore=_copytree_rundir_ignore_files)
+ shutil.copytree(
+ CLOUDINIT_RUN_DIR,
+ os.path.join(run_dir, "cloud-init"),
+ ignore=_copytree_rundir_ignore_files,
+ )
except shutil.Error as e:
sys.stderr.write("Failed collecting file(s) due to error:\n")
- sys.stderr.write(str(e) + '\n')
+ sys.stderr.write(str(e) + "\n")
_debug("collected dir %s\n" % CLOUDINIT_RUN_DIR, 1, verbosity)
else:
- _debug("directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, 1,
- verbosity)
+ _debug(
+ "directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR,
+ 1,
+ verbosity,
+ )
with chdir(tmp_dir):
- subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')])
+ subp(["tar", "czvf", tarfile, log_dir.replace(tmp_dir + "/", "")])
sys.stderr.write("Wrote %s\n" % tarfile)
return 0
@@ -152,10 +184,10 @@ def handle_collect_logs_args(name, args):
def main():
"""Tool to collect and tar all cloud-init related logs."""
parser = get_parser()
- return handle_collect_logs_args('collect-logs', parser.parse_args())
+ return handle_collect_logs_args("collect-logs", parser.parse_args())
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(main())
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/make_mime.py b/cloudinit/cmd/devel/make_mime.py
index 4e6a5778..a7493c74 100755
--- a/cloudinit/cmd/devel/make_mime.py
+++ b/cloudinit/cmd/devel/make_mime.py
@@ -9,19 +9,22 @@ from email.mime.text import MIMEText
from cloudinit import log
from cloudinit.handlers import INCLUSION_TYPES_MAP
+
from . import addLogHandlerCLI
-NAME = 'make-mime'
+NAME = "make-mime"
LOG = log.getLogger(NAME)
-EPILOG = ("Example: make-mime -a config.yaml:cloud-config "
- "-a script.sh:x-shellscript > user-data")
+EPILOG = (
+ "Example: make-mime -a config.yaml:cloud-config "
+ "-a script.sh:x-shellscript > user-data"
+)
def file_content_type(text):
- """ Return file content type by reading the first line of the input. """
+ """Return file content type by reading the first line of the input."""
try:
filename, content_type = text.split(":", 1)
- return (open(filename, 'r'), filename, content_type.strip())
+ return (open(filename, "r"), filename, content_type.strip())
except ValueError as e:
raise argparse.ArgumentError(
text, "Invalid value for %r" % (text)
@@ -41,26 +44,43 @@ def get_parser(parser=None):
# update the parser's doc and add an epilog to show an example
parser.description = __doc__
parser.epilog = EPILOG
- parser.add_argument("-a", "--attach", dest="files", type=file_content_type,
- action='append', default=[],
- metavar="<file>:<content-type>",
- help=("attach the given file as the specified "
- "content-type"))
- parser.add_argument('-l', '--list-types', action='store_true',
- default=False,
- help='List support cloud-init content types.')
- parser.add_argument('-f', '--force', action='store_true',
- default=False,
- help='Ignore unknown content-type warnings')
+ parser.add_argument(
+ "-a",
+ "--attach",
+ dest="files",
+ type=file_content_type,
+ action="append",
+ default=[],
+ metavar="<file>:<content-type>",
+ help="attach the given file as the specified content-type",
+ )
+ parser.add_argument(
+ "-l",
+ "--list-types",
+ action="store_true",
+ default=False,
+ help="List support cloud-init content types.",
+ )
+ parser.add_argument(
+ "-f",
+ "--force",
+ action="store_true",
+ default=False,
+ help="Ignore unknown content-type warnings",
+ )
return parser
def get_content_types(strip_prefix=False):
- """ Return a list of cloud-init supported content types. Optionally
- strip out the leading 'text/' of the type if strip_prefix=True.
+ """Return a list of cloud-init supported content types. Optionally
+ strip out the leading 'text/' of the type if strip_prefix=True.
"""
- return sorted([ctype.replace("text/", "") if strip_prefix else ctype
- for ctype in INCLUSION_TYPES_MAP.values()])
+ return sorted(
+ [
+ ctype.replace("text/", "") if strip_prefix else ctype
+ for ctype in INCLUSION_TYPES_MAP.values()
+ ]
+ )
def handle_args(name, args):
@@ -82,14 +102,16 @@ def handle_args(name, args):
for i, (fh, filename, format_type) in enumerate(args.files):
contents = fh.read()
sub_message = MIMEText(contents, format_type, sys.getdefaultencoding())
- sub_message.add_header('Content-Disposition',
- 'attachment; filename="%s"' % (filename))
+ sub_message.add_header(
+ "Content-Disposition", 'attachment; filename="%s"' % (filename)
+ )
content_type = sub_message.get_content_type().lower()
if content_type not in get_content_types():
level = "WARNING" if args.force else "ERROR"
- msg = (level + ": content type %r for attachment %s "
- "may be incorrect!") % (content_type, i + 1)
- sys.stderr.write(msg + '\n')
+ msg = (
+ level + ": content type %r for attachment %s may be incorrect!"
+ ) % (content_type, i + 1)
+ sys.stderr.write(msg + "\n")
errors.append(msg)
sub_messages.append(sub_message)
if len(errors) and not args.force:
@@ -104,10 +126,10 @@ def handle_args(name, args):
def main():
args = get_parser().parse_args()
- return(handle_args(NAME, args))
+ return handle_args(NAME, args)
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(main())
diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py
index f4a98e5e..18b1e7ff 100755
--- a/cloudinit/cmd/devel/net_convert.py
+++ b/cloudinit/cmd/devel/net_convert.py
@@ -6,15 +6,13 @@ import json
import os
import sys
-from cloudinit.sources.helpers import openstack
+from cloudinit import distros, log, safeyaml
+from cloudinit.net import eni, netplan, network_state, networkd, sysconfig
from cloudinit.sources import DataSourceAzure as azure
from cloudinit.sources import DataSourceOVF as ovf
+from cloudinit.sources.helpers import openstack
-from cloudinit import distros, safeyaml
-from cloudinit.net import eni, netplan, networkd, network_state, sysconfig
-from cloudinit import log
-
-NAME = 'net-convert'
+NAME = "net-convert"
def get_parser(parser=None):
@@ -27,33 +25,59 @@ def get_parser(parser=None):
"""
if not parser:
parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
- parser.add_argument("-p", "--network-data", type=open,
- metavar="PATH", required=True,
- help="The network configuration to read")
- parser.add_argument("-k", "--kind",
- choices=['eni', 'network_data.json', 'yaml',
- 'azure-imds', 'vmware-imc'],
- required=True,
- help="The format of the given network config")
- parser.add_argument("-d", "--directory",
- metavar="PATH",
- help="directory to place output in",
- required=True)
- parser.add_argument("-D", "--distro",
- choices=[item for sublist in
- distros.OSFAMILIES.values()
- for item in sublist],
- required=True)
- parser.add_argument("-m", "--mac",
- metavar="name,mac",
- action='append',
- help="interface name to mac mapping")
- parser.add_argument("--debug", action='store_true',
- help='enable debug logging to stderr.')
- parser.add_argument("-O", "--output-kind",
- choices=['eni', 'netplan', 'networkd', 'sysconfig'],
- required=True,
- help="The network config format to emit")
+ parser.add_argument(
+ "-p",
+ "--network-data",
+ type=open,
+ metavar="PATH",
+ required=True,
+ help="The network configuration to read",
+ )
+ parser.add_argument(
+ "-k",
+ "--kind",
+ choices=[
+ "eni",
+ "network_data.json",
+ "yaml",
+ "azure-imds",
+ "vmware-imc",
+ ],
+ required=True,
+ help="The format of the given network config",
+ )
+ parser.add_argument(
+ "-d",
+ "--directory",
+ metavar="PATH",
+ help="directory to place output in",
+ required=True,
+ )
+ parser.add_argument(
+ "-D",
+ "--distro",
+ choices=[
+ item for sublist in distros.OSFAMILIES.values() for item in sublist
+ ],
+ required=True,
+ )
+ parser.add_argument(
+ "-m",
+ "--mac",
+ metavar="name,mac",
+ action="append",
+ help="interface name to mac mapping",
+ )
+ parser.add_argument(
+ "--debug", action="store_true", help="enable debug logging to stderr."
+ )
+ parser.add_argument(
+ "-O",
+ "--output-kind",
+ choices=["eni", "netplan", "networkd", "sysconfig"],
+ required=True,
+ help="The network config format to emit",
+ )
return parser
@@ -81,59 +105,68 @@ def handle_args(name, args):
pre_ns = eni.convert_eni_data(net_data)
elif args.kind == "yaml":
pre_ns = safeyaml.load(net_data)
- if 'network' in pre_ns:
- pre_ns = pre_ns.get('network')
+ if "network" in pre_ns:
+ pre_ns = pre_ns.get("network")
if args.debug:
- sys.stderr.write('\n'.join(
- ["Input YAML", safeyaml.dumps(pre_ns), ""]))
- elif args.kind == 'network_data.json':
+ sys.stderr.write(
+ "\n".join(["Input YAML", safeyaml.dumps(pre_ns), ""])
+ )
+ elif args.kind == "network_data.json":
pre_ns = openstack.convert_net_json(
- json.loads(net_data), known_macs=known_macs)
- elif args.kind == 'azure-imds':
+ json.loads(net_data), known_macs=known_macs
+ )
+ elif args.kind == "azure-imds":
pre_ns = azure.parse_network_config(json.loads(net_data))
- elif args.kind == 'vmware-imc':
+ elif args.kind == "vmware-imc":
config = ovf.Config(ovf.ConfigFile(args.network_data.name))
pre_ns = ovf.get_network_config_from_conf(config, False)
ns = network_state.parse_net_config_data(pre_ns)
if args.debug:
- sys.stderr.write('\n'.join(
- ["", "Internal State", safeyaml.dumps(ns), ""]))
+ sys.stderr.write(
+ "\n".join(["", "Internal State", safeyaml.dumps(ns), ""])
+ )
distro_cls = distros.fetch(args.distro)
distro = distro_cls(args.distro, {}, None)
config = {}
if args.output_kind == "eni":
r_cls = eni.Renderer
- config = distro.renderer_configs.get('eni')
+ config = distro.renderer_configs.get("eni")
elif args.output_kind == "netplan":
r_cls = netplan.Renderer
- config = distro.renderer_configs.get('netplan')
+ config = distro.renderer_configs.get("netplan")
# don't run netplan generate/apply
- config['postcmds'] = False
+ config["postcmds"] = False
# trim leading slash
- config['netplan_path'] = config['netplan_path'][1:]
+ config["netplan_path"] = config["netplan_path"][1:]
# enable some netplan features
- config['features'] = ['dhcp-use-domains', 'ipv6-mtu']
+ config["features"] = ["dhcp-use-domains", "ipv6-mtu"]
elif args.output_kind == "networkd":
r_cls = networkd.Renderer
- config = distro.renderer_configs.get('networkd')
+ config = distro.renderer_configs.get("networkd")
elif args.output_kind == "sysconfig":
r_cls = sysconfig.Renderer
- config = distro.renderer_configs.get('sysconfig')
+ config = distro.renderer_configs.get("sysconfig")
else:
raise RuntimeError("Invalid output_kind")
r = r_cls(config=config)
- sys.stderr.write(''.join([
- "Read input format '%s' from '%s'.\n" % (
- args.kind, args.network_data.name),
- "Wrote output format '%s' to '%s'\n" % (
- args.output_kind, args.directory)]) + "\n")
+ sys.stderr.write(
+ "".join(
+ [
+ "Read input format '%s' from '%s'.\n"
+ % (args.kind, args.network_data.name),
+ "Wrote output format '%s' to '%s'\n"
+ % (args.output_kind, args.directory),
+ ]
+ )
+ + "\n"
+ )
r.render_network_state(network_state=ns, target=args.directory)
-if __name__ == '__main__':
+if __name__ == "__main__":
args = get_parser().parse_args()
handle_args(NAME, args)
diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py
index be304630..76b16c2e 100644
--- a/cloudinit/cmd/devel/parser.py
+++ b/cloudinit/cmd/devel/parser.py
@@ -5,33 +5,47 @@
"""Define 'devel' subcommand argument parsers to include in cloud-init cmd."""
import argparse
+
from cloudinit.config import schema
-from . import hotplug_hook
-from . import net_convert
-from . import render
-from . import make_mime
+from . import hotplug_hook, make_mime, net_convert, render
def get_parser(parser=None):
if not parser:
parser = argparse.ArgumentParser(
- prog='cloudinit-devel',
- description='Run development cloud-init tools')
- subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
+ prog="cloudinit-devel",
+ description="Run development cloud-init tools",
+ )
+ subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand")
subparsers.required = True
subcmds = [
- (hotplug_hook.NAME, hotplug_hook.__doc__,
- hotplug_hook.get_parser, hotplug_hook.handle_args),
- ('schema', 'Validate cloud-config files for document schema',
- schema.get_parser, schema.handle_schema_args),
- (net_convert.NAME, net_convert.__doc__,
- net_convert.get_parser, net_convert.handle_args),
- (render.NAME, render.__doc__,
- render.get_parser, render.handle_args),
- (make_mime.NAME, make_mime.__doc__,
- make_mime.get_parser, make_mime.handle_args),
+ (
+ hotplug_hook.NAME,
+ hotplug_hook.__doc__,
+ hotplug_hook.get_parser,
+ hotplug_hook.handle_args,
+ ),
+ (
+ "schema",
+ "Validate cloud-config files for document schema",
+ schema.get_parser,
+ schema.handle_schema_args,
+ ),
+ (
+ net_convert.NAME,
+ net_convert.__doc__,
+ net_convert.get_parser,
+ net_convert.handle_args,
+ ),
+ (render.NAME, render.__doc__, render.get_parser, render.handle_args),
+ (
+ make_mime.NAME,
+ make_mime.__doc__,
+ make_mime.get_parser,
+ make_mime.handle_args,
+ ),
]
for (subcmd, helpmsg, get_parser, handler) in subcmds:
parser = subparsers.add_parser(subcmd, help=helpmsg)
diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py
index 1090aa16..2f9a22a8 100755
--- a/cloudinit/cmd/devel/render.py
+++ b/cloudinit/cmd/devel/render.py
@@ -6,12 +6,13 @@ import argparse
import os
import sys
-from cloudinit.handlers.jinja_template import render_jinja_payload_from_file
from cloudinit import log
+from cloudinit.handlers.jinja_template import render_jinja_payload_from_file
from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE
+
from . import addLogHandlerCLI, read_cfg_paths
-NAME = 'render'
+NAME = "render"
LOG = log.getLogger(NAME)
@@ -27,13 +28,24 @@ def get_parser(parser=None):
if not parser:
parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
parser.add_argument(
- 'user_data', type=str, help='Path to the user-data file to render')
+ "user_data", type=str, help="Path to the user-data file to render"
+ )
+ parser.add_argument(
+ "-i",
+ "--instance-data",
+ type=str,
+ help=(
+ "Optional path to instance-data.json file. Defaults to"
+ " /run/cloud-init/instance-data.json"
+ ),
+ )
parser.add_argument(
- '-i', '--instance-data', type=str,
- help=('Optional path to instance-data.json file. Defaults to'
- ' /run/cloud-init/instance-data.json'))
- parser.add_argument('-d', '--debug', action='store_true', default=False,
- help='Add verbose messages during template render')
+ "-d",
+ "--debug",
+ action="store_true",
+ default=False,
+ help="Add verbose messages during template render",
+ )
return parser
@@ -54,34 +66,38 @@ def handle_args(name, args):
redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE)
if uid == 0:
instance_data_fn = os.path.join(
- paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE)
+ paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE
+ )
if not os.path.exists(instance_data_fn):
LOG.warning(
- 'Missing root-readable %s. Using redacted %s instead.',
- instance_data_fn, redacted_data_fn
+ "Missing root-readable %s. Using redacted %s instead.",
+ instance_data_fn,
+ redacted_data_fn,
)
instance_data_fn = redacted_data_fn
else:
instance_data_fn = redacted_data_fn
if not os.path.exists(instance_data_fn):
- LOG.error('Missing instance-data.json file: %s', instance_data_fn)
+ LOG.error("Missing instance-data.json file: %s", instance_data_fn)
return 1
try:
with open(args.user_data) as stream:
user_data = stream.read()
except IOError:
- LOG.error('Missing user-data file: %s', args.user_data)
+ LOG.error("Missing user-data file: %s", args.user_data)
return 1
try:
rendered_payload = render_jinja_payload_from_file(
- payload=user_data, payload_fn=args.user_data,
+ payload=user_data,
+ payload_fn=args.user_data,
instance_data_file=instance_data_fn,
- debug=True if args.debug else False)
+ debug=True if args.debug else False,
+ )
except RuntimeError as e:
- LOG.error('Cannot render from instance data: %s', str(e))
+ LOG.error("Cannot render from instance data: %s", str(e))
return 1
if not rendered_payload:
- LOG.error('Unable to render user-data file: %s', args.user_data)
+ LOG.error("Unable to render user-data file: %s", args.user_data)
return 1
sys.stdout.write(rendered_payload)
return 0
@@ -89,10 +105,10 @@ def handle_args(name, args):
def main():
args = get_parser().parse_args()
- return(handle_args(NAME, args))
+ return handle_args(NAME, args)
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(main())
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index 63186d34..e67edbc3 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -19,6 +19,7 @@ import time
import traceback
from cloudinit import patcher
+
patcher.patch_logging()
from cloudinit import log as logging
@@ -34,8 +35,7 @@ from cloudinit import warnings
from cloudinit import reporting
from cloudinit.reporting import events
-from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE,
- CLOUD_CONFIG)
+from cloudinit.settings import PER_INSTANCE, PER_ALWAYS, PER_ONCE, CLOUD_CONFIG
from cloudinit import atomic_helper
@@ -44,8 +44,10 @@ from cloudinit import dhclient_hook
# Welcome message template
-WELCOME_MSG_TPL = ("Cloud-init v. {version} running '{action}' at "
- "{timestamp}. Up {uptime} seconds.")
+WELCOME_MSG_TPL = (
+ "Cloud-init v. {version} running '{action}' at "
+ "{timestamp}. Up {uptime} seconds."
+)
# Module section template
MOD_SECTION_TPL = "cloud_%s_modules"
@@ -53,9 +55,9 @@ MOD_SECTION_TPL = "cloud_%s_modules"
# Frequency shortname to full name
# (so users don't have to remember the full name...)
FREQ_SHORT_NAMES = {
- 'instance': PER_INSTANCE,
- 'always': PER_ALWAYS,
- 'once': PER_ONCE,
+ "instance": PER_INSTANCE,
+ "always": PER_ALWAYS,
+ "once": PER_ONCE,
}
LOG = logging.getLogger()
@@ -63,21 +65,20 @@ LOG = logging.getLogger()
# Used for when a logger may not be active
# and we still want to print exceptions...
-def print_exc(msg=''):
+def print_exc(msg=""):
if msg:
sys.stderr.write("%s\n" % (msg))
- sys.stderr.write('-' * 60)
+ sys.stderr.write("-" * 60)
sys.stderr.write("\n")
traceback.print_exc(file=sys.stderr)
- sys.stderr.write('-' * 60)
+ sys.stderr.write("-" * 60)
sys.stderr.write("\n")
def welcome(action, msg=None):
if not msg:
msg = welcome_format(action)
- util.multi_log("%s\n" % (msg),
- console=False, stderr=True, log=LOG)
+ util.multi_log("%s\n" % (msg), console=False, stderr=True, log=LOG)
return msg
@@ -86,7 +87,8 @@ def welcome_format(action):
version=version.version_string(),
uptime=util.uptime(),
timestamp=util.time_rfc2822(),
- action=action)
+ action=action,
+ )
def extract_fns(args):
@@ -107,29 +109,31 @@ def run_module_section(mods, action_name, section):
(which_ran, failures) = mods.run_section(full_section_name)
total_attempted = len(which_ran) + len(failures)
if total_attempted == 0:
- msg = ("No '%s' modules to run"
- " under section '%s'") % (action_name, full_section_name)
+ msg = "No '%s' modules to run under section '%s'" % (
+ action_name,
+ full_section_name,
+ )
sys.stderr.write("%s\n" % (msg))
LOG.debug(msg)
return []
else:
- LOG.debug("Ran %s modules with %s failures",
- len(which_ran), len(failures))
+ LOG.debug(
+ "Ran %s modules with %s failures", len(which_ran), len(failures)
+ )
return failures
def apply_reporting_cfg(cfg):
- if cfg.get('reporting'):
- reporting.update_configuration(cfg.get('reporting'))
+ if cfg.get("reporting"):
+ reporting.update_configuration(cfg.get("reporting"))
-def parse_cmdline_url(cmdline, names=('cloud-config-url', 'url')):
+def parse_cmdline_url(cmdline, names=("cloud-config-url", "url")):
data = util.keyval_str_to_dict(cmdline)
for key in names:
if key in data:
return key, data[key]
- raise KeyError("No keys (%s) found in string '%s'" %
- (cmdline, names))
+ raise KeyError("No keys (%s) found in string '%s'" % (cmdline, names))
def attempt_cmdline_url(path, network=True, cmdline=None):
@@ -163,51 +167,60 @@ def attempt_cmdline_url(path, network=True, cmdline=None):
if path_is_local and os.path.exists(path):
if network:
- m = ("file '%s' existed, possibly from local stage download"
- " of command line url '%s'. Not re-writing." % (path, url))
+ m = (
+ "file '%s' existed, possibly from local stage download"
+ " of command line url '%s'. Not re-writing." % (path, url)
+ )
level = logging.INFO
if path_is_local:
level = logging.DEBUG
else:
- m = ("file '%s' existed, possibly from previous boot download"
- " of command line url '%s'. Not re-writing." % (path, url))
+ m = (
+ "file '%s' existed, possibly from previous boot download"
+ " of command line url '%s'. Not re-writing." % (path, url)
+ )
level = logging.WARN
return (level, m)
- kwargs = {'url': url, 'timeout': 10, 'retries': 2}
+ kwargs = {"url": url, "timeout": 10, "retries": 2}
if network or path_is_local:
level = logging.WARN
- kwargs['sec_between'] = 1
+ kwargs["sec_between"] = 1
else:
level = logging.DEBUG
- kwargs['sec_between'] = .1
+ kwargs["sec_between"] = 0.1
data = None
- header = b'#cloud-config'
+ header = b"#cloud-config"
try:
resp = url_helper.read_file_or_url(**kwargs)
if resp.ok():
data = resp.contents
if not resp.contents.startswith(header):
- if cmdline_name == 'cloud-config-url':
+ if cmdline_name == "cloud-config-url":
level = logging.WARN
else:
level = logging.INFO
return (
level,
- "contents of '%s' did not start with %s" % (url, header))
+ "contents of '%s' did not start with %s" % (url, header),
+ )
else:
- return (level,
- "url '%s' returned code %s. Ignoring." % (url, resp.code))
+ return (
+ level,
+ "url '%s' returned code %s. Ignoring." % (url, resp.code),
+ )
except url_helper.UrlError as e:
return (level, "retrieving url '%s' failed: %s" % (url, e))
util.write_file(path, data, mode=0o600)
- return (logging.INFO,
- "wrote cloud-config data from %s='%s' to %s" %
- (cmdline_name, url, path))
+ return (
+ logging.INFO,
+ "wrote cloud-config data from %s='%s' to %s"
+ % (cmdline_name, url, path),
+ )
def purge_cache_on_python_version_change(init):
@@ -216,31 +229,32 @@ def purge_cache_on_python_version_change(init):
There could be changes not represented in our cache (obj.pkl) after we
upgrade to a new version of python, so at that point clear the cache
"""
- current_python_version = '%d.%d' % (
- sys.version_info.major, sys.version_info.minor
+ current_python_version = "%d.%d" % (
+ sys.version_info.major,
+ sys.version_info.minor,
)
python_version_path = os.path.join(
- init.paths.get_cpath('data'), 'python-version'
+ init.paths.get_cpath("data"), "python-version"
)
if os.path.exists(python_version_path):
cached_python_version = open(python_version_path).read()
# The Python version has changed out from under us, anything that was
# pickled previously is likely useless due to API changes.
if cached_python_version != current_python_version:
- LOG.debug('Python version change detected. Purging cache')
+ LOG.debug("Python version change detected. Purging cache")
init.purge_cache(True)
util.write_file(python_version_path, current_python_version)
else:
- if os.path.exists(init.paths.get_ipath_cur('obj_pkl')):
+ if os.path.exists(init.paths.get_ipath_cur("obj_pkl")):
LOG.info(
- 'Writing python-version file. '
- 'Cache compatibility status is currently unknown.'
+ "Writing python-version file. "
+ "Cache compatibility status is currently unknown."
)
util.write_file(python_version_path, current_python_version)
def _should_bring_up_interfaces(init, args):
- if util.get_cfg_option_bool(init.cfg, 'disable_network_activation'):
+ if util.get_cfg_option_bool(init.cfg, "disable_network_activation"):
return False
return not args.local
@@ -250,10 +264,14 @@ def main_init(name, args):
if args.local:
deps = [sources.DEP_FILESYSTEM]
- early_logs = [attempt_cmdline_url(
- path=os.path.join("%s.d" % CLOUD_CONFIG,
- "91_kernel_cmdline_url.cfg"),
- network=not args.local)]
+ early_logs = [
+ attempt_cmdline_url(
+ path=os.path.join(
+ "%s.d" % CLOUD_CONFIG, "91_kernel_cmdline_url.cfg"
+ ),
+ network=not args.local,
+ )
+ ]
# Cloud-init 'init' stage is broken up into the following sub-stages
# 1. Ensure that the init object fetches its config without errors
@@ -289,8 +307,9 @@ def main_init(name, args):
early_logs.append((logging.WARN, msg))
if args.debug:
# Reset so that all the debug handlers are closed out
- LOG.debug(("Logging being reset, this logger may no"
- " longer be active shortly"))
+ LOG.debug(
+ "Logging being reset, this logger may no longer be active shortly"
+ )
logging.resetLogging()
logging.setupLogging(init.cfg)
apply_reporting_cfg(init.cfg)
@@ -317,9 +336,11 @@ def main_init(name, args):
if mode == sources.DSMODE_NETWORK:
existing = "trust"
sys.stderr.write("%s\n" % (netinfo.debug_info()))
- LOG.debug(("Checking to see if files that we need already"
- " exist from a previous run that would allow us"
- " to stop early."))
+ LOG.debug(
+ "Checking to see if files that we need already"
+ " exist from a previous run that would allow us"
+ " to stop early."
+ )
# no-net is written by upstart cloud-init-nonet when network failed
# to come up
stop_files = [
@@ -331,15 +352,18 @@ def main_init(name, args):
existing_files.append(fn)
if existing_files:
- LOG.debug("[%s] Exiting. stop file %s existed",
- mode, existing_files)
+ LOG.debug(
+ "[%s] Exiting. stop file %s existed", mode, existing_files
+ )
return (None, [])
else:
- LOG.debug("Execution continuing, no previous run detected that"
- " would allow us to stop early.")
+ LOG.debug(
+ "Execution continuing, no previous run detected that"
+ " would allow us to stop early."
+ )
else:
existing = "check"
- mcfg = util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False)
+ mcfg = util.get_cfg_option_bool(init.cfg, "manual_cache_clean", False)
if mcfg:
LOG.debug("manual cache clean set from config")
existing = "trust"
@@ -360,8 +384,11 @@ def main_init(name, args):
# if in network mode, and the datasource is local
# then work was done at that stage.
if mode == sources.DSMODE_NETWORK and init.datasource.dsmode != mode:
- LOG.debug("[%s] Exiting. datasource %s in local mode",
- mode, init.datasource)
+ LOG.debug(
+ "[%s] Exiting. datasource %s in local mode",
+ mode,
+ init.datasource,
+ )
return (None, [])
except sources.DataSourceNotFoundException:
# In the case of 'cloud-init init' without '--local' it is a bit
@@ -371,8 +398,9 @@ def main_init(name, args):
if mode == sources.DSMODE_LOCAL:
LOG.debug("No local datasource found")
else:
- util.logexc(LOG, ("No instance datasource found!"
- " Likely bad things to come!"))
+ util.logexc(
+ LOG, "No instance datasource found! Likely bad things to come!"
+ )
if not args.force:
init.apply_network_config(bring_up=bring_up_interfaces)
LOG.debug("[%s] Exiting without datasource", mode)
@@ -381,46 +409,60 @@ def main_init(name, args):
else:
return (None, ["No instance datasource found."])
else:
- LOG.debug("[%s] barreling on in force mode without datasource",
- mode)
+ LOG.debug(
+ "[%s] barreling on in force mode without datasource", mode
+ )
_maybe_persist_instance_data(init)
# Stage 6
iid = init.instancify()
- LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s",
- mode, name, iid, init.is_new_instance())
+ LOG.debug(
+ "[%s] %s will now be targeting instance id: %s. new=%s",
+ mode,
+ name,
+ iid,
+ init.is_new_instance(),
+ )
if mode == sources.DSMODE_LOCAL:
# Before network comes up, set any configured hostname to allow
# dhcp clients to advertize this hostname to any DDNS services
# LP: #1746455.
- _maybe_set_hostname(init, stage='local', retry_stage='network')
+ _maybe_set_hostname(init, stage="local", retry_stage="network")
init.apply_network_config(bring_up=bring_up_interfaces)
if mode == sources.DSMODE_LOCAL:
if init.datasource.dsmode != mode:
- LOG.debug("[%s] Exiting. datasource %s not in local mode.",
- mode, init.datasource)
+ LOG.debug(
+ "[%s] Exiting. datasource %s not in local mode.",
+ mode,
+ init.datasource,
+ )
return (init.datasource, [])
else:
- LOG.debug("[%s] %s is in local mode, will apply init modules now.",
- mode, init.datasource)
+ LOG.debug(
+ "[%s] %s is in local mode, will apply init modules now.",
+ mode,
+ init.datasource,
+ )
# Give the datasource a chance to use network resources.
# This is used on Azure to communicate with the fabric over network.
init.setup_datasource()
# update fully realizes user-data (pulling in #include if necessary)
init.update()
- _maybe_set_hostname(init, stage='init-net', retry_stage='modules:config')
+ _maybe_set_hostname(init, stage="init-net", retry_stage="modules:config")
# Stage 7
try:
# Attempt to consume the data per instance.
# This may run user-data handlers and/or perform
# url downloads and such as needed.
- (ran, _results) = init.cloudify().run('consume_data',
- init.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
+ (ran, _results) = init.cloudify().run(
+ "consume_data",
+ init.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
if not ran:
# Just consume anything that is set to run per-always
# if nothing ran in the per-instance code
@@ -442,8 +484,7 @@ def main_init(name, args):
errfmt_orig = errfmt
(outfmt, errfmt) = util.get_output_cfg(mods.cfg, name)
if outfmt_orig != outfmt or errfmt_orig != errfmt:
- LOG.warning("Stdout, stderr changing to (%s, %s)",
- outfmt, errfmt)
+ LOG.warning("Stdout, stderr changing to (%s, %s)", outfmt, errfmt)
(outfmt, errfmt) = util.fixup_output(mods.cfg, name)
except Exception:
util.logexc(LOG, "Failed to re-adjust output redirection!")
@@ -459,11 +500,11 @@ def main_init(name, args):
def di_report_warn(datasource, cfg):
- if 'di_report' not in cfg:
+ if "di_report" not in cfg:
LOG.debug("no di_report found in config.")
return
- dicfg = cfg['di_report']
+ dicfg = cfg["di_report"]
if dicfg is None:
# ds-identify may write 'di_report:\n #comment\n'
# which reads as {'di_report': None}
@@ -474,7 +515,7 @@ def di_report_warn(datasource, cfg):
LOG.warning("di_report config not a dictionary: %s", dicfg)
return
- dslist = dicfg.get('datasource_list')
+ dslist = dicfg.get("datasource_list")
if dslist is None:
LOG.warning("no 'datasource_list' found in di_report.")
return
@@ -486,18 +527,26 @@ def di_report_warn(datasource, cfg):
# where Name is the thing that shows up in datasource_list.
modname = datasource.__module__.rpartition(".")[2]
if modname.startswith(sources.DS_PREFIX):
- modname = modname[len(sources.DS_PREFIX):]
+ modname = modname[len(sources.DS_PREFIX) :]
else:
- LOG.warning("Datasource '%s' came from unexpected module '%s'.",
- datasource, modname)
+ LOG.warning(
+ "Datasource '%s' came from unexpected module '%s'.",
+ datasource,
+ modname,
+ )
if modname in dslist:
- LOG.debug("used datasource '%s' from '%s' was in di_report's list: %s",
- datasource, modname, dslist)
+ LOG.debug(
+ "used datasource '%s' from '%s' was in di_report's list: %s",
+ datasource,
+ modname,
+ dslist,
+ )
return
- warnings.show_warning('dsid_missing_source', cfg,
- source=modname, dslist=str(dslist))
+ warnings.show_warning(
+ "dsid_missing_source", cfg, source=modname, dslist=str(dslist)
+ )
def main_modules(action_name, args):
@@ -521,8 +570,10 @@ def main_modules(action_name, args):
init.fetch(existing="trust")
except sources.DataSourceNotFoundException:
# There was no datasource found, theres nothing to do
- msg = ('Can not apply stage %s, no datasource found! Likely bad '
- 'things to come!' % name)
+ msg = (
+ "Can not apply stage %s, no datasource found! Likely bad "
+ "things to come!" % name
+ )
util.logexc(LOG, msg)
print_exc(msg)
if not args.force:
@@ -539,8 +590,9 @@ def main_modules(action_name, args):
util.logexc(LOG, "Failed to setup output redirection!")
if args.debug:
# Reset so that all the debug handlers are closed out
- LOG.debug(("Logging being reset, this logger may no"
- " longer be active shortly"))
+ LOG.debug(
+ "Logging being reset, this logger may no longer be active shortly"
+ )
logging.resetLogging()
logging.setupLogging(mods.cfg)
apply_reporting_cfg(init.cfg)
@@ -573,10 +625,12 @@ def main_single(name, args):
# There was no datasource found,
# that might be bad (or ok) depending on
# the module being ran (so continue on)
- util.logexc(LOG, ("Failed to fetch your datasource,"
- " likely bad things to come!"))
- print_exc(("Failed to fetch your datasource,"
- " likely bad things to come!"))
+ util.logexc(
+ LOG, "Failed to fetch your datasource, likely bad things to come!"
+ )
+ print_exc(
+ "Failed to fetch your datasource, likely bad things to come!"
+ )
if not args.force:
return 1
_maybe_persist_instance_data(init)
@@ -598,8 +652,9 @@ def main_single(name, args):
util.logexc(LOG, "Failed to setup output redirection!")
if args.debug:
# Reset so that all the debug handlers are closed out
- LOG.debug(("Logging being reset, this logger may no"
- " longer be active shortly"))
+ LOG.debug(
+ "Logging being reset, this logger may no longer be active shortly"
+ )
logging.resetLogging()
logging.setupLogging(mods.cfg)
apply_reporting_cfg(init.cfg)
@@ -608,9 +663,7 @@ def main_single(name, args):
welcome(name, msg=w_msg)
# Stage 5
- (which_ran, failures) = mods.run_single(mod_name,
- mod_args,
- mod_freq)
+ (which_ran, failures) = mods.run_single(mod_name, mod_args, mod_freq)
if failures:
LOG.warning("Ran %s but it failed!", mod_name)
return 1
@@ -633,7 +686,12 @@ def status_wrapper(name, args, data_d=None, link_d=None):
result_path = os.path.join(data_d, "result.json")
result_link = os.path.join(link_d, "result.json")
- util.ensure_dirs((data_d, link_d,))
+ util.ensure_dirs(
+ (
+ data_d,
+ link_d,
+ )
+ )
(_name, functor) = args.action
@@ -647,14 +705,20 @@ def status_wrapper(name, args, data_d=None, link_d=None):
else:
raise ValueError("unknown name: %s" % name)
- modes = ('init', 'init-local', 'modules-init', 'modules-config',
- 'modules-final')
+ modes = (
+ "init",
+ "init-local",
+ "modules-init",
+ "modules-config",
+ "modules-final",
+ )
if mode not in modes:
raise ValueError(
- "Invalid cloud init mode specified '{0}'".format(mode))
+ "Invalid cloud init mode specified '{0}'".format(mode)
+ )
status = None
- if mode == 'init-local':
+ if mode == "init-local":
for f in (status_link, result_link, status_path, result_path):
util.del_file(f)
else:
@@ -664,45 +728,46 @@ def status_wrapper(name, args, data_d=None, link_d=None):
pass
nullstatus = {
- 'errors': [],
- 'start': None,
- 'finished': None,
+ "errors": [],
+ "start": None,
+ "finished": None,
}
if status is None:
- status = {'v1': {}}
- status['v1']['datasource'] = None
+ status = {"v1": {}}
+ status["v1"]["datasource"] = None
for m in modes:
- if m not in status['v1']:
- status['v1'][m] = nullstatus.copy()
+ if m not in status["v1"]:
+ status["v1"][m] = nullstatus.copy()
- v1 = status['v1']
- v1['stage'] = mode
- v1[mode]['start'] = time.time()
+ v1 = status["v1"]
+ v1["stage"] = mode
+ v1[mode]["start"] = time.time()
atomic_helper.write_json(status_path, status)
- util.sym_link(os.path.relpath(status_path, link_d), status_link,
- force=True)
+ util.sym_link(
+ os.path.relpath(status_path, link_d), status_link, force=True
+ )
try:
ret = functor(name, args)
- if mode in ('init', 'init-local'):
+ if mode in ("init", "init-local"):
(datasource, errors) = ret
if datasource is not None:
- v1['datasource'] = str(datasource)
+ v1["datasource"] = str(datasource)
else:
errors = ret
- v1[mode]['errors'] = [str(e) for e in errors]
+ v1[mode]["errors"] = [str(e) for e in errors]
except Exception as e:
util.logexc(LOG, "failed stage %s", mode)
print_exc("failed run of stage %s" % mode)
- v1[mode]['errors'] = [str(e)]
+ v1[mode]["errors"] = [str(e)]
- v1[mode]['finished'] = time.time()
- v1['stage'] = None
+ v1[mode]["finished"] = time.time()
+ v1["stage"] = None
atomic_helper.write_json(status_path, status)
@@ -710,23 +775,26 @@ def status_wrapper(name, args, data_d=None, link_d=None):
# write the 'finished' file
errors = []
for m in modes:
- if v1[m]['errors']:
- errors.extend(v1[m].get('errors', []))
+ if v1[m]["errors"]:
+ errors.extend(v1[m].get("errors", []))
atomic_helper.write_json(
- result_path, {'v1': {'datasource': v1['datasource'],
- 'errors': errors}})
- util.sym_link(os.path.relpath(result_path, link_d), result_link,
- force=True)
+ result_path,
+ {"v1": {"datasource": v1["datasource"], "errors": errors}},
+ )
+ util.sym_link(
+ os.path.relpath(result_path, link_d), result_link, force=True
+ )
- return len(v1[mode]['errors'])
+ return len(v1[mode]["errors"])
def _maybe_persist_instance_data(init):
"""Write instance-data.json file if absent and datasource is restored."""
if init.ds_restored:
instance_data_file = os.path.join(
- init.paths.run_dir, sources.INSTANCE_JSON_FILE)
+ init.paths.run_dir, sources.INSTANCE_JSON_FILE
+ )
if not os.path.exists(instance_data_file):
init.datasource.persist_instance_data()
@@ -739,18 +807,23 @@ def _maybe_set_hostname(init, stage, retry_stage):
"""
cloud = init.cloudify()
(hostname, _fqdn) = util.get_hostname_fqdn(
- init.cfg, cloud, metadata_only=True)
+ init.cfg, cloud, metadata_only=True
+ )
if hostname: # meta-data or user-data hostname content
try:
- cc_set_hostname.handle('set-hostname', init.cfg, cloud, LOG, None)
+ cc_set_hostname.handle("set-hostname", init.cfg, cloud, LOG, None)
except cc_set_hostname.SetHostnameError as e:
LOG.debug(
- 'Failed setting hostname in %s stage. Will'
- ' retry in %s stage. Error: %s.', stage, retry_stage, str(e))
+ "Failed setting hostname in %s stage. Will"
+ " retry in %s stage. Error: %s.",
+ stage,
+ retry_stage,
+ str(e),
+ )
def main_features(name, args):
- sys.stdout.write('\n'.join(sorted(version.FEATURES)) + '\n')
+ sys.stdout.write("\n".join(sorted(version.FEATURES)) + "\n")
def main(sysv_args=None):
@@ -760,129 +833,182 @@ def main(sysv_args=None):
sysv_args = sysv_args[1:]
# Top level args
- parser.add_argument('--version', '-v', action='version',
- version='%(prog)s ' + (version.version_string()))
- parser.add_argument('--file', '-f', action='append',
- dest='files',
- help=('additional yaml configuration'
- ' files to use'),
- type=argparse.FileType('rb'))
- parser.add_argument('--debug', '-d', action='store_true',
- help=('show additional pre-action'
- ' logging (default: %(default)s)'),
- default=False)
- parser.add_argument('--force', action='store_true',
- help=('force running even if no datasource is'
- ' found (use at your own risk)'),
- dest='force',
- default=False)
+ parser.add_argument(
+ "--version",
+ "-v",
+ action="version",
+ version="%(prog)s " + (version.version_string()),
+ )
+ parser.add_argument(
+ "--file",
+ "-f",
+ action="append",
+ dest="files",
+ help="additional yaml configuration files to use",
+ type=argparse.FileType("rb"),
+ )
+ parser.add_argument(
+ "--debug",
+ "-d",
+ action="store_true",
+ help="show additional pre-action logging (default: %(default)s)",
+ default=False,
+ )
+ parser.add_argument(
+ "--force",
+ action="store_true",
+ help=(
+ "force running even if no datasource is"
+ " found (use at your own risk)"
+ ),
+ dest="force",
+ default=False,
+ )
parser.set_defaults(reporter=None)
- subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
+ subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand")
subparsers.required = True
# Each action and its sub-options (if any)
- parser_init = subparsers.add_parser('init',
- help=('initializes cloud-init and'
- ' performs initial modules'))
- parser_init.add_argument("--local", '-l', action='store_true',
- help="start in local mode (default: %(default)s)",
- default=False)
+ parser_init = subparsers.add_parser(
+ "init", help="initializes cloud-init and performs initial modules"
+ )
+ parser_init.add_argument(
+ "--local",
+ "-l",
+ action="store_true",
+ help="start in local mode (default: %(default)s)",
+ default=False,
+ )
# This is used so that we can know which action is selected +
# the functor to use to run this subcommand
- parser_init.set_defaults(action=('init', main_init))
+ parser_init.set_defaults(action=("init", main_init))
# These settings are used for the 'config' and 'final' stages
- parser_mod = subparsers.add_parser('modules',
- help=('activates modules using '
- 'a given configuration key'))
- parser_mod.add_argument("--mode", '-m', action='store',
- help=("module configuration name "
- "to use (default: %(default)s)"),
- default='config',
- choices=('init', 'config', 'final'))
- parser_mod.set_defaults(action=('modules', main_modules))
+ parser_mod = subparsers.add_parser(
+ "modules", help="activates modules using a given configuration key"
+ )
+ parser_mod.add_argument(
+ "--mode",
+ "-m",
+ action="store",
+ help="module configuration name to use (default: %(default)s)",
+ default="config",
+ choices=("init", "config", "final"),
+ )
+ parser_mod.set_defaults(action=("modules", main_modules))
# This subcommand allows you to run a single module
- parser_single = subparsers.add_parser('single',
- help=('run a single module '))
- parser_single.add_argument("--name", '-n', action="store",
- help="module name to run",
- required=True)
- parser_single.add_argument("--frequency", action="store",
- help=("frequency of the module"),
- required=False,
- choices=list(FREQ_SHORT_NAMES.keys()))
- parser_single.add_argument("--report", action="store_true",
- help="enable reporting",
- required=False)
- parser_single.add_argument("module_args", nargs="*",
- metavar='argument',
- help=('any additional arguments to'
- ' pass to this module'))
- parser_single.set_defaults(action=('single', main_single))
+ parser_single = subparsers.add_parser(
+ "single", help="run a single module "
+ )
+ parser_single.add_argument(
+ "--name",
+ "-n",
+ action="store",
+ help="module name to run",
+ required=True,
+ )
+ parser_single.add_argument(
+ "--frequency",
+ action="store",
+ help="frequency of the module",
+ required=False,
+ choices=list(FREQ_SHORT_NAMES.keys()),
+ )
+ parser_single.add_argument(
+ "--report",
+ action="store_true",
+ help="enable reporting",
+ required=False,
+ )
+ parser_single.add_argument(
+ "module_args",
+ nargs="*",
+ metavar="argument",
+ help="any additional arguments to pass to this module",
+ )
+ parser_single.set_defaults(action=("single", main_single))
parser_query = subparsers.add_parser(
- 'query',
- help='Query standardized instance metadata from the command line.')
+ "query",
+ help="Query standardized instance metadata from the command line.",
+ )
parser_dhclient = subparsers.add_parser(
- dhclient_hook.NAME, help=dhclient_hook.__doc__)
+ dhclient_hook.NAME, help=dhclient_hook.__doc__
+ )
dhclient_hook.get_parser(parser_dhclient)
- parser_features = subparsers.add_parser('features',
- help=('list defined features'))
- parser_features.set_defaults(action=('features', main_features))
+ parser_features = subparsers.add_parser(
+ "features", help="list defined features"
+ )
+ parser_features.set_defaults(action=("features", main_features))
parser_analyze = subparsers.add_parser(
- 'analyze', help='Devel tool: Analyze cloud-init logs and data')
+ "analyze", help="Devel tool: Analyze cloud-init logs and data"
+ )
- parser_devel = subparsers.add_parser(
- 'devel', help='Run development tools')
+ parser_devel = subparsers.add_parser("devel", help="Run development tools")
parser_collect_logs = subparsers.add_parser(
- 'collect-logs', help='Collect and tar all cloud-init debug info')
+ "collect-logs", help="Collect and tar all cloud-init debug info"
+ )
parser_clean = subparsers.add_parser(
- 'clean', help='Remove logs and artifacts so cloud-init can re-run.')
+ "clean", help="Remove logs and artifacts so cloud-init can re-run."
+ )
parser_status = subparsers.add_parser(
- 'status', help='Report cloud-init status or wait on completion.')
+ "status", help="Report cloud-init status or wait on completion."
+ )
if sysv_args:
# Only load subparsers if subcommand is specified to avoid load cost
- if sysv_args[0] == 'analyze':
+ if sysv_args[0] == "analyze":
from cloudinit.analyze.__main__ import get_parser as analyze_parser
+
# Construct analyze subcommand parser
analyze_parser(parser_analyze)
- elif sysv_args[0] == 'devel':
+ elif sysv_args[0] == "devel":
from cloudinit.cmd.devel.parser import get_parser as devel_parser
+
# Construct devel subcommand parser
devel_parser(parser_devel)
- elif sysv_args[0] == 'collect-logs':
+ elif sysv_args[0] == "collect-logs":
from cloudinit.cmd.devel.logs import (
- get_parser as logs_parser, handle_collect_logs_args)
+ get_parser as logs_parser,
+ handle_collect_logs_args,
+ )
+
logs_parser(parser_collect_logs)
parser_collect_logs.set_defaults(
- action=('collect-logs', handle_collect_logs_args))
- elif sysv_args[0] == 'clean':
+ action=("collect-logs", handle_collect_logs_args)
+ )
+ elif sysv_args[0] == "clean":
from cloudinit.cmd.clean import (
- get_parser as clean_parser, handle_clean_args)
+ get_parser as clean_parser,
+ handle_clean_args,
+ )
+
clean_parser(parser_clean)
- parser_clean.set_defaults(
- action=('clean', handle_clean_args))
- elif sysv_args[0] == 'query':
+ parser_clean.set_defaults(action=("clean", handle_clean_args))
+ elif sysv_args[0] == "query":
from cloudinit.cmd.query import (
- get_parser as query_parser, handle_args as handle_query_args)
+ get_parser as query_parser,
+ handle_args as handle_query_args,
+ )
+
query_parser(parser_query)
- parser_query.set_defaults(
- action=('render', handle_query_args))
- elif sysv_args[0] == 'status':
+ parser_query.set_defaults(action=("render", handle_query_args))
+ elif sysv_args[0] == "status":
from cloudinit.cmd.status import (
- get_parser as status_parser, handle_status_args)
+ get_parser as status_parser,
+ handle_status_args,
+ )
+
status_parser(parser_status)
- parser_status.set_defaults(
- action=('status', handle_status_args))
+ parser_status.set_defaults(action=("status", handle_status_args))
args = parser.parse_args(args=sysv_args)
@@ -906,14 +1032,20 @@ def main(sysv_args=None):
if args.local:
rname, rdesc = ("init-local", "searching for local datasources")
else:
- rname, rdesc = ("init-network",
- "searching for network datasources")
+ rname, rdesc = (
+ "init-network",
+ "searching for network datasources",
+ )
elif name == "modules":
- rname, rdesc = ("modules-%s" % args.mode,
- "running modules for %s" % args.mode)
+ rname, rdesc = (
+ "modules-%s" % args.mode,
+ "running modules for %s" % args.mode,
+ )
elif name == "single":
- rname, rdesc = ("single/%s" % args.name,
- "running single module %s" % args.name)
+ rname, rdesc = (
+ "single/%s" % args.name,
+ "running single module %s" % args.name,
+ )
report_on = args.report
else:
rname = name
@@ -921,19 +1053,24 @@ def main(sysv_args=None):
report_on = False
args.reporter = events.ReportEventStack(
- rname, rdesc, reporting_enabled=report_on)
+ rname, rdesc, reporting_enabled=report_on
+ )
with args.reporter:
retval = util.log_time(
- logfunc=LOG.debug, msg="cloud-init mode '%s'" % name,
- get_uptime=True, func=functor, args=(name, args))
+ logfunc=LOG.debug,
+ msg="cloud-init mode '%s'" % name,
+ get_uptime=True,
+ func=functor,
+ args=(name, args),
+ )
reporting.flush_events()
return retval
-if __name__ == '__main__':
- if 'TZ' not in os.environ:
- os.environ['TZ'] = ":/etc/localtime"
+if __name__ == "__main__":
+ if "TZ" not in os.environ:
+ os.environ["TZ"] = ":/etc/localtime"
return_value = main(sys.argv)
if return_value:
sys.exit(return_value)
diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py
index e53cd855..46f17699 100644
--- a/cloudinit/cmd/query.py
+++ b/cloudinit/cmd/query.py
@@ -14,22 +14,24 @@ output; if this fails, they are treated as binary.
"""
import argparse
-from errno import EACCES
import os
import sys
+from errno import EACCES
+from cloudinit import log, util
+from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths
from cloudinit.handlers.jinja_template import (
convert_jinja_instance_data,
get_jinja_variable_alias,
- render_jinja_payload
+ render_jinja_payload,
)
-from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths
-from cloudinit import log
from cloudinit.sources import (
- INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE, REDACT_SENSITIVE_VALUE)
-from cloudinit import util
+ INSTANCE_JSON_FILE,
+ INSTANCE_JSON_SENSITIVE_FILE,
+ REDACT_SENSITIVE_VALUE,
+)
-NAME = 'query'
+NAME = "query"
LOG = log.getLogger(NAME)
@@ -43,41 +45,79 @@ def get_parser(parser=None):
@returns: ArgumentParser with proper argument configuration.
"""
if not parser:
- parser = argparse.ArgumentParser(
- prog=NAME, description=__doc__)
+ parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
parser.add_argument(
- '-d', '--debug', action='store_true', default=False,
- help='Add verbose messages during template render')
+ "-d",
+ "--debug",
+ action="store_true",
+ default=False,
+ help="Add verbose messages during template render",
+ )
parser.add_argument(
- '-i', '--instance-data', type=str,
- help=('Path to instance-data.json file. Default is /run/cloud-init/%s'
- % INSTANCE_JSON_FILE))
+ "-i",
+ "--instance-data",
+ type=str,
+ help="Path to instance-data.json file. Default is /run/cloud-init/%s"
+ % INSTANCE_JSON_FILE,
+ )
parser.add_argument(
- '-l', '--list-keys', action='store_true', default=False,
- help=('List query keys available at the provided instance-data'
- ' <varname>.'))
+ "-l",
+ "--list-keys",
+ action="store_true",
+ default=False,
+ help=(
+ "List query keys available at the provided instance-data"
+ " <varname>."
+ ),
+ )
parser.add_argument(
- '-u', '--user-data', type=str,
- help=('Path to user-data file. Default is'
- ' /var/lib/cloud/instance/user-data.txt'))
+ "-u",
+ "--user-data",
+ type=str,
+ help=(
+ "Path to user-data file. Default is"
+ " /var/lib/cloud/instance/user-data.txt"
+ ),
+ )
parser.add_argument(
- '-v', '--vendor-data', type=str,
- help=('Path to vendor-data file. Default is'
- ' /var/lib/cloud/instance/vendor-data.txt'))
+ "-v",
+ "--vendor-data",
+ type=str,
+ help=(
+ "Path to vendor-data file. Default is"
+ " /var/lib/cloud/instance/vendor-data.txt"
+ ),
+ )
parser.add_argument(
- 'varname', type=str, nargs='?',
- help=('A dot-delimited specific variable to query from'
- ' instance-data. For example: v1.local_hostname. If the'
- ' value is not JSON serializable, it will be base64-encoded and'
- ' will contain the prefix "ci-b64:". '))
+ "varname",
+ type=str,
+ nargs="?",
+ help=(
+ "A dot-delimited specific variable to query from"
+ " instance-data. For example: v1.local_hostname. If the"
+ " value is not JSON serializable, it will be base64-encoded and"
+ ' will contain the prefix "ci-b64:". '
+ ),
+ )
parser.add_argument(
- '-a', '--all', action='store_true', default=False, dest='dump_all',
- help='Dump all available instance-data')
+ "-a",
+ "--all",
+ action="store_true",
+ default=False,
+ dest="dump_all",
+ help="Dump all available instance-data",
+ )
parser.add_argument(
- '-f', '--format', type=str, dest='format',
- help=('Optionally specify a custom output format string. Any'
- ' instance-data variable can be specified between double-curly'
- ' braces. For example -f "{{ v2.cloud_name }}"'))
+ "-f",
+ "--format",
+ type=str,
+ dest="format",
+ help=(
+ "Optionally specify a custom output format string. Any"
+ " instance-data variable can be specified between double-curly"
+ ' braces. For example -f "{{ v2.cloud_name }}"'
+ ),
+ )
return parser
@@ -91,7 +131,7 @@ def load_userdata(ud_file_path):
"""
bdata = util.load_file(ud_file_path, decode=False)
try:
- return bdata.decode('utf-8')
+ return bdata.decode("utf-8")
except UnicodeDecodeError:
return util.decomp_gzip(bdata, quiet=False, decode=True)
@@ -118,13 +158,15 @@ def _read_instance_data(instance_data, user_data, vendor_data) -> dict:
redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE)
if uid == 0:
sensitive_data_fn = os.path.join(
- paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE)
+ paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE
+ )
if os.path.exists(sensitive_data_fn):
instance_data_fn = sensitive_data_fn
else:
LOG.warning(
- 'Missing root-readable %s. Using redacted %s instead.',
- sensitive_data_fn, redacted_data_fn
+ "Missing root-readable %s. Using redacted %s instead.",
+ sensitive_data_fn,
+ redacted_data_fn,
)
instance_data_fn = redacted_data_fn
else:
@@ -132,11 +174,11 @@ def _read_instance_data(instance_data, user_data, vendor_data) -> dict:
if user_data:
user_data_fn = user_data
else:
- user_data_fn = os.path.join(paths.instance_link, 'user-data.txt')
+ user_data_fn = os.path.join(paths.instance_link, "user-data.txt")
if vendor_data:
vendor_data_fn = vendor_data
else:
- vendor_data_fn = os.path.join(paths.instance_link, 'vendor-data.txt')
+ vendor_data_fn = os.path.join(paths.instance_link, "vendor-data.txt")
try:
instance_json = util.load_file(instance_data_fn)
@@ -144,24 +186,30 @@ def _read_instance_data(instance_data, user_data, vendor_data) -> dict:
if e.errno == EACCES:
LOG.error("No read permission on '%s'. Try sudo", instance_data_fn)
else:
- LOG.error('Missing instance-data file: %s', instance_data_fn)
+ LOG.error("Missing instance-data file: %s", instance_data_fn)
raise
instance_data = util.load_json(instance_json)
if uid != 0:
- instance_data['userdata'] = (
- '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, user_data_fn))
- instance_data['vendordata'] = (
- '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, vendor_data_fn))
+ instance_data["userdata"] = "<%s> file:%s" % (
+ REDACT_SENSITIVE_VALUE,
+ user_data_fn,
+ )
+ instance_data["vendordata"] = "<%s> file:%s" % (
+ REDACT_SENSITIVE_VALUE,
+ vendor_data_fn,
+ )
else:
- instance_data['userdata'] = load_userdata(user_data_fn)
- instance_data['vendordata'] = load_userdata(vendor_data_fn)
+ instance_data["userdata"] = load_userdata(user_data_fn)
+ instance_data["vendordata"] = load_userdata(vendor_data_fn)
return instance_data
def _find_instance_data_leaf_by_varname_path(
- jinja_vars_without_aliases: dict, jinja_vars_with_aliases: dict,
- varname: str, list_keys: bool
+ jinja_vars_without_aliases: dict,
+ jinja_vars_with_aliases: dict,
+ varname: str,
+ list_keys: bool,
):
"""Return the value of the dot-delimited varname path in instance-data
@@ -174,7 +222,7 @@ def _find_instance_data_leaf_by_varname_path(
"""
walked_key_path = ""
response = jinja_vars_without_aliases
- for key_path_part in varname.split('.'):
+ for key_path_part in varname.split("."):
try:
# Walk key path using complete aliases dict, yet response
# should only contain jinja_without_aliases
@@ -205,8 +253,9 @@ def handle_args(name, args):
addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING)
if not any([args.list_keys, args.varname, args.format, args.dump_all]):
LOG.error(
- 'Expected one of the options: --all, --format,'
- ' --list-keys or varname')
+ "Expected one of the options: --all, --format,"
+ " --list-keys or varname"
+ )
get_parser().print_help()
return 1
try:
@@ -216,11 +265,13 @@ def handle_args(name, args):
except (IOError, OSError):
return 1
if args.format:
- payload = '## template: jinja\n{fmt}'.format(fmt=args.format)
+ payload = "## template: jinja\n{fmt}".format(fmt=args.format)
rendered_payload = render_jinja_payload(
- payload=payload, payload_fn='query commandline',
+ payload=payload,
+ payload_fn="query commandline",
instance_data=instance_data,
- debug=True if args.debug else False)
+ debug=True if args.debug else False,
+ )
if rendered_payload:
print(rendered_payload)
return 0
@@ -240,7 +291,7 @@ def handle_args(name, args):
jinja_vars_without_aliases=response,
jinja_vars_with_aliases=jinja_vars_with_aliases,
varname=args.varname,
- list_keys=args.list_keys
+ list_keys=args.list_keys,
)
except (KeyError, ValueError) as e:
LOG.error(e)
@@ -248,11 +299,10 @@ def handle_args(name, args):
if args.list_keys:
if not isinstance(response, dict):
LOG.error(
- "--list-keys provided but '%s' is not a dict",
- args.varname
+ "--list-keys provided but '%s' is not a dict", args.varname
)
return 1
- response = '\n'.join(sorted(response.keys()))
+ response = "\n".join(sorted(response.keys()))
if not isinstance(response, str):
response = util.json_dumps(response)
print(response)
@@ -265,7 +315,7 @@ def main():
sys.exit(handle_args(NAME, parser.parse_args()))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py
index ea79a85b..cff16c34 100644
--- a/cloudinit/cmd/status.py
+++ b/cloudinit/cmd/status.py
@@ -7,20 +7,20 @@
import argparse
import os
import sys
-from time import gmtime, strftime, sleep
+from time import gmtime, sleep, strftime
from cloudinit.distros import uses_systemd
from cloudinit.stages import Init
from cloudinit.util import get_cmdline, load_file, load_json
-CLOUDINIT_DISABLED_FILE = '/etc/cloud/cloud-init.disabled'
+CLOUDINIT_DISABLED_FILE = "/etc/cloud/cloud-init.disabled"
# customer visible status messages
-STATUS_ENABLED_NOT_RUN = 'not run'
-STATUS_RUNNING = 'running'
-STATUS_DONE = 'done'
-STATUS_ERROR = 'error'
-STATUS_DISABLED = 'disabled'
+STATUS_ENABLED_NOT_RUN = "not run"
+STATUS_RUNNING = "running"
+STATUS_DONE = "done"
+STATUS_ERROR = "error"
+STATUS_DISABLED = "disabled"
def get_parser(parser=None):
@@ -34,15 +34,25 @@ def get_parser(parser=None):
"""
if not parser:
parser = argparse.ArgumentParser(
- prog='status',
- description='Report run status of cloud init')
+ prog="status", description="Report run status of cloud init"
+ )
parser.add_argument(
- '-l', '--long', action='store_true', default=False,
- help=('Report long format of statuses including run stage name and'
- ' error messages'))
+ "-l",
+ "--long",
+ action="store_true",
+ default=False,
+ help=(
+ "Report long format of statuses including run stage name and"
+ " error messages"
+ ),
+ )
parser.add_argument(
- '-w', '--wait', action='store_true', default=False,
- help='Block waiting on cloud-init to complete')
+ "-w",
+ "--wait",
+ action="store_true",
+ default=False,
+ help="Block waiting on cloud-init to complete",
+ )
return parser
@@ -55,18 +65,18 @@ def handle_status_args(name, args):
status, status_detail, time = _get_status_details(init.paths)
if args.wait:
while status in (STATUS_ENABLED_NOT_RUN, STATUS_RUNNING):
- sys.stdout.write('.')
+ sys.stdout.write(".")
sys.stdout.flush()
status, status_detail, time = _get_status_details(init.paths)
sleep(0.25)
- sys.stdout.write('\n')
+ sys.stdout.write("\n")
if args.long:
- print('status: {0}'.format(status))
+ print("status: {0}".format(status))
if time:
- print('time: {0}'.format(time))
- print('detail:\n{0}'.format(status_detail))
+ print("time: {0}".format(time))
+ print("detail:\n{0}".format(status_detail))
else:
- print('status: {0}'.format(status))
+ print("status: {0}".format(status))
return 1 if status == STATUS_ERROR else 0
@@ -81,20 +91,20 @@ def _is_cloudinit_disabled(disable_file, paths):
is_disabled = False
cmdline_parts = get_cmdline().split()
if not uses_systemd():
- reason = 'Cloud-init enabled on sysvinit'
- elif 'cloud-init=enabled' in cmdline_parts:
- reason = 'Cloud-init enabled by kernel command line cloud-init=enabled'
+ reason = "Cloud-init enabled on sysvinit"
+ elif "cloud-init=enabled" in cmdline_parts:
+ reason = "Cloud-init enabled by kernel command line cloud-init=enabled"
elif os.path.exists(disable_file):
is_disabled = True
- reason = 'Cloud-init disabled by {0}'.format(disable_file)
- elif 'cloud-init=disabled' in cmdline_parts:
+ reason = "Cloud-init disabled by {0}".format(disable_file)
+ elif "cloud-init=disabled" in cmdline_parts:
is_disabled = True
- reason = 'Cloud-init disabled by kernel parameter cloud-init=disabled'
- elif not os.path.exists(os.path.join(paths.run_dir, 'enabled')):
+ reason = "Cloud-init disabled by kernel parameter cloud-init=disabled"
+ elif not os.path.exists(os.path.join(paths.run_dir, "enabled")):
is_disabled = True
- reason = 'Cloud-init disabled by cloud-init-generator'
+ reason = "Cloud-init disabled by cloud-init-generator"
else:
- reason = 'Cloud-init enabled by systemd cloud-init-generator'
+ reason = "Cloud-init enabled by systemd cloud-init-generator"
return (is_disabled, reason)
@@ -106,34 +116,35 @@ def _get_status_details(paths):
Values are obtained from parsing paths.run_dir/status.json.
"""
status = STATUS_ENABLED_NOT_RUN
- status_detail = ''
+ status_detail = ""
status_v1 = {}
- status_file = os.path.join(paths.run_dir, 'status.json')
- result_file = os.path.join(paths.run_dir, 'result.json')
+ status_file = os.path.join(paths.run_dir, "status.json")
+ result_file = os.path.join(paths.run_dir, "result.json")
(is_disabled, reason) = _is_cloudinit_disabled(
- CLOUDINIT_DISABLED_FILE, paths)
+ CLOUDINIT_DISABLED_FILE, paths
+ )
if is_disabled:
status = STATUS_DISABLED
status_detail = reason
if os.path.exists(status_file):
if not os.path.exists(result_file):
status = STATUS_RUNNING
- status_v1 = load_json(load_file(status_file)).get('v1', {})
+ status_v1 = load_json(load_file(status_file)).get("v1", {})
errors = []
latest_event = 0
for key, value in sorted(status_v1.items()):
- if key == 'stage':
+ if key == "stage":
if value:
status = STATUS_RUNNING
- status_detail = 'Running in stage: {0}'.format(value)
- elif key == 'datasource':
+ status_detail = "Running in stage: {0}".format(value)
+ elif key == "datasource":
status_detail = value
elif isinstance(value, dict):
- errors.extend(value.get('errors', []))
- start = value.get('start') or 0
- finished = value.get('finished') or 0
+ errors.extend(value.get("errors", []))
+ start = value.get("start") or 0
+ finished = value.get("finished") or 0
if finished == 0 and start != 0:
status = STATUS_RUNNING
event_time = max(start, finished)
@@ -141,23 +152,23 @@ def _get_status_details(paths):
latest_event = event_time
if errors:
status = STATUS_ERROR
- status_detail = '\n'.join(errors)
+ status_detail = "\n".join(errors)
elif status == STATUS_ENABLED_NOT_RUN and latest_event > 0:
status = STATUS_DONE
if latest_event:
- time = strftime('%a, %d %b %Y %H:%M:%S %z', gmtime(latest_event))
+ time = strftime("%a, %d %b %Y %H:%M:%S %z", gmtime(latest_event))
else:
- time = ''
+ time = ""
return status, status_detail, time
def main():
"""Tool to report status of cloud-init."""
parser = get_parser()
- sys.exit(handle_status_args('status', parser.parse_args()))
+ sys.exit(handle_status_args("status", parser.parse_args()))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
# vi: ts=4 expandtab