summaryrefslogtreecommitdiff
path: root/cloudinit/analyze
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit/analyze')
-rw-r--r--cloudinit/analyze/__main__.py269
-rw-r--r--cloudinit/analyze/dump.py71
-rw-r--r--cloudinit/analyze/show.py192
3 files changed, 302 insertions, 230 deletions
diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py
index 99e5c203..36a5be78 100644
--- a/cloudinit/analyze/__main__.py
+++ b/cloudinit/analyze/__main__.py
@@ -5,62 +5,111 @@
import argparse
import re
import sys
+from datetime import datetime
from cloudinit.util import json_dumps
-from datetime import datetime
-from . import dump
-from . import show
+
+from . import dump, show
def get_parser(parser=None):
if not parser:
parser = argparse.ArgumentParser(
- prog='cloudinit-analyze',
- description='Devel tool: Analyze cloud-init logs and data')
- subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
+ prog="cloudinit-analyze",
+ description="Devel tool: Analyze cloud-init logs and data",
+ )
+ subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand")
subparsers.required = True
parser_blame = subparsers.add_parser(
- 'blame', help='Print list of executed stages ordered by time to init')
+ "blame", help="Print list of executed stages ordered by time to init"
+ )
parser_blame.add_argument(
- '-i', '--infile', action='store', dest='infile',
- default='/var/log/cloud-init.log',
- help='specify where to read input.')
+ "-i",
+ "--infile",
+ action="store",
+ dest="infile",
+ default="/var/log/cloud-init.log",
+ help="specify where to read input.",
+ )
parser_blame.add_argument(
- '-o', '--outfile', action='store', dest='outfile', default='-',
- help='specify where to write output. ')
- parser_blame.set_defaults(action=('blame', analyze_blame))
+ "-o",
+ "--outfile",
+ action="store",
+ dest="outfile",
+ default="-",
+ help="specify where to write output. ",
+ )
+ parser_blame.set_defaults(action=("blame", analyze_blame))
parser_show = subparsers.add_parser(
- 'show', help='Print list of in-order events during execution')
- parser_show.add_argument('-f', '--format', action='store',
- dest='print_format', default='%I%D @%Es +%ds',
- help='specify formatting of output.')
- parser_show.add_argument('-i', '--infile', action='store',
- dest='infile', default='/var/log/cloud-init.log',
- help='specify where to read input.')
- parser_show.add_argument('-o', '--outfile', action='store',
- dest='outfile', default='-',
- help='specify where to write output.')
- parser_show.set_defaults(action=('show', analyze_show))
+ "show", help="Print list of in-order events during execution"
+ )
+ parser_show.add_argument(
+ "-f",
+ "--format",
+ action="store",
+ dest="print_format",
+ default="%I%D @%Es +%ds",
+ help="specify formatting of output.",
+ )
+ parser_show.add_argument(
+ "-i",
+ "--infile",
+ action="store",
+ dest="infile",
+ default="/var/log/cloud-init.log",
+ help="specify where to read input.",
+ )
+ parser_show.add_argument(
+ "-o",
+ "--outfile",
+ action="store",
+ dest="outfile",
+ default="-",
+ help="specify where to write output.",
+ )
+ parser_show.set_defaults(action=("show", analyze_show))
parser_dump = subparsers.add_parser(
- 'dump', help='Dump cloud-init events in JSON format')
- parser_dump.add_argument('-i', '--infile', action='store',
- dest='infile', default='/var/log/cloud-init.log',
- help='specify where to read input. ')
- parser_dump.add_argument('-o', '--outfile', action='store',
- dest='outfile', default='-',
- help='specify where to write output. ')
- parser_dump.set_defaults(action=('dump', analyze_dump))
+ "dump", help="Dump cloud-init events in JSON format"
+ )
+ parser_dump.add_argument(
+ "-i",
+ "--infile",
+ action="store",
+ dest="infile",
+ default="/var/log/cloud-init.log",
+ help="specify where to read input. ",
+ )
+ parser_dump.add_argument(
+ "-o",
+ "--outfile",
+ action="store",
+ dest="outfile",
+ default="-",
+ help="specify where to write output. ",
+ )
+ parser_dump.set_defaults(action=("dump", analyze_dump))
parser_boot = subparsers.add_parser(
- 'boot', help='Print list of boot times for kernel and cloud-init')
- parser_boot.add_argument('-i', '--infile', action='store',
- dest='infile', default='/var/log/cloud-init.log',
- help='specify where to read input. ')
- parser_boot.add_argument('-o', '--outfile', action='store',
- dest='outfile', default='-',
- help='specify where to write output.')
- parser_boot.set_defaults(action=('boot', analyze_boot))
+ "boot", help="Print list of boot times for kernel and cloud-init"
+ )
+ parser_boot.add_argument(
+ "-i",
+ "--infile",
+ action="store",
+ dest="infile",
+ default="/var/log/cloud-init.log",
+ help="specify where to read input. ",
+ )
+ parser_boot.add_argument(
+ "-o",
+ "--outfile",
+ action="store",
+ dest="outfile",
+ default="-",
+ help="specify where to write output.",
+ )
+ parser_boot.set_defaults(action=("boot", analyze_boot))
return parser
@@ -78,61 +127,68 @@ def analyze_boot(name, args):
"""
infh, outfh = configure_io(args)
kernel_info = show.dist_check_timestamp()
- status_code, kernel_start, kernel_end, ci_sysd_start = \
- kernel_info
+ status_code, kernel_start, kernel_end, ci_sysd_start = kernel_info
kernel_start_timestamp = datetime.utcfromtimestamp(kernel_start)
kernel_end_timestamp = datetime.utcfromtimestamp(kernel_end)
ci_sysd_start_timestamp = datetime.utcfromtimestamp(ci_sysd_start)
try:
- last_init_local = \
- [e for e in _get_events(infh) if e['name'] == 'init-local' and
- 'starting search' in e['description']][-1]
- ci_start = datetime.utcfromtimestamp(last_init_local['timestamp'])
+ last_init_local = [
+ e
+ for e in _get_events(infh)
+ if e["name"] == "init-local"
+ and "starting search" in e["description"]
+ ][-1]
+ ci_start = datetime.utcfromtimestamp(last_init_local["timestamp"])
except IndexError:
- ci_start = 'Could not find init-local log-line in cloud-init.log'
+ ci_start = "Could not find init-local log-line in cloud-init.log"
status_code = show.FAIL_CODE
- FAILURE_MSG = 'Your Linux distro or container does not support this ' \
- 'functionality.\n' \
- 'You must be running a Kernel Telemetry supported ' \
- 'distro.\nPlease check ' \
- 'https://cloudinit.readthedocs.io/en/latest' \
- '/topics/analyze.html for more ' \
- 'information on supported distros.\n'
-
- SUCCESS_MSG = '-- Most Recent Boot Record --\n' \
- ' Kernel Started at: {k_s_t}\n' \
- ' Kernel ended boot at: {k_e_t}\n' \
- ' Kernel time to boot (seconds): {k_r}\n' \
- ' Cloud-init activated by systemd at: {ci_sysd_t}\n' \
- ' Time between Kernel end boot and Cloud-init ' \
- 'activation (seconds): {bt_r}\n' \
- ' Cloud-init start: {ci_start}\n'
-
- CONTAINER_MSG = '-- Most Recent Container Boot Record --\n' \
- ' Container started at: {k_s_t}\n' \
- ' Cloud-init activated by systemd at: {ci_sysd_t}\n' \
- ' Cloud-init start: {ci_start}\n' \
-
+ FAILURE_MSG = (
+ "Your Linux distro or container does not support this "
+ "functionality.\n"
+ "You must be running a Kernel Telemetry supported "
+ "distro.\nPlease check "
+ "https://cloudinit.readthedocs.io/en/latest"
+ "/topics/analyze.html for more "
+ "information on supported distros.\n"
+ )
+
+ SUCCESS_MSG = (
+ "-- Most Recent Boot Record --\n"
+ " Kernel Started at: {k_s_t}\n"
+ " Kernel ended boot at: {k_e_t}\n"
+ " Kernel time to boot (seconds): {k_r}\n"
+ " Cloud-init activated by systemd at: {ci_sysd_t}\n"
+ " Time between Kernel end boot and Cloud-init "
+ "activation (seconds): {bt_r}\n"
+ " Cloud-init start: {ci_start}\n"
+ )
+
+ CONTAINER_MSG = (
+ "-- Most Recent Container Boot Record --\n"
+ " Container started at: {k_s_t}\n"
+ " Cloud-init activated by systemd at: {ci_sysd_t}\n"
+ " Cloud-init start: {ci_start}\n"
+ )
status_map = {
show.FAIL_CODE: FAILURE_MSG,
show.CONTAINER_CODE: CONTAINER_MSG,
- show.SUCCESS_CODE: SUCCESS_MSG
+ show.SUCCESS_CODE: SUCCESS_MSG,
}
kernel_runtime = kernel_end - kernel_start
between_process_runtime = ci_sysd_start - kernel_end
kwargs = {
- 'k_s_t': kernel_start_timestamp,
- 'k_e_t': kernel_end_timestamp,
- 'k_r': kernel_runtime,
- 'bt_r': between_process_runtime,
- 'k_e': kernel_end,
- 'k_s': kernel_start,
- 'ci_sysd': ci_sysd_start,
- 'ci_sysd_t': ci_sysd_start_timestamp,
- 'ci_start': ci_start
+ "k_s_t": kernel_start_timestamp,
+ "k_e_t": kernel_end_timestamp,
+ "k_r": kernel_runtime,
+ "bt_r": between_process_runtime,
+ "k_e": kernel_end,
+ "k_s": kernel_start,
+ "ci_sysd": ci_sysd_start,
+ "ci_sysd_t": ci_sysd_start_timestamp,
+ "ci_start": ci_start,
}
outfh.write(status_map[status_code].format(**kwargs))
@@ -152,15 +208,16 @@ def analyze_blame(name, args):
and sorting by record data ('delta')
"""
(infh, outfh) = configure_io(args)
- blame_format = ' %ds (%n)'
- r = re.compile(r'(^\s+\d+\.\d+)', re.MULTILINE)
- for idx, record in enumerate(show.show_events(_get_events(infh),
- blame_format)):
+ blame_format = " %ds (%n)"
+ r = re.compile(r"(^\s+\d+\.\d+)", re.MULTILINE)
+ for idx, record in enumerate(
+ show.show_events(_get_events(infh), blame_format)
+ ):
srecs = sorted(filter(r.match, record), reverse=True)
- outfh.write('-- Boot Record %02d --\n' % (idx + 1))
- outfh.write('\n'.join(srecs) + '\n')
- outfh.write('\n')
- outfh.write('%d boot records analyzed\n' % (idx + 1))
+ outfh.write("-- Boot Record %02d --\n" % (idx + 1))
+ outfh.write("\n".join(srecs) + "\n")
+ outfh.write("\n")
+ outfh.write("%d boot records analyzed\n" % (idx + 1))
def analyze_show(name, args):
@@ -184,21 +241,25 @@ def analyze_show(name, args):
Finished stage: (modules-final) 0.NNN seconds
"""
(infh, outfh) = configure_io(args)
- for idx, record in enumerate(show.show_events(_get_events(infh),
- args.print_format)):
- outfh.write('-- Boot Record %02d --\n' % (idx + 1))
- outfh.write('The total time elapsed since completing an event is'
- ' printed after the "@" character.\n')
- outfh.write('The time the event takes is printed after the "+" '
- 'character.\n\n')
- outfh.write('\n'.join(record) + '\n')
- outfh.write('%d boot records analyzed\n' % (idx + 1))
+ for idx, record in enumerate(
+ show.show_events(_get_events(infh), args.print_format)
+ ):
+ outfh.write("-- Boot Record %02d --\n" % (idx + 1))
+ outfh.write(
+ "The total time elapsed since completing an event is"
+ ' printed after the "@" character.\n'
+ )
+ outfh.write(
+ 'The time the event takes is printed after the "+" character.\n\n'
+ )
+ outfh.write("\n".join(record) + "\n")
+ outfh.write("%d boot records analyzed\n" % (idx + 1))
def analyze_dump(name, args):
"""Dump cloud-init events in json format"""
(infh, outfh) = configure_io(args)
- outfh.write(json_dumps(_get_events(infh)) + '\n')
+ outfh.write(json_dumps(_get_events(infh)) + "\n")
def _get_events(infile):
@@ -211,28 +272,28 @@ def _get_events(infile):
def configure_io(args):
"""Common parsing and setup of input/output files"""
- if args.infile == '-':
+ if args.infile == "-":
infh = sys.stdin
else:
try:
- infh = open(args.infile, 'r')
+ infh = open(args.infile, "r")
except OSError:
- sys.stderr.write('Cannot open file %s\n' % args.infile)
+ sys.stderr.write("Cannot open file %s\n" % args.infile)
sys.exit(1)
- if args.outfile == '-':
+ if args.outfile == "-":
outfh = sys.stdout
else:
try:
- outfh = open(args.outfile, 'w')
+ outfh = open(args.outfile, "w")
except OSError:
- sys.stderr.write('Cannot open file %s\n' % args.outfile)
+ sys.stderr.write("Cannot open file %s\n" % args.outfile)
sys.exit(1)
return (infh, outfh)
-if __name__ == '__main__':
+if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
(name, action_functor) = args.action
diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py
index 62ad51fe..8e6e3c6a 100644
--- a/cloudinit/analyze/dump.py
+++ b/cloudinit/analyze/dump.py
@@ -1,21 +1,20 @@
# This file is part of cloud-init. See LICENSE file for license information.
import calendar
-from datetime import datetime
import sys
+from datetime import datetime
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
stage_to_description = {
- 'finished': 'finished running cloud-init',
- 'init-local': 'starting search for local datasources',
- 'init-network': 'searching for network datasources',
- 'init': 'searching for network datasources',
- 'modules-config': 'running config modules',
- 'modules-final': 'finalizing modules',
- 'modules': 'running modules for',
- 'single': 'running single module ',
+ "finished": "finished running cloud-init",
+ "init-local": "starting search for local datasources",
+ "init-network": "searching for network datasources",
+ "init": "searching for network datasources",
+ "modules-config": "running config modules",
+ "modules-final": "finalizing modules",
+ "modules": "running modules for",
+ "single": "running single module ",
}
# logger's asctime format
@@ -34,11 +33,11 @@ def parse_timestamp(timestampstr):
if timestampstr.split()[0] in months:
# Aug 29 22:55:26
FMT = DEFAULT_FMT
- if '.' in timestampstr:
+ if "." in timestampstr:
FMT = CLOUD_INIT_JOURNALCTL_FMT
- dt = datetime.strptime(timestampstr + " " +
- str(datetime.now().year),
- FMT)
+ dt = datetime.strptime(
+ timestampstr + " " + str(datetime.now().year), FMT
+ )
timestamp = dt.strftime("%s.%f")
elif "," in timestampstr:
# 2016-09-12 14:39:20,839
@@ -52,7 +51,7 @@ def parse_timestamp(timestampstr):
def parse_timestamp_from_date(timestampstr):
- out, _ = subp.subp(['date', '+%s.%3N', '-d', timestampstr])
+ out, _ = subp.subp(["date", "+%s.%3N", "-d", timestampstr])
timestamp = out.strip()
return float(timestamp)
@@ -79,8 +78,8 @@ def parse_ci_logline(line):
# Apr 30 19:39:11 cloud-init[2673]: handlers.py[DEBUG]: start: \
# init-local/check-cache: attempting to read from cache [check]
- amazon_linux_2_sep = ' cloud-init['
- separators = [' - ', ' [CLOUDINIT] ', amazon_linux_2_sep]
+ amazon_linux_2_sep = " cloud-init["
+ separators = [" - ", " [CLOUDINIT] ", amazon_linux_2_sep]
found = False
for sep in separators:
if sep in line:
@@ -99,7 +98,7 @@ def parse_ci_logline(line):
if "," in timehost:
timestampstr, extra = timehost.split(",")
timestampstr += ",%s" % extra.split()[0]
- if ' ' in extra:
+ if " " in extra:
hostname = extra.split()[-1]
else:
hostname = timehost.split()[-1]
@@ -111,11 +110,11 @@ def parse_ci_logline(line):
eventstr = eventstr.split(maxsplit=1)[1]
else:
timestampstr = timehost.split(hostname)[0].strip()
- if 'Cloud-init v.' in eventstr:
- event_type = 'start'
- if 'running' in eventstr:
- stage_and_timestamp = eventstr.split('running')[1].lstrip()
- event_name, _ = stage_and_timestamp.split(' at ')
+ if "Cloud-init v." in eventstr:
+ event_type = "start"
+ if "running" in eventstr:
+ stage_and_timestamp = eventstr.split("running")[1].lstrip()
+ event_name, _ = stage_and_timestamp.split(" at ")
event_name = event_name.replace("'", "").replace(":", "-")
if event_name == "init":
event_name = "init-network"
@@ -128,17 +127,17 @@ def parse_ci_logline(line):
event_description = eventstr.split(event_name)[1].strip()
event = {
- 'name': event_name.rstrip(":"),
- 'description': event_description,
- 'timestamp': parse_timestamp(timestampstr),
- 'origin': 'cloudinit',
- 'event_type': event_type.rstrip(":"),
+ "name": event_name.rstrip(":"),
+ "description": event_description,
+ "timestamp": parse_timestamp(timestampstr),
+ "origin": "cloudinit",
+ "event_type": event_type.rstrip(":"),
}
- if event['event_type'] == "finish":
+ if event["event_type"] == "finish":
result = event_description.split(":")[0]
- desc = event_description.split(result)[1].lstrip(':').strip()
- event['result'] = result
- event['description'] = desc.strip()
+ desc = event_description.split(result)[1].lstrip(":").strip()
+ event["result"] = result
+ event["description"] = desc.strip()
return event
@@ -146,10 +145,10 @@ def parse_ci_logline(line):
def dump_events(cisource=None, rawdata=None):
events = []
event = None
- CI_EVENT_MATCHES = ['start:', 'finish:', 'Cloud-init v.']
+ CI_EVENT_MATCHES = ["start:", "finish:", "Cloud-init v."]
if not any([cisource, rawdata]):
- raise ValueError('Either cisource or rawdata parameters are required')
+ raise ValueError("Either cisource or rawdata parameters are required")
if rawdata:
data = rawdata.splitlines()
@@ -162,7 +161,7 @@ def dump_events(cisource=None, rawdata=None):
try:
event = parse_ci_logline(line)
except ValueError:
- sys.stderr.write('Skipping invalid entry\n')
+ sys.stderr.write("Skipping invalid entry\n")
if event:
events.append(event)
diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py
index 01a4d3e5..5fd9cdfd 100644
--- a/cloudinit/analyze/show.py
+++ b/cloudinit/analyze/show.py
@@ -8,11 +8,10 @@ import base64
import datetime
import json
import os
-import time
import sys
+import time
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
from cloudinit.distros import uses_systemd
# Example events:
@@ -35,24 +34,25 @@ from cloudinit.distros import uses_systemd
# }
format_key = {
- '%d': 'delta',
- '%D': 'description',
- '%E': 'elapsed',
- '%e': 'event_type',
- '%I': 'indent',
- '%l': 'level',
- '%n': 'name',
- '%o': 'origin',
- '%r': 'result',
- '%t': 'timestamp',
- '%T': 'total_time',
+ "%d": "delta",
+ "%D": "description",
+ "%E": "elapsed",
+ "%e": "event_type",
+ "%I": "indent",
+ "%l": "level",
+ "%n": "name",
+ "%o": "origin",
+ "%r": "result",
+ "%t": "timestamp",
+ "%T": "total_time",
}
-formatting_help = " ".join(["{0}: {1}".format(k.replace('%', '%%'), v)
- for k, v in format_key.items()])
-SUCCESS_CODE = 'successful'
-FAIL_CODE = 'failure'
-CONTAINER_CODE = 'container'
+formatting_help = " ".join(
+ ["{0}: {1}".format(k.replace("%", "%%"), v) for k, v in format_key.items()]
+)
+SUCCESS_CODE = "successful"
+FAIL_CODE = "failure"
+CONTAINER_CODE = "container"
TIMESTAMP_UNKNOWN = (FAIL_CODE, -1, -1, -1)
@@ -60,7 +60,7 @@ def format_record(msg, event):
for i, j in format_key.items():
if i in msg:
# ensure consistent formatting of time values
- if j in ['delta', 'elapsed', 'timestamp']:
+ if j in ["delta", "elapsed", "timestamp"]:
msg = msg.replace(i, "{%s:08.5f}" % j)
else:
msg = msg.replace(i, "{%s}" % j)
@@ -68,13 +68,13 @@ def format_record(msg, event):
def dump_event_files(event):
- content = dict((k, v) for k, v in event.items() if k not in ['content'])
- files = content['files']
+ content = dict((k, v) for k, v in event.items() if k not in ["content"])
+ files = content["files"]
saved = []
for f in files:
- fname = f['path']
+ fname = f["path"]
fn_local = os.path.basename(fname)
- fcontent = base64.b64decode(f['content']).decode('ascii')
+ fcontent = base64.b64decode(f["content"]).decode("ascii")
util.write_file(fn_local, fcontent)
saved.append(fn_local)
@@ -83,13 +83,13 @@ def dump_event_files(event):
def event_name(event):
if event:
- return event.get('name')
+ return event.get("name")
return None
def event_type(event):
if event:
- return event.get('event_type')
+ return event.get("event_type")
return None
@@ -100,7 +100,7 @@ def event_parent(event):
def event_timestamp(event):
- return float(event.get('timestamp'))
+ return float(event.get("timestamp"))
def event_datetime(event):
@@ -117,41 +117,44 @@ def event_duration(start, finish):
def event_record(start_time, start, finish):
record = finish.copy()
- record.update({
- 'delta': event_duration(start, finish),
- 'elapsed': delta_seconds(start_time, event_datetime(start)),
- 'indent': '|' + ' ' * (event_name(start).count('/') - 1) + '`->',
- })
+ record.update(
+ {
+ "delta": event_duration(start, finish),
+ "elapsed": delta_seconds(start_time, event_datetime(start)),
+ "indent": "|" + " " * (event_name(start).count("/") - 1) + "`->",
+ }
+ )
return record
def total_time_record(total_time):
- return 'Total Time: %3.5f seconds\n' % total_time
+ return "Total Time: %3.5f seconds\n" % total_time
class SystemctlReader(object):
- '''
+ """
Class for dealing with all systemctl subp calls in a consistent manner.
- '''
+ """
+
def __init__(self, property, parameter=None):
self.epoch = None
- self.args = ['/bin/systemctl', 'show']
+ self.args = ["/bin/systemctl", "show"]
if parameter:
self.args.append(parameter)
- self.args.extend(['-p', property])
+ self.args.extend(["-p", property])
# Don't want the init of our object to break. Instead of throwing
# an exception, set an error code that gets checked when data is
# requested from the object
self.failure = self.subp()
def subp(self):
- '''
+ """
Make a subp call based on set args and handle errors by setting
failure code
:return: whether the subp call failed or not
- '''
+ """
try:
value, err = subp.subp(self.args, capture=True)
if err:
@@ -162,41 +165,41 @@ class SystemctlReader(object):
return systemctl_fail
def parse_epoch_as_float(self):
- '''
+ """
If subp call succeeded, return the timestamp from subp as a float.
:return: timestamp as a float
- '''
+ """
# subp has 2 ways to fail: it either fails and throws an exception,
# or returns an error code. Raise an exception here in order to make
# sure both scenarios throw exceptions
if self.failure:
- raise RuntimeError('Subprocess call to systemctl has failed, '
- 'returning error code ({})'
- .format(self.failure))
+ raise RuntimeError(
+ "Subprocess call to systemctl has failed, "
+ "returning error code ({})".format(self.failure)
+ )
# Output from systemctl show has the format Property=Value.
# For example, UserspaceMonotonic=1929304
- timestamp = self.epoch.split('=')[1]
+ timestamp = self.epoch.split("=")[1]
# Timestamps reported by systemctl are in microseconds, converting
return float(timestamp) / 1000000
def dist_check_timestamp():
- '''
+ """
Determine which init system a particular linux distro is using.
Each init system (systemd, upstart, etc) has a different way of
providing timestamps.
:return: timestamps of kernelboot, kernelendboot, and cloud-initstart
or TIMESTAMP_UNKNOWN if the timestamps cannot be retrieved.
- '''
+ """
if uses_systemd():
return gather_timestamps_using_systemd()
# Use dmesg to get timestamps if the distro does not have systemd
- if util.is_FreeBSD() or 'gentoo' in \
- util.system_info()['system'].lower():
+ if util.is_FreeBSD() or "gentoo" in util.system_info()["system"].lower():
return gather_timestamps_using_dmesg()
# this distro doesn't fit anything that is supported by cloud-init. just
@@ -205,20 +208,20 @@ def dist_check_timestamp():
def gather_timestamps_using_dmesg():
- '''
+ """
Gather timestamps that corresponds to kernel begin initialization,
kernel finish initialization using dmesg as opposed to systemctl
:return: the two timestamps plus a dummy timestamp to keep consistency
with gather_timestamps_using_systemd
- '''
+ """
try:
- data, _ = subp.subp(['dmesg'], capture=True)
+ data, _ = subp.subp(["dmesg"], capture=True)
split_entries = data[0].splitlines()
for i in split_entries:
- if i.decode('UTF-8').find('user') != -1:
- splitup = i.decode('UTF-8').split()
- stripped = splitup[1].strip(']')
+ if i.decode("UTF-8").find("user") != -1:
+ splitup = i.decode("UTF-8").split()
+ stripped = splitup[1].strip("]")
# kernel timestamp from dmesg is equal to 0,
# with the userspace timestamp relative to it.
@@ -228,8 +231,7 @@ def gather_timestamps_using_dmesg():
# systemd wont start cloud-init in this case,
# so we cannot get that timestamp
- return SUCCESS_CODE, kernel_start, kernel_end, \
- kernel_end
+ return SUCCESS_CODE, kernel_start, kernel_end, kernel_end
except Exception:
pass
@@ -237,18 +239,20 @@ def gather_timestamps_using_dmesg():
def gather_timestamps_using_systemd():
- '''
+ """
Gather timestamps that corresponds to kernel begin initialization,
kernel finish initialization. and cloud-init systemd unit activation
:return: the three timestamps
- '''
+ """
kernel_start = float(time.time()) - float(util.uptime())
try:
- delta_k_end = SystemctlReader('UserspaceTimestampMonotonic')\
- .parse_epoch_as_float()
- delta_ci_s = SystemctlReader('InactiveExitTimestampMonotonic',
- 'cloud-init-local').parse_epoch_as_float()
+ delta_k_end = SystemctlReader(
+ "UserspaceTimestampMonotonic"
+ ).parse_epoch_as_float()
+ delta_ci_s = SystemctlReader(
+ "InactiveExitTimestampMonotonic", "cloud-init-local"
+ ).parse_epoch_as_float()
base_time = kernel_start
status = SUCCESS_CODE
# lxc based containers do not set their monotonic zero point to be when
@@ -262,12 +266,13 @@ def gather_timestamps_using_systemd():
# in containers when https://github.com/lxc/lxcfs/issues/292
# is fixed, util.uptime() should be used instead of stat on
try:
- file_stat = os.stat('/proc/1/cmdline')
+ file_stat = os.stat("/proc/1/cmdline")
kernel_start = file_stat.st_atime
except OSError as err:
- raise RuntimeError('Could not determine container boot '
- 'time from /proc/1/cmdline. ({})'
- .format(err)) from err
+ raise RuntimeError(
+ "Could not determine container boot "
+ "time from /proc/1/cmdline. ({})".format(err)
+ ) from err
status = CONTAINER_CODE
else:
status = FAIL_CODE
@@ -283,10 +288,14 @@ def gather_timestamps_using_systemd():
return status, kernel_start, kernel_end, cloudinit_sysd
-def generate_records(events, blame_sort=False,
- print_format="(%n) %d seconds in %I%D",
- dump_files=False, log_datafiles=False):
- '''
+def generate_records(
+ events,
+ blame_sort=False,
+ print_format="(%n) %d seconds in %I%D",
+ dump_files=False,
+ log_datafiles=False,
+):
+ """
Take in raw events and create parent-child dependencies between events
in order to order events in chronological order.
@@ -298,9 +307,9 @@ def generate_records(events, blame_sort=False,
:param log_datafiles: whether or not to log events generated
:return: boot records ordered chronologically
- '''
+ """
- sorted_events = sorted(events, key=lambda x: x['timestamp'])
+ sorted_events = sorted(events, key=lambda x: x["timestamp"])
records = []
start_time = None
total_time = 0.0
@@ -316,8 +325,8 @@ def generate_records(events, blame_sort=False,
except IndexError:
next_evt = None
- if event_type(event) == 'start':
- if event.get('name') in stages_seen:
+ if event_type(event) == "start":
+ if event.get("name") in stages_seen:
records.append(total_time_record(total_time))
boot_records.append(records)
records = []
@@ -331,25 +340,28 @@ def generate_records(events, blame_sort=False,
# see if we have a pair
if event_name(event) == event_name(next_evt):
- if event_type(next_evt) == 'finish':
- records.append(format_record(print_format,
- event_record(start_time,
- event,
- next_evt)))
+ if event_type(next_evt) == "finish":
+ records.append(
+ format_record(
+ print_format,
+ event_record(start_time, event, next_evt),
+ )
+ )
else:
# This is a parent event
- records.append("Starting stage: %s" % event.get('name'))
+ records.append("Starting stage: %s" % event.get("name"))
unprocessed.append(event)
- stages_seen.append(event.get('name'))
+ stages_seen.append(event.get("name"))
continue
else:
prev_evt = unprocessed.pop()
if event_name(event) == event_name(prev_evt):
record = event_record(start_time, prev_evt, event)
- records.append(format_record("Finished stage: "
- "(%n) %d seconds",
- record) + "\n")
- total_time += record.get('delta')
+ records.append(
+ format_record("Finished stage: (%n) %d seconds", record)
+ + "\n"
+ )
+ total_time += record.get("delta")
else:
# not a match, put it back
unprocessed.append(prev_evt)
@@ -360,7 +372,7 @@ def generate_records(events, blame_sort=False,
def show_events(events, print_format):
- '''
+ """
A passthrough method that makes it easier to call generate_records()
:param events: JSONs from dump that represents events taken from logs
@@ -368,18 +380,18 @@ def show_events(events, print_format):
and time taken by the event in one line
:return: boot records ordered chronologically
- '''
+ """
return generate_records(events, print_format=print_format)
def load_events_infile(infile):
- '''
+ """
Takes in a log file, read it, and convert to json.
:param infile: The Log file to be read
:return: json version of logfile, raw file
- '''
+ """
data = infile.read()
try:
return json.loads(data), data