summaryrefslogtreecommitdiff
path: root/cloudinit/analyze
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit/analyze')
-rw-r--r--cloudinit/analyze/__main__.py269
-rw-r--r--cloudinit/analyze/dump.py71
-rw-r--r--cloudinit/analyze/show.py192
-rw-r--r--cloudinit/analyze/tests/test_boot.py170
-rw-r--r--cloudinit/analyze/tests/test_dump.py208
5 files changed, 302 insertions, 608 deletions
diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py
index 99e5c203..36a5be78 100644
--- a/cloudinit/analyze/__main__.py
+++ b/cloudinit/analyze/__main__.py
@@ -5,62 +5,111 @@
import argparse
import re
import sys
+from datetime import datetime
from cloudinit.util import json_dumps
-from datetime import datetime
-from . import dump
-from . import show
+
+from . import dump, show
def get_parser(parser=None):
if not parser:
parser = argparse.ArgumentParser(
- prog='cloudinit-analyze',
- description='Devel tool: Analyze cloud-init logs and data')
- subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
+ prog="cloudinit-analyze",
+ description="Devel tool: Analyze cloud-init logs and data",
+ )
+ subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand")
subparsers.required = True
parser_blame = subparsers.add_parser(
- 'blame', help='Print list of executed stages ordered by time to init')
+ "blame", help="Print list of executed stages ordered by time to init"
+ )
parser_blame.add_argument(
- '-i', '--infile', action='store', dest='infile',
- default='/var/log/cloud-init.log',
- help='specify where to read input.')
+ "-i",
+ "--infile",
+ action="store",
+ dest="infile",
+ default="/var/log/cloud-init.log",
+ help="specify where to read input.",
+ )
parser_blame.add_argument(
- '-o', '--outfile', action='store', dest='outfile', default='-',
- help='specify where to write output. ')
- parser_blame.set_defaults(action=('blame', analyze_blame))
+ "-o",
+ "--outfile",
+ action="store",
+ dest="outfile",
+ default="-",
+ help="specify where to write output. ",
+ )
+ parser_blame.set_defaults(action=("blame", analyze_blame))
parser_show = subparsers.add_parser(
- 'show', help='Print list of in-order events during execution')
- parser_show.add_argument('-f', '--format', action='store',
- dest='print_format', default='%I%D @%Es +%ds',
- help='specify formatting of output.')
- parser_show.add_argument('-i', '--infile', action='store',
- dest='infile', default='/var/log/cloud-init.log',
- help='specify where to read input.')
- parser_show.add_argument('-o', '--outfile', action='store',
- dest='outfile', default='-',
- help='specify where to write output.')
- parser_show.set_defaults(action=('show', analyze_show))
+ "show", help="Print list of in-order events during execution"
+ )
+ parser_show.add_argument(
+ "-f",
+ "--format",
+ action="store",
+ dest="print_format",
+ default="%I%D @%Es +%ds",
+ help="specify formatting of output.",
+ )
+ parser_show.add_argument(
+ "-i",
+ "--infile",
+ action="store",
+ dest="infile",
+ default="/var/log/cloud-init.log",
+ help="specify where to read input.",
+ )
+ parser_show.add_argument(
+ "-o",
+ "--outfile",
+ action="store",
+ dest="outfile",
+ default="-",
+ help="specify where to write output.",
+ )
+ parser_show.set_defaults(action=("show", analyze_show))
parser_dump = subparsers.add_parser(
- 'dump', help='Dump cloud-init events in JSON format')
- parser_dump.add_argument('-i', '--infile', action='store',
- dest='infile', default='/var/log/cloud-init.log',
- help='specify where to read input. ')
- parser_dump.add_argument('-o', '--outfile', action='store',
- dest='outfile', default='-',
- help='specify where to write output. ')
- parser_dump.set_defaults(action=('dump', analyze_dump))
+ "dump", help="Dump cloud-init events in JSON format"
+ )
+ parser_dump.add_argument(
+ "-i",
+ "--infile",
+ action="store",
+ dest="infile",
+ default="/var/log/cloud-init.log",
+ help="specify where to read input. ",
+ )
+ parser_dump.add_argument(
+ "-o",
+ "--outfile",
+ action="store",
+ dest="outfile",
+ default="-",
+ help="specify where to write output. ",
+ )
+ parser_dump.set_defaults(action=("dump", analyze_dump))
parser_boot = subparsers.add_parser(
- 'boot', help='Print list of boot times for kernel and cloud-init')
- parser_boot.add_argument('-i', '--infile', action='store',
- dest='infile', default='/var/log/cloud-init.log',
- help='specify where to read input. ')
- parser_boot.add_argument('-o', '--outfile', action='store',
- dest='outfile', default='-',
- help='specify where to write output.')
- parser_boot.set_defaults(action=('boot', analyze_boot))
+ "boot", help="Print list of boot times for kernel and cloud-init"
+ )
+ parser_boot.add_argument(
+ "-i",
+ "--infile",
+ action="store",
+ dest="infile",
+ default="/var/log/cloud-init.log",
+ help="specify where to read input. ",
+ )
+ parser_boot.add_argument(
+ "-o",
+ "--outfile",
+ action="store",
+ dest="outfile",
+ default="-",
+ help="specify where to write output.",
+ )
+ parser_boot.set_defaults(action=("boot", analyze_boot))
return parser
@@ -78,61 +127,68 @@ def analyze_boot(name, args):
"""
infh, outfh = configure_io(args)
kernel_info = show.dist_check_timestamp()
- status_code, kernel_start, kernel_end, ci_sysd_start = \
- kernel_info
+ status_code, kernel_start, kernel_end, ci_sysd_start = kernel_info
kernel_start_timestamp = datetime.utcfromtimestamp(kernel_start)
kernel_end_timestamp = datetime.utcfromtimestamp(kernel_end)
ci_sysd_start_timestamp = datetime.utcfromtimestamp(ci_sysd_start)
try:
- last_init_local = \
- [e for e in _get_events(infh) if e['name'] == 'init-local' and
- 'starting search' in e['description']][-1]
- ci_start = datetime.utcfromtimestamp(last_init_local['timestamp'])
+ last_init_local = [
+ e
+ for e in _get_events(infh)
+ if e["name"] == "init-local"
+ and "starting search" in e["description"]
+ ][-1]
+ ci_start = datetime.utcfromtimestamp(last_init_local["timestamp"])
except IndexError:
- ci_start = 'Could not find init-local log-line in cloud-init.log'
+ ci_start = "Could not find init-local log-line in cloud-init.log"
status_code = show.FAIL_CODE
- FAILURE_MSG = 'Your Linux distro or container does not support this ' \
- 'functionality.\n' \
- 'You must be running a Kernel Telemetry supported ' \
- 'distro.\nPlease check ' \
- 'https://cloudinit.readthedocs.io/en/latest' \
- '/topics/analyze.html for more ' \
- 'information on supported distros.\n'
-
- SUCCESS_MSG = '-- Most Recent Boot Record --\n' \
- ' Kernel Started at: {k_s_t}\n' \
- ' Kernel ended boot at: {k_e_t}\n' \
- ' Kernel time to boot (seconds): {k_r}\n' \
- ' Cloud-init activated by systemd at: {ci_sysd_t}\n' \
- ' Time between Kernel end boot and Cloud-init ' \
- 'activation (seconds): {bt_r}\n' \
- ' Cloud-init start: {ci_start}\n'
-
- CONTAINER_MSG = '-- Most Recent Container Boot Record --\n' \
- ' Container started at: {k_s_t}\n' \
- ' Cloud-init activated by systemd at: {ci_sysd_t}\n' \
- ' Cloud-init start: {ci_start}\n' \
-
+ FAILURE_MSG = (
+ "Your Linux distro or container does not support this "
+ "functionality.\n"
+ "You must be running a Kernel Telemetry supported "
+ "distro.\nPlease check "
+ "https://cloudinit.readthedocs.io/en/latest"
+ "/topics/analyze.html for more "
+ "information on supported distros.\n"
+ )
+
+ SUCCESS_MSG = (
+ "-- Most Recent Boot Record --\n"
+ " Kernel Started at: {k_s_t}\n"
+ " Kernel ended boot at: {k_e_t}\n"
+ " Kernel time to boot (seconds): {k_r}\n"
+ " Cloud-init activated by systemd at: {ci_sysd_t}\n"
+ " Time between Kernel end boot and Cloud-init "
+ "activation (seconds): {bt_r}\n"
+ " Cloud-init start: {ci_start}\n"
+ )
+
+ CONTAINER_MSG = (
+ "-- Most Recent Container Boot Record --\n"
+ " Container started at: {k_s_t}\n"
+ " Cloud-init activated by systemd at: {ci_sysd_t}\n"
+ " Cloud-init start: {ci_start}\n"
+ )
status_map = {
show.FAIL_CODE: FAILURE_MSG,
show.CONTAINER_CODE: CONTAINER_MSG,
- show.SUCCESS_CODE: SUCCESS_MSG
+ show.SUCCESS_CODE: SUCCESS_MSG,
}
kernel_runtime = kernel_end - kernel_start
between_process_runtime = ci_sysd_start - kernel_end
kwargs = {
- 'k_s_t': kernel_start_timestamp,
- 'k_e_t': kernel_end_timestamp,
- 'k_r': kernel_runtime,
- 'bt_r': between_process_runtime,
- 'k_e': kernel_end,
- 'k_s': kernel_start,
- 'ci_sysd': ci_sysd_start,
- 'ci_sysd_t': ci_sysd_start_timestamp,
- 'ci_start': ci_start
+ "k_s_t": kernel_start_timestamp,
+ "k_e_t": kernel_end_timestamp,
+ "k_r": kernel_runtime,
+ "bt_r": between_process_runtime,
+ "k_e": kernel_end,
+ "k_s": kernel_start,
+ "ci_sysd": ci_sysd_start,
+ "ci_sysd_t": ci_sysd_start_timestamp,
+ "ci_start": ci_start,
}
outfh.write(status_map[status_code].format(**kwargs))
@@ -152,15 +208,16 @@ def analyze_blame(name, args):
and sorting by record data ('delta')
"""
(infh, outfh) = configure_io(args)
- blame_format = ' %ds (%n)'
- r = re.compile(r'(^\s+\d+\.\d+)', re.MULTILINE)
- for idx, record in enumerate(show.show_events(_get_events(infh),
- blame_format)):
+ blame_format = " %ds (%n)"
+ r = re.compile(r"(^\s+\d+\.\d+)", re.MULTILINE)
+ for idx, record in enumerate(
+ show.show_events(_get_events(infh), blame_format)
+ ):
srecs = sorted(filter(r.match, record), reverse=True)
- outfh.write('-- Boot Record %02d --\n' % (idx + 1))
- outfh.write('\n'.join(srecs) + '\n')
- outfh.write('\n')
- outfh.write('%d boot records analyzed\n' % (idx + 1))
+ outfh.write("-- Boot Record %02d --\n" % (idx + 1))
+ outfh.write("\n".join(srecs) + "\n")
+ outfh.write("\n")
+ outfh.write("%d boot records analyzed\n" % (idx + 1))
def analyze_show(name, args):
@@ -184,21 +241,25 @@ def analyze_show(name, args):
Finished stage: (modules-final) 0.NNN seconds
"""
(infh, outfh) = configure_io(args)
- for idx, record in enumerate(show.show_events(_get_events(infh),
- args.print_format)):
- outfh.write('-- Boot Record %02d --\n' % (idx + 1))
- outfh.write('The total time elapsed since completing an event is'
- ' printed after the "@" character.\n')
- outfh.write('The time the event takes is printed after the "+" '
- 'character.\n\n')
- outfh.write('\n'.join(record) + '\n')
- outfh.write('%d boot records analyzed\n' % (idx + 1))
+ for idx, record in enumerate(
+ show.show_events(_get_events(infh), args.print_format)
+ ):
+ outfh.write("-- Boot Record %02d --\n" % (idx + 1))
+ outfh.write(
+ "The total time elapsed since completing an event is"
+ ' printed after the "@" character.\n'
+ )
+ outfh.write(
+ 'The time the event takes is printed after the "+" character.\n\n'
+ )
+ outfh.write("\n".join(record) + "\n")
+ outfh.write("%d boot records analyzed\n" % (idx + 1))
def analyze_dump(name, args):
"""Dump cloud-init events in json format"""
(infh, outfh) = configure_io(args)
- outfh.write(json_dumps(_get_events(infh)) + '\n')
+ outfh.write(json_dumps(_get_events(infh)) + "\n")
def _get_events(infile):
@@ -211,28 +272,28 @@ def _get_events(infile):
def configure_io(args):
"""Common parsing and setup of input/output files"""
- if args.infile == '-':
+ if args.infile == "-":
infh = sys.stdin
else:
try:
- infh = open(args.infile, 'r')
+ infh = open(args.infile, "r")
except OSError:
- sys.stderr.write('Cannot open file %s\n' % args.infile)
+ sys.stderr.write("Cannot open file %s\n" % args.infile)
sys.exit(1)
- if args.outfile == '-':
+ if args.outfile == "-":
outfh = sys.stdout
else:
try:
- outfh = open(args.outfile, 'w')
+ outfh = open(args.outfile, "w")
except OSError:
- sys.stderr.write('Cannot open file %s\n' % args.outfile)
+ sys.stderr.write("Cannot open file %s\n" % args.outfile)
sys.exit(1)
return (infh, outfh)
-if __name__ == '__main__':
+if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
(name, action_functor) = args.action
diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py
index 62ad51fe..8e6e3c6a 100644
--- a/cloudinit/analyze/dump.py
+++ b/cloudinit/analyze/dump.py
@@ -1,21 +1,20 @@
# This file is part of cloud-init. See LICENSE file for license information.
import calendar
-from datetime import datetime
import sys
+from datetime import datetime
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
stage_to_description = {
- 'finished': 'finished running cloud-init',
- 'init-local': 'starting search for local datasources',
- 'init-network': 'searching for network datasources',
- 'init': 'searching for network datasources',
- 'modules-config': 'running config modules',
- 'modules-final': 'finalizing modules',
- 'modules': 'running modules for',
- 'single': 'running single module ',
+ "finished": "finished running cloud-init",
+ "init-local": "starting search for local datasources",
+ "init-network": "searching for network datasources",
+ "init": "searching for network datasources",
+ "modules-config": "running config modules",
+ "modules-final": "finalizing modules",
+ "modules": "running modules for",
+ "single": "running single module ",
}
# logger's asctime format
@@ -34,11 +33,11 @@ def parse_timestamp(timestampstr):
if timestampstr.split()[0] in months:
# Aug 29 22:55:26
FMT = DEFAULT_FMT
- if '.' in timestampstr:
+ if "." in timestampstr:
FMT = CLOUD_INIT_JOURNALCTL_FMT
- dt = datetime.strptime(timestampstr + " " +
- str(datetime.now().year),
- FMT)
+ dt = datetime.strptime(
+ timestampstr + " " + str(datetime.now().year), FMT
+ )
timestamp = dt.strftime("%s.%f")
elif "," in timestampstr:
# 2016-09-12 14:39:20,839
@@ -52,7 +51,7 @@ def parse_timestamp(timestampstr):
def parse_timestamp_from_date(timestampstr):
- out, _ = subp.subp(['date', '+%s.%3N', '-d', timestampstr])
+ out, _ = subp.subp(["date", "+%s.%3N", "-d", timestampstr])
timestamp = out.strip()
return float(timestamp)
@@ -79,8 +78,8 @@ def parse_ci_logline(line):
# Apr 30 19:39:11 cloud-init[2673]: handlers.py[DEBUG]: start: \
# init-local/check-cache: attempting to read from cache [check]
- amazon_linux_2_sep = ' cloud-init['
- separators = [' - ', ' [CLOUDINIT] ', amazon_linux_2_sep]
+ amazon_linux_2_sep = " cloud-init["
+ separators = [" - ", " [CLOUDINIT] ", amazon_linux_2_sep]
found = False
for sep in separators:
if sep in line:
@@ -99,7 +98,7 @@ def parse_ci_logline(line):
if "," in timehost:
timestampstr, extra = timehost.split(",")
timestampstr += ",%s" % extra.split()[0]
- if ' ' in extra:
+ if " " in extra:
hostname = extra.split()[-1]
else:
hostname = timehost.split()[-1]
@@ -111,11 +110,11 @@ def parse_ci_logline(line):
eventstr = eventstr.split(maxsplit=1)[1]
else:
timestampstr = timehost.split(hostname)[0].strip()
- if 'Cloud-init v.' in eventstr:
- event_type = 'start'
- if 'running' in eventstr:
- stage_and_timestamp = eventstr.split('running')[1].lstrip()
- event_name, _ = stage_and_timestamp.split(' at ')
+ if "Cloud-init v." in eventstr:
+ event_type = "start"
+ if "running" in eventstr:
+ stage_and_timestamp = eventstr.split("running")[1].lstrip()
+ event_name, _ = stage_and_timestamp.split(" at ")
event_name = event_name.replace("'", "").replace(":", "-")
if event_name == "init":
event_name = "init-network"
@@ -128,17 +127,17 @@ def parse_ci_logline(line):
event_description = eventstr.split(event_name)[1].strip()
event = {
- 'name': event_name.rstrip(":"),
- 'description': event_description,
- 'timestamp': parse_timestamp(timestampstr),
- 'origin': 'cloudinit',
- 'event_type': event_type.rstrip(":"),
+ "name": event_name.rstrip(":"),
+ "description": event_description,
+ "timestamp": parse_timestamp(timestampstr),
+ "origin": "cloudinit",
+ "event_type": event_type.rstrip(":"),
}
- if event['event_type'] == "finish":
+ if event["event_type"] == "finish":
result = event_description.split(":")[0]
- desc = event_description.split(result)[1].lstrip(':').strip()
- event['result'] = result
- event['description'] = desc.strip()
+ desc = event_description.split(result)[1].lstrip(":").strip()
+ event["result"] = result
+ event["description"] = desc.strip()
return event
@@ -146,10 +145,10 @@ def parse_ci_logline(line):
def dump_events(cisource=None, rawdata=None):
events = []
event = None
- CI_EVENT_MATCHES = ['start:', 'finish:', 'Cloud-init v.']
+ CI_EVENT_MATCHES = ["start:", "finish:", "Cloud-init v."]
if not any([cisource, rawdata]):
- raise ValueError('Either cisource or rawdata parameters are required')
+ raise ValueError("Either cisource or rawdata parameters are required")
if rawdata:
data = rawdata.splitlines()
@@ -162,7 +161,7 @@ def dump_events(cisource=None, rawdata=None):
try:
event = parse_ci_logline(line)
except ValueError:
- sys.stderr.write('Skipping invalid entry\n')
+ sys.stderr.write("Skipping invalid entry\n")
if event:
events.append(event)
diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py
index 01a4d3e5..5fd9cdfd 100644
--- a/cloudinit/analyze/show.py
+++ b/cloudinit/analyze/show.py
@@ -8,11 +8,10 @@ import base64
import datetime
import json
import os
-import time
import sys
+import time
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
from cloudinit.distros import uses_systemd
# Example events:
@@ -35,24 +34,25 @@ from cloudinit.distros import uses_systemd
# }
format_key = {
- '%d': 'delta',
- '%D': 'description',
- '%E': 'elapsed',
- '%e': 'event_type',
- '%I': 'indent',
- '%l': 'level',
- '%n': 'name',
- '%o': 'origin',
- '%r': 'result',
- '%t': 'timestamp',
- '%T': 'total_time',
+ "%d": "delta",
+ "%D": "description",
+ "%E": "elapsed",
+ "%e": "event_type",
+ "%I": "indent",
+ "%l": "level",
+ "%n": "name",
+ "%o": "origin",
+ "%r": "result",
+ "%t": "timestamp",
+ "%T": "total_time",
}
-formatting_help = " ".join(["{0}: {1}".format(k.replace('%', '%%'), v)
- for k, v in format_key.items()])
-SUCCESS_CODE = 'successful'
-FAIL_CODE = 'failure'
-CONTAINER_CODE = 'container'
+formatting_help = " ".join(
+ ["{0}: {1}".format(k.replace("%", "%%"), v) for k, v in format_key.items()]
+)
+SUCCESS_CODE = "successful"
+FAIL_CODE = "failure"
+CONTAINER_CODE = "container"
TIMESTAMP_UNKNOWN = (FAIL_CODE, -1, -1, -1)
@@ -60,7 +60,7 @@ def format_record(msg, event):
for i, j in format_key.items():
if i in msg:
# ensure consistent formatting of time values
- if j in ['delta', 'elapsed', 'timestamp']:
+ if j in ["delta", "elapsed", "timestamp"]:
msg = msg.replace(i, "{%s:08.5f}" % j)
else:
msg = msg.replace(i, "{%s}" % j)
@@ -68,13 +68,13 @@ def format_record(msg, event):
def dump_event_files(event):
- content = dict((k, v) for k, v in event.items() if k not in ['content'])
- files = content['files']
+ content = dict((k, v) for k, v in event.items() if k not in ["content"])
+ files = content["files"]
saved = []
for f in files:
- fname = f['path']
+ fname = f["path"]
fn_local = os.path.basename(fname)
- fcontent = base64.b64decode(f['content']).decode('ascii')
+ fcontent = base64.b64decode(f["content"]).decode("ascii")
util.write_file(fn_local, fcontent)
saved.append(fn_local)
@@ -83,13 +83,13 @@ def dump_event_files(event):
def event_name(event):
if event:
- return event.get('name')
+ return event.get("name")
return None
def event_type(event):
if event:
- return event.get('event_type')
+ return event.get("event_type")
return None
@@ -100,7 +100,7 @@ def event_parent(event):
def event_timestamp(event):
- return float(event.get('timestamp'))
+ return float(event.get("timestamp"))
def event_datetime(event):
@@ -117,41 +117,44 @@ def event_duration(start, finish):
def event_record(start_time, start, finish):
record = finish.copy()
- record.update({
- 'delta': event_duration(start, finish),
- 'elapsed': delta_seconds(start_time, event_datetime(start)),
- 'indent': '|' + ' ' * (event_name(start).count('/') - 1) + '`->',
- })
+ record.update(
+ {
+ "delta": event_duration(start, finish),
+ "elapsed": delta_seconds(start_time, event_datetime(start)),
+ "indent": "|" + " " * (event_name(start).count("/") - 1) + "`->",
+ }
+ )
return record
def total_time_record(total_time):
- return 'Total Time: %3.5f seconds\n' % total_time
+ return "Total Time: %3.5f seconds\n" % total_time
class SystemctlReader(object):
- '''
+ """
Class for dealing with all systemctl subp calls in a consistent manner.
- '''
+ """
+
def __init__(self, property, parameter=None):
self.epoch = None
- self.args = ['/bin/systemctl', 'show']
+ self.args = ["/bin/systemctl", "show"]
if parameter:
self.args.append(parameter)
- self.args.extend(['-p', property])
+ self.args.extend(["-p", property])
# Don't want the init of our object to break. Instead of throwing
# an exception, set an error code that gets checked when data is
# requested from the object
self.failure = self.subp()
def subp(self):
- '''
+ """
Make a subp call based on set args and handle errors by setting
failure code
:return: whether the subp call failed or not
- '''
+ """
try:
value, err = subp.subp(self.args, capture=True)
if err:
@@ -162,41 +165,41 @@ class SystemctlReader(object):
return systemctl_fail
def parse_epoch_as_float(self):
- '''
+ """
If subp call succeeded, return the timestamp from subp as a float.
:return: timestamp as a float
- '''
+ """
# subp has 2 ways to fail: it either fails and throws an exception,
# or returns an error code. Raise an exception here in order to make
# sure both scenarios throw exceptions
if self.failure:
- raise RuntimeError('Subprocess call to systemctl has failed, '
- 'returning error code ({})'
- .format(self.failure))
+ raise RuntimeError(
+ "Subprocess call to systemctl has failed, "
+ "returning error code ({})".format(self.failure)
+ )
# Output from systemctl show has the format Property=Value.
# For example, UserspaceMonotonic=1929304
- timestamp = self.epoch.split('=')[1]
+ timestamp = self.epoch.split("=")[1]
# Timestamps reported by systemctl are in microseconds, converting
return float(timestamp) / 1000000
def dist_check_timestamp():
- '''
+ """
Determine which init system a particular linux distro is using.
Each init system (systemd, upstart, etc) has a different way of
providing timestamps.
:return: timestamps of kernelboot, kernelendboot, and cloud-initstart
or TIMESTAMP_UNKNOWN if the timestamps cannot be retrieved.
- '''
+ """
if uses_systemd():
return gather_timestamps_using_systemd()
# Use dmesg to get timestamps if the distro does not have systemd
- if util.is_FreeBSD() or 'gentoo' in \
- util.system_info()['system'].lower():
+ if util.is_FreeBSD() or "gentoo" in util.system_info()["system"].lower():
return gather_timestamps_using_dmesg()
# this distro doesn't fit anything that is supported by cloud-init. just
@@ -205,20 +208,20 @@ def dist_check_timestamp():
def gather_timestamps_using_dmesg():
- '''
+ """
Gather timestamps that corresponds to kernel begin initialization,
kernel finish initialization using dmesg as opposed to systemctl
:return: the two timestamps plus a dummy timestamp to keep consistency
with gather_timestamps_using_systemd
- '''
+ """
try:
- data, _ = subp.subp(['dmesg'], capture=True)
+ data, _ = subp.subp(["dmesg"], capture=True)
split_entries = data[0].splitlines()
for i in split_entries:
- if i.decode('UTF-8').find('user') != -1:
- splitup = i.decode('UTF-8').split()
- stripped = splitup[1].strip(']')
+ if i.decode("UTF-8").find("user") != -1:
+ splitup = i.decode("UTF-8").split()
+ stripped = splitup[1].strip("]")
# kernel timestamp from dmesg is equal to 0,
# with the userspace timestamp relative to it.
@@ -228,8 +231,7 @@ def gather_timestamps_using_dmesg():
# systemd wont start cloud-init in this case,
# so we cannot get that timestamp
- return SUCCESS_CODE, kernel_start, kernel_end, \
- kernel_end
+ return SUCCESS_CODE, kernel_start, kernel_end, kernel_end
except Exception:
pass
@@ -237,18 +239,20 @@ def gather_timestamps_using_dmesg():
def gather_timestamps_using_systemd():
- '''
+ """
Gather timestamps that corresponds to kernel begin initialization,
kernel finish initialization. and cloud-init systemd unit activation
:return: the three timestamps
- '''
+ """
kernel_start = float(time.time()) - float(util.uptime())
try:
- delta_k_end = SystemctlReader('UserspaceTimestampMonotonic')\
- .parse_epoch_as_float()
- delta_ci_s = SystemctlReader('InactiveExitTimestampMonotonic',
- 'cloud-init-local').parse_epoch_as_float()
+ delta_k_end = SystemctlReader(
+ "UserspaceTimestampMonotonic"
+ ).parse_epoch_as_float()
+ delta_ci_s = SystemctlReader(
+ "InactiveExitTimestampMonotonic", "cloud-init-local"
+ ).parse_epoch_as_float()
base_time = kernel_start
status = SUCCESS_CODE
# lxc based containers do not set their monotonic zero point to be when
@@ -262,12 +266,13 @@ def gather_timestamps_using_systemd():
# in containers when https://github.com/lxc/lxcfs/issues/292
# is fixed, util.uptime() should be used instead of stat on
try:
- file_stat = os.stat('/proc/1/cmdline')
+ file_stat = os.stat("/proc/1/cmdline")
kernel_start = file_stat.st_atime
except OSError as err:
- raise RuntimeError('Could not determine container boot '
- 'time from /proc/1/cmdline. ({})'
- .format(err)) from err
+ raise RuntimeError(
+ "Could not determine container boot "
+ "time from /proc/1/cmdline. ({})".format(err)
+ ) from err
status = CONTAINER_CODE
else:
status = FAIL_CODE
@@ -283,10 +288,14 @@ def gather_timestamps_using_systemd():
return status, kernel_start, kernel_end, cloudinit_sysd
-def generate_records(events, blame_sort=False,
- print_format="(%n) %d seconds in %I%D",
- dump_files=False, log_datafiles=False):
- '''
+def generate_records(
+ events,
+ blame_sort=False,
+ print_format="(%n) %d seconds in %I%D",
+ dump_files=False,
+ log_datafiles=False,
+):
+ """
Take in raw events and create parent-child dependencies between events
in order to order events in chronological order.
@@ -298,9 +307,9 @@ def generate_records(events, blame_sort=False,
:param log_datafiles: whether or not to log events generated
:return: boot records ordered chronologically
- '''
+ """
- sorted_events = sorted(events, key=lambda x: x['timestamp'])
+ sorted_events = sorted(events, key=lambda x: x["timestamp"])
records = []
start_time = None
total_time = 0.0
@@ -316,8 +325,8 @@ def generate_records(events, blame_sort=False,
except IndexError:
next_evt = None
- if event_type(event) == 'start':
- if event.get('name') in stages_seen:
+ if event_type(event) == "start":
+ if event.get("name") in stages_seen:
records.append(total_time_record(total_time))
boot_records.append(records)
records = []
@@ -331,25 +340,28 @@ def generate_records(events, blame_sort=False,
# see if we have a pair
if event_name(event) == event_name(next_evt):
- if event_type(next_evt) == 'finish':
- records.append(format_record(print_format,
- event_record(start_time,
- event,
- next_evt)))
+ if event_type(next_evt) == "finish":
+ records.append(
+ format_record(
+ print_format,
+ event_record(start_time, event, next_evt),
+ )
+ )
else:
# This is a parent event
- records.append("Starting stage: %s" % event.get('name'))
+ records.append("Starting stage: %s" % event.get("name"))
unprocessed.append(event)
- stages_seen.append(event.get('name'))
+ stages_seen.append(event.get("name"))
continue
else:
prev_evt = unprocessed.pop()
if event_name(event) == event_name(prev_evt):
record = event_record(start_time, prev_evt, event)
- records.append(format_record("Finished stage: "
- "(%n) %d seconds",
- record) + "\n")
- total_time += record.get('delta')
+ records.append(
+ format_record("Finished stage: (%n) %d seconds", record)
+ + "\n"
+ )
+ total_time += record.get("delta")
else:
# not a match, put it back
unprocessed.append(prev_evt)
@@ -360,7 +372,7 @@ def generate_records(events, blame_sort=False,
def show_events(events, print_format):
- '''
+ """
A passthrough method that makes it easier to call generate_records()
:param events: JSONs from dump that represents events taken from logs
@@ -368,18 +380,18 @@ def show_events(events, print_format):
and time taken by the event in one line
:return: boot records ordered chronologically
- '''
+ """
return generate_records(events, print_format=print_format)
def load_events_infile(infile):
- '''
+ """
Takes in a log file, read it, and convert to json.
:param infile: The Log file to be read
:return: json version of logfile, raw file
- '''
+ """
data = infile.read()
try:
return json.loads(data), data
diff --git a/cloudinit/analyze/tests/test_boot.py b/cloudinit/analyze/tests/test_boot.py
deleted file mode 100644
index f69423c3..00000000
--- a/cloudinit/analyze/tests/test_boot.py
+++ /dev/null
@@ -1,170 +0,0 @@
-import os
-from cloudinit.analyze.__main__ import (analyze_boot, get_parser)
-from cloudinit.tests.helpers import CiTestCase, mock
-from cloudinit.analyze.show import dist_check_timestamp, SystemctlReader, \
- FAIL_CODE, CONTAINER_CODE
-
-err_code = (FAIL_CODE, -1, -1, -1)
-
-
-class TestDistroChecker(CiTestCase):
-
- @mock.patch('cloudinit.util.system_info', return_value={'dist': ('', '',
- ''),
- 'system': ''})
- @mock.patch('cloudinit.util.get_linux_distro', return_value=('', '', ''))
- @mock.patch('cloudinit.util.is_FreeBSD', return_value=False)
- def test_blank_distro(self, m_sys_info, m_get_linux_distro, m_free_bsd):
- self.assertEqual(err_code, dist_check_timestamp())
-
- @mock.patch('cloudinit.util.system_info', return_value={'dist': ('', '',
- '')})
- @mock.patch('cloudinit.util.get_linux_distro', return_value=('', '', ''))
- @mock.patch('cloudinit.util.is_FreeBSD', return_value=True)
- def test_freebsd_gentoo_cant_find(self, m_sys_info,
- m_get_linux_distro, m_is_FreeBSD):
- self.assertEqual(err_code, dist_check_timestamp())
-
- @mock.patch('cloudinit.subp.subp', return_value=(0, 1))
- def test_subp_fails(self, m_subp):
- self.assertEqual(err_code, dist_check_timestamp())
-
-
-class TestSystemCtlReader(CiTestCase):
-
- def test_systemctl_invalid_property(self):
- reader = SystemctlReader('dummyProperty')
- with self.assertRaises(RuntimeError):
- reader.parse_epoch_as_float()
-
- def test_systemctl_invalid_parameter(self):
- reader = SystemctlReader('dummyProperty', 'dummyParameter')
- with self.assertRaises(RuntimeError):
- reader.parse_epoch_as_float()
-
- @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None))
- def test_systemctl_works_correctly_threshold(self, m_subp):
- reader = SystemctlReader('dummyProperty', 'dummyParameter')
- self.assertEqual(1.0, reader.parse_epoch_as_float())
- thresh = 1.0 - reader.parse_epoch_as_float()
- self.assertTrue(thresh < 1e-6)
- self.assertTrue(thresh > (-1 * 1e-6))
-
- @mock.patch('cloudinit.subp.subp', return_value=('U=0', None))
- def test_systemctl_succeed_zero(self, m_subp):
- reader = SystemctlReader('dummyProperty', 'dummyParameter')
- self.assertEqual(0.0, reader.parse_epoch_as_float())
-
- @mock.patch('cloudinit.subp.subp', return_value=('U=1', None))
- def test_systemctl_succeed_distinct(self, m_subp):
- reader = SystemctlReader('dummyProperty', 'dummyParameter')
- val1 = reader.parse_epoch_as_float()
- m_subp.return_value = ('U=2', None)
- reader2 = SystemctlReader('dummyProperty', 'dummyParameter')
- val2 = reader2.parse_epoch_as_float()
- self.assertNotEqual(val1, val2)
-
- @mock.patch('cloudinit.subp.subp', return_value=('100', None))
- def test_systemctl_epoch_not_splittable(self, m_subp):
- reader = SystemctlReader('dummyProperty', 'dummyParameter')
- with self.assertRaises(IndexError):
- reader.parse_epoch_as_float()
-
- @mock.patch('cloudinit.subp.subp', return_value=('U=foobar', None))
- def test_systemctl_cannot_convert_epoch_to_float(self, m_subp):
- reader = SystemctlReader('dummyProperty', 'dummyParameter')
- with self.assertRaises(ValueError):
- reader.parse_epoch_as_float()
-
-
-class TestAnalyzeBoot(CiTestCase):
-
- def set_up_dummy_file_ci(self, path, log_path):
- infh = open(path, 'w+')
- infh.write('2019-07-08 17:40:49,601 - util.py[DEBUG]: Cloud-init v. '
- '19.1-1-gbaa47854-0ubuntu1~18.04.1 running \'init-local\' '
- 'at Mon, 08 Jul 2019 17:40:49 +0000. Up 18.84 seconds.')
- infh.close()
- outfh = open(log_path, 'w+')
- outfh.close()
-
- def set_up_dummy_file(self, path, log_path):
- infh = open(path, 'w+')
- infh.write('dummy data')
- infh.close()
- outfh = open(log_path, 'w+')
- outfh.close()
-
- def remove_dummy_file(self, path, log_path):
- if os.path.isfile(path):
- os.remove(path)
- if os.path.isfile(log_path):
- os.remove(log_path)
-
- @mock.patch('cloudinit.analyze.show.dist_check_timestamp',
- return_value=err_code)
- def test_boot_invalid_distro(self, m_dist_check_timestamp):
-
- path = os.path.dirname(os.path.abspath(__file__))
- log_path = path + '/boot-test.log'
- path += '/dummy.log'
- self.set_up_dummy_file(path, log_path)
-
- parser = get_parser()
- args = parser.parse_args(args=['boot', '-i', path, '-o',
- log_path])
- name_default = ''
- analyze_boot(name_default, args)
- # now args have been tested, go into outfile and make sure error
- # message is in the outfile
- outfh = open(args.outfile, 'r')
- data = outfh.read()
- err_string = 'Your Linux distro or container does not support this ' \
- 'functionality.\nYou must be running a Kernel ' \
- 'Telemetry supported distro.\nPlease check ' \
- 'https://cloudinit.readthedocs.io/en/latest/topics' \
- '/analyze.html for more information on supported ' \
- 'distros.\n'
-
- self.remove_dummy_file(path, log_path)
- self.assertEqual(err_string, data)
-
- @mock.patch("cloudinit.util.is_container", return_value=True)
- @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None))
- def test_container_no_ci_log_line(self, m_is_container, m_subp):
- path = os.path.dirname(os.path.abspath(__file__))
- log_path = path + '/boot-test.log'
- path += '/dummy.log'
- self.set_up_dummy_file(path, log_path)
-
- parser = get_parser()
- args = parser.parse_args(args=['boot', '-i', path, '-o',
- log_path])
- name_default = ''
-
- finish_code = analyze_boot(name_default, args)
-
- self.remove_dummy_file(path, log_path)
- self.assertEqual(FAIL_CODE, finish_code)
-
- @mock.patch("cloudinit.util.is_container", return_value=True)
- @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None))
- @mock.patch('cloudinit.analyze.__main__._get_events', return_value=[{
- 'name': 'init-local', 'description': 'starting search', 'timestamp':
- 100000}])
- @mock.patch('cloudinit.analyze.show.dist_check_timestamp',
- return_value=(CONTAINER_CODE, 1, 1, 1))
- def test_container_ci_log_line(self, m_is_container, m_subp, m_get, m_g):
- path = os.path.dirname(os.path.abspath(__file__))
- log_path = path + '/boot-test.log'
- path += '/dummy.log'
- self.set_up_dummy_file_ci(path, log_path)
-
- parser = get_parser()
- args = parser.parse_args(args=['boot', '-i', path, '-o',
- log_path])
- name_default = ''
- finish_code = analyze_boot(name_default, args)
-
- self.remove_dummy_file(path, log_path)
- self.assertEqual(CONTAINER_CODE, finish_code)
diff --git a/cloudinit/analyze/tests/test_dump.py b/cloudinit/analyze/tests/test_dump.py
deleted file mode 100644
index dac1efb6..00000000
--- a/cloudinit/analyze/tests/test_dump.py
+++ /dev/null
@@ -1,208 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from datetime import datetime
-from textwrap import dedent
-
-from cloudinit.analyze.dump import (
- dump_events, parse_ci_logline, parse_timestamp)
-from cloudinit.util import write_file
-from cloudinit.subp import which
-from cloudinit.tests.helpers import CiTestCase, mock, skipIf
-
-
-class TestParseTimestamp(CiTestCase):
-
- def test_parse_timestamp_handles_cloud_init_default_format(self):
- """Logs with cloud-init detailed formats will be properly parsed."""
- trusty_fmt = '%Y-%m-%d %H:%M:%S,%f'
- trusty_stamp = '2016-09-12 14:39:20,839'
- dt = datetime.strptime(trusty_stamp, trusty_fmt)
- self.assertEqual(
- float(dt.strftime('%s.%f')), parse_timestamp(trusty_stamp))
-
- def test_parse_timestamp_handles_syslog_adding_year(self):
- """Syslog timestamps lack a year. Add year and properly parse."""
- syslog_fmt = '%b %d %H:%M:%S %Y'
- syslog_stamp = 'Aug 08 15:12:51'
-
- # convert stamp ourselves by adding the missing year value
- year = datetime.now().year
- dt = datetime.strptime(syslog_stamp + " " + str(year), syslog_fmt)
- self.assertEqual(
- float(dt.strftime('%s.%f')),
- parse_timestamp(syslog_stamp))
-
- def test_parse_timestamp_handles_journalctl_format_adding_year(self):
- """Journalctl precise timestamps lack a year. Add year and parse."""
- journal_fmt = '%b %d %H:%M:%S.%f %Y'
- journal_stamp = 'Aug 08 17:15:50.606811'
-
- # convert stamp ourselves by adding the missing year value
- year = datetime.now().year
- dt = datetime.strptime(journal_stamp + " " + str(year), journal_fmt)
- self.assertEqual(
- float(dt.strftime('%s.%f')), parse_timestamp(journal_stamp))
-
- @skipIf(not which("date"), "'date' command not available.")
- def test_parse_unexpected_timestamp_format_with_date_command(self):
- """Dump sends unexpected timestamp formats to date for processing."""
- new_fmt = '%H:%M %m/%d %Y'
- new_stamp = '17:15 08/08'
- # convert stamp ourselves by adding the missing year value
- year = datetime.now().year
- dt = datetime.strptime(new_stamp + " " + str(year), new_fmt)
-
- # use date(1)
- with self.allow_subp(["date"]):
- self.assertEqual(
- float(dt.strftime('%s.%f')), parse_timestamp(new_stamp))
-
-
-class TestParseCILogLine(CiTestCase):
-
- def test_parse_logline_returns_none_without_separators(self):
- """When no separators are found, parse_ci_logline returns None."""
- expected_parse_ignores = [
- '', '-', 'adsf-asdf', '2017-05-22 18:02:01,088', 'CLOUDINIT']
- for parse_ignores in expected_parse_ignores:
- self.assertIsNone(parse_ci_logline(parse_ignores))
-
- def test_parse_logline_returns_event_for_cloud_init_logs(self):
- """parse_ci_logline returns an event parse from cloud-init format."""
- line = (
- "2017-08-08 20:05:07,147 - util.py[DEBUG]: Cloud-init v. 0.7.9"
- " running 'init-local' at Tue, 08 Aug 2017 20:05:07 +0000. Up"
- " 6.26 seconds.")
- dt = datetime.strptime(
- '2017-08-08 20:05:07,147', '%Y-%m-%d %H:%M:%S,%f')
- timestamp = float(dt.strftime('%s.%f'))
- expected = {
- 'description': 'starting search for local datasources',
- 'event_type': 'start',
- 'name': 'init-local',
- 'origin': 'cloudinit',
- 'timestamp': timestamp}
- self.assertEqual(expected, parse_ci_logline(line))
-
- def test_parse_logline_returns_event_for_journalctl_logs(self):
- """parse_ci_logline returns an event parse from journalctl format."""
- line = ("Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT]"
- " util.py[DEBUG]: Cloud-init v. 0.7.8 running 'init-local' at"
- " Thu, 03 Nov 2016 06:51:06 +0000. Up 1.0 seconds.")
- year = datetime.now().year
- dt = datetime.strptime(
- 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y')
- timestamp = float(dt.strftime('%s.%f'))
- expected = {
- 'description': 'starting search for local datasources',
- 'event_type': 'start',
- 'name': 'init-local',
- 'origin': 'cloudinit',
- 'timestamp': timestamp}
- self.assertEqual(expected, parse_ci_logline(line))
-
- @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date")
- def test_parse_logline_returns_event_for_finish_events(self,
- m_parse_from_date):
- """parse_ci_logline returns a finish event for a parsed log line."""
- line = ('2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT]'
- ' handlers.py[DEBUG]: finish: modules-final: SUCCESS: running'
- ' modules for final')
- expected = {
- 'description': 'running modules for final',
- 'event_type': 'finish',
- 'name': 'modules-final',
- 'origin': 'cloudinit',
- 'result': 'SUCCESS',
- 'timestamp': 1472594005.972}
- m_parse_from_date.return_value = "1472594005.972"
- self.assertEqual(expected, parse_ci_logline(line))
- m_parse_from_date.assert_has_calls(
- [mock.call("2016-08-30 21:53:25.972325+00:00")])
-
- def test_parse_logline_returns_event_for_amazon_linux_2_line(self):
- line = (
- "Apr 30 19:39:11 cloud-init[2673]: handlers.py[DEBUG]: start:"
- " init-local/check-cache: attempting to read from cache [check]")
- # Generate the expected value using `datetime`, so that TZ
- # determination is consistent with the code under test.
- timestamp_dt = datetime.strptime(
- "Apr 30 19:39:11", "%b %d %H:%M:%S"
- ).replace(year=datetime.now().year)
- expected = {
- 'description': 'attempting to read from cache [check]',
- 'event_type': 'start',
- 'name': 'init-local/check-cache',
- 'origin': 'cloudinit',
- 'timestamp': timestamp_dt.timestamp()}
- self.assertEqual(expected, parse_ci_logline(line))
-
-
-SAMPLE_LOGS = dedent("""\
-Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT] util.py[DEBUG]:\
- Cloud-init v. 0.7.8 running 'init-local' at Thu, 03 Nov 2016\
- 06:51:06 +0000. Up 1.0 seconds.
-2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT] handlers.py[DEBUG]: finish:\
- modules-final: SUCCESS: running modules for final
-""")
-
-
-class TestDumpEvents(CiTestCase):
- maxDiff = None
-
- @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date")
- def test_dump_events_with_rawdata(self, m_parse_from_date):
- """Rawdata is split and parsed into a tuple of events and data"""
- m_parse_from_date.return_value = "1472594005.972"
- events, data = dump_events(rawdata=SAMPLE_LOGS)
- expected_data = SAMPLE_LOGS.splitlines()
- self.assertEqual(
- [mock.call("2016-08-30 21:53:25.972325+00:00")],
- m_parse_from_date.call_args_list)
- self.assertEqual(expected_data, data)
- year = datetime.now().year
- dt1 = datetime.strptime(
- 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y')
- timestamp1 = float(dt1.strftime('%s.%f'))
- expected_events = [{
- 'description': 'starting search for local datasources',
- 'event_type': 'start',
- 'name': 'init-local',
- 'origin': 'cloudinit',
- 'timestamp': timestamp1}, {
- 'description': 'running modules for final',
- 'event_type': 'finish',
- 'name': 'modules-final',
- 'origin': 'cloudinit',
- 'result': 'SUCCESS',
- 'timestamp': 1472594005.972}]
- self.assertEqual(expected_events, events)
-
- @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date")
- def test_dump_events_with_cisource(self, m_parse_from_date):
- """Cisource file is read and parsed into a tuple of events and data."""
- tmpfile = self.tmp_path('logfile')
- write_file(tmpfile, SAMPLE_LOGS)
- m_parse_from_date.return_value = 1472594005.972
-
- events, data = dump_events(cisource=open(tmpfile))
- year = datetime.now().year
- dt1 = datetime.strptime(
- 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y')
- timestamp1 = float(dt1.strftime('%s.%f'))
- expected_events = [{
- 'description': 'starting search for local datasources',
- 'event_type': 'start',
- 'name': 'init-local',
- 'origin': 'cloudinit',
- 'timestamp': timestamp1}, {
- 'description': 'running modules for final',
- 'event_type': 'finish',
- 'name': 'modules-final',
- 'origin': 'cloudinit',
- 'result': 'SUCCESS',
- 'timestamp': 1472594005.972}]
- self.assertEqual(expected_events, events)
- self.assertEqual(SAMPLE_LOGS.splitlines(), [d.strip() for d in data])
- m_parse_from_date.assert_has_calls(
- [mock.call("2016-08-30 21:53:25.972325+00:00")])