summaryrefslogtreecommitdiff
path: root/cloudinit/reporting
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit/reporting')
-rw-r--r--cloudinit/reporting/events.py23
-rwxr-xr-xcloudinit/reporting/handlers.py39
2 files changed, 43 insertions, 19 deletions
diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py
index e5dfab33..b8677c8b 100644
--- a/cloudinit/reporting/events.py
+++ b/cloudinit/reporting/events.py
@@ -12,7 +12,7 @@ import base64
import os.path
import time
-from . import instantiated_handler_registry
+from . import instantiated_handler_registry, available_handlers
FINISH_EVENT_TYPE = 'finish'
START_EVENT_TYPE = 'start'
@@ -81,17 +81,32 @@ class FinishReportingEvent(ReportingEvent):
return data
-def report_event(event):
- """Report an event to all registered event handlers.
+def report_event(event, excluded_handler_types=None):
+ """Report an event to all registered event handlers
+ except those whose type is in excluded_handler_types.
This should generally be called via one of the other functions in
the reporting module.
+ :param excluded_handler_types:
+ List of handlers types to exclude from reporting the event to.
:param event_type:
The type of the event; this should be a constant from the
reporting module.
"""
- for _, handler in instantiated_handler_registry.registered_items.items():
+
+ if not excluded_handler_types:
+ excluded_handler_types = {}
+ excluded_handler_classes = {
+ hndl_cls
+ for hndl_type, hndl_cls in available_handlers.registered_items.items()
+ if hndl_type in excluded_handler_types
+ }
+
+ handlers = instantiated_handler_registry.registered_items.items()
+ for _, handler in handlers:
+ if type(handler) in excluded_handler_classes:
+ continue # skip this excluded handler
handler.publish_event(event)
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index 946df7e0..0a8c7af3 100755
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -35,7 +35,6 @@ class ReportingHandler(metaclass=abc.ABCMeta):
def flush(self):
"""Ensure ReportingHandler has published all events"""
- pass
class LogHandler(ReportingHandler):
@@ -114,6 +113,8 @@ class HyperVKvpReportingHandler(ReportingHandler):
https://technet.microsoft.com/en-us/library/dn798287.aspx#Linux%20guests
"""
HV_KVP_EXCHANGE_MAX_VALUE_SIZE = 2048
+ # The maximum value size expected in Azure
+ HV_KVP_AZURE_MAX_VALUE_SIZE = 1024
HV_KVP_EXCHANGE_MAX_KEY_SIZE = 512
HV_KVP_RECORD_SIZE = (HV_KVP_EXCHANGE_MAX_KEY_SIZE +
HV_KVP_EXCHANGE_MAX_VALUE_SIZE)
@@ -139,7 +140,8 @@ class HyperVKvpReportingHandler(ReportingHandler):
self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX,
self.incarnation_no)
self.publish_thread = threading.Thread(
- target=self._publish_event_routine)
+ target=self._publish_event_routine
+ )
self.publish_thread.daemon = True
self.publish_thread.start()
@@ -195,17 +197,23 @@ class HyperVKvpReportingHandler(ReportingHandler):
def _event_key(self, event):
"""
the event key format is:
- CLOUD_INIT|<incarnation number>|<event_type>|<event_name>|<time>
+ CLOUD_INIT|<incarnation number>|<event_type>|<event_name>|<uuid>
+ [|subevent_index]
"""
return u"{0}|{1}|{2}|{3}".format(self.event_key_prefix,
event.event_type, event.name,
uuid.uuid4())
def _encode_kvp_item(self, key, value):
- data = (struct.pack("%ds%ds" % (
+ data = struct.pack(
+ "%ds%ds"
+ % (
self.HV_KVP_EXCHANGE_MAX_KEY_SIZE,
- self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE),
- key.encode('utf-8'), value.encode('utf-8')))
+ self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE,
+ ),
+ key.encode("utf-8"),
+ value.encode("utf-8"),
+ )
return data
def _decode_kvp_item(self, record_data):
@@ -219,7 +227,7 @@ class HyperVKvpReportingHandler(ReportingHandler):
v = (
record_data[
self.HV_KVP_EXCHANGE_MAX_KEY_SIZE:self.HV_KVP_RECORD_SIZE
- ].decode('utf-8').strip('\x00'))
+ ].decode('utf-8').strip('\x00'))
return {'key': k, 'value': v}
@@ -244,13 +252,14 @@ class HyperVKvpReportingHandler(ReportingHandler):
data_without_desc = json.dumps(meta_data,
separators=self.JSON_SEPARATORS)
room_for_desc = (
- self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE -
+ self.HV_KVP_AZURE_MAX_VALUE_SIZE -
len(data_without_desc) - 8)
value = data_without_desc.replace(
message_place_holder,
'"{key}":"{desc}"'.format(
key=self.MSG_KEY, desc=des_in_json[:room_for_desc]))
- result_array.append(self._encode_kvp_item(key, value))
+ subkey = "{}|{}".format(key, i)
+ result_array.append(self._encode_kvp_item(subkey, value))
i += 1
des_in_json = des_in_json[room_for_desc:]
if len(des_in_json) == 0:
@@ -265,11 +274,11 @@ class HyperVKvpReportingHandler(ReportingHandler):
"""
key = self._event_key(event)
meta_data = {
- "name": event.name,
- "type": event.event_type,
- "ts": (datetime.utcfromtimestamp(event.timestamp)
- .isoformat() + 'Z'),
- }
+ "name": event.name,
+ "type": event.event_type,
+ "ts": (datetime.utcfromtimestamp(event.timestamp)
+ .isoformat() + 'Z'),
+ }
if hasattr(event, self.RESULT_KEY):
meta_data[self.RESULT_KEY] = event.result
meta_data[self.MSG_KEY] = event.description
@@ -277,7 +286,7 @@ class HyperVKvpReportingHandler(ReportingHandler):
# if it reaches the maximum length of kvp value,
# break it down to slices.
# this should be very corner case.
- if len(value) > self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE:
+ if len(value) > self.HV_KVP_AZURE_MAX_VALUE_SIZE:
return self._break_down(key, meta_data, event.description)
else:
data = self._encode_kvp_item(key, value)