From 3c551f6ebc12f7729a2755c89b19b9000e27cc88 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 8 Jun 2020 12:49:12 -0400 Subject: Move subp into its own module. (#416) This was painful, but it finishes a TODO from cloudinit/subp.py. It moves the following from util to subp: ProcessExecutionError subp which target_path I moved subp_blob_in_tempfile into cc_chef, which is its only caller. That saved us from having to deal with it using write_file and temp_utils from subp (which does not import any cloudinit things now). It is arguable that 'target_path' could be moved to a 'path_utils' or something, but in order to use it from subp and also from utils, we had to get it out of utils. --- tests/unittests/test_reporting_hyperv.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tests/unittests/test_reporting_hyperv.py') diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py index b3e083c6..fa8f8859 100644 --- a/tests/unittests/test_reporting_hyperv.py +++ b/tests/unittests/test_reporting_hyperv.py @@ -131,7 +131,7 @@ class TextKvpReporter(CiTestCase): self.assertEqual(0, len(kvps)) @mock.patch('cloudinit.distros.uses_systemd') - @mock.patch('cloudinit.util.subp') + @mock.patch('cloudinit.subp.subp') def test_get_boot_telemetry(self, m_subp, m_sysd): reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) datetime_pattern = r"\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]" -- cgit v1.2.3 From f3bd42659efeed4b092ffcdfd5df7f24813f2d3e Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 10 Jun 2020 07:39:29 -0700 Subject: test: fix all flake8 E126 errors (#425) --- cloudinit/cmd/devel/render.py | 5 +- cloudinit/cmd/query.py | 5 +- cloudinit/distros/netbsd.py | 10 +- cloudinit/distros/openbsd.py | 7 +- cloudinit/net/__init__.py | 5 +- cloudinit/net/bsd.py | 5 +- cloudinit/net/netbsd.py | 5 +- cloudinit/net/openbsd.py | 5 +- cloudinit/net/sysconfig.py | 2 +- cloudinit/reporting/handlers.py | 14 +- cloudinit/sources/DataSourceAzure.py | 33 +++-- cloudinit/sources/helpers/tests/test_netlink.py | 165 +++++++++++++-------- cloudinit/sources/tests/test_init.py | 9 +- tests/cloud_tests/testcases/base.py | 2 +- tests/unittests/test_datasource/test_azure.py | 33 +++-- tests/unittests/test_datasource/test_scaleway.py | 42 ++++-- tests/unittests/test_distros/test_bsd_utils.py | 5 +- .../unittests/test_handler/test_handler_mounts.py | 7 +- tests/unittests/test_reporting_hyperv.py | 11 +- tests/unittests/test_sshutil.py | 18 ++- tox.ini | 3 +- 21 files changed, 238 insertions(+), 153 deletions(-) (limited to 'tests/unittests/test_reporting_hyperv.py') diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py index 1bc22406..1090aa16 100755 --- a/cloudinit/cmd/devel/render.py +++ b/cloudinit/cmd/devel/render.py @@ -57,8 +57,9 @@ def handle_args(name, args): paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE) if not os.path.exists(instance_data_fn): LOG.warning( - 'Missing root-readable %s. Using redacted %s instead.', - instance_data_fn, redacted_data_fn) + 'Missing root-readable %s. Using redacted %s instead.', + instance_data_fn, redacted_data_fn + ) instance_data_fn = redacted_data_fn else: instance_data_fn = redacted_data_fn diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py index e3db8679..0fb48ebd 100644 --- a/cloudinit/cmd/query.py +++ b/cloudinit/cmd/query.py @@ -90,8 +90,9 @@ def handle_args(name, args): instance_data_fn = sensitive_data_fn else: LOG.warning( - 'Missing root-readable %s. Using redacted %s instead.', - sensitive_data_fn, redacted_data_fn) + 'Missing root-readable %s. Using redacted %s instead.', + sensitive_data_fn, redacted_data_fn + ) instance_data_fn = redacted_data_fn else: instance_data_fn = redacted_data_fn diff --git a/cloudinit/distros/netbsd.py b/cloudinit/distros/netbsd.py index 066737a8..f1a9b182 100644 --- a/cloudinit/distros/netbsd.py +++ b/cloudinit/distros/netbsd.py @@ -100,8 +100,9 @@ class NetBSD(cloudinit.distros.bsd.BSD): else: method = crypt.METHOD_BLOWFISH # pylint: disable=E1101 hashed_pw = crypt.crypt( - passwd, - crypt.mksalt(method)) + passwd, + crypt.mksalt(method) + ) try: subp.subp(['usermod', '-p', hashed_pw, user]) @@ -143,8 +144,9 @@ class NetBSD(cloudinit.distros.bsd.BSD): os_arch = platform.machine() e = os.environ.copy() e['PKG_PATH'] = ( - 'http://cdn.netbsd.org/pub/pkgsrc/' - 'packages/NetBSD/%s/%s/All') % (os_arch, os_release) + 'http://cdn.netbsd.org/pub/pkgsrc/' + 'packages/NetBSD/%s/%s/All' + ) % (os_arch, os_release) return e def update_package_sources(self): diff --git a/cloudinit/distros/openbsd.py b/cloudinit/distros/openbsd.py index 07c76530..720c9cf3 100644 --- a/cloudinit/distros/openbsd.py +++ b/cloudinit/distros/openbsd.py @@ -42,9 +42,10 @@ class Distro(cloudinit.distros.netbsd.NetBSD): os_arch = platform.machine() e = os.environ.copy() e['PKG_PATH'] = ( - 'ftp://ftp.openbsd.org/pub/OpenBSD/{os_release}/' - 'packages/{os_arch}/').format( - os_arch=os_arch, os_release=os_release) + 'ftp://ftp.openbsd.org/pub/OpenBSD/{os_release}/' + 'packages/{os_arch}/').format( + os_arch=os_arch, os_release=os_release + ) return e diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index a57fea0a..b40cb154 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -849,8 +849,9 @@ def get_interfaces_by_mac_on_freebsd(): def get_interfaces_by_mac_on_netbsd(): ret = {} re_field_match = ( - r"(?P\w+).*address:\s" - r"(?P([\da-f]{2}[:-]){5}([\da-f]{2})).*") + r"(?P\w+).*address:\s" + r"(?P([\da-f]{2}[:-]){5}([\da-f]{2})).*" + ) (out, _) = subp.subp(['ifconfig', '-a']) if_lines = re.sub(r'\n\s+', ' ', out).splitlines() for line in if_lines: diff --git a/cloudinit/net/bsd.py b/cloudinit/net/bsd.py index 1c355a98..e34e0454 100644 --- a/cloudinit/net/bsd.py +++ b/cloudinit/net/bsd.py @@ -66,8 +66,9 @@ class BSDRenderer(renderer.Renderer): if subnet.get('type') == 'static': if not subnet.get('netmask'): LOG.debug( - 'Skipping IP %s, because there is no netmask', - subnet.get('address')) + 'Skipping IP %s, because there is no netmask', + subnet.get('address') + ) continue LOG.debug('Configuring dev %s with %s / %s', device_name, subnet.get('address'), subnet.get('netmask')) diff --git a/cloudinit/net/netbsd.py b/cloudinit/net/netbsd.py index 30437b5f..71b38ee6 100644 --- a/cloudinit/net/netbsd.py +++ b/cloudinit/net/netbsd.py @@ -17,8 +17,9 @@ class Renderer(cloudinit.net.bsd.BSDRenderer): if self.dhcp_interfaces(): self.set_rc_config_value('dhcpcd', 'YES') self.set_rc_config_value( - 'dhcpcd_flags', - ' '.join(self.dhcp_interfaces())) + 'dhcpcd_flags', + ' '.join(self.dhcp_interfaces()) + ) for device_name, v in self.interface_configurations.items(): if isinstance(v, dict): self.set_rc_config_value( diff --git a/cloudinit/net/openbsd.py b/cloudinit/net/openbsd.py index 489ea48b..166d77e6 100644 --- a/cloudinit/net/openbsd.py +++ b/cloudinit/net/openbsd.py @@ -19,8 +19,9 @@ class Renderer(cloudinit.net.bsd.BSDRenderer): elif isinstance(v, dict): try: content = "inet {address} {netmask}\n".format( - address=v['address'], - netmask=v['netmask']) + address=v['address'], + netmask=v['netmask'] + ) except KeyError: LOG.error( "Invalid static configuration for %s", diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index f36c300f..0a5d481d 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -505,7 +505,7 @@ class Renderer(renderer.Renderer): iface_cfg['IPADDR6_%d' % ipv6_index] = ipv6_cidr else: iface_cfg['IPV6ADDR_SECONDARIES'] += \ - " " + ipv6_cidr + " " + ipv6_cidr else: ipv4_index = ipv4_index + 1 suff = "" if ipv4_index == 0 else str(ipv4_index) diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py index 00e8d2e5..6b9127b6 100755 --- a/cloudinit/reporting/handlers.py +++ b/cloudinit/reporting/handlers.py @@ -139,7 +139,8 @@ class HyperVKvpReportingHandler(ReportingHandler): self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX, self.incarnation_no) self.publish_thread = threading.Thread( - target=self._publish_event_routine) + target=self._publish_event_routine + ) self.publish_thread.daemon = True self.publish_thread.start() @@ -202,10 +203,15 @@ class HyperVKvpReportingHandler(ReportingHandler): uuid.uuid4()) def _encode_kvp_item(self, key, value): - data = (struct.pack("%ds%ds" % ( + data = struct.pack( + "%ds%ds" + % ( self.HV_KVP_EXCHANGE_MAX_KEY_SIZE, - self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE), - key.encode('utf-8'), value.encode('utf-8'))) + self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE, + ), + key.encode("utf-8"), + value.encode("utf-8"), + ) return data def _decode_kvp_item(self, record_data): diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 89312b9e..6d569057 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -523,8 +523,9 @@ class DataSourceAzure(sources.DataSource): try: crawled_data = util.log_time( - logfunc=LOG.debug, msg='Crawl of metadata service', - func=self.crawl_metadata) + logfunc=LOG.debug, msg='Crawl of metadata service', + func=self.crawl_metadata + ) except sources.InvalidMetaDataException as e: LOG.warning('Could not crawl Azure metadata: %s', e) return False @@ -893,9 +894,10 @@ def can_dev_be_reformatted(devpath, preserve_ntfs): (cand_part, cand_path, devpath)) with events.ReportEventStack( - name="mount-ntfs-and-count", - description="mount-ntfs-and-count", - parent=azure_ds_reporter) as evt: + name="mount-ntfs-and-count", + description="mount-ntfs-and-count", + parent=azure_ds_reporter + ) as evt: try: file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", update_env_for_mount={'LANG': 'C'}) @@ -924,9 +926,10 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, # wait for ephemeral disk to come up naplen = .2 with events.ReportEventStack( - name="wait-for-ephemeral-disk", - description="wait for ephemeral disk", - parent=azure_ds_reporter): + name="wait-for-ephemeral-disk", + description="wait for ephemeral disk", + parent=azure_ds_reporter + ): missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen, @@ -1334,9 +1337,10 @@ def parse_network_config(imds_metadata): @return: Dictionary containing network version 2 standard configuration. """ with events.ReportEventStack( - name="parse_network_config", - description="", - parent=azure_ds_reporter) as evt: + name="parse_network_config", + description="", + parent=azure_ds_reporter + ) as evt: if imds_metadata != sources.UNSET and imds_metadata: netconfig = {'version': 2, 'ethernets': {}} LOG.debug('Azure: generating network configuration from IMDS') @@ -1480,9 +1484,10 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None): def _is_platform_viable(seed_dir): with events.ReportEventStack( - name="check-platform-viability", - description="found azure asset tag", - parent=azure_ds_reporter) as evt: + name="check-platform-viability", + description="found azure asset tag", + parent=azure_ds_reporter + ) as evt: """Check platform environment to report if this datasource may run.""" asset_tag = util.read_dmi_data('chassis-asset-tag') diff --git a/cloudinit/sources/helpers/tests/test_netlink.py b/cloudinit/sources/helpers/tests/test_netlink.py index 58c3adc6..10760bd6 100644 --- a/cloudinit/sources/helpers/tests/test_netlink.py +++ b/cloudinit/sources/helpers/tests/test_netlink.py @@ -180,17 +180,22 @@ class TestWaitForMediaDisconnectConnect(CiTestCase): other_ifname = "eth1" expected_ifname = "eth0" data_op_down_eth1 = self._media_switch_data( - other_ifname, RTM_NEWLINK, OPER_DOWN) + other_ifname, RTM_NEWLINK, OPER_DOWN + ) data_op_up_eth1 = self._media_switch_data( - other_ifname, RTM_NEWLINK, OPER_UP) + other_ifname, RTM_NEWLINK, OPER_UP + ) data_op_down_eth0 = self._media_switch_data( - expected_ifname, RTM_NEWLINK, OPER_DOWN) + expected_ifname, RTM_NEWLINK, OPER_DOWN + ) data_op_up_eth0 = self._media_switch_data( - expected_ifname, RTM_NEWLINK, OPER_UP) - m_read_netlink_socket.side_effect = [data_op_down_eth1, - data_op_up_eth1, - data_op_down_eth0, - data_op_up_eth0] + expected_ifname, RTM_NEWLINK, OPER_UP) + m_read_netlink_socket.side_effect = [ + data_op_down_eth1, + data_op_up_eth1, + data_op_down_eth0, + data_op_up_eth0 + ] wait_for_media_disconnect_connect(m_socket, expected_ifname) self.assertIn('Ignored netlink event on interface %s' % other_ifname, self.logs.getvalue()) @@ -207,17 +212,23 @@ class TestWaitForMediaDisconnectConnect(CiTestCase): ''' ifname = "eth0" data_getlink_down = self._media_switch_data( - ifname, RTM_GETLINK, OPER_DOWN) + ifname, RTM_GETLINK, OPER_DOWN + ) data_getlink_up = self._media_switch_data( - ifname, RTM_GETLINK, OPER_UP) + ifname, RTM_GETLINK, OPER_UP + ) data_newlink_down = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_DOWN) + ifname, RTM_NEWLINK, OPER_DOWN + ) data_newlink_up = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_UP) - m_read_netlink_socket.side_effect = [data_getlink_down, - data_getlink_up, - data_newlink_down, - data_newlink_up] + ifname, RTM_NEWLINK, OPER_UP + ) + m_read_netlink_socket.side_effect = [ + data_getlink_down, + data_getlink_up, + data_newlink_down, + data_newlink_up + ] wait_for_media_disconnect_connect(m_socket, ifname) self.assertEqual(m_read_netlink_socket.call_count, 4) @@ -233,19 +244,25 @@ class TestWaitForMediaDisconnectConnect(CiTestCase): ''' ifname = "eth0" data_setlink_down = self._media_switch_data( - ifname, RTM_SETLINK, OPER_DOWN) + ifname, RTM_SETLINK, OPER_DOWN + ) data_setlink_up = self._media_switch_data( - ifname, RTM_SETLINK, OPER_UP) + ifname, RTM_SETLINK, OPER_UP + ) data_newlink_down = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_DOWN) + ifname, RTM_NEWLINK, OPER_DOWN + ) data_newlink_up = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_UP) - m_read_netlink_socket.side_effect = [data_setlink_down, - data_setlink_up, - data_newlink_down, - data_newlink_up, - data_newlink_down, - data_newlink_up] + ifname, RTM_NEWLINK, OPER_UP + ) + m_read_netlink_socket.side_effect = [ + data_setlink_down, + data_setlink_up, + data_newlink_down, + data_newlink_up, + data_newlink_down, + data_newlink_up + ] wait_for_media_disconnect_connect(m_socket, ifname) self.assertEqual(m_read_netlink_socket.call_count, 4) @@ -255,23 +272,30 @@ class TestWaitForMediaDisconnectConnect(CiTestCase): ifname = "eth0" data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) - data_op_dormant = self._media_switch_data(ifname, RTM_NEWLINK, - OPER_DORMANT) - data_op_notpresent = self._media_switch_data(ifname, RTM_NEWLINK, - OPER_NOTPRESENT) - data_op_lowerdown = self._media_switch_data(ifname, RTM_NEWLINK, - OPER_LOWERLAYERDOWN) - data_op_testing = self._media_switch_data(ifname, RTM_NEWLINK, - OPER_TESTING) - data_op_unknown = self._media_switch_data(ifname, RTM_NEWLINK, - OPER_UNKNOWN) - m_read_netlink_socket.side_effect = [data_op_up, data_op_up, - data_op_dormant, data_op_up, - data_op_notpresent, data_op_up, - data_op_lowerdown, data_op_up, - data_op_testing, data_op_up, - data_op_unknown, data_op_up, - data_op_down, data_op_up] + data_op_dormant = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_DORMANT + ) + data_op_notpresent = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_NOTPRESENT + ) + data_op_lowerdown = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_LOWERLAYERDOWN + ) + data_op_testing = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_TESTING + ) + data_op_unknown = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_UNKNOWN + ) + m_read_netlink_socket.side_effect = [ + data_op_up, data_op_up, + data_op_dormant, data_op_up, + data_op_notpresent, data_op_up, + data_op_lowerdown, data_op_up, + data_op_testing, data_op_up, + data_op_unknown, data_op_up, + data_op_down, data_op_up + ] wait_for_media_disconnect_connect(m_socket, ifname) self.assertEqual(m_read_netlink_socket.call_count, 14) @@ -281,12 +305,14 @@ class TestWaitForMediaDisconnectConnect(CiTestCase): ifname = "eth0" data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) - data_op_dormant = self._media_switch_data(ifname, RTM_NEWLINK, - OPER_DORMANT) - data_op_unknown = self._media_switch_data(ifname, RTM_NEWLINK, - OPER_UNKNOWN) - m_read_netlink_socket.side_effect = [data_op_down, data_op_dormant, - data_op_unknown, data_op_up] + data_op_dormant = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_DORMANT) + data_op_unknown = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_UNKNOWN) + m_read_netlink_socket.side_effect = [ + data_op_down, data_op_dormant, + data_op_unknown, data_op_up + ] wait_for_media_disconnect_connect(m_socket, ifname) self.assertEqual(m_read_netlink_socket.call_count, 4) @@ -300,9 +326,11 @@ class TestWaitForMediaDisconnectConnect(CiTestCase): data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) data_op_invalid = self._media_switch_data(ifname, RTM_NEWLINK, 7) - m_read_netlink_socket.side_effect = [data_op_invalid, data_op_up, - data_op_down, data_op_invalid, - data_op_up] + m_read_netlink_socket.side_effect = [ + data_op_invalid, data_op_up, + data_op_down, data_op_invalid, + data_op_up + ] wait_for_media_disconnect_connect(m_socket, ifname) self.assertEqual(m_read_netlink_socket.call_count, 5) @@ -333,8 +361,9 @@ class TestWaitForMediaDisconnectConnect(CiTestCase): data_invalid2 = self._media_switch_data(ifname, RTM_NEWLINK, None) data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) - m_read_netlink_socket.side_effect = [data_invalid1, data_invalid2, - data_op_down, data_op_up] + m_read_netlink_socket.side_effect = [ + data_invalid1, data_invalid2, data_op_down, data_op_up + ] wait_for_media_disconnect_connect(m_socket, ifname) self.assertEqual(m_read_netlink_socket.call_count, 4) @@ -344,11 +373,15 @@ class TestWaitForMediaDisconnectConnect(CiTestCase): bytes = ifname.encode("utf-8") data = bytearray(96) struct.pack_into("=LHHLL", data, 0, 48, RTM_NEWLINK, 0, 0, 0) - struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3, - bytes, 5, 16, int_to_bytes(OPER_DOWN)) + struct.pack_into( + "HH4sHHc", data, RTATTR_START_OFFSET, 8, 3, + bytes, 5, 16, int_to_bytes(OPER_DOWN) + ) struct.pack_into("=LHHLL", data, 48, 48, RTM_NEWLINK, 0, 0, 0) - struct.pack_into("HH4sHHc", data, 48 + RTATTR_START_OFFSET, 8, - 3, bytes, 5, 16, int_to_bytes(OPER_UP)) + struct.pack_into( + "HH4sHHc", data, 48 + RTATTR_START_OFFSET, 8, + 3, bytes, 5, 16, int_to_bytes(OPER_UP) + ) m_read_netlink_socket.return_value = data wait_for_media_disconnect_connect(m_socket, ifname) self.assertEqual(m_read_netlink_socket.call_count, 1) @@ -360,14 +393,18 @@ class TestWaitForMediaDisconnectConnect(CiTestCase): data1 = bytearray(112) data2 = bytearray(32) struct.pack_into("=LHHLL", data1, 0, 48, RTM_NEWLINK, 0, 0, 0) - struct.pack_into("HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3, - bytes, 5, 16, int_to_bytes(OPER_DOWN)) + struct.pack_into( + "HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3, + bytes, 5, 16, int_to_bytes(OPER_DOWN) + ) struct.pack_into("=LHHLL", data1, 48, 48, RTM_NEWLINK, 0, 0, 0) - struct.pack_into("HH4sHHc", data1, 80, 8, 3, bytes, 5, 16, - int_to_bytes(OPER_DOWN)) + struct.pack_into( + "HH4sHHc", data1, 80, 8, 3, bytes, 5, 16, int_to_bytes(OPER_DOWN) + ) struct.pack_into("=LHHLL", data1, 96, 48, RTM_NEWLINK, 0, 0, 0) - struct.pack_into("HH4sHHc", data2, 16, 8, 3, bytes, 5, 16, - int_to_bytes(OPER_UP)) + struct.pack_into( + "HH4sHHc", data2, 16, 8, 3, bytes, 5, 16, int_to_bytes(OPER_UP) + ) m_read_netlink_socket.side_effect = [data1, data2] wait_for_media_disconnect_connect(m_socket, ifname) self.assertEqual(m_read_netlink_socket.call_count, 2) diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index 5b6f1b3f..1420a988 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -436,10 +436,11 @@ class TestDataSource(CiTestCase): expected = { 'base64_encoded_keys': [], 'merged_cfg': { - '_doc': ( - 'Merged cloud-init system config from ' - '/etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/'), - 'datasource': {'_undef': {'key1': False}}}, + '_doc': ( + 'Merged cloud-init system config from ' + '/etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/' + ), + 'datasource': {'_undef': {'key1': False}}}, 'sensitive_keys': [ 'ds/meta_data/some/security-credentials', 'merged_cfg'], 'sys_info': sys_info, diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py index 68d59111..2e7c6686 100644 --- a/tests/cloud_tests/testcases/base.py +++ b/tests/cloud_tests/testcases/base.py @@ -321,7 +321,7 @@ class CloudTestCase(unittest.TestCase): "Unexpected sys_info dist value") self.assertEqual(self.os_name, v1_data['distro_release']) self.assertEqual( - str(self.os_cfg['version']), v1_data['distro_version']) + str(self.os_cfg['version']), v1_data['distro_version']) self.assertEqual('x86_64', v1_data['machine']) self.assertIsNotNone( re.match(r'3.\d\.\d', v1_data['python_version']), diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 05552a1e..a99cbd41 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -683,15 +683,17 @@ scbus-1 on xpt0 bus 0 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') def test_crawl_metadata_on_reprovision_reports_ready( - self, poll_imds_func, - report_ready_func, - m_write, m_dhcp): + self, poll_imds_func, report_ready_func, m_write, m_dhcp + ): """If reprovisioning, report ready at the end""" ovfenv = construct_valid_ovf_env( - platform_settings={"PreprovisionedVm": "True"}) + platform_settings={"PreprovisionedVm": "True"} + ) - data = {'ovfcontent': ovfenv, - 'sys_cfg': {}} + data = { + 'ovfcontent': ovfenv, + 'sys_cfg': {} + } dsrc = self._get_ds(data) poll_imds_func.return_value = ovfenv dsrc.crawl_metadata() @@ -706,15 +708,18 @@ scbus-1 on xpt0 bus 0 @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') @mock.patch('cloudinit.sources.DataSourceAzure.readurl') def test_crawl_metadata_on_reprovision_reports_ready_using_lease( - self, m_readurl, m_dhcp, - m_net, report_ready_func, - m_media_switch, m_write): + self, m_readurl, m_dhcp, m_net, report_ready_func, + m_media_switch, m_write + ): """If reprovisioning, report ready using the obtained lease""" ovfenv = construct_valid_ovf_env( - platform_settings={"PreprovisionedVm": "True"}) + platform_settings={"PreprovisionedVm": "True"} + ) - data = {'ovfcontent': ovfenv, - 'sys_cfg': {}} + data = { + 'ovfcontent': ovfenv, + 'sys_cfg': {} + } dsrc = self._get_ds(data) lease = { @@ -1955,8 +1960,8 @@ class TestPreprovisioningPollIMDS(CiTestCase): response = requests.Response() response.status_code = 404 if self.tries == 2 else 410 raise requests.exceptions.HTTPError( - "fake {}".format(response.status_code), - response=response) + "fake {}".format(response.status_code), response=response + ) # Third try should succeed and stop retries or redhcp return mock.MagicMock(status_code=200, text="good", content="good") diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index 15441454..9d82bda9 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -353,12 +353,16 @@ class TestDataSourceScaleway(HttprettyTestCase): self.datasource.metadata['ipv6'] = None netcfg = self.datasource.network_config - resp = {'version': 1, - 'config': [{ - 'type': 'physical', - 'name': 'ens2', - 'subnets': [{'type': 'dhcp4'}]}] + resp = { + 'version': 1, + 'config': [ + { + 'type': 'physical', + 'name': 'ens2', + 'subnets': [{'type': 'dhcp4'}] } + ] + } self.assertEqual(netcfg, resp) @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') @@ -424,12 +428,16 @@ class TestDataSourceScaleway(HttprettyTestCase): self.datasource.metadata['ipv6'] = None self.datasource._network_config = sources.UNSET - resp = {'version': 1, - 'config': [{ - 'type': 'physical', - 'name': 'ens2', - 'subnets': [{'type': 'dhcp4'}]}] + resp = { + 'version': 1, + 'config': [ + { + 'type': 'physical', + 'name': 'ens2', + 'subnets': [{'type': 'dhcp4'}] } + ] + } netcfg = self.datasource.network_config self.assertEqual(netcfg, resp) @@ -448,12 +456,16 @@ class TestDataSourceScaleway(HttprettyTestCase): self.datasource.metadata['ipv6'] = None self.datasource._network_config = None - resp = {'version': 1, - 'config': [{ - 'type': 'physical', - 'name': 'ens2', - 'subnets': [{'type': 'dhcp4'}]}] + resp = { + 'version': 1, + 'config': [ + { + 'type': 'physical', + 'name': 'ens2', + 'subnets': [{'type': 'dhcp4'}] } + ] + } netcfg = self.datasource.network_config self.assertEqual(netcfg, resp) diff --git a/tests/unittests/test_distros/test_bsd_utils.py b/tests/unittests/test_distros/test_bsd_utils.py index b38e4af5..3a68f2a9 100644 --- a/tests/unittests/test_distros/test_bsd_utils.py +++ b/tests/unittests/test_distros/test_bsd_utils.py @@ -62,5 +62,6 @@ class TestBsdUtils(CiTestCase): self.load_file.return_value = RC_FILE.format(hostname='foo') bsd_utils.set_rc_config_value('hostname', 'bar') self.write_file.assert_called_with( - '/etc/rc.conf', - RC_FILE.format(hostname='bar')) + '/etc/rc.conf', + RC_FILE.format(hostname='bar') + ) diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py index 80c53c83..b643e3ae 100644 --- a/tests/unittests/test_handler/test_handler_mounts.py +++ b/tests/unittests/test_handler/test_handler_mounts.py @@ -260,8 +260,11 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase): '/dev/vdb /mnt auto defaults,noexec,comment=cloudconfig 0 2\n' ) fstab_expected_content = fstab_original_content - cc = {'mounts': [ - ['/dev/vdb', '/mnt', 'auto', 'defaults,noexec']]} + cc = { + 'mounts': [ + ['/dev/vdb', '/mnt', 'auto', 'defaults,noexec'] + ] + } with open(cc_mounts.FSTAB_PATH, 'w') as fd: fd.write(fstab_original_content) with open(cc_mounts.FSTAB_PATH, 'r') as fd: diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py index fa8f8859..b60a66ab 100644 --- a/tests/unittests/test_reporting_hyperv.py +++ b/tests/unittests/test_reporting_hyperv.py @@ -93,10 +93,15 @@ class TextKvpReporter(CiTestCase): def test_not_truncate_kvp_file_modified_after_boot(self): with open(self.tmp_file_path, "wb+") as f: kvp = {'key': 'key1', 'value': 'value1'} - data = (struct.pack("%ds%ds" % ( + data = struct.pack( + "%ds%ds" + % ( HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_KEY_SIZE, - HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE), - kvp['key'].encode('utf-8'), kvp['value'].encode('utf-8'))) + HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE, + ), + kvp["key"].encode("utf-8"), + kvp["value"].encode("utf-8"), + ) f.write(data) cur_time = time.time() os.utime(self.tmp_file_path, (cur_time, cur_time)) diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index b4767f0c..d15fc60b 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -374,13 +374,13 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): sshd_config = self.tmp_path('sshd_config') util.write_file( - sshd_config, - "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys)) + sshd_config, + "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys) + ) (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( - fpw.pw_name, sshd_config) - content = ssh_util.update_authorized_keys( - auth_key_entries, []) + fpw.pw_name, sshd_config) + content = ssh_util.update_authorized_keys(auth_key_entries, []) self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn) self.assertTrue(VALID_CONTENT['rsa'] in content) @@ -398,11 +398,13 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): sshd_config = self.tmp_path('sshd_config') util.write_file( - sshd_config, - "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys)) + sshd_config, + "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys) + ) (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( - fpw.pw_name, sshd_config) + fpw.pw_name, sshd_config + ) content = ssh_util.update_authorized_keys(auth_key_entries, []) self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn) diff --git a/tox.ini b/tox.ini index e04c7791..ebcebc41 100644 --- a/tox.ini +++ b/tox.ini @@ -43,11 +43,10 @@ basepython = python2.7 deps = -r{toxinidir}/test-requirements.txt [flake8] -# E126: continuation line over-indented for hanging indent # E226: missing whitespace around arithmetic operator # W503: line break before binary operator # W504: line break after binary operator -ignore=E126,E226,W503,W504 +ignore=E226,W503,W504 exclude = .venv,.tox,dist,doc,*egg,.git,build,tools per-file-ignores = cloudinit/cmd/main.py:E402 -- cgit v1.2.3 From 3cec3881062490727c5fff1b16b53f0176f976f0 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 13 Jul 2020 12:00:32 -0400 Subject: cloudinit: remove global disable of pylint W0105 and fix errors (#480) This includes a fix to a test that had a string concatenation issue, and so was only testing a prefix of what was intended. --- .pylintrc | 3 +- cloudinit/analyze/show.py | 41 ++++++++++------------ cloudinit/config/cc_resizefs.py | 14 ++++---- cloudinit/distros/opensuse.py | 2 +- cloudinit/net/eni.py | 6 ++-- cloudinit/sources/DataSourceAzure.py | 14 ++++---- cloudinit/sources/DataSourceHetzner.py | 6 ++-- .../unittests/test_datasource/test_azure_helper.py | 7 ++-- tests/unittests/test_reporting_hyperv.py | 6 ++-- 9 files changed, 45 insertions(+), 54 deletions(-) (limited to 'tests/unittests/test_reporting_hyperv.py') diff --git a/.pylintrc b/.pylintrc index 4d5d066d..f04603b9 100644 --- a/.pylintrc +++ b/.pylintrc @@ -7,7 +7,6 @@ jobs=4 [MESSAGES CONTROL] # Errors and warings with some filtered: -# W0105(pointless-string-statement) # W0107(unnecessary-pass) # W0201(attribute-defined-outside-init) # W0212(protected-access) @@ -28,7 +27,7 @@ jobs=4 # W0703(broad-except) # W1401(anomalous-backslash-in-string) -disable=C, F, I, R, W0105, W0107, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0613, W0621, W0622, W0631, W0703, W1401 +disable=C, F, I, R, W0107, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0613, W0621, W0622, W0631, W0703, W1401 [REPORTS] diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py index cca1fa7f..0c825b23 100644 --- a/cloudinit/analyze/show.py +++ b/cloudinit/analyze/show.py @@ -15,28 +15,25 @@ from cloudinit import subp from cloudinit import util from cloudinit.distros import uses_systemd -# An event: -''' -{ - "description": "executing late commands", - "event_type": "start", - "level": "INFO", - "name": "cmd-install/stage-late" - "origin": "cloudinit", - "timestamp": 1461164249.1590767, -}, - - { - "description": "executing late commands", - "event_type": "finish", - "level": "INFO", - "name": "cmd-install/stage-late", - "origin": "cloudinit", - "result": "SUCCESS", - "timestamp": 1461164249.1590767 - } - -''' +# Example events: +# { +# "description": "executing late commands", +# "event_type": "start", +# "level": "INFO", +# "name": "cmd-install/stage-late" +# "origin": "cloudinit", +# "timestamp": 1461164249.1590767, +# } +# { +# "description": "executing late commands", +# "event_type": "finish", +# "level": "INFO", +# "name": "cmd-install/stage-late", +# "origin": "cloudinit", +# "result": "SUCCESS", +# "timestamp": 1461164249.1590767 +# } + format_key = { '%d': 'delta', '%D': 'description', diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 8de4db30..978d2ee0 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -118,14 +118,12 @@ def _can_skip_resize_ufs(mount_point, devpth): if o == "-f": frag_sz = int(a) # check the current partition size - """ - # gpart show /dev/da0 -=> 40 62914480 da0 GPT (30G) - 40 1024 1 freebsd-boot (512K) - 1064 58719232 2 freebsd-ufs (28G) - 58720296 3145728 3 freebsd-swap (1.5G) - 61866024 1048496 - free - (512M) - """ + # Example output from `gpart show /dev/da0`: + # => 40 62914480 da0 GPT (30G) + # 40 1024 1 freebsd-boot (512K) + # 1064 58719232 2 freebsd-ufs (28G) + # 58720296 3145728 3 freebsd-swap (1.5G) + # 61866024 1048496 - free - (512M) expect_sz = None m = re.search('^(/dev/.+)p([0-9])$', devpth) gpart_res = _get_gpart_output(m.group(1)) diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py index ffb7d0e8..b8e557b8 100644 --- a/cloudinit/distros/opensuse.py +++ b/cloudinit/distros/opensuse.py @@ -185,7 +185,7 @@ class Distro(distros.Distro): def preferred_ntp_clients(self): """The preferred ntp client is dependent on the version.""" - """Allow distro to determine the preferred ntp client list""" + # Allow distro to determine the preferred ntp client list if not self._preferred_ntp_clients: distro_info = util.system_info()['dist'] name = distro_info[0] diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index b4c69457..13c041f3 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -483,10 +483,8 @@ class Renderer(renderer.Renderer): if searchdomains: lo['subnets'][0]["dns_search"] = (" ".join(searchdomains)) - ''' Apply a sort order to ensure that we write out - the physical interfaces first; this is critical for - bonding - ''' + # Apply a sort order to ensure that we write out the physical + # interfaces first; this is critical for bonding order = { 'loopback': 0, 'physical': 1, diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 41431a7a..5e25b956 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -166,12 +166,11 @@ def get_resource_disk_on_freebsd(port_id): port_id = port_id - 2 g1 = "000" + str(port_id) g0g1 = "{0}-{1}".format(g0, g1) - """ - search 'X' from - 'dev.storvsc.X.%pnpinfo: - classid=32412632-86cb-44a2-9b5c-50d1417354f5 - deviceid=00000000-0001-8899-0000-000000000000' - """ + + # search 'X' from + # 'dev.storvsc.X.%pnpinfo: + # classid=32412632-86cb-44a2-9b5c-50d1417354f5 + # deviceid=00000000-0001-8899-0000-000000000000' sysctl_out = get_dev_storvsc_sysctl() storvscid = find_storvscid_from_sysctl_pnpinfo(sysctl_out, g0g1) @@ -1485,13 +1484,12 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None): def _is_platform_viable(seed_dir): + """Check platform environment to report if this datasource may run.""" with events.ReportEventStack( name="check-platform-viability", description="found azure asset tag", parent=azure_ds_reporter ) as evt: - - """Check platform environment to report if this datasource may run.""" asset_tag = util.read_dmi_data('chassis-asset-tag') if asset_tag == AZURE_CHASSIS_ASSET_TAG: return True diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py index 70e4274c..a86035e0 100644 --- a/cloudinit/sources/DataSourceHetzner.py +++ b/cloudinit/sources/DataSourceHetzner.py @@ -69,9 +69,9 @@ class DataSourceHetzner(sources.DataSource): self.userdata_raw = hc_helper.maybe_b64decode(ud) self.metadata_full = md - """hostname is name provided by user at launch. The API enforces - it is a valid hostname, but it is not guaranteed to be resolvable - in dns or fully qualified.""" + # hostname is name provided by user at launch. The API enforces it is + # a valid hostname, but it is not guaranteed to be resolvable in dns or + # fully qualified. self.metadata['instance-id'] = md['instance-id'] self.metadata['local-hostname'] = md['hostname'] self.metadata['network-config'] = md.get('network-config', None) diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index 71ef57f0..f314cd4a 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -404,11 +404,10 @@ class TestWALinuxAgentShim(CiTestCase): self.GoalState.call_args_list) def test_certificates_used_to_determine_public_keys(self): + # if register_with_azure_and_fetch_data() isn't passed some info about + # the user's public keys, there's no point in even trying to parse the + # certificates shim = wa_shim() - """if register_with_azure_and_fetch_data() isn't passed some info about - the user's public keys, there's no point in even trying to parse - the certificates - """ mypk = [{'fingerprint': 'fp1', 'path': 'path1'}, {'fingerprint': 'fp3', 'path': 'path3', 'value': ''}] certs = {'fp1': 'expected-key', diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py index b60a66ab..bacf5da9 100644 --- a/tests/unittests/test_reporting_hyperv.py +++ b/tests/unittests/test_reporting_hyperv.py @@ -139,8 +139,10 @@ class TextKvpReporter(CiTestCase): @mock.patch('cloudinit.subp.subp') def test_get_boot_telemetry(self, m_subp, m_sysd): reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) - datetime_pattern = r"\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]" - r"\d:[0-5]\d\.\d+([+-][0-2]\d:[0-5]\d|Z)" + datetime_pattern = ( + r"\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]" + r"\d:[0-5]\d\.\d+([+-][0-2]\d:[0-5]\d|Z)" + ) # get_boot_telemetry makes two subp calls to systemctl. We provide # a list of values that the subp calls should return -- cgit v1.2.3 From d941c7f7846e0216873384044d20f4b6723697c5 Mon Sep 17 00:00:00 2001 From: Moustafa Moustafa Date: Thu, 20 Aug 2020 12:04:03 -0700 Subject: Pushing cloud-init log to the KVP (#529) Push the cloud-init.log file (Up to 500KB at once) to the KVP before reporting ready to the Azure platform. Based on the analysis done on a large sample of cloud-init.log files, Here's the statistics collected on the log file size: P50 P90 P95 P99 P99.9 P99.99 137K 423K 537K 3.5MB 6MB 16MB This change limits the size of cloud-init.log file data that gets dumped to KVP to 500KB. So for ~95% of the cases, the whole log file will be dumped and for the remaining ~5%, we will get the last 500KB of the cloud-init.log file. To asses the performance of the 500KB limit, 250 VM were deployed with a 500KB cloud-init.log file and the time taken to compress, encode and dump the entries to KVP was measured. Here's the time in milliseconds percentiles: P50 P99 P999 75.705 232.701 1169.636 Another 250 VMs were deployed with this logic dumping their normal cloud-init.log file to KVP, the same timing was measured as above. Here's the time in milliseconds percentiles: P50 P99 P999 1.88 5.277 6.992 Added excluded_handlers to the report_event function to be able to opt-out from reporting the events of the compressed cloud-init.log file to the cloud-init.log file. The KVP break_down logic had a bug, where it will reuse the same key for all the split chunks of KVP which results in overwriting the split KVPs by the last one when consumed by Hyper-V. I added the split chunk index as a differentiator to the KVP key. The Hyper-V consumes the KVPs from the KVP file as chunks whose key is 512KB and value is 2048KB but the Azure platform expects the value to be 1024KB, thus I introduced the Azure value limit. --- cloudinit/reporting/events.py | 23 ++++++++-- cloudinit/reporting/handlers.py | 12 ++++-- cloudinit/sources/DataSourceAzure.py | 12 ++++-- cloudinit/sources/helpers/azure.py | 58 ++++++++++++++++++++++++- tests/unittests/test_reporting_hyperv.py | 74 ++++++++++++++++++++++++++++++-- 5 files changed, 162 insertions(+), 17 deletions(-) (limited to 'tests/unittests/test_reporting_hyperv.py') diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py index e5dfab33..b8677c8b 100644 --- a/cloudinit/reporting/events.py +++ b/cloudinit/reporting/events.py @@ -12,7 +12,7 @@ import base64 import os.path import time -from . import instantiated_handler_registry +from . import instantiated_handler_registry, available_handlers FINISH_EVENT_TYPE = 'finish' START_EVENT_TYPE = 'start' @@ -81,17 +81,32 @@ class FinishReportingEvent(ReportingEvent): return data -def report_event(event): - """Report an event to all registered event handlers. +def report_event(event, excluded_handler_types=None): + """Report an event to all registered event handlers + except those whose type is in excluded_handler_types. This should generally be called via one of the other functions in the reporting module. + :param excluded_handler_types: + List of handlers types to exclude from reporting the event to. :param event_type: The type of the event; this should be a constant from the reporting module. """ - for _, handler in instantiated_handler_registry.registered_items.items(): + + if not excluded_handler_types: + excluded_handler_types = {} + excluded_handler_classes = { + hndl_cls + for hndl_type, hndl_cls in available_handlers.registered_items.items() + if hndl_type in excluded_handler_types + } + + handlers = instantiated_handler_registry.registered_items.items() + for _, handler in handlers: + if type(handler) in excluded_handler_classes: + continue # skip this excluded handler handler.publish_event(event) diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py index 1986ebdd..0a8c7af3 100755 --- a/cloudinit/reporting/handlers.py +++ b/cloudinit/reporting/handlers.py @@ -113,6 +113,8 @@ class HyperVKvpReportingHandler(ReportingHandler): https://technet.microsoft.com/en-us/library/dn798287.aspx#Linux%20guests """ HV_KVP_EXCHANGE_MAX_VALUE_SIZE = 2048 + # The maximum value size expected in Azure + HV_KVP_AZURE_MAX_VALUE_SIZE = 1024 HV_KVP_EXCHANGE_MAX_KEY_SIZE = 512 HV_KVP_RECORD_SIZE = (HV_KVP_EXCHANGE_MAX_KEY_SIZE + HV_KVP_EXCHANGE_MAX_VALUE_SIZE) @@ -195,7 +197,8 @@ class HyperVKvpReportingHandler(ReportingHandler): def _event_key(self, event): """ the event key format is: - CLOUD_INIT||||