summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoshua Powers <josh.powers@canonical.com>2020-06-10 07:39:29 -0700
committerGitHub <noreply@github.com>2020-06-10 10:39:29 -0400
commitf3bd42659efeed4b092ffcdfd5df7f24813f2d3e (patch)
tree09e39d069dd98e19781859e17a03773b7cefff7f
parentf083050478adc199648c980991f2bcece79ed31b (diff)
downloadvyos-cloud-init-f3bd42659efeed4b092ffcdfd5df7f24813f2d3e.tar.gz
vyos-cloud-init-f3bd42659efeed4b092ffcdfd5df7f24813f2d3e.zip
test: fix all flake8 E126 errors (#425)
-rwxr-xr-xcloudinit/cmd/devel/render.py5
-rw-r--r--cloudinit/cmd/query.py5
-rw-r--r--cloudinit/distros/netbsd.py10
-rw-r--r--cloudinit/distros/openbsd.py7
-rw-r--r--cloudinit/net/__init__.py5
-rw-r--r--cloudinit/net/bsd.py5
-rw-r--r--cloudinit/net/netbsd.py5
-rw-r--r--cloudinit/net/openbsd.py5
-rw-r--r--cloudinit/net/sysconfig.py2
-rwxr-xr-xcloudinit/reporting/handlers.py14
-rwxr-xr-xcloudinit/sources/DataSourceAzure.py33
-rw-r--r--cloudinit/sources/helpers/tests/test_netlink.py165
-rw-r--r--cloudinit/sources/tests/test_init.py9
-rw-r--r--tests/cloud_tests/testcases/base.py2
-rw-r--r--tests/unittests/test_datasource/test_azure.py33
-rw-r--r--tests/unittests/test_datasource/test_scaleway.py42
-rw-r--r--tests/unittests/test_distros/test_bsd_utils.py5
-rw-r--r--tests/unittests/test_handler/test_handler_mounts.py7
-rw-r--r--tests/unittests/test_reporting_hyperv.py11
-rw-r--r--tests/unittests/test_sshutil.py18
-rw-r--r--tox.ini3
21 files changed, 238 insertions, 153 deletions
diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py
index 1bc22406..1090aa16 100755
--- a/cloudinit/cmd/devel/render.py
+++ b/cloudinit/cmd/devel/render.py
@@ -57,8 +57,9 @@ def handle_args(name, args):
paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE)
if not os.path.exists(instance_data_fn):
LOG.warning(
- 'Missing root-readable %s. Using redacted %s instead.',
- instance_data_fn, redacted_data_fn)
+ 'Missing root-readable %s. Using redacted %s instead.',
+ instance_data_fn, redacted_data_fn
+ )
instance_data_fn = redacted_data_fn
else:
instance_data_fn = redacted_data_fn
diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py
index e3db8679..0fb48ebd 100644
--- a/cloudinit/cmd/query.py
+++ b/cloudinit/cmd/query.py
@@ -90,8 +90,9 @@ def handle_args(name, args):
instance_data_fn = sensitive_data_fn
else:
LOG.warning(
- 'Missing root-readable %s. Using redacted %s instead.',
- sensitive_data_fn, redacted_data_fn)
+ 'Missing root-readable %s. Using redacted %s instead.',
+ sensitive_data_fn, redacted_data_fn
+ )
instance_data_fn = redacted_data_fn
else:
instance_data_fn = redacted_data_fn
diff --git a/cloudinit/distros/netbsd.py b/cloudinit/distros/netbsd.py
index 066737a8..f1a9b182 100644
--- a/cloudinit/distros/netbsd.py
+++ b/cloudinit/distros/netbsd.py
@@ -100,8 +100,9 @@ class NetBSD(cloudinit.distros.bsd.BSD):
else:
method = crypt.METHOD_BLOWFISH # pylint: disable=E1101
hashed_pw = crypt.crypt(
- passwd,
- crypt.mksalt(method))
+ passwd,
+ crypt.mksalt(method)
+ )
try:
subp.subp(['usermod', '-p', hashed_pw, user])
@@ -143,8 +144,9 @@ class NetBSD(cloudinit.distros.bsd.BSD):
os_arch = platform.machine()
e = os.environ.copy()
e['PKG_PATH'] = (
- 'http://cdn.netbsd.org/pub/pkgsrc/'
- 'packages/NetBSD/%s/%s/All') % (os_arch, os_release)
+ 'http://cdn.netbsd.org/pub/pkgsrc/'
+ 'packages/NetBSD/%s/%s/All'
+ ) % (os_arch, os_release)
return e
def update_package_sources(self):
diff --git a/cloudinit/distros/openbsd.py b/cloudinit/distros/openbsd.py
index 07c76530..720c9cf3 100644
--- a/cloudinit/distros/openbsd.py
+++ b/cloudinit/distros/openbsd.py
@@ -42,9 +42,10 @@ class Distro(cloudinit.distros.netbsd.NetBSD):
os_arch = platform.machine()
e = os.environ.copy()
e['PKG_PATH'] = (
- 'ftp://ftp.openbsd.org/pub/OpenBSD/{os_release}/'
- 'packages/{os_arch}/').format(
- os_arch=os_arch, os_release=os_release)
+ 'ftp://ftp.openbsd.org/pub/OpenBSD/{os_release}/'
+ 'packages/{os_arch}/').format(
+ os_arch=os_arch, os_release=os_release
+ )
return e
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index a57fea0a..b40cb154 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -849,8 +849,9 @@ def get_interfaces_by_mac_on_freebsd():
def get_interfaces_by_mac_on_netbsd():
ret = {}
re_field_match = (
- r"(?P<ifname>\w+).*address:\s"
- r"(?P<mac>([\da-f]{2}[:-]){5}([\da-f]{2})).*")
+ r"(?P<ifname>\w+).*address:\s"
+ r"(?P<mac>([\da-f]{2}[:-]){5}([\da-f]{2})).*"
+ )
(out, _) = subp.subp(['ifconfig', '-a'])
if_lines = re.sub(r'\n\s+', ' ', out).splitlines()
for line in if_lines:
diff --git a/cloudinit/net/bsd.py b/cloudinit/net/bsd.py
index 1c355a98..e34e0454 100644
--- a/cloudinit/net/bsd.py
+++ b/cloudinit/net/bsd.py
@@ -66,8 +66,9 @@ class BSDRenderer(renderer.Renderer):
if subnet.get('type') == 'static':
if not subnet.get('netmask'):
LOG.debug(
- 'Skipping IP %s, because there is no netmask',
- subnet.get('address'))
+ 'Skipping IP %s, because there is no netmask',
+ subnet.get('address')
+ )
continue
LOG.debug('Configuring dev %s with %s / %s', device_name,
subnet.get('address'), subnet.get('netmask'))
diff --git a/cloudinit/net/netbsd.py b/cloudinit/net/netbsd.py
index 30437b5f..71b38ee6 100644
--- a/cloudinit/net/netbsd.py
+++ b/cloudinit/net/netbsd.py
@@ -17,8 +17,9 @@ class Renderer(cloudinit.net.bsd.BSDRenderer):
if self.dhcp_interfaces():
self.set_rc_config_value('dhcpcd', 'YES')
self.set_rc_config_value(
- 'dhcpcd_flags',
- ' '.join(self.dhcp_interfaces()))
+ 'dhcpcd_flags',
+ ' '.join(self.dhcp_interfaces())
+ )
for device_name, v in self.interface_configurations.items():
if isinstance(v, dict):
self.set_rc_config_value(
diff --git a/cloudinit/net/openbsd.py b/cloudinit/net/openbsd.py
index 489ea48b..166d77e6 100644
--- a/cloudinit/net/openbsd.py
+++ b/cloudinit/net/openbsd.py
@@ -19,8 +19,9 @@ class Renderer(cloudinit.net.bsd.BSDRenderer):
elif isinstance(v, dict):
try:
content = "inet {address} {netmask}\n".format(
- address=v['address'],
- netmask=v['netmask'])
+ address=v['address'],
+ netmask=v['netmask']
+ )
except KeyError:
LOG.error(
"Invalid static configuration for %s",
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index f36c300f..0a5d481d 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -505,7 +505,7 @@ class Renderer(renderer.Renderer):
iface_cfg['IPADDR6_%d' % ipv6_index] = ipv6_cidr
else:
iface_cfg['IPV6ADDR_SECONDARIES'] += \
- " " + ipv6_cidr
+ " " + ipv6_cidr
else:
ipv4_index = ipv4_index + 1
suff = "" if ipv4_index == 0 else str(ipv4_index)
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index 00e8d2e5..6b9127b6 100755
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -139,7 +139,8 @@ class HyperVKvpReportingHandler(ReportingHandler):
self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX,
self.incarnation_no)
self.publish_thread = threading.Thread(
- target=self._publish_event_routine)
+ target=self._publish_event_routine
+ )
self.publish_thread.daemon = True
self.publish_thread.start()
@@ -202,10 +203,15 @@ class HyperVKvpReportingHandler(ReportingHandler):
uuid.uuid4())
def _encode_kvp_item(self, key, value):
- data = (struct.pack("%ds%ds" % (
+ data = struct.pack(
+ "%ds%ds"
+ % (
self.HV_KVP_EXCHANGE_MAX_KEY_SIZE,
- self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE),
- key.encode('utf-8'), value.encode('utf-8')))
+ self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE,
+ ),
+ key.encode("utf-8"),
+ value.encode("utf-8"),
+ )
return data
def _decode_kvp_item(self, record_data):
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 89312b9e..6d569057 100755
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -523,8 +523,9 @@ class DataSourceAzure(sources.DataSource):
try:
crawled_data = util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self.crawl_metadata)
+ logfunc=LOG.debug, msg='Crawl of metadata service',
+ func=self.crawl_metadata
+ )
except sources.InvalidMetaDataException as e:
LOG.warning('Could not crawl Azure metadata: %s', e)
return False
@@ -893,9 +894,10 @@ def can_dev_be_reformatted(devpath, preserve_ntfs):
(cand_part, cand_path, devpath))
with events.ReportEventStack(
- name="mount-ntfs-and-count",
- description="mount-ntfs-and-count",
- parent=azure_ds_reporter) as evt:
+ name="mount-ntfs-and-count",
+ description="mount-ntfs-and-count",
+ parent=azure_ds_reporter
+ ) as evt:
try:
file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
update_env_for_mount={'LANG': 'C'})
@@ -924,9 +926,10 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
# wait for ephemeral disk to come up
naplen = .2
with events.ReportEventStack(
- name="wait-for-ephemeral-disk",
- description="wait for ephemeral disk",
- parent=azure_ds_reporter):
+ name="wait-for-ephemeral-disk",
+ description="wait for ephemeral disk",
+ parent=azure_ds_reporter
+ ):
missing = util.wait_for_files([devpath],
maxwait=maxwait,
naplen=naplen,
@@ -1334,9 +1337,10 @@ def parse_network_config(imds_metadata):
@return: Dictionary containing network version 2 standard configuration.
"""
with events.ReportEventStack(
- name="parse_network_config",
- description="",
- parent=azure_ds_reporter) as evt:
+ name="parse_network_config",
+ description="",
+ parent=azure_ds_reporter
+ ) as evt:
if imds_metadata != sources.UNSET and imds_metadata:
netconfig = {'version': 2, 'ethernets': {}}
LOG.debug('Azure: generating network configuration from IMDS')
@@ -1480,9 +1484,10 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None):
def _is_platform_viable(seed_dir):
with events.ReportEventStack(
- name="check-platform-viability",
- description="found azure asset tag",
- parent=azure_ds_reporter) as evt:
+ name="check-platform-viability",
+ description="found azure asset tag",
+ parent=azure_ds_reporter
+ ) as evt:
"""Check platform environment to report if this datasource may run."""
asset_tag = util.read_dmi_data('chassis-asset-tag')
diff --git a/cloudinit/sources/helpers/tests/test_netlink.py b/cloudinit/sources/helpers/tests/test_netlink.py
index 58c3adc6..10760bd6 100644
--- a/cloudinit/sources/helpers/tests/test_netlink.py
+++ b/cloudinit/sources/helpers/tests/test_netlink.py
@@ -180,17 +180,22 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
other_ifname = "eth1"
expected_ifname = "eth0"
data_op_down_eth1 = self._media_switch_data(
- other_ifname, RTM_NEWLINK, OPER_DOWN)
+ other_ifname, RTM_NEWLINK, OPER_DOWN
+ )
data_op_up_eth1 = self._media_switch_data(
- other_ifname, RTM_NEWLINK, OPER_UP)
+ other_ifname, RTM_NEWLINK, OPER_UP
+ )
data_op_down_eth0 = self._media_switch_data(
- expected_ifname, RTM_NEWLINK, OPER_DOWN)
+ expected_ifname, RTM_NEWLINK, OPER_DOWN
+ )
data_op_up_eth0 = self._media_switch_data(
- expected_ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_op_down_eth1,
- data_op_up_eth1,
- data_op_down_eth0,
- data_op_up_eth0]
+ expected_ifname, RTM_NEWLINK, OPER_UP)
+ m_read_netlink_socket.side_effect = [
+ data_op_down_eth1,
+ data_op_up_eth1,
+ data_op_down_eth0,
+ data_op_up_eth0
+ ]
wait_for_media_disconnect_connect(m_socket, expected_ifname)
self.assertIn('Ignored netlink event on interface %s' % other_ifname,
self.logs.getvalue())
@@ -207,17 +212,23 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
'''
ifname = "eth0"
data_getlink_down = self._media_switch_data(
- ifname, RTM_GETLINK, OPER_DOWN)
+ ifname, RTM_GETLINK, OPER_DOWN
+ )
data_getlink_up = self._media_switch_data(
- ifname, RTM_GETLINK, OPER_UP)
+ ifname, RTM_GETLINK, OPER_UP
+ )
data_newlink_down = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_DOWN)
+ ifname, RTM_NEWLINK, OPER_DOWN
+ )
data_newlink_up = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_getlink_down,
- data_getlink_up,
- data_newlink_down,
- data_newlink_up]
+ ifname, RTM_NEWLINK, OPER_UP
+ )
+ m_read_netlink_socket.side_effect = [
+ data_getlink_down,
+ data_getlink_up,
+ data_newlink_down,
+ data_newlink_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
@@ -233,19 +244,25 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
'''
ifname = "eth0"
data_setlink_down = self._media_switch_data(
- ifname, RTM_SETLINK, OPER_DOWN)
+ ifname, RTM_SETLINK, OPER_DOWN
+ )
data_setlink_up = self._media_switch_data(
- ifname, RTM_SETLINK, OPER_UP)
+ ifname, RTM_SETLINK, OPER_UP
+ )
data_newlink_down = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_DOWN)
+ ifname, RTM_NEWLINK, OPER_DOWN
+ )
data_newlink_up = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_setlink_down,
- data_setlink_up,
- data_newlink_down,
- data_newlink_up,
- data_newlink_down,
- data_newlink_up]
+ ifname, RTM_NEWLINK, OPER_UP
+ )
+ m_read_netlink_socket.side_effect = [
+ data_setlink_down,
+ data_setlink_up,
+ data_newlink_down,
+ data_newlink_up,
+ data_newlink_down,
+ data_newlink_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
@@ -255,23 +272,30 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
ifname = "eth0"
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
- data_op_dormant = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_DORMANT)
- data_op_notpresent = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_NOTPRESENT)
- data_op_lowerdown = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_LOWERLAYERDOWN)
- data_op_testing = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_TESTING)
- data_op_unknown = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_UNKNOWN)
- m_read_netlink_socket.side_effect = [data_op_up, data_op_up,
- data_op_dormant, data_op_up,
- data_op_notpresent, data_op_up,
- data_op_lowerdown, data_op_up,
- data_op_testing, data_op_up,
- data_op_unknown, data_op_up,
- data_op_down, data_op_up]
+ data_op_dormant = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_DORMANT
+ )
+ data_op_notpresent = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_NOTPRESENT
+ )
+ data_op_lowerdown = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_LOWERLAYERDOWN
+ )
+ data_op_testing = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_TESTING
+ )
+ data_op_unknown = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_UNKNOWN
+ )
+ m_read_netlink_socket.side_effect = [
+ data_op_up, data_op_up,
+ data_op_dormant, data_op_up,
+ data_op_notpresent, data_op_up,
+ data_op_lowerdown, data_op_up,
+ data_op_testing, data_op_up,
+ data_op_unknown, data_op_up,
+ data_op_down, data_op_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 14)
@@ -281,12 +305,14 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
ifname = "eth0"
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
- data_op_dormant = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_DORMANT)
- data_op_unknown = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_UNKNOWN)
- m_read_netlink_socket.side_effect = [data_op_down, data_op_dormant,
- data_op_unknown, data_op_up]
+ data_op_dormant = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_DORMANT)
+ data_op_unknown = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_UNKNOWN)
+ m_read_netlink_socket.side_effect = [
+ data_op_down, data_op_dormant,
+ data_op_unknown, data_op_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
@@ -300,9 +326,11 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
data_op_invalid = self._media_switch_data(ifname, RTM_NEWLINK, 7)
- m_read_netlink_socket.side_effect = [data_op_invalid, data_op_up,
- data_op_down, data_op_invalid,
- data_op_up]
+ m_read_netlink_socket.side_effect = [
+ data_op_invalid, data_op_up,
+ data_op_down, data_op_invalid,
+ data_op_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 5)
@@ -333,8 +361,9 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
data_invalid2 = self._media_switch_data(ifname, RTM_NEWLINK, None)
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_invalid1, data_invalid2,
- data_op_down, data_op_up]
+ m_read_netlink_socket.side_effect = [
+ data_invalid1, data_invalid2, data_op_down, data_op_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
@@ -344,11 +373,15 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
bytes = ifname.encode("utf-8")
data = bytearray(96)
struct.pack_into("=LHHLL", data, 0, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3,
- bytes, 5, 16, int_to_bytes(OPER_DOWN))
+ struct.pack_into(
+ "HH4sHHc", data, RTATTR_START_OFFSET, 8, 3,
+ bytes, 5, 16, int_to_bytes(OPER_DOWN)
+ )
struct.pack_into("=LHHLL", data, 48, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data, 48 + RTATTR_START_OFFSET, 8,
- 3, bytes, 5, 16, int_to_bytes(OPER_UP))
+ struct.pack_into(
+ "HH4sHHc", data, 48 + RTATTR_START_OFFSET, 8,
+ 3, bytes, 5, 16, int_to_bytes(OPER_UP)
+ )
m_read_netlink_socket.return_value = data
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 1)
@@ -360,14 +393,18 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
data1 = bytearray(112)
data2 = bytearray(32)
struct.pack_into("=LHHLL", data1, 0, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3,
- bytes, 5, 16, int_to_bytes(OPER_DOWN))
+ struct.pack_into(
+ "HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3,
+ bytes, 5, 16, int_to_bytes(OPER_DOWN)
+ )
struct.pack_into("=LHHLL", data1, 48, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data1, 80, 8, 3, bytes, 5, 16,
- int_to_bytes(OPER_DOWN))
+ struct.pack_into(
+ "HH4sHHc", data1, 80, 8, 3, bytes, 5, 16, int_to_bytes(OPER_DOWN)
+ )
struct.pack_into("=LHHLL", data1, 96, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data2, 16, 8, 3, bytes, 5, 16,
- int_to_bytes(OPER_UP))
+ struct.pack_into(
+ "HH4sHHc", data2, 16, 8, 3, bytes, 5, 16, int_to_bytes(OPER_UP)
+ )
m_read_netlink_socket.side_effect = [data1, data2]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 2)
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
index 5b6f1b3f..1420a988 100644
--- a/cloudinit/sources/tests/test_init.py
+++ b/cloudinit/sources/tests/test_init.py
@@ -436,10 +436,11 @@ class TestDataSource(CiTestCase):
expected = {
'base64_encoded_keys': [],
'merged_cfg': {
- '_doc': (
- 'Merged cloud-init system config from '
- '/etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/'),
- 'datasource': {'_undef': {'key1': False}}},
+ '_doc': (
+ 'Merged cloud-init system config from '
+ '/etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/'
+ ),
+ 'datasource': {'_undef': {'key1': False}}},
'sensitive_keys': [
'ds/meta_data/some/security-credentials', 'merged_cfg'],
'sys_info': sys_info,
diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py
index 68d59111..2e7c6686 100644
--- a/tests/cloud_tests/testcases/base.py
+++ b/tests/cloud_tests/testcases/base.py
@@ -321,7 +321,7 @@ class CloudTestCase(unittest.TestCase):
"Unexpected sys_info dist value")
self.assertEqual(self.os_name, v1_data['distro_release'])
self.assertEqual(
- str(self.os_cfg['version']), v1_data['distro_version'])
+ str(self.os_cfg['version']), v1_data['distro_version'])
self.assertEqual('x86_64', v1_data['machine'])
self.assertIsNotNone(
re.match(r'3.\d\.\d', v1_data['python_version']),
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 05552a1e..a99cbd41 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -683,15 +683,17 @@ scbus-1 on xpt0 bus 0
'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
@mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds')
def test_crawl_metadata_on_reprovision_reports_ready(
- self, poll_imds_func,
- report_ready_func,
- m_write, m_dhcp):
+ self, poll_imds_func, report_ready_func, m_write, m_dhcp
+ ):
"""If reprovisioning, report ready at the end"""
ovfenv = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVm": "True"})
+ platform_settings={"PreprovisionedVm": "True"}
+ )
- data = {'ovfcontent': ovfenv,
- 'sys_cfg': {}}
+ data = {
+ 'ovfcontent': ovfenv,
+ 'sys_cfg': {}
+ }
dsrc = self._get_ds(data)
poll_imds_func.return_value = ovfenv
dsrc.crawl_metadata()
@@ -706,15 +708,18 @@ scbus-1 on xpt0 bus 0
@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
@mock.patch('cloudinit.sources.DataSourceAzure.readurl')
def test_crawl_metadata_on_reprovision_reports_ready_using_lease(
- self, m_readurl, m_dhcp,
- m_net, report_ready_func,
- m_media_switch, m_write):
+ self, m_readurl, m_dhcp, m_net, report_ready_func,
+ m_media_switch, m_write
+ ):
"""If reprovisioning, report ready using the obtained lease"""
ovfenv = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVm": "True"})
+ platform_settings={"PreprovisionedVm": "True"}
+ )
- data = {'ovfcontent': ovfenv,
- 'sys_cfg': {}}
+ data = {
+ 'ovfcontent': ovfenv,
+ 'sys_cfg': {}
+ }
dsrc = self._get_ds(data)
lease = {
@@ -1955,8 +1960,8 @@ class TestPreprovisioningPollIMDS(CiTestCase):
response = requests.Response()
response.status_code = 404 if self.tries == 2 else 410
raise requests.exceptions.HTTPError(
- "fake {}".format(response.status_code),
- response=response)
+ "fake {}".format(response.status_code), response=response
+ )
# Third try should succeed and stop retries or redhcp
return mock.MagicMock(status_code=200, text="good", content="good")
diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py
index 15441454..9d82bda9 100644
--- a/tests/unittests/test_datasource/test_scaleway.py
+++ b/tests/unittests/test_datasource/test_scaleway.py
@@ -353,12 +353,16 @@ class TestDataSourceScaleway(HttprettyTestCase):
self.datasource.metadata['ipv6'] = None
netcfg = self.datasource.network_config
- resp = {'version': 1,
- 'config': [{
- 'type': 'physical',
- 'name': 'ens2',
- 'subnets': [{'type': 'dhcp4'}]}]
+ resp = {
+ 'version': 1,
+ 'config': [
+ {
+ 'type': 'physical',
+ 'name': 'ens2',
+ 'subnets': [{'type': 'dhcp4'}]
}
+ ]
+ }
self.assertEqual(netcfg, resp)
@mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
@@ -424,12 +428,16 @@ class TestDataSourceScaleway(HttprettyTestCase):
self.datasource.metadata['ipv6'] = None
self.datasource._network_config = sources.UNSET
- resp = {'version': 1,
- 'config': [{
- 'type': 'physical',
- 'name': 'ens2',
- 'subnets': [{'type': 'dhcp4'}]}]
+ resp = {
+ 'version': 1,
+ 'config': [
+ {
+ 'type': 'physical',
+ 'name': 'ens2',
+ 'subnets': [{'type': 'dhcp4'}]
}
+ ]
+ }
netcfg = self.datasource.network_config
self.assertEqual(netcfg, resp)
@@ -448,12 +456,16 @@ class TestDataSourceScaleway(HttprettyTestCase):
self.datasource.metadata['ipv6'] = None
self.datasource._network_config = None
- resp = {'version': 1,
- 'config': [{
- 'type': 'physical',
- 'name': 'ens2',
- 'subnets': [{'type': 'dhcp4'}]}]
+ resp = {
+ 'version': 1,
+ 'config': [
+ {
+ 'type': 'physical',
+ 'name': 'ens2',
+ 'subnets': [{'type': 'dhcp4'}]
}
+ ]
+ }
netcfg = self.datasource.network_config
self.assertEqual(netcfg, resp)
diff --git a/tests/unittests/test_distros/test_bsd_utils.py b/tests/unittests/test_distros/test_bsd_utils.py
index b38e4af5..3a68f2a9 100644
--- a/tests/unittests/test_distros/test_bsd_utils.py
+++ b/tests/unittests/test_distros/test_bsd_utils.py
@@ -62,5 +62,6 @@ class TestBsdUtils(CiTestCase):
self.load_file.return_value = RC_FILE.format(hostname='foo')
bsd_utils.set_rc_config_value('hostname', 'bar')
self.write_file.assert_called_with(
- '/etc/rc.conf',
- RC_FILE.format(hostname='bar'))
+ '/etc/rc.conf',
+ RC_FILE.format(hostname='bar')
+ )
diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py
index 80c53c83..b643e3ae 100644
--- a/tests/unittests/test_handler/test_handler_mounts.py
+++ b/tests/unittests/test_handler/test_handler_mounts.py
@@ -260,8 +260,11 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase):
'/dev/vdb /mnt auto defaults,noexec,comment=cloudconfig 0 2\n'
)
fstab_expected_content = fstab_original_content
- cc = {'mounts': [
- ['/dev/vdb', '/mnt', 'auto', 'defaults,noexec']]}
+ cc = {
+ 'mounts': [
+ ['/dev/vdb', '/mnt', 'auto', 'defaults,noexec']
+ ]
+ }
with open(cc_mounts.FSTAB_PATH, 'w') as fd:
fd.write(fstab_original_content)
with open(cc_mounts.FSTAB_PATH, 'r') as fd:
diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py
index fa8f8859..b60a66ab 100644
--- a/tests/unittests/test_reporting_hyperv.py
+++ b/tests/unittests/test_reporting_hyperv.py
@@ -93,10 +93,15 @@ class TextKvpReporter(CiTestCase):
def test_not_truncate_kvp_file_modified_after_boot(self):
with open(self.tmp_file_path, "wb+") as f:
kvp = {'key': 'key1', 'value': 'value1'}
- data = (struct.pack("%ds%ds" % (
+ data = struct.pack(
+ "%ds%ds"
+ % (
HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_KEY_SIZE,
- HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE),
- kvp['key'].encode('utf-8'), kvp['value'].encode('utf-8')))
+ HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE,
+ ),
+ kvp["key"].encode("utf-8"),
+ kvp["value"].encode("utf-8"),
+ )
f.write(data)
cur_time = time.time()
os.utime(self.tmp_file_path, (cur_time, cur_time))
diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py
index b4767f0c..d15fc60b 100644
--- a/tests/unittests/test_sshutil.py
+++ b/tests/unittests/test_sshutil.py
@@ -374,13 +374,13 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
sshd_config = self.tmp_path('sshd_config')
util.write_file(
- sshd_config,
- "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys))
+ sshd_config,
+ "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys)
+ )
(auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
- fpw.pw_name, sshd_config)
- content = ssh_util.update_authorized_keys(
- auth_key_entries, [])
+ fpw.pw_name, sshd_config)
+ content = ssh_util.update_authorized_keys(auth_key_entries, [])
self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn)
self.assertTrue(VALID_CONTENT['rsa'] in content)
@@ -398,11 +398,13 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
sshd_config = self.tmp_path('sshd_config')
util.write_file(
- sshd_config,
- "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys))
+ sshd_config,
+ "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys)
+ )
(auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
- fpw.pw_name, sshd_config)
+ fpw.pw_name, sshd_config
+ )
content = ssh_util.update_authorized_keys(auth_key_entries, [])
self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn)
diff --git a/tox.ini b/tox.ini
index e04c7791..ebcebc41 100644
--- a/tox.ini
+++ b/tox.ini
@@ -43,11 +43,10 @@ basepython = python2.7
deps = -r{toxinidir}/test-requirements.txt
[flake8]
-# E126: continuation line over-indented for hanging indent
# E226: missing whitespace around arithmetic operator
# W503: line break before binary operator
# W504: line break after binary operator
-ignore=E126,E226,W503,W504
+ignore=E226,W503,W504
exclude = .venv,.tox,dist,doc,*egg,.git,build,tools
per-file-ignores =
cloudinit/cmd/main.py:E402