summaryrefslogtreecommitdiff
path: root/tests/unittests/sources
diff options
context:
space:
mode:
Diffstat (limited to 'tests/unittests/sources')
-rw-r--r--tests/unittests/sources/helpers/test_netlink.py357
-rw-r--r--tests/unittests/sources/helpers/test_openstack.py51
-rw-r--r--tests/unittests/sources/test_aliyun.py217
-rw-r--r--tests/unittests/sources/test_altcloud.py311
-rw-r--r--tests/unittests/sources/test_azure.py3174
-rw-r--r--tests/unittests/sources/test_azure_helper.py1138
-rw-r--r--tests/unittests/sources/test_cloudsigma.py72
-rw-r--r--tests/unittests/sources/test_cloudstack.py121
-rw-r--r--tests/unittests/sources/test_common.py86
-rw-r--r--tests/unittests/sources/test_configdrive.py1100
-rw-r--r--tests/unittests/sources/test_digitalocean.py283
-rw-r--r--tests/unittests/sources/test_ec2.py851
-rw-r--r--tests/unittests/sources/test_exoscale.py248
-rw-r--r--tests/unittests/sources/test_gce.py304
-rw-r--r--tests/unittests/sources/test_hetzner.py85
-rw-r--r--tests/unittests/sources/test_ibmcloud.py299
-rw-r--r--tests/unittests/sources/test_init.py879
-rw-r--r--tests/unittests/sources/test_lxd.py134
-rw-r--r--tests/unittests/sources/test_maas.py147
-rw-r--r--tests/unittests/sources/test_nocloud.py320
-rw-r--r--tests/unittests/sources/test_opennebula.py888
-rw-r--r--tests/unittests/sources/test_openstack.py652
-rw-r--r--tests/unittests/sources/test_oracle.py412
-rw-r--r--tests/unittests/sources/test_ovf.py1053
-rw-r--r--tests/unittests/sources/test_rbx.py215
-rw-r--r--tests/unittests/sources/test_scaleway.py481
-rw-r--r--tests/unittests/sources/test_smartos.py956
-rw-r--r--tests/unittests/sources/test_upcloud.py161
-rw-r--r--tests/unittests/sources/test_vmware.py12
-rw-r--r--tests/unittests/sources/test_vultr.py375
-rw-r--r--tests/unittests/sources/vmware/test_custom_script.py61
-rw-r--r--tests/unittests/sources/vmware/test_guestcust_util.py79
-rw-r--r--tests/unittests/sources/vmware/test_vmware_config_file.py430
33 files changed, 9248 insertions, 6704 deletions
diff --git a/tests/unittests/sources/helpers/test_netlink.py b/tests/unittests/sources/helpers/test_netlink.py
index 478ce375..5eabf104 100644
--- a/tests/unittests/sources/helpers/test_netlink.py
+++ b/tests/unittests/sources/helpers/test_netlink.py
@@ -2,48 +2,64 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from tests.unittests.helpers import CiTestCase, mock
+import codecs
import socket
import struct
-import codecs
+
from cloudinit.sources.helpers.netlink import (
- NetlinkCreateSocketError, create_bound_netlink_socket, read_netlink_socket,
- read_rta_oper_state, unpack_rta_attr, wait_for_media_disconnect_connect,
- wait_for_nic_attach_event, wait_for_nic_detach_event,
- OPER_DOWN, OPER_UP, OPER_DORMANT, OPER_LOWERLAYERDOWN, OPER_NOTPRESENT,
- OPER_TESTING, OPER_UNKNOWN, RTATTR_START_OFFSET, RTM_NEWLINK, RTM_DELLINK,
- RTM_SETLINK, RTM_GETLINK, MAX_SIZE)
+ MAX_SIZE,
+ OPER_DORMANT,
+ OPER_DOWN,
+ OPER_LOWERLAYERDOWN,
+ OPER_NOTPRESENT,
+ OPER_TESTING,
+ OPER_UNKNOWN,
+ OPER_UP,
+ RTATTR_START_OFFSET,
+ RTM_DELLINK,
+ RTM_GETLINK,
+ RTM_NEWLINK,
+ RTM_SETLINK,
+ NetlinkCreateSocketError,
+ create_bound_netlink_socket,
+ read_netlink_socket,
+ read_rta_oper_state,
+ unpack_rta_attr,
+ wait_for_media_disconnect_connect,
+ wait_for_nic_attach_event,
+ wait_for_nic_detach_event,
+)
+from tests.unittests.helpers import CiTestCase, mock
def int_to_bytes(i):
- '''convert integer to binary: eg: 1 to \x01'''
- hex_value = '{0:x}'.format(i)
- hex_value = '0' * (len(hex_value) % 2) + hex_value
- return codecs.decode(hex_value, 'hex_codec')
+ """convert integer to binary: eg: 1 to \x01"""
+ hex_value = "{0:x}".format(i)
+ hex_value = "0" * (len(hex_value) % 2) + hex_value
+ return codecs.decode(hex_value, "hex_codec")
class TestCreateBoundNetlinkSocket(CiTestCase):
-
- @mock.patch('cloudinit.sources.helpers.netlink.socket.socket')
+ @mock.patch("cloudinit.sources.helpers.netlink.socket.socket")
def test_socket_error_on_create(self, m_socket):
- '''create_bound_netlink_socket catches socket creation exception'''
+ """create_bound_netlink_socket catches socket creation exception"""
"""NetlinkCreateSocketError is raised when socket creation errors."""
m_socket.side_effect = socket.error("Fake socket failure")
with self.assertRaises(NetlinkCreateSocketError) as ctx_mgr:
create_bound_netlink_socket()
self.assertEqual(
- 'Exception during netlink socket create: Fake socket failure',
- str(ctx_mgr.exception))
+ "Exception during netlink socket create: Fake socket failure",
+ str(ctx_mgr.exception),
+ )
class TestReadNetlinkSocket(CiTestCase):
-
- @mock.patch('cloudinit.sources.helpers.netlink.socket.socket')
- @mock.patch('cloudinit.sources.helpers.netlink.select.select')
+ @mock.patch("cloudinit.sources.helpers.netlink.socket.socket")
+ @mock.patch("cloudinit.sources.helpers.netlink.select.select")
def test_read_netlink_socket(self, m_select, m_socket):
- '''read_netlink_socket able to receive data'''
- data = 'netlinktest'
+ """read_netlink_socket able to receive data"""
+ data = "netlinktest"
m_select.return_value = [m_socket], None, None
m_socket.recv.return_value = data
recv_data = read_netlink_socket(m_socket, 2)
@@ -52,10 +68,10 @@ class TestReadNetlinkSocket(CiTestCase):
self.assertIsNotNone(recv_data)
self.assertEqual(recv_data, data)
- @mock.patch('cloudinit.sources.helpers.netlink.socket.socket')
- @mock.patch('cloudinit.sources.helpers.netlink.select.select')
+ @mock.patch("cloudinit.sources.helpers.netlink.socket.socket")
+ @mock.patch("cloudinit.sources.helpers.netlink.select.select")
def test_netlink_read_timeout(self, m_select, m_socket):
- '''read_netlink_socket should timeout if nothing to read'''
+ """read_netlink_socket should timeout if nothing to read"""
m_select.return_value = [], None, None
data = read_netlink_socket(m_socket, 1)
m_select.assert_called_with([m_socket], [], [], 1)
@@ -63,35 +79,43 @@ class TestReadNetlinkSocket(CiTestCase):
self.assertIsNone(data)
def test_read_invalid_socket(self):
- '''read_netlink_socket raises assert error if socket is invalid'''
+ """read_netlink_socket raises assert error if socket is invalid"""
socket = None
with self.assertRaises(AssertionError) as context:
read_netlink_socket(socket, 1)
- self.assertTrue('netlink socket is none' in str(context.exception))
+ self.assertTrue("netlink socket is none" in str(context.exception))
class TestParseNetlinkMessage(CiTestCase):
-
def test_read_rta_oper_state(self):
- '''read_rta_oper_state could parse netlink message and extract data'''
+ """read_rta_oper_state could parse netlink message and extract data"""
ifname = "eth0"
bytes = ifname.encode("utf-8")
buf = bytearray(48)
- struct.pack_into("HH4sHHc", buf, RTATTR_START_OFFSET, 8, 3, bytes, 5,
- 16, int_to_bytes(OPER_DOWN))
+ struct.pack_into(
+ "HH4sHHc",
+ buf,
+ RTATTR_START_OFFSET,
+ 8,
+ 3,
+ bytes,
+ 5,
+ 16,
+ int_to_bytes(OPER_DOWN),
+ )
interface_state = read_rta_oper_state(buf)
self.assertEqual(interface_state.ifname, ifname)
self.assertEqual(interface_state.operstate, OPER_DOWN)
def test_read_none_data(self):
- '''read_rta_oper_state raises assert error if data is none'''
+ """read_rta_oper_state raises assert error if data is none"""
data = None
with self.assertRaises(AssertionError) as context:
read_rta_oper_state(data)
- self.assertEqual('data is none', str(context.exception))
+ self.assertEqual("data is none", str(context.exception))
def test_read_invalid_rta_operstate_none(self):
- '''read_rta_oper_state returns none if operstate is none'''
+ """read_rta_oper_state returns none if operstate is none"""
ifname = "eth0"
buf = bytearray(40)
bytes = ifname.encode("utf-8")
@@ -100,65 +124,84 @@ class TestParseNetlinkMessage(CiTestCase):
self.assertIsNone(interface_state)
def test_read_invalid_rta_ifname_none(self):
- '''read_rta_oper_state returns none if ifname is none'''
+ """read_rta_oper_state returns none if ifname is none"""
buf = bytearray(40)
- struct.pack_into("HHc", buf, RTATTR_START_OFFSET, 5, 16,
- int_to_bytes(OPER_DOWN))
+ struct.pack_into(
+ "HHc", buf, RTATTR_START_OFFSET, 5, 16, int_to_bytes(OPER_DOWN)
+ )
interface_state = read_rta_oper_state(buf)
self.assertIsNone(interface_state)
def test_read_invalid_data_len(self):
- '''raise assert error if data size is smaller than required size'''
+ """raise assert error if data size is smaller than required size"""
buf = bytearray(32)
with self.assertRaises(AssertionError) as context:
read_rta_oper_state(buf)
- self.assertTrue('length of data is smaller than RTATTR_START_OFFSET' in
- str(context.exception))
+ self.assertTrue(
+ "length of data is smaller than RTATTR_START_OFFSET"
+ in str(context.exception)
+ )
def test_unpack_rta_attr_none_data(self):
- '''unpack_rta_attr raises assert error if data is none'''
+ """unpack_rta_attr raises assert error if data is none"""
data = None
with self.assertRaises(AssertionError) as context:
unpack_rta_attr(data, RTATTR_START_OFFSET)
- self.assertTrue('data is none' in str(context.exception))
+ self.assertTrue("data is none" in str(context.exception))
def test_unpack_rta_attr_invalid_offset(self):
- '''unpack_rta_attr raises assert error if offset is invalid'''
+ """unpack_rta_attr raises assert error if offset is invalid"""
data = bytearray(48)
with self.assertRaises(AssertionError) as context:
unpack_rta_attr(data, "offset")
- self.assertTrue('offset is not integer' in str(context.exception))
+ self.assertTrue("offset is not integer" in str(context.exception))
with self.assertRaises(AssertionError) as context:
unpack_rta_attr(data, 31)
- self.assertTrue('rta offset is less than expected length' in
- str(context.exception))
+ self.assertTrue(
+ "rta offset is less than expected length" in str(context.exception)
+ )
-@mock.patch('cloudinit.sources.helpers.netlink.socket.socket')
-@mock.patch('cloudinit.sources.helpers.netlink.read_netlink_socket')
+@mock.patch("cloudinit.sources.helpers.netlink.socket.socket")
+@mock.patch("cloudinit.sources.helpers.netlink.read_netlink_socket")
class TestNicAttachDetach(CiTestCase):
with_logs = True
def _media_switch_data(self, ifname, msg_type, operstate):
- '''construct netlink data with specified fields'''
+ """construct netlink data with specified fields"""
if ifname and operstate is not None:
data = bytearray(48)
bytes = ifname.encode("utf-8")
- struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3,
- bytes, 5, 16, int_to_bytes(operstate))
+ struct.pack_into(
+ "HH4sHHc",
+ data,
+ RTATTR_START_OFFSET,
+ 8,
+ 3,
+ bytes,
+ 5,
+ 16,
+ int_to_bytes(operstate),
+ )
elif ifname:
data = bytearray(40)
bytes = ifname.encode("utf-8")
struct.pack_into("HH4s", data, RTATTR_START_OFFSET, 8, 3, bytes)
elif operstate:
data = bytearray(40)
- struct.pack_into("HHc", data, RTATTR_START_OFFSET, 5, 16,
- int_to_bytes(operstate))
+ struct.pack_into(
+ "HHc",
+ data,
+ RTATTR_START_OFFSET,
+ 5,
+ 16,
+ int_to_bytes(operstate),
+ )
struct.pack_into("=LHHLL", data, 0, len(data), msg_type, 0, 0, 0)
return data
def test_nic_attached_oper_down(self, m_read_netlink_socket, m_socket):
- '''Test for a new nic attached'''
+ """Test for a new nic attached"""
ifname = "eth0"
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
m_read_netlink_socket.side_effect = [data_op_down]
@@ -167,7 +210,7 @@ class TestNicAttachDetach(CiTestCase):
self.assertEqual(ifname, ifread)
def test_nic_attached_oper_up(self, m_read_netlink_socket, m_socket):
- '''Test for a new nic attached'''
+ """Test for a new nic attached"""
ifname = "eth0"
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
m_read_netlink_socket.side_effect = [data_op_up]
@@ -176,7 +219,7 @@ class TestNicAttachDetach(CiTestCase):
self.assertEqual(ifname, ifread)
def test_nic_attach_ignore_existing(self, m_read_netlink_socket, m_socket):
- '''Test that we read only the interfaces we are interested in.'''
+ """Test that we read only the interfaces we are interested in."""
data_eth0 = self._media_switch_data("eth0", RTM_NEWLINK, OPER_DOWN)
data_eth1 = self._media_switch_data("eth1", RTM_NEWLINK, OPER_DOWN)
m_read_netlink_socket.side_effect = [data_eth0, data_eth1]
@@ -185,7 +228,7 @@ class TestNicAttachDetach(CiTestCase):
self.assertEqual("eth1", ifread)
def test_nic_attach_read_first(self, m_read_netlink_socket, m_socket):
- '''Test that we read only the interfaces we are interested in.'''
+ """Test that we read only the interfaces we are interested in."""
data_eth0 = self._media_switch_data("eth0", RTM_NEWLINK, OPER_DOWN)
data_eth1 = self._media_switch_data("eth1", RTM_NEWLINK, OPER_DOWN)
m_read_netlink_socket.side_effect = [data_eth0, data_eth1]
@@ -194,7 +237,7 @@ class TestNicAttachDetach(CiTestCase):
self.assertEqual("eth0", ifread)
def test_nic_detached(self, m_read_netlink_socket, m_socket):
- '''Test for an existing nic detached'''
+ """Test for an existing nic detached"""
ifname = "eth0"
data_op_down = self._media_switch_data(ifname, RTM_DELLINK, OPER_DOWN)
m_read_netlink_socket.side_effect = [data_op_down]
@@ -203,32 +246,46 @@ class TestNicAttachDetach(CiTestCase):
self.assertEqual(ifname, ifread)
-@mock.patch('cloudinit.sources.helpers.netlink.socket.socket')
-@mock.patch('cloudinit.sources.helpers.netlink.read_netlink_socket')
+@mock.patch("cloudinit.sources.helpers.netlink.socket.socket")
+@mock.patch("cloudinit.sources.helpers.netlink.read_netlink_socket")
class TestWaitForMediaDisconnectConnect(CiTestCase):
with_logs = True
def _media_switch_data(self, ifname, msg_type, operstate):
- '''construct netlink data with specified fields'''
+ """construct netlink data with specified fields"""
if ifname and operstate is not None:
data = bytearray(48)
bytes = ifname.encode("utf-8")
- struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3,
- bytes, 5, 16, int_to_bytes(operstate))
+ struct.pack_into(
+ "HH4sHHc",
+ data,
+ RTATTR_START_OFFSET,
+ 8,
+ 3,
+ bytes,
+ 5,
+ 16,
+ int_to_bytes(operstate),
+ )
elif ifname:
data = bytearray(40)
bytes = ifname.encode("utf-8")
struct.pack_into("HH4s", data, RTATTR_START_OFFSET, 8, 3, bytes)
elif operstate:
data = bytearray(40)
- struct.pack_into("HHc", data, RTATTR_START_OFFSET, 5, 16,
- int_to_bytes(operstate))
+ struct.pack_into(
+ "HHc",
+ data,
+ RTATTR_START_OFFSET,
+ 5,
+ 16,
+ int_to_bytes(operstate),
+ )
struct.pack_into("=LHHLL", data, 0, len(data), msg_type, 0, 0, 0)
return data
- def test_media_down_up_scenario(self, m_read_netlink_socket,
- m_socket):
- '''Test for media down up sequence for required interface name'''
+ def test_media_down_up_scenario(self, m_read_netlink_socket, m_socket):
+ """Test for media down up sequence for required interface name"""
ifname = "eth0"
# construct data for Oper State down
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
@@ -238,15 +295,16 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 2)
- def test_wait_for_media_switch_diff_interface(self, m_read_netlink_socket,
- m_socket):
- '''wait_for_media_disconnect_connect ignores unexpected interfaces.
+ def test_wait_for_media_switch_diff_interface(
+ self, m_read_netlink_socket, m_socket
+ ):
+ """wait_for_media_disconnect_connect ignores unexpected interfaces.
The first two messages are for other interfaces and last two are for
expected interface. So the function exit only after receiving last
2 messages and therefore the call count for m_read_netlink_socket
has to be 4
- '''
+ """
other_ifname = "eth1"
expected_ifname = "eth0"
data_op_down_eth1 = self._media_switch_data(
@@ -259,51 +317,50 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
expected_ifname, RTM_NEWLINK, OPER_DOWN
)
data_op_up_eth0 = self._media_switch_data(
- expected_ifname, RTM_NEWLINK, OPER_UP)
+ expected_ifname, RTM_NEWLINK, OPER_UP
+ )
m_read_netlink_socket.side_effect = [
data_op_down_eth1,
data_op_up_eth1,
data_op_down_eth0,
- data_op_up_eth0
+ data_op_up_eth0,
]
wait_for_media_disconnect_connect(m_socket, expected_ifname)
- self.assertIn('Ignored netlink event on interface %s' % other_ifname,
- self.logs.getvalue())
+ self.assertIn(
+ "Ignored netlink event on interface %s" % other_ifname,
+ self.logs.getvalue(),
+ )
self.assertEqual(m_read_netlink_socket.call_count, 4)
def test_invalid_msgtype_getlink(self, m_read_netlink_socket, m_socket):
- '''wait_for_media_disconnect_connect ignores GETLINK events.
+ """wait_for_media_disconnect_connect ignores GETLINK events.
The first two messages are for oper down and up for RTM_GETLINK type
which netlink module will ignore. The last 2 messages are RTM_NEWLINK
with oper state down and up messages. Therefore the call count for
m_read_netlink_socket has to be 4 ignoring first 2 messages
of RTM_GETLINK
- '''
+ """
ifname = "eth0"
data_getlink_down = self._media_switch_data(
ifname, RTM_GETLINK, OPER_DOWN
)
- data_getlink_up = self._media_switch_data(
- ifname, RTM_GETLINK, OPER_UP
- )
+ data_getlink_up = self._media_switch_data(ifname, RTM_GETLINK, OPER_UP)
data_newlink_down = self._media_switch_data(
ifname, RTM_NEWLINK, OPER_DOWN
)
- data_newlink_up = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_UP
- )
+ data_newlink_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
m_read_netlink_socket.side_effect = [
data_getlink_down,
data_getlink_up,
data_newlink_down,
- data_newlink_up
+ data_newlink_up,
]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
def test_invalid_msgtype_setlink(self, m_read_netlink_socket, m_socket):
- '''wait_for_media_disconnect_connect ignores SETLINK events.
+ """wait_for_media_disconnect_connect ignores SETLINK events.
The first two messages are for oper down and up for RTM_GETLINK type
which it will ignore. 3rd and 4th messages are RTM_NEWLINK with down
@@ -311,34 +368,31 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
sees down->up scenario. So the call count for m_read_netlink_socket
has to be 4 ignoring first 2 messages of RTM_GETLINK and
last 2 messages of RTM_NEWLINK
- '''
+ """
ifname = "eth0"
data_setlink_down = self._media_switch_data(
ifname, RTM_SETLINK, OPER_DOWN
)
- data_setlink_up = self._media_switch_data(
- ifname, RTM_SETLINK, OPER_UP
- )
+ data_setlink_up = self._media_switch_data(ifname, RTM_SETLINK, OPER_UP)
data_newlink_down = self._media_switch_data(
ifname, RTM_NEWLINK, OPER_DOWN
)
- data_newlink_up = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_UP
- )
+ data_newlink_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
m_read_netlink_socket.side_effect = [
data_setlink_down,
data_setlink_up,
data_newlink_down,
data_newlink_up,
data_newlink_down,
- data_newlink_up
+ data_newlink_up,
]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
- def test_netlink_invalid_switch_scenario(self, m_read_netlink_socket,
- m_socket):
- '''returns only if it receives UP event after a DOWN event'''
+ def test_netlink_invalid_switch_scenario(
+ self, m_read_netlink_socket, m_socket
+ ):
+ """returns only if it receives UP event after a DOWN event"""
ifname = "eth0"
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
@@ -358,114 +412,153 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
ifname, RTM_NEWLINK, OPER_UNKNOWN
)
m_read_netlink_socket.side_effect = [
- data_op_up, data_op_up,
- data_op_dormant, data_op_up,
- data_op_notpresent, data_op_up,
- data_op_lowerdown, data_op_up,
- data_op_testing, data_op_up,
- data_op_unknown, data_op_up,
- data_op_down, data_op_up
+ data_op_up,
+ data_op_up,
+ data_op_dormant,
+ data_op_up,
+ data_op_notpresent,
+ data_op_up,
+ data_op_lowerdown,
+ data_op_up,
+ data_op_testing,
+ data_op_up,
+ data_op_unknown,
+ data_op_up,
+ data_op_down,
+ data_op_up,
]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 14)
- def test_netlink_valid_inbetween_transitions(self, m_read_netlink_socket,
- m_socket):
- '''wait_for_media_disconnect_connect handles in between transitions'''
+ def test_netlink_valid_inbetween_transitions(
+ self, m_read_netlink_socket, m_socket
+ ):
+ """wait_for_media_disconnect_connect handles in between transitions"""
ifname = "eth0"
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
data_op_dormant = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_DORMANT)
+ ifname, RTM_NEWLINK, OPER_DORMANT
+ )
data_op_unknown = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_UNKNOWN)
+ ifname, RTM_NEWLINK, OPER_UNKNOWN
+ )
m_read_netlink_socket.side_effect = [
- data_op_down, data_op_dormant,
- data_op_unknown, data_op_up
+ data_op_down,
+ data_op_dormant,
+ data_op_unknown,
+ data_op_up,
]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
def test_netlink_invalid_operstate(self, m_read_netlink_socket, m_socket):
- '''wait_for_media_disconnect_connect should handle invalid operstates.
+ """wait_for_media_disconnect_connect should handle invalid operstates.
The function should not fail and return even if it receives invalid
operstates. It always should wait for down up sequence.
- '''
+ """
ifname = "eth0"
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
data_op_invalid = self._media_switch_data(ifname, RTM_NEWLINK, 7)
m_read_netlink_socket.side_effect = [
- data_op_invalid, data_op_up,
- data_op_down, data_op_invalid,
- data_op_up
+ data_op_invalid,
+ data_op_up,
+ data_op_down,
+ data_op_invalid,
+ data_op_up,
]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 5)
def test_wait_invalid_socket(self, m_read_netlink_socket, m_socket):
- '''wait_for_media_disconnect_connect handle none netlink socket.'''
+ """wait_for_media_disconnect_connect handle none netlink socket."""
socket = None
ifname = "eth0"
with self.assertRaises(AssertionError) as context:
wait_for_media_disconnect_connect(socket, ifname)
- self.assertTrue('netlink socket is none' in str(context.exception))
+ self.assertTrue("netlink socket is none" in str(context.exception))
def test_wait_invalid_ifname(self, m_read_netlink_socket, m_socket):
- '''wait_for_media_disconnect_connect handle none interface name'''
+ """wait_for_media_disconnect_connect handle none interface name"""
ifname = None
with self.assertRaises(AssertionError) as context:
wait_for_media_disconnect_connect(m_socket, ifname)
- self.assertTrue('interface name is none' in str(context.exception))
+ self.assertTrue("interface name is none" in str(context.exception))
ifname = ""
with self.assertRaises(AssertionError) as context:
wait_for_media_disconnect_connect(m_socket, ifname)
- self.assertTrue('interface name cannot be empty' in
- str(context.exception))
+ self.assertTrue(
+ "interface name cannot be empty" in str(context.exception)
+ )
def test_wait_invalid_rta_attr(self, m_read_netlink_socket, m_socket):
- ''' wait_for_media_disconnect_connect handles invalid rta data'''
+ """wait_for_media_disconnect_connect handles invalid rta data"""
ifname = "eth0"
data_invalid1 = self._media_switch_data(None, RTM_NEWLINK, OPER_DOWN)
data_invalid2 = self._media_switch_data(ifname, RTM_NEWLINK, None)
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
m_read_netlink_socket.side_effect = [
- data_invalid1, data_invalid2, data_op_down, data_op_up
+ data_invalid1,
+ data_invalid2,
+ data_op_down,
+ data_op_up,
]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
def test_read_multiple_netlink_msgs(self, m_read_netlink_socket, m_socket):
- '''Read multiple messages in single receive call'''
+ """Read multiple messages in single receive call"""
ifname = "eth0"
bytes = ifname.encode("utf-8")
data = bytearray(96)
struct.pack_into("=LHHLL", data, 0, 48, RTM_NEWLINK, 0, 0, 0)
struct.pack_into(
- "HH4sHHc", data, RTATTR_START_OFFSET, 8, 3,
- bytes, 5, 16, int_to_bytes(OPER_DOWN)
+ "HH4sHHc",
+ data,
+ RTATTR_START_OFFSET,
+ 8,
+ 3,
+ bytes,
+ 5,
+ 16,
+ int_to_bytes(OPER_DOWN),
)
struct.pack_into("=LHHLL", data, 48, 48, RTM_NEWLINK, 0, 0, 0)
struct.pack_into(
- "HH4sHHc", data, 48 + RTATTR_START_OFFSET, 8,
- 3, bytes, 5, 16, int_to_bytes(OPER_UP)
+ "HH4sHHc",
+ data,
+ 48 + RTATTR_START_OFFSET,
+ 8,
+ 3,
+ bytes,
+ 5,
+ 16,
+ int_to_bytes(OPER_UP),
)
m_read_netlink_socket.return_value = data
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 1)
def test_read_partial_netlink_msgs(self, m_read_netlink_socket, m_socket):
- '''Read partial messages in receive call'''
+ """Read partial messages in receive call"""
ifname = "eth0"
bytes = ifname.encode("utf-8")
data1 = bytearray(112)
data2 = bytearray(32)
struct.pack_into("=LHHLL", data1, 0, 48, RTM_NEWLINK, 0, 0, 0)
struct.pack_into(
- "HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3,
- bytes, 5, 16, int_to_bytes(OPER_DOWN)
+ "HH4sHHc",
+ data1,
+ RTATTR_START_OFFSET,
+ 8,
+ 3,
+ bytes,
+ 5,
+ 16,
+ int_to_bytes(OPER_DOWN),
)
struct.pack_into("=LHHLL", data1, 48, 48, RTM_NEWLINK, 0, 0, 0)
struct.pack_into(
diff --git a/tests/unittests/sources/helpers/test_openstack.py b/tests/unittests/sources/helpers/test_openstack.py
index 74743e7c..eb87b1ce 100644
--- a/tests/unittests/sources/helpers/test_openstack.py
+++ b/tests/unittests/sources/helpers/test_openstack.py
@@ -8,10 +8,9 @@ from tests.unittests import helpers as test_helpers
@mock.patch(
"cloudinit.net.is_openvswitch_internal_interface",
- mock.Mock(return_value=False)
+ mock.Mock(return_value=False),
)
class TestConvertNetJson(test_helpers.CiTestCase):
-
def test_phy_types(self):
"""Verify the different known physical types are handled."""
# network_data.json example from
@@ -19,31 +18,45 @@ class TestConvertNetJson(test_helpers.CiTestCase):
mac0 = "fa:16:3e:9c:bf:3d"
net_json = {
"links": [
- {"ethernet_mac_address": mac0, "id": "tapcd9f6d46-4a",
- "mtu": None, "type": "bridge",
- "vif_id": "cd9f6d46-4a3a-43ab-a466-994af9db96fc"}
+ {
+ "ethernet_mac_address": mac0,
+ "id": "tapcd9f6d46-4a",
+ "mtu": None,
+ "type": "bridge",
+ "vif_id": "cd9f6d46-4a3a-43ab-a466-994af9db96fc",
+ }
],
"networks": [
- {"id": "network0", "link": "tapcd9f6d46-4a",
- "network_id": "99e88329-f20d-4741-9593-25bf07847b16",
- "type": "ipv4_dhcp"}
+ {
+ "id": "network0",
+ "link": "tapcd9f6d46-4a",
+ "network_id": "99e88329-f20d-4741-9593-25bf07847b16",
+ "type": "ipv4_dhcp",
+ }
],
- "services": [{"address": "8.8.8.8", "type": "dns"}]
+ "services": [{"address": "8.8.8.8", "type": "dns"}],
}
- macs = {mac0: 'eth0'}
+ macs = {mac0: "eth0"}
expected = {
- 'version': 1,
- 'config': [
- {'mac_address': 'fa:16:3e:9c:bf:3d',
- 'mtu': None, 'name': 'eth0',
- 'subnets': [{'type': 'dhcp4'}],
- 'type': 'physical'},
- {'address': '8.8.8.8', 'type': 'nameserver'}]}
+ "version": 1,
+ "config": [
+ {
+ "mac_address": "fa:16:3e:9c:bf:3d",
+ "mtu": None,
+ "name": "eth0",
+ "subnets": [{"type": "dhcp4"}],
+ "type": "physical",
+ },
+ {"address": "8.8.8.8", "type": "nameserver"},
+ ],
+ }
for t in openstack.KNOWN_PHYSICAL_TYPES:
net_json["links"][0]["type"] = t
self.assertEqual(
expected,
- openstack.convert_net_json(network_json=net_json,
- known_macs=macs))
+ openstack.convert_net_json(
+ network_json=net_json, known_macs=macs
+ ),
+ )
diff --git a/tests/unittests/sources/test_aliyun.py b/tests/unittests/sources/test_aliyun.py
index 00209913..8a61d5ee 100644
--- a/tests/unittests/sources/test_aliyun.py
+++ b/tests/unittests/sources/test_aliyun.py
@@ -1,38 +1,46 @@
# This file is part of cloud-init. See LICENSE file for license information.
import functools
-import httpretty
import os
from unittest import mock
+import httpretty
+
from cloudinit import helpers
from cloudinit.sources import DataSourceAliYun as ay
from cloudinit.sources.DataSourceEc2 import convert_ec2_metadata_network_config
from tests.unittests import helpers as test_helpers
DEFAULT_METADATA = {
- 'instance-id': 'aliyun-test-vm-00',
- 'eipv4': '10.0.0.1',
- 'hostname': 'test-hostname',
- 'image-id': 'm-test',
- 'launch-index': '0',
- 'mac': '00:16:3e:00:00:00',
- 'network-type': 'vpc',
- 'private-ipv4': '192.168.0.1',
- 'serial-number': 'test-string',
- 'vpc-cidr-block': '192.168.0.0/16',
- 'vpc-id': 'test-vpc',
- 'vswitch-id': 'test-vpc',
- 'vswitch-cidr-block': '192.168.0.0/16',
- 'zone-id': 'test-zone-1',
- 'ntp-conf': {'ntp_servers': [
- 'ntp1.aliyun.com',
- 'ntp2.aliyun.com',
- 'ntp3.aliyun.com']},
- 'source-address': ['http://mirrors.aliyun.com',
- 'http://mirrors.aliyuncs.com'],
- 'public-keys': {'key-pair-1': {'openssh-key': 'ssh-rsa AAAAB3...'},
- 'key-pair-2': {'openssh-key': 'ssh-rsa AAAAB3...'}}
+ "instance-id": "aliyun-test-vm-00",
+ "eipv4": "10.0.0.1",
+ "hostname": "test-hostname",
+ "image-id": "m-test",
+ "launch-index": "0",
+ "mac": "00:16:3e:00:00:00",
+ "network-type": "vpc",
+ "private-ipv4": "192.168.0.1",
+ "serial-number": "test-string",
+ "vpc-cidr-block": "192.168.0.0/16",
+ "vpc-id": "test-vpc",
+ "vswitch-id": "test-vpc",
+ "vswitch-cidr-block": "192.168.0.0/16",
+ "zone-id": "test-zone-1",
+ "ntp-conf": {
+ "ntp_servers": [
+ "ntp1.aliyun.com",
+ "ntp2.aliyun.com",
+ "ntp3.aliyun.com",
+ ]
+ },
+ "source-address": [
+ "http://mirrors.aliyun.com",
+ "http://mirrors.aliyuncs.com",
+ ],
+ "public-keys": {
+ "key-pair-1": {"openssh-key": "ssh-rsa AAAAB3..."},
+ "key-pair-2": {"openssh-key": "ssh-rsa AAAAB3..."},
+ },
}
DEFAULT_USERDATA = """\
@@ -46,21 +54,22 @@ def register_mock_metaserver(base_url, data):
if isinstance(body, str):
register(base_url, body)
elif isinstance(body, list):
- register(base_url.rstrip('/'), '\n'.join(body) + '\n')
+ register(base_url.rstrip("/"), "\n".join(body) + "\n")
elif isinstance(body, dict):
if not body:
- register(base_url.rstrip('/') + '/', 'not found',
- status_code=404)
+ register(
+ base_url.rstrip("/") + "/", "not found", status_code=404
+ )
vals = []
for k, v in body.items():
if isinstance(v, (str, list)):
- suffix = k.rstrip('/')
+ suffix = k.rstrip("/")
else:
- suffix = k.rstrip('/') + '/'
+ suffix = k.rstrip("/") + "/"
vals.append(suffix)
- url = base_url.rstrip('/') + '/' + suffix
+ url = base_url.rstrip("/") + "/" + suffix
register_helper(register, url, v)
- register(base_url, '\n'.join(vals) + '\n')
+ register(base_url, "\n".join(vals) + "\n")
register = functools.partial(httpretty.register_uri, httpretty.GET)
register_helper(register, base_url, data)
@@ -69,9 +78,9 @@ def register_mock_metaserver(base_url, data):
class TestAliYunDatasource(test_helpers.HttprettyTestCase):
def setUp(self):
super(TestAliYunDatasource, self).setUp()
- cfg = {'datasource': {'AliYun': {'timeout': '1', 'max_wait': '1'}}}
+ cfg = {"datasource": {"AliYun": {"timeout": "1", "max_wait": "1"}}}
distro = {}
- paths = helpers.Paths({'run_dir': self.tmp_dir()})
+ paths = helpers.Paths({"run_dir": self.tmp_dir()})
self.ds = ay.DataSourceAliYun(cfg, distro, paths)
self.metadata_address = self.ds.metadata_urls[0]
@@ -85,15 +94,20 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase):
@property
def metadata_url(self):
- return os.path.join(
- self.metadata_address,
- self.ds.min_metadata_version, 'meta-data') + '/'
+ return (
+ os.path.join(
+ self.metadata_address,
+ self.ds.min_metadata_version,
+ "meta-data",
+ )
+ + "/"
+ )
@property
def userdata_url(self):
return os.path.join(
- self.metadata_address,
- self.ds.min_metadata_version, 'user-data')
+ self.metadata_address, self.ds.min_metadata_version, "user-data"
+ )
# EC2 provides an instance-identity document which must return 404 here
# for this test to pass.
@@ -103,9 +117,12 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase):
@property
def identity_url(self):
- return os.path.join(self.metadata_address,
- self.ds.min_metadata_version,
- 'dynamic', 'instance-identity')
+ return os.path.join(
+ self.metadata_address,
+ self.ds.min_metadata_version,
+ "dynamic",
+ "instance-identity",
+ )
def regist_default_server(self):
register_mock_metaserver(self.metadata_url, self.default_metadata)
@@ -114,21 +131,26 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase):
def _test_get_data(self):
self.assertEqual(self.ds.metadata, self.default_metadata)
- self.assertEqual(self.ds.userdata_raw,
- self.default_userdata.encode('utf8'))
+ self.assertEqual(
+ self.ds.userdata_raw, self.default_userdata.encode("utf8")
+ )
def _test_get_sshkey(self):
- pub_keys = [v['openssh-key'] for (_, v) in
- self.default_metadata['public-keys'].items()]
+ pub_keys = [
+ v["openssh-key"]
+ for (_, v) in self.default_metadata["public-keys"].items()
+ ]
self.assertEqual(self.ds.get_public_ssh_keys(), pub_keys)
def _test_get_iid(self):
- self.assertEqual(self.default_metadata['instance-id'],
- self.ds.get_instance_id())
+ self.assertEqual(
+ self.default_metadata["instance-id"], self.ds.get_instance_id()
+ )
def _test_host_name(self):
- self.assertEqual(self.default_metadata['hostname'],
- self.ds.get_hostname())
+ self.assertEqual(
+ self.default_metadata["hostname"], self.ds.get_hostname()
+ )
@mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun")
def test_with_mock_server(self, m_is_aliyun):
@@ -141,10 +163,11 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase):
self._test_get_sshkey()
self._test_get_iid()
self._test_host_name()
- self.assertEqual('aliyun', self.ds.cloud_name)
- self.assertEqual('ec2', self.ds.platform)
+ self.assertEqual("aliyun", self.ds.cloud_name)
+ self.assertEqual("ec2", self.ds.platform)
self.assertEqual(
- 'metadata (http://100.100.100.200)', self.ds.subplatform)
+ "metadata (http://100.100.100.200)", self.ds.subplatform
+ )
@mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun")
def test_returns_false_when_not_on_aliyun(self, m_is_aliyun):
@@ -159,30 +182,38 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase):
public_keys = {}
self.assertEqual(ay.parse_public_keys(public_keys), [])
- public_keys = {'key-pair-0': 'ssh-key-0'}
- self.assertEqual(ay.parse_public_keys(public_keys),
- [public_keys['key-pair-0']])
+ public_keys = {"key-pair-0": "ssh-key-0"}
+ self.assertEqual(
+ ay.parse_public_keys(public_keys), [public_keys["key-pair-0"]]
+ )
- public_keys = {'key-pair-0': 'ssh-key-0', 'key-pair-1': 'ssh-key-1'}
- self.assertEqual(set(ay.parse_public_keys(public_keys)),
- set([public_keys['key-pair-0'],
- public_keys['key-pair-1']]))
+ public_keys = {"key-pair-0": "ssh-key-0", "key-pair-1": "ssh-key-1"}
+ self.assertEqual(
+ set(ay.parse_public_keys(public_keys)),
+ set([public_keys["key-pair-0"], public_keys["key-pair-1"]]),
+ )
- public_keys = {'key-pair-0': ['ssh-key-0', 'ssh-key-1']}
- self.assertEqual(ay.parse_public_keys(public_keys),
- public_keys['key-pair-0'])
+ public_keys = {"key-pair-0": ["ssh-key-0", "ssh-key-1"]}
+ self.assertEqual(
+ ay.parse_public_keys(public_keys), public_keys["key-pair-0"]
+ )
- public_keys = {'key-pair-0': {'openssh-key': []}}
+ public_keys = {"key-pair-0": {"openssh-key": []}}
self.assertEqual(ay.parse_public_keys(public_keys), [])
- public_keys = {'key-pair-0': {'openssh-key': 'ssh-key-0'}}
- self.assertEqual(ay.parse_public_keys(public_keys),
- [public_keys['key-pair-0']['openssh-key']])
+ public_keys = {"key-pair-0": {"openssh-key": "ssh-key-0"}}
+ self.assertEqual(
+ ay.parse_public_keys(public_keys),
+ [public_keys["key-pair-0"]["openssh-key"]],
+ )
- public_keys = {'key-pair-0': {'openssh-key': ['ssh-key-0',
- 'ssh-key-1']}}
- self.assertEqual(ay.parse_public_keys(public_keys),
- public_keys['key-pair-0']['openssh-key'])
+ public_keys = {
+ "key-pair-0": {"openssh-key": ["ssh-key-0", "ssh-key-1"]}
+ }
+ self.assertEqual(
+ ay.parse_public_keys(public_keys),
+ public_keys["key-pair-0"]["openssh-key"],
+ )
def test_route_metric_calculated_without_device_number(self):
"""Test that route-metric code works without `device-number`
@@ -193,38 +224,43 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase):
LP: #1917875
"""
netcfg = convert_ec2_metadata_network_config(
- {"interfaces": {"macs": {
- "06:17:04:d7:26:09": {
- "interface-id": "eni-e44ef49e",
- },
- "06:17:04:d7:26:08": {
- "interface-id": "eni-e44ef49f",
+ {
+ "interfaces": {
+ "macs": {
+ "06:17:04:d7:26:09": {
+ "interface-id": "eni-e44ef49e",
+ },
+ "06:17:04:d7:26:08": {
+ "interface-id": "eni-e44ef49f",
+ },
+ }
}
- }}},
+ },
macs_to_nics={
- '06:17:04:d7:26:09': 'eth0',
- '06:17:04:d7:26:08': 'eth1',
- }
+ "06:17:04:d7:26:09": "eth0",
+ "06:17:04:d7:26:08": "eth1",
+ },
)
- met0 = netcfg['ethernets']['eth0']['dhcp4-overrides']['route-metric']
- met1 = netcfg['ethernets']['eth1']['dhcp4-overrides']['route-metric']
+ met0 = netcfg["ethernets"]["eth0"]["dhcp4-overrides"]["route-metric"]
+ met1 = netcfg["ethernets"]["eth1"]["dhcp4-overrides"]["route-metric"]
# route-metric numbers should be 100 apart
assert 100 == abs(met0 - met1)
class TestIsAliYun(test_helpers.CiTestCase):
- ALIYUN_PRODUCT = 'Alibaba Cloud ECS'
- read_dmi_data_expected = [mock.call('system-product-name')]
+ ALIYUN_PRODUCT = "Alibaba Cloud ECS"
+ read_dmi_data_expected = [mock.call("system-product-name")]
@mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data")
def test_true_on_aliyun_product(self, m_read_dmi_data):
"""Should return true if the dmi product data has expected value."""
m_read_dmi_data.return_value = self.ALIYUN_PRODUCT
ret = ay._is_aliyun()
- self.assertEqual(self.read_dmi_data_expected,
- m_read_dmi_data.call_args_list)
+ self.assertEqual(
+ self.read_dmi_data_expected, m_read_dmi_data.call_args_list
+ )
self.assertEqual(True, ret)
@mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data")
@@ -232,8 +268,9 @@ class TestIsAliYun(test_helpers.CiTestCase):
"""Should return false on empty value returned."""
m_read_dmi_data.return_value = ""
ret = ay._is_aliyun()
- self.assertEqual(self.read_dmi_data_expected,
- m_read_dmi_data.call_args_list)
+ self.assertEqual(
+ self.read_dmi_data_expected, m_read_dmi_data.call_args_list
+ )
self.assertEqual(False, ret)
@mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data")
@@ -241,8 +278,10 @@ class TestIsAliYun(test_helpers.CiTestCase):
"""Should return false on an unrelated string."""
m_read_dmi_data.return_value = "cubs win"
ret = ay._is_aliyun()
- self.assertEqual(self.read_dmi_data_expected,
- m_read_dmi_data.call_args_list)
+ self.assertEqual(
+ self.read_dmi_data_expected, m_read_dmi_data.call_args_list
+ )
self.assertEqual(False, ret)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_altcloud.py b/tests/unittests/sources/test_altcloud.py
index 7384c104..44dfafd9 100644
--- a/tests/unittests/sources/test_altcloud.py
+++ b/tests/unittests/sources/test_altcloud.py
@@ -6,54 +6,47 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-'''
+"""
This test file exercises the code in sources DataSourceAltCloud.py
-'''
+"""
import os
import shutil
import tempfile
-from cloudinit import dmi
-from cloudinit import helpers
-from cloudinit import subp
-from cloudinit import util
-
-from tests.unittests.helpers import CiTestCase, mock
-
import cloudinit.sources.DataSourceAltCloud as dsac
+from cloudinit import dmi, helpers, subp, util
+from tests.unittests.helpers import CiTestCase, mock
-OS_UNAME_ORIG = getattr(os, 'uname')
+OS_UNAME_ORIG = getattr(os, "uname")
def _write_user_data_files(mount_dir, value):
- '''
+ """
Populate the deltacloud_user_data_file the user_data_file
which would be populated with user data.
- '''
- deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt'
- user_data_file = mount_dir + '/user-data.txt'
+ """
+ deltacloud_user_data_file = mount_dir + "/deltacloud-user-data.txt"
+ user_data_file = mount_dir + "/user-data.txt"
- udfile = open(deltacloud_user_data_file, 'w')
+ udfile = open(deltacloud_user_data_file, "w")
udfile.write(value)
udfile.close()
os.chmod(deltacloud_user_data_file, 0o664)
- udfile = open(user_data_file, 'w')
+ udfile = open(user_data_file, "w")
udfile.write(value)
udfile.close()
os.chmod(user_data_file, 0o664)
-def _remove_user_data_files(mount_dir,
- dc_file=True,
- non_dc_file=True):
- '''
+def _remove_user_data_files(mount_dir, dc_file=True, non_dc_file=True):
+ """
Remove the test files: deltacloud_user_data_file and
user_data_file
- '''
- deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt'
- user_data_file = mount_dir + '/user-data.txt'
+ """
+ deltacloud_user_data_file = mount_dir + "/deltacloud-user-data.txt"
+ user_data_file = mount_dir + "/user-data.txt"
# Ignore any failures removeing files that are already gone.
if dc_file:
@@ -70,9 +63,10 @@ def _remove_user_data_files(mount_dir,
def _dmi_data(expected):
- '''
+ """
Spoof the data received over DMI
- '''
+ """
+
def _data(key):
return expected
@@ -80,19 +74,19 @@ def _dmi_data(expected):
class TestGetCloudType(CiTestCase):
- '''Test to exercise method: DataSourceAltCloud.get_cloud_type()'''
+ """Test to exercise method: DataSourceAltCloud.get_cloud_type()"""
with_logs = True
def setUp(self):
- '''Set up.'''
+ """Set up."""
super(TestGetCloudType, self).setUp()
self.tmp = self.tmp_dir()
- self.paths = helpers.Paths({'cloud_dir': self.tmp})
+ self.paths = helpers.Paths({"cloud_dir": self.tmp})
self.dmi_data = dmi.read_dmi_data
# We have a different code path for arm to deal with LP1243287
# We have to switch arch to x86_64 to avoid test failure
- force_arch('x86_64')
+ force_arch("x86_64")
def tearDown(self):
# Reset
@@ -101,216 +95,226 @@ class TestGetCloudType(CiTestCase):
def test_cloud_info_file_ioerror(self):
"""Return UNKNOWN when /etc/sysconfig/cloud-info exists but errors."""
- self.assertEqual('/etc/sysconfig/cloud-info', dsac.CLOUD_INFO_FILE)
+ self.assertEqual("/etc/sysconfig/cloud-info", dsac.CLOUD_INFO_FILE)
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
# Attempting to read the directory generates IOError
- with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.tmp):
- self.assertEqual('UNKNOWN', dsrc.get_cloud_type())
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.tmp):
+ self.assertEqual("UNKNOWN", dsrc.get_cloud_type())
self.assertIn(
- "[Errno 21] Is a directory: '%s'" % self.tmp,
- self.logs.getvalue())
+ "[Errno 21] Is a directory: '%s'" % self.tmp, self.logs.getvalue()
+ )
def test_cloud_info_file(self):
"""Return uppercase stripped content from /etc/sysconfig/cloud-info."""
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
- cloud_info = self.tmp_path('cloud-info', dir=self.tmp)
- util.write_file(cloud_info, ' OverRiDdeN CloudType ')
+ cloud_info = self.tmp_path("cloud-info", dir=self.tmp)
+ util.write_file(cloud_info, " OverRiDdeN CloudType ")
# Attempting to read the directory generates IOError
- with mock.patch.object(dsac, 'CLOUD_INFO_FILE', cloud_info):
- self.assertEqual('OVERRIDDEN CLOUDTYPE', dsrc.get_cloud_type())
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", cloud_info):
+ self.assertEqual("OVERRIDDEN CLOUDTYPE", dsrc.get_cloud_type())
def test_rhev(self):
- '''
+ """
Test method get_cloud_type() for RHEVm systems.
Forcing read_dmi_data return to match a RHEVm system: RHEV Hypervisor
- '''
- dmi.read_dmi_data = _dmi_data('RHEV')
+ """
+ dmi.read_dmi_data = _dmi_data("RHEV")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
- self.assertEqual('RHEV', dsrc.get_cloud_type())
+ self.assertEqual("RHEV", dsrc.get_cloud_type())
def test_vsphere(self):
- '''
+ """
Test method get_cloud_type() for vSphere systems.
Forcing read_dmi_data return to match a vSphere system: RHEV Hypervisor
- '''
- dmi.read_dmi_data = _dmi_data('VMware Virtual Platform')
+ """
+ dmi.read_dmi_data = _dmi_data("VMware Virtual Platform")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
- self.assertEqual('VSPHERE', dsrc.get_cloud_type())
+ self.assertEqual("VSPHERE", dsrc.get_cloud_type())
def test_unknown(self):
- '''
+ """
Test method get_cloud_type() for unknown systems.
Forcing read_dmi_data return to match an unrecognized return.
- '''
- dmi.read_dmi_data = _dmi_data('Unrecognized Platform')
+ """
+ dmi.read_dmi_data = _dmi_data("Unrecognized Platform")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
- self.assertEqual('UNKNOWN', dsrc.get_cloud_type())
+ self.assertEqual("UNKNOWN", dsrc.get_cloud_type())
class TestGetDataCloudInfoFile(CiTestCase):
- '''
+ """
Test to exercise method: DataSourceAltCloud.get_data()
With a contrived CLOUD_INFO_FILE
- '''
+ """
+
def setUp(self):
- '''Set up.'''
+ """Set up."""
self.tmp = self.tmp_dir()
self.paths = helpers.Paths(
- {'cloud_dir': self.tmp, 'run_dir': self.tmp})
- self.cloud_info_file = self.tmp_path('cloud-info', dir=self.tmp)
+ {"cloud_dir": self.tmp, "run_dir": self.tmp}
+ )
+ self.cloud_info_file = self.tmp_path("cloud-info", dir=self.tmp)
def test_rhev(self):
- '''Success Test module get_data() forcing RHEV.'''
+ """Success Test module get_data() forcing RHEV."""
- util.write_file(self.cloud_info_file, 'RHEV')
+ util.write_file(self.cloud_info_file, "RHEV")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_rhevm = lambda: True
- with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file):
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file):
self.assertEqual(True, dsrc.get_data())
- self.assertEqual('altcloud', dsrc.cloud_name)
- self.assertEqual('altcloud', dsrc.platform_type)
- self.assertEqual('rhev (/dev/fd0)', dsrc.subplatform)
+ self.assertEqual("altcloud", dsrc.cloud_name)
+ self.assertEqual("altcloud", dsrc.platform_type)
+ self.assertEqual("rhev (/dev/fd0)", dsrc.subplatform)
def test_vsphere(self):
- '''Success Test module get_data() forcing VSPHERE.'''
+ """Success Test module get_data() forcing VSPHERE."""
- util.write_file(self.cloud_info_file, 'VSPHERE')
+ util.write_file(self.cloud_info_file, "VSPHERE")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_vsphere = lambda: True
- with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file):
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file):
self.assertEqual(True, dsrc.get_data())
- self.assertEqual('altcloud', dsrc.cloud_name)
- self.assertEqual('altcloud', dsrc.platform_type)
- self.assertEqual('vsphere (unknown)', dsrc.subplatform)
+ self.assertEqual("altcloud", dsrc.cloud_name)
+ self.assertEqual("altcloud", dsrc.platform_type)
+ self.assertEqual("vsphere (unknown)", dsrc.subplatform)
def test_fail_rhev(self):
- '''Failure Test module get_data() forcing RHEV.'''
+ """Failure Test module get_data() forcing RHEV."""
- util.write_file(self.cloud_info_file, 'RHEV')
+ util.write_file(self.cloud_info_file, "RHEV")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_rhevm = lambda: False
- with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file):
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file):
self.assertEqual(False, dsrc.get_data())
def test_fail_vsphere(self):
- '''Failure Test module get_data() forcing VSPHERE.'''
+ """Failure Test module get_data() forcing VSPHERE."""
- util.write_file(self.cloud_info_file, 'VSPHERE')
+ util.write_file(self.cloud_info_file, "VSPHERE")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_vsphere = lambda: False
- with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file):
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file):
self.assertEqual(False, dsrc.get_data())
def test_unrecognized(self):
- '''Failure Test module get_data() forcing unrecognized.'''
+ """Failure Test module get_data() forcing unrecognized."""
- util.write_file(self.cloud_info_file, 'unrecognized')
+ util.write_file(self.cloud_info_file, "unrecognized")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
- with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file):
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file):
self.assertEqual(False, dsrc.get_data())
class TestGetDataNoCloudInfoFile(CiTestCase):
- '''
+ """
Test to exercise method: DataSourceAltCloud.get_data()
Without a CLOUD_INFO_FILE
- '''
+ """
+
def setUp(self):
- '''Set up.'''
+ """Set up."""
self.tmp = self.tmp_dir()
self.paths = helpers.Paths(
- {'cloud_dir': self.tmp, 'run_dir': self.tmp})
+ {"cloud_dir": self.tmp, "run_dir": self.tmp}
+ )
self.dmi_data = dmi.read_dmi_data
- dsac.CLOUD_INFO_FILE = \
- 'no such file'
+ dsac.CLOUD_INFO_FILE = "no such file"
# We have a different code path for arm to deal with LP1243287
# We have to switch arch to x86_64 to avoid test failure
- force_arch('x86_64')
+ force_arch("x86_64")
def tearDown(self):
# Reset
- dsac.CLOUD_INFO_FILE = \
- '/etc/sysconfig/cloud-info'
+ dsac.CLOUD_INFO_FILE = "/etc/sysconfig/cloud-info"
dmi.read_dmi_data = self.dmi_data
# Return back to original arch
force_arch()
def test_rhev_no_cloud_file(self):
- '''Test No cloud info file module get_data() forcing RHEV.'''
+ """Test No cloud info file module get_data() forcing RHEV."""
- dmi.read_dmi_data = _dmi_data('RHEV Hypervisor')
+ dmi.read_dmi_data = _dmi_data("RHEV Hypervisor")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_rhevm = lambda: True
self.assertEqual(True, dsrc.get_data())
def test_vsphere_no_cloud_file(self):
- '''Test No cloud info file module get_data() forcing VSPHERE.'''
+ """Test No cloud info file module get_data() forcing VSPHERE."""
- dmi.read_dmi_data = _dmi_data('VMware Virtual Platform')
+ dmi.read_dmi_data = _dmi_data("VMware Virtual Platform")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_vsphere = lambda: True
self.assertEqual(True, dsrc.get_data())
def test_failure_no_cloud_file(self):
- '''Test No cloud info file module get_data() forcing unrecognized.'''
+ """Test No cloud info file module get_data() forcing unrecognized."""
- dmi.read_dmi_data = _dmi_data('Unrecognized Platform')
+ dmi.read_dmi_data = _dmi_data("Unrecognized Platform")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.get_data())
class TestUserDataRhevm(CiTestCase):
- '''
+ """
Test to exercise method: DataSourceAltCloud.user_data_rhevm()
- '''
+ """
+
def setUp(self):
- '''Set up.'''
- self.paths = helpers.Paths({'cloud_dir': '/tmp'})
+ """Set up."""
+ self.paths = helpers.Paths({"cloud_dir": "/tmp"})
self.mount_dir = self.tmp_dir()
- _write_user_data_files(self.mount_dir, 'test user data')
+ _write_user_data_files(self.mount_dir, "test user data")
self.add_patch(
- 'cloudinit.sources.DataSourceAltCloud.modprobe_floppy',
- 'm_modprobe_floppy', return_value=None)
+ "cloudinit.sources.DataSourceAltCloud.modprobe_floppy",
+ "m_modprobe_floppy",
+ return_value=None,
+ )
self.add_patch(
- 'cloudinit.sources.DataSourceAltCloud.util.udevadm_settle',
- 'm_udevadm_settle', return_value=('', ''))
+ "cloudinit.sources.DataSourceAltCloud.util.udevadm_settle",
+ "m_udevadm_settle",
+ return_value=("", ""),
+ )
self.add_patch(
- 'cloudinit.sources.DataSourceAltCloud.util.mount_cb',
- 'm_mount_cb')
+ "cloudinit.sources.DataSourceAltCloud.util.mount_cb", "m_mount_cb"
+ )
def test_mount_cb_fails(self):
- '''Test user_data_rhevm() where mount_cb fails.'''
+ """Test user_data_rhevm() where mount_cb fails."""
self.m_mount_cb.side_effect = util.MountFailedError("Failed Mount")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
def test_modprobe_fails(self):
- '''Test user_data_rhevm() where modprobe fails.'''
+ """Test user_data_rhevm() where modprobe fails."""
self.m_modprobe_floppy.side_effect = subp.ProcessExecutionError(
- "Failed modprobe")
+ "Failed modprobe"
+ )
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
def test_no_modprobe_cmd(self):
- '''Test user_data_rhevm() with no modprobe command.'''
+ """Test user_data_rhevm() with no modprobe command."""
self.m_modprobe_floppy.side_effect = subp.ProcessExecutionError(
- "No such file or dir")
+ "No such file or dir"
+ )
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
def test_udevadm_fails(self):
- '''Test user_data_rhevm() where udevadm fails.'''
+ """Test user_data_rhevm() where udevadm fails."""
self.m_udevadm_settle.side_effect = subp.ProcessExecutionError(
- "Failed settle.")
+ "Failed settle."
+ )
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
def test_no_udevadm_cmd(self):
- '''Test user_data_rhevm() with no udevadm command.'''
+ """Test user_data_rhevm() with no udevadm command."""
self.m_udevadm_settle.side_effect = OSError("No such file or dir")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
@@ -318,16 +322,17 @@ class TestUserDataRhevm(CiTestCase):
class TestUserDataVsphere(CiTestCase):
- '''
+ """
Test to exercise method: DataSourceAltCloud.user_data_vsphere()
- '''
+ """
+
def setUp(self):
- '''Set up.'''
+ """Set up."""
self.tmp = self.tmp_dir()
- self.paths = helpers.Paths({'cloud_dir': self.tmp})
+ self.paths = helpers.Paths({"cloud_dir": self.tmp})
self.mount_dir = tempfile.mkdtemp()
- _write_user_data_files(self.mount_dir, 'test user data')
+ _write_user_data_files(self.mount_dir, "test user data")
def tearDown(self):
# Reset
@@ -340,13 +345,12 @@ class TestUserDataVsphere(CiTestCase):
except OSError:
pass
- dsac.CLOUD_INFO_FILE = \
- '/etc/sysconfig/cloud-info'
+ dsac.CLOUD_INFO_FILE = "/etc/sysconfig/cloud-info"
@mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with")
@mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb")
def test_user_data_vsphere_no_cdrom(self, m_mount_cb, m_find_devs_with):
- '''Test user_data_vsphere() where mount_cb fails.'''
+ """Test user_data_vsphere() where mount_cb fails."""
m_mount_cb.return_value = []
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
@@ -356,7 +360,7 @@ class TestUserDataVsphere(CiTestCase):
@mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with")
@mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb")
def test_user_data_vsphere_mcb_fail(self, m_mount_cb, m_find_devs_with):
- '''Test user_data_vsphere() where mount_cb fails.'''
+ """Test user_data_vsphere() where mount_cb fails."""
m_find_devs_with.return_value = ["/dev/mock/cdrom"]
m_mount_cb.side_effect = util.MountFailedError("Unable To mount")
@@ -370,28 +374,30 @@ class TestUserDataVsphere(CiTestCase):
def test_user_data_vsphere_success(self, m_mount_cb, m_find_devs_with):
"""Test user_data_vsphere() where successful."""
m_find_devs_with.return_value = ["/dev/mock/cdrom"]
- m_mount_cb.return_value = 'raw userdata from cdrom'
+ m_mount_cb.return_value = "raw userdata from cdrom"
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
- cloud_info = self.tmp_path('cloud-info', dir=self.tmp)
- util.write_file(cloud_info, 'VSPHERE')
+ cloud_info = self.tmp_path("cloud-info", dir=self.tmp)
+ util.write_file(cloud_info, "VSPHERE")
self.assertEqual(True, dsrc.user_data_vsphere())
- m_find_devs_with.assert_called_once_with('LABEL=CDROM')
+ m_find_devs_with.assert_called_once_with("LABEL=CDROM")
m_mount_cb.assert_called_once_with(
- '/dev/mock/cdrom', dsac.read_user_data_callback)
- with mock.patch.object(dsrc, 'get_cloud_type', return_value='VSPHERE'):
- self.assertEqual('vsphere (/dev/mock/cdrom)', dsrc.subplatform)
+ "/dev/mock/cdrom", dsac.read_user_data_callback
+ )
+ with mock.patch.object(dsrc, "get_cloud_type", return_value="VSPHERE"):
+ self.assertEqual("vsphere (/dev/mock/cdrom)", dsrc.subplatform)
class TestReadUserDataCallback(CiTestCase):
- '''
+ """
Test to exercise method: DataSourceAltCloud.read_user_data_callback()
- '''
+ """
+
def setUp(self):
- '''Set up.'''
- self.paths = helpers.Paths({'cloud_dir': '/tmp'})
+ """Set up."""
+ self.paths = helpers.Paths({"cloud_dir": "/tmp"})
self.mount_dir = tempfile.mkdtemp()
- _write_user_data_files(self.mount_dir, 'test user data')
+ _write_user_data_files(self.mount_dir, "test user data")
def tearDown(self):
# Reset
@@ -405,46 +411,49 @@ class TestReadUserDataCallback(CiTestCase):
pass
def test_callback_both(self):
- '''Test read_user_data_callback() with both files.'''
+ """Test read_user_data_callback() with both files."""
- self.assertEqual('test user data',
- dsac.read_user_data_callback(self.mount_dir))
+ self.assertEqual(
+ "test user data", dsac.read_user_data_callback(self.mount_dir)
+ )
def test_callback_dc(self):
- '''Test read_user_data_callback() with only DC file.'''
+ """Test read_user_data_callback() with only DC file."""
- _remove_user_data_files(self.mount_dir,
- dc_file=False,
- non_dc_file=True)
+ _remove_user_data_files(
+ self.mount_dir, dc_file=False, non_dc_file=True
+ )
- self.assertEqual('test user data',
- dsac.read_user_data_callback(self.mount_dir))
+ self.assertEqual(
+ "test user data", dsac.read_user_data_callback(self.mount_dir)
+ )
def test_callback_non_dc(self):
- '''Test read_user_data_callback() with only non-DC file.'''
+ """Test read_user_data_callback() with only non-DC file."""
- _remove_user_data_files(self.mount_dir,
- dc_file=True,
- non_dc_file=False)
+ _remove_user_data_files(
+ self.mount_dir, dc_file=True, non_dc_file=False
+ )
- self.assertEqual('test user data',
- dsac.read_user_data_callback(self.mount_dir))
+ self.assertEqual(
+ "test user data", dsac.read_user_data_callback(self.mount_dir)
+ )
def test_callback_none(self):
- '''Test read_user_data_callback() no files are found.'''
+ """Test read_user_data_callback() no files are found."""
_remove_user_data_files(self.mount_dir)
self.assertIsNone(dsac.read_user_data_callback(self.mount_dir))
def force_arch(arch=None):
-
def _os_uname():
- return ('LINUX', 'NODENAME', 'RELEASE', 'VERSION', arch)
+ return ("LINUX", "NODENAME", "RELEASE", "VERSION", arch)
if arch:
- setattr(os, 'uname', _os_uname)
+ setattr(os, "uname", _os_uname)
elif arch is None:
- setattr(os, 'uname', OS_UNAME_ORIG)
+ setattr(os, "uname", OS_UNAME_ORIG)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py
index ad8be04b..8b0762b7 100644
--- a/tests/unittests/sources/test_azure.py
+++ b/tests/unittests/sources/test_azure.py
@@ -1,33 +1,47 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import url_helper
-from cloudinit.sources import (
- UNSET, DataSourceAzure as dsaz, InvalidMetaDataException)
-from cloudinit.util import (b64e, decode_binary, load_file, write_file,
- MountFailedError, json_dumps, load_json)
-from cloudinit.version import version_string as vs
-from tests.unittests.helpers import (
- HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call,
- ExitStack, resourceLocation)
-from cloudinit.sources.helpers import netlink
-
import copy
import crypt
-import httpretty
import json
import os
-import requests
import stat
import xml.etree.ElementTree as ET
-import yaml
+import httpretty
+import requests
+import yaml
-def construct_valid_ovf_env(data=None, pubkeys=None,
- userdata=None, platform_settings=None):
+from cloudinit import distros, helpers, url_helper
+from cloudinit.sources import UNSET
+from cloudinit.sources import DataSourceAzure as dsaz
+from cloudinit.sources import InvalidMetaDataException
+from cloudinit.sources.helpers import netlink
+from cloudinit.util import (
+ MountFailedError,
+ b64e,
+ decode_binary,
+ json_dumps,
+ load_file,
+ load_json,
+ write_file,
+)
+from cloudinit.version import version_string as vs
+from tests.unittests.helpers import (
+ CiTestCase,
+ ExitStack,
+ HttprettyTestCase,
+ mock,
+ populate_dir,
+ resourceLocation,
+ wrap_and_call,
+)
+
+
+def construct_valid_ovf_env(
+ data=None, pubkeys=None, userdata=None, platform_settings=None
+):
if data is None:
- data = {'HostName': 'FOOHOST'}
+ data = {"HostName": "FOOHOST"}
if pubkeys is None:
pubkeys = {}
@@ -45,9 +59,14 @@ def construct_valid_ovf_env(data=None, pubkeys=None,
"""
for key, dval in data.items():
if isinstance(dval, dict):
- val = dict(dval).get('text')
- attrs = ' ' + ' '.join(["%s='%s'" % (k, v) for k, v
- in dict(dval).items() if k != 'text'])
+ val = dict(dval).get("text")
+ attrs = " " + " ".join(
+ [
+ "%s='%s'" % (k, v)
+ for k, v in dict(dval).items()
+ if k != "text"
+ ]
+ )
else:
val = dval
attrs = ""
@@ -61,8 +80,10 @@ def construct_valid_ovf_env(data=None, pubkeys=None,
for fp, path, value in pubkeys:
content += " <PublicKey>"
if fp and path:
- content += ("<Fingerprint>%s</Fingerprint><Path>%s</Path>" %
- (fp, path))
+ content += "<Fingerprint>%s</Fingerprint><Path>%s</Path>" % (
+ fp,
+ path,
+ )
if value:
content += "<Value>%s</Value>" % value
content += "</PublicKey>\n"
@@ -106,300 +127,331 @@ NETWORK_METADATA = {
"vmScaleSetName": "",
"vmSize": "Standard_DS1_v2",
"zone": "",
- "publicKeys": [
- {
- "keyData": "ssh-rsa key1",
- "path": "path1"
- }
- ]
+ "publicKeys": [{"keyData": "ssh-rsa key1", "path": "path1"}],
},
"network": {
"interface": [
{
"macAddress": "000D3A047598",
- "ipv6": {
- "ipAddress": []
- },
+ "ipv6": {"ipAddress": []},
"ipv4": {
- "subnet": [
- {
- "prefix": "24",
- "address": "10.0.0.0"
- }
- ],
+ "subnet": [{"prefix": "24", "address": "10.0.0.0"}],
"ipAddress": [
{
"privateIpAddress": "10.0.0.4",
- "publicIpAddress": "104.46.124.81"
+ "publicIpAddress": "104.46.124.81",
}
- ]
- }
+ ],
+ },
}
]
- }
+ },
}
SECONDARY_INTERFACE = {
"macAddress": "220D3A047598",
- "ipv6": {
- "ipAddress": []
- },
+ "ipv6": {"ipAddress": []},
"ipv4": {
- "subnet": [
- {
- "prefix": "24",
- "address": "10.0.1.0"
- }
- ],
+ "subnet": [{"prefix": "24", "address": "10.0.1.0"}],
"ipAddress": [
{
"privateIpAddress": "10.0.1.5",
}
- ]
- }
+ ],
+ },
}
SECONDARY_INTERFACE_NO_IP = {
"macAddress": "220D3A047598",
- "ipv6": {
- "ipAddress": []
- },
+ "ipv6": {"ipAddress": []},
"ipv4": {
- "subnet": [
- {
- "prefix": "24",
- "address": "10.0.1.0"
- }
- ],
- "ipAddress": []
- }
+ "subnet": [{"prefix": "24", "address": "10.0.1.0"}],
+ "ipAddress": [],
+ },
}
IMDS_NETWORK_METADATA = {
"interface": [
{
"macAddress": "000D3A047598",
- "ipv6": {
- "ipAddress": []
- },
+ "ipv6": {"ipAddress": []},
"ipv4": {
- "subnet": [
- {
- "prefix": "24",
- "address": "10.0.0.0"
- }
- ],
+ "subnet": [{"prefix": "24", "address": "10.0.0.0"}],
"ipAddress": [
{
"privateIpAddress": "10.0.0.4",
- "publicIpAddress": "104.46.124.81"
+ "publicIpAddress": "104.46.124.81",
}
- ]
- }
+ ],
+ },
}
]
}
-MOCKPATH = 'cloudinit.sources.DataSourceAzure.'
-EXAMPLE_UUID = 'd0df4c54-4ecb-4a4b-9954-5bdf3ed5c3b8'
+MOCKPATH = "cloudinit.sources.DataSourceAzure."
+EXAMPLE_UUID = "d0df4c54-4ecb-4a4b-9954-5bdf3ed5c3b8"
class TestParseNetworkConfig(CiTestCase):
maxDiff = None
fallback_config = {
- 'version': 1,
- 'config': [{
- 'type': 'physical', 'name': 'eth0',
- 'mac_address': '00:11:22:33:44:55',
- 'params': {'driver': 'hv_netsvc'},
- 'subnets': [{'type': 'dhcp'}],
- }]
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "eth0",
+ "mac_address": "00:11:22:33:44:55",
+ "params": {"driver": "hv_netsvc"},
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
}
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
def test_single_ipv4_nic_configuration(self, m_driver):
"""parse_network_config emits dhcp on single nic with ipv4"""
- expected = {'ethernets': {
- 'eth0': {'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100},
- 'dhcp6': False,
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'set-name': 'eth0'}}, 'version': 2}
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": False,
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ }
self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA))
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
def test_increases_route_metric_for_non_primary_nics(self, m_driver):
"""parse_network_config increases route-metric for each nic"""
- expected = {'ethernets': {
- 'eth0': {'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100},
- 'dhcp6': False,
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'set-name': 'eth0'},
- 'eth1': {'set-name': 'eth1',
- 'match': {'macaddress': '22:0d:3a:04:75:98'},
- 'dhcp6': False,
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 200}},
- 'eth2': {'set-name': 'eth2',
- 'match': {'macaddress': '33:0d:3a:04:75:98'},
- 'dhcp6': False,
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 300}}}, 'version': 2}
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": False,
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ },
+ "eth1": {
+ "set-name": "eth1",
+ "match": {"macaddress": "22:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 200},
+ },
+ "eth2": {
+ "set-name": "eth2",
+ "match": {"macaddress": "33:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 300},
+ },
+ },
+ "version": 2,
+ }
imds_data = copy.deepcopy(NETWORK_METADATA)
- imds_data['network']['interface'].append(SECONDARY_INTERFACE)
+ imds_data["network"]["interface"].append(SECONDARY_INTERFACE)
third_intf = copy.deepcopy(SECONDARY_INTERFACE)
- third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33')
- third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0'
- third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6'
- imds_data['network']['interface'].append(third_intf)
+ third_intf["macAddress"] = third_intf["macAddress"].replace("22", "33")
+ third_intf["ipv4"]["subnet"][0]["address"] = "10.0.2.0"
+ third_intf["ipv4"]["ipAddress"][0]["privateIpAddress"] = "10.0.2.6"
+ imds_data["network"]["interface"].append(third_intf)
self.assertEqual(expected, dsaz.parse_network_config(imds_data))
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
def test_ipv4_and_ipv6_route_metrics_match_for_nics(self, m_driver):
"""parse_network_config emits matching ipv4 and ipv6 route-metrics."""
- expected = {'ethernets': {
- 'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/128'],
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100},
- 'dhcp6': True,
- 'dhcp6-overrides': {'route-metric': 100},
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'set-name': 'eth0'},
- 'eth1': {'set-name': 'eth1',
- 'match': {'macaddress': '22:0d:3a:04:75:98'},
- 'dhcp4': True,
- 'dhcp6': False,
- 'dhcp4-overrides': {'route-metric': 200}},
- 'eth2': {'set-name': 'eth2',
- 'match': {'macaddress': '33:0d:3a:04:75:98'},
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 300},
- 'dhcp6': True,
- 'dhcp6-overrides': {'route-metric': 300}}}, 'version': 2}
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "addresses": ["10.0.0.5/24", "2001:dead:beef::2/128"],
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": True,
+ "dhcp6-overrides": {"route-metric": 100},
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ },
+ "eth1": {
+ "set-name": "eth1",
+ "match": {"macaddress": "22:0d:3a:04:75:98"},
+ "dhcp4": True,
+ "dhcp6": False,
+ "dhcp4-overrides": {"route-metric": 200},
+ },
+ "eth2": {
+ "set-name": "eth2",
+ "match": {"macaddress": "33:0d:3a:04:75:98"},
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 300},
+ "dhcp6": True,
+ "dhcp6-overrides": {"route-metric": 300},
+ },
+ },
+ "version": 2,
+ }
imds_data = copy.deepcopy(NETWORK_METADATA)
- nic1 = imds_data['network']['interface'][0]
- nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'})
+ nic1 = imds_data["network"]["interface"][0]
+ nic1["ipv4"]["ipAddress"].append({"privateIpAddress": "10.0.0.5"})
- nic1['ipv6'] = {
+ nic1["ipv6"] = {
"subnet": [{"address": "2001:dead:beef::16"}],
- "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"},
- {"privateIpAddress": "2001:dead:beef::2"}]
+ "ipAddress": [
+ {"privateIpAddress": "2001:dead:beef::1"},
+ {"privateIpAddress": "2001:dead:beef::2"},
+ ],
}
- imds_data['network']['interface'].append(SECONDARY_INTERFACE)
+ imds_data["network"]["interface"].append(SECONDARY_INTERFACE)
third_intf = copy.deepcopy(SECONDARY_INTERFACE)
- third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33')
- third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0'
- third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6'
- third_intf['ipv6'] = {
+ third_intf["macAddress"] = third_intf["macAddress"].replace("22", "33")
+ third_intf["ipv4"]["subnet"][0]["address"] = "10.0.2.0"
+ third_intf["ipv4"]["ipAddress"][0]["privateIpAddress"] = "10.0.2.6"
+ third_intf["ipv6"] = {
"subnet": [{"prefix": "64", "address": "2001:dead:beef::2"}],
- "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}]
+ "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}],
}
- imds_data['network']['interface'].append(third_intf)
+ imds_data["network"]["interface"].append(third_intf)
self.assertEqual(expected, dsaz.parse_network_config(imds_data))
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
def test_ipv4_secondary_ips_will_be_static_addrs(self, m_driver):
"""parse_network_config emits primary ipv4 as dhcp others are static"""
- expected = {'ethernets': {
- 'eth0': {'addresses': ['10.0.0.5/24'],
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100},
- 'dhcp6': True,
- 'dhcp6-overrides': {'route-metric': 100},
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'set-name': 'eth0'}}, 'version': 2}
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "addresses": ["10.0.0.5/24"],
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": True,
+ "dhcp6-overrides": {"route-metric": 100},
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ }
imds_data = copy.deepcopy(NETWORK_METADATA)
- nic1 = imds_data['network']['interface'][0]
- nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'})
+ nic1 = imds_data["network"]["interface"][0]
+ nic1["ipv4"]["ipAddress"].append({"privateIpAddress": "10.0.0.5"})
- nic1['ipv6'] = {
+ nic1["ipv6"] = {
"subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}],
- "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}]
+ "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}],
}
self.assertEqual(expected, dsaz.parse_network_config(imds_data))
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
def test_ipv6_secondary_ips_will_be_static_cidrs(self, m_driver):
"""parse_network_config emits primary ipv6 as dhcp others are static"""
- expected = {'ethernets': {
- 'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/10'],
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100},
- 'dhcp6': True,
- 'dhcp6-overrides': {'route-metric': 100},
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'set-name': 'eth0'}}, 'version': 2}
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "addresses": ["10.0.0.5/24", "2001:dead:beef::2/10"],
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": True,
+ "dhcp6-overrides": {"route-metric": 100},
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ }
imds_data = copy.deepcopy(NETWORK_METADATA)
- nic1 = imds_data['network']['interface'][0]
- nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'})
+ nic1 = imds_data["network"]["interface"][0]
+ nic1["ipv4"]["ipAddress"].append({"privateIpAddress": "10.0.0.5"})
# Secondary ipv6 addresses currently ignored/unconfigured
- nic1['ipv6'] = {
+ nic1["ipv6"] = {
"subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}],
- "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"},
- {"privateIpAddress": "2001:dead:beef::2"}]
+ "ipAddress": [
+ {"privateIpAddress": "2001:dead:beef::1"},
+ {"privateIpAddress": "2001:dead:beef::2"},
+ ],
}
self.assertEqual(expected, dsaz.parse_network_config(imds_data))
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value='hv_netvsc')
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver",
+ return_value="hv_netvsc",
+ )
def test_match_driver_for_netvsc(self, m_driver):
"""parse_network_config emits driver when using netvsc."""
- expected = {'ethernets': {
- 'eth0': {
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100},
- 'dhcp6': False,
- 'match': {
- 'macaddress': '00:0d:3a:04:75:98',
- 'driver': 'hv_netvsc',
- },
- 'set-name': 'eth0'
- }}, 'version': 2}
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": False,
+ "match": {
+ "macaddress": "00:0d:3a:04:75:98",
+ "driver": "hv_netvsc",
+ },
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ }
self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA))
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
- @mock.patch('cloudinit.net.generate_fallback_config')
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ @mock.patch("cloudinit.net.generate_fallback_config")
def test_parse_network_config_uses_fallback_cfg_when_no_network_metadata(
- self, m_fallback_config, m_driver):
+ self, m_fallback_config, m_driver
+ ):
"""parse_network_config generates fallback network config when the
IMDS instance metadata is corrupted/invalid, such as when
network metadata is not present.
"""
imds_metadata_missing_network_metadata = copy.deepcopy(
- NETWORK_METADATA)
- del imds_metadata_missing_network_metadata['network']
+ NETWORK_METADATA
+ )
+ del imds_metadata_missing_network_metadata["network"]
m_fallback_config.return_value = self.fallback_config
self.assertEqual(
self.fallback_config,
- dsaz.parse_network_config(
- imds_metadata_missing_network_metadata))
+ dsaz.parse_network_config(imds_metadata_missing_network_metadata),
+ )
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
- @mock.patch('cloudinit.net.generate_fallback_config')
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ @mock.patch("cloudinit.net.generate_fallback_config")
def test_parse_network_config_uses_fallback_cfg_when_no_interface_metadata(
- self, m_fallback_config, m_driver):
+ self, m_fallback_config, m_driver
+ ):
"""parse_network_config generates fallback network config when the
IMDS instance metadata is corrupted/invalid, such as when
network interface metadata is not present.
"""
imds_metadata_missing_interface_metadata = copy.deepcopy(
- NETWORK_METADATA)
- del imds_metadata_missing_interface_metadata['network']['interface']
+ NETWORK_METADATA
+ )
+ del imds_metadata_missing_interface_metadata["network"]["interface"]
m_fallback_config.return_value = self.fallback_config
self.assertEqual(
self.fallback_config,
dsaz.parse_network_config(
- imds_metadata_missing_interface_metadata))
+ imds_metadata_missing_interface_metadata
+ ),
+ )
class TestGetMetadataFromIMDS(HttprettyTestCase):
@@ -412,175 +464,218 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
dsaz.IMDS_URL
)
- @mock.patch(MOCKPATH + 'readurl')
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4', autospec=True)
- @mock.patch(MOCKPATH + 'net.is_up', autospec=True)
+ @mock.patch(MOCKPATH + "readurl")
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4", autospec=True)
+ @mock.patch(MOCKPATH + "net.is_up", autospec=True)
def test_get_metadata_does_not_dhcp_if_network_is_up(
- self, m_net_is_up, m_dhcp, m_readurl):
+ self, m_net_is_up, m_dhcp, m_readurl
+ ):
"""Do not perform DHCP setup when nic is already up."""
m_net_is_up.return_value = True
m_readurl.return_value = url_helper.StringResponse(
- json.dumps(NETWORK_METADATA).encode('utf-8'))
+ json.dumps(NETWORK_METADATA).encode("utf-8")
+ )
self.assertEqual(
- NETWORK_METADATA,
- dsaz.get_metadata_from_imds('eth9', retries=3))
+ NETWORK_METADATA, dsaz.get_metadata_from_imds("eth9", retries=3)
+ )
- m_net_is_up.assert_called_with('eth9')
+ m_net_is_up.assert_called_with("eth9")
m_dhcp.assert_not_called()
self.assertIn(
"Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
- self.logs.getvalue())
+ self.logs.getvalue(),
+ )
- @mock.patch(MOCKPATH + 'readurl', autospec=True)
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
- @mock.patch(MOCKPATH + 'net.is_up')
+ @mock.patch(MOCKPATH + "readurl", autospec=True)
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4")
+ @mock.patch(MOCKPATH + "net.is_up")
def test_get_metadata_uses_instance_url(
- self, m_net_is_up, m_dhcp, m_readurl):
+ self, m_net_is_up, m_dhcp, m_readurl
+ ):
"""Make sure readurl is called with the correct url when accessing
metadata"""
m_net_is_up.return_value = True
m_readurl.return_value = url_helper.StringResponse(
- json.dumps(IMDS_NETWORK_METADATA).encode('utf-8'))
+ json.dumps(IMDS_NETWORK_METADATA).encode("utf-8")
+ )
dsaz.get_metadata_from_imds(
- 'eth0', retries=3, md_type=dsaz.metadata_type.all)
+ "eth0", retries=3, md_type=dsaz.metadata_type.all
+ )
m_readurl.assert_called_with(
- "http://169.254.169.254/metadata/instance?api-version="
- "2019-06-01", exception_cb=mock.ANY,
- headers=mock.ANY, retries=mock.ANY,
- timeout=mock.ANY, infinite=False)
+ "http://169.254.169.254/metadata/instance?api-version=2019-06-01",
+ exception_cb=mock.ANY,
+ headers=mock.ANY,
+ retries=mock.ANY,
+ timeout=mock.ANY,
+ infinite=False,
+ )
- @mock.patch(MOCKPATH + 'readurl', autospec=True)
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
- @mock.patch(MOCKPATH + 'net.is_up')
+ @mock.patch(MOCKPATH + "readurl", autospec=True)
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4")
+ @mock.patch(MOCKPATH + "net.is_up")
def test_get_network_metadata_uses_network_url(
- self, m_net_is_up, m_dhcp, m_readurl):
+ self, m_net_is_up, m_dhcp, m_readurl
+ ):
"""Make sure readurl is called with the correct url when accessing
network metadata"""
m_net_is_up.return_value = True
m_readurl.return_value = url_helper.StringResponse(
- json.dumps(IMDS_NETWORK_METADATA).encode('utf-8'))
+ json.dumps(IMDS_NETWORK_METADATA).encode("utf-8")
+ )
dsaz.get_metadata_from_imds(
- 'eth0', retries=3, md_type=dsaz.metadata_type.network)
+ "eth0", retries=3, md_type=dsaz.metadata_type.network
+ )
m_readurl.assert_called_with(
"http://169.254.169.254/metadata/instance/network?api-version="
- "2019-06-01", exception_cb=mock.ANY,
- headers=mock.ANY, retries=mock.ANY,
- timeout=mock.ANY, infinite=False)
+ "2019-06-01",
+ exception_cb=mock.ANY,
+ headers=mock.ANY,
+ retries=mock.ANY,
+ timeout=mock.ANY,
+ infinite=False,
+ )
- @mock.patch(MOCKPATH + 'readurl', autospec=True)
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
- @mock.patch(MOCKPATH + 'net.is_up')
+ @mock.patch(MOCKPATH + "readurl", autospec=True)
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4")
+ @mock.patch(MOCKPATH + "net.is_up")
def test_get_default_metadata_uses_instance_url(
- self, m_net_is_up, m_dhcp, m_readurl):
+ self, m_net_is_up, m_dhcp, m_readurl
+ ):
"""Make sure readurl is called with the correct url when accessing
metadata"""
m_net_is_up.return_value = True
m_readurl.return_value = url_helper.StringResponse(
- json.dumps(IMDS_NETWORK_METADATA).encode('utf-8'))
+ json.dumps(IMDS_NETWORK_METADATA).encode("utf-8")
+ )
- dsaz.get_metadata_from_imds(
- 'eth0', retries=3)
+ dsaz.get_metadata_from_imds("eth0", retries=3)
m_readurl.assert_called_with(
- "http://169.254.169.254/metadata/instance?api-version="
- "2019-06-01", exception_cb=mock.ANY,
- headers=mock.ANY, retries=mock.ANY,
- timeout=mock.ANY, infinite=False)
+ "http://169.254.169.254/metadata/instance?api-version=2019-06-01",
+ exception_cb=mock.ANY,
+ headers=mock.ANY,
+ retries=mock.ANY,
+ timeout=mock.ANY,
+ infinite=False,
+ )
- @mock.patch(MOCKPATH + 'readurl', autospec=True)
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
- @mock.patch(MOCKPATH + 'net.is_up')
+ @mock.patch(MOCKPATH + "readurl", autospec=True)
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4")
+ @mock.patch(MOCKPATH + "net.is_up")
def test_get_metadata_uses_extended_url(
- self, m_net_is_up, m_dhcp, m_readurl):
+ self, m_net_is_up, m_dhcp, m_readurl
+ ):
"""Make sure readurl is called with the correct url when accessing
metadata"""
m_net_is_up.return_value = True
m_readurl.return_value = url_helper.StringResponse(
- json.dumps(IMDS_NETWORK_METADATA).encode('utf-8'))
+ json.dumps(IMDS_NETWORK_METADATA).encode("utf-8")
+ )
dsaz.get_metadata_from_imds(
- 'eth0', retries=3, md_type=dsaz.metadata_type.all,
- api_version="2021-08-01")
+ "eth0",
+ retries=3,
+ md_type=dsaz.metadata_type.all,
+ api_version="2021-08-01",
+ )
m_readurl.assert_called_with(
"http://169.254.169.254/metadata/instance?api-version="
- "2021-08-01&extended=true", exception_cb=mock.ANY,
- headers=mock.ANY, retries=mock.ANY,
- timeout=mock.ANY, infinite=False)
+ "2021-08-01&extended=true",
+ exception_cb=mock.ANY,
+ headers=mock.ANY,
+ retries=mock.ANY,
+ timeout=mock.ANY,
+ infinite=False,
+ )
- @mock.patch(MOCKPATH + 'readurl', autospec=True)
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting', autospec=True)
- @mock.patch(MOCKPATH + 'net.is_up', autospec=True)
+ @mock.patch(MOCKPATH + "readurl", autospec=True)
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4WithReporting", autospec=True)
+ @mock.patch(MOCKPATH + "net.is_up", autospec=True)
def test_get_metadata_performs_dhcp_when_network_is_down(
- self, m_net_is_up, m_dhcp, m_readurl):
+ self, m_net_is_up, m_dhcp, m_readurl
+ ):
"""Perform DHCP setup when nic is not up."""
m_net_is_up.return_value = False
m_readurl.return_value = url_helper.StringResponse(
- json.dumps(NETWORK_METADATA).encode('utf-8'))
+ json.dumps(NETWORK_METADATA).encode("utf-8")
+ )
self.assertEqual(
- NETWORK_METADATA,
- dsaz.get_metadata_from_imds('eth9', retries=2))
+ NETWORK_METADATA, dsaz.get_metadata_from_imds("eth9", retries=2)
+ )
- m_net_is_up.assert_called_with('eth9')
- m_dhcp.assert_called_with(mock.ANY, 'eth9')
+ m_net_is_up.assert_called_with("eth9")
+ m_dhcp.assert_called_with(mock.ANY, "eth9")
self.assertIn(
"Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
- self.logs.getvalue())
+ self.logs.getvalue(),
+ )
m_readurl.assert_called_with(
- self.network_md_url, exception_cb=mock.ANY,
- headers={'Metadata': 'true'}, retries=2,
- timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, infinite=False)
+ self.network_md_url,
+ exception_cb=mock.ANY,
+ headers={"Metadata": "true"},
+ retries=2,
+ timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS,
+ infinite=False,
+ )
- @mock.patch('cloudinit.url_helper.time.sleep')
- @mock.patch(MOCKPATH + 'net.is_up', autospec=True)
+ @mock.patch("cloudinit.url_helper.time.sleep")
+ @mock.patch(MOCKPATH + "net.is_up", autospec=True)
def test_get_metadata_from_imds_empty_when_no_imds_present(
- self, m_net_is_up, m_sleep):
+ self, m_net_is_up, m_sleep
+ ):
"""Return empty dict when IMDS network metadata is absent."""
httpretty.register_uri(
httpretty.GET,
- dsaz.IMDS_URL + '/instance?api-version=2017-12-01',
- body={}, status=404)
+ dsaz.IMDS_URL + "/instance?api-version=2017-12-01",
+ body={},
+ status=404,
+ )
m_net_is_up.return_value = True # skips dhcp
- self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=2))
+ self.assertEqual({}, dsaz.get_metadata_from_imds("eth9", retries=2))
- m_net_is_up.assert_called_with('eth9')
+ m_net_is_up.assert_called_with("eth9")
self.assertEqual([mock.call(1), mock.call(1)], m_sleep.call_args_list)
self.assertIn(
"Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
- self.logs.getvalue())
+ self.logs.getvalue(),
+ )
- @mock.patch('requests.Session.request')
- @mock.patch('cloudinit.url_helper.time.sleep')
- @mock.patch(MOCKPATH + 'net.is_up', autospec=True)
+ @mock.patch("requests.Session.request")
+ @mock.patch("cloudinit.url_helper.time.sleep")
+ @mock.patch(MOCKPATH + "net.is_up", autospec=True)
def test_get_metadata_from_imds_retries_on_timeout(
- self, m_net_is_up, m_sleep, m_request):
+ self, m_net_is_up, m_sleep, m_request
+ ):
"""Retry IMDS network metadata on timeout errors."""
self.attempt = 0
- m_request.side_effect = requests.Timeout('Fake Connection Timeout')
+ m_request.side_effect = requests.Timeout("Fake Connection Timeout")
def retry_callback(request, uri, headers):
self.attempt += 1
- raise requests.Timeout('Fake connection timeout')
+ raise requests.Timeout("Fake connection timeout")
httpretty.register_uri(
httpretty.GET,
- dsaz.IMDS_URL + 'instance?api-version=2017-12-01',
- body=retry_callback)
+ dsaz.IMDS_URL + "instance?api-version=2017-12-01",
+ body=retry_callback,
+ )
m_net_is_up.return_value = True # skips dhcp
- self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=3))
+ self.assertEqual({}, dsaz.get_metadata_from_imds("eth9", retries=3))
- m_net_is_up.assert_called_with('eth9')
- self.assertEqual([mock.call(1)]*3, m_sleep.call_args_list)
+ m_net_is_up.assert_called_with("eth9")
+ self.assertEqual([mock.call(1)] * 3, m_sleep.call_args_list)
self.assertIn(
"Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
- self.logs.getvalue())
+ self.logs.getvalue(),
+ )
class TestAzureDataSource(CiTestCase):
@@ -593,25 +688,35 @@ class TestAzureDataSource(CiTestCase):
# patch cloud_dir, so our 'seed_dir' is guaranteed empty
self.paths = helpers.Paths(
- {'cloud_dir': self.tmp, 'run_dir': self.tmp})
- self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent')
+ {"cloud_dir": self.tmp, "run_dir": self.tmp}
+ )
+ self.waagent_d = os.path.join(self.tmp, "var", "lib", "waagent")
self.patches = ExitStack()
self.addCleanup(self.patches.close)
- self.patches.enter_context(mock.patch.object(
- dsaz, '_get_random_seed', return_value='wild'))
+ self.patches.enter_context(
+ mock.patch.object(dsaz, "_get_random_seed", return_value="wild")
+ )
self.m_get_metadata_from_imds = self.patches.enter_context(
mock.patch.object(
- dsaz, 'get_metadata_from_imds',
- mock.MagicMock(return_value=NETWORK_METADATA)))
+ dsaz,
+ "get_metadata_from_imds",
+ mock.MagicMock(return_value=NETWORK_METADATA),
+ )
+ )
self.m_fallback_nic = self.patches.enter_context(
- mock.patch('cloudinit.sources.net.find_fallback_nic',
- return_value='eth9'))
+ mock.patch(
+ "cloudinit.sources.net.find_fallback_nic", return_value="eth9"
+ )
+ )
self.m_remove_ubuntu_network_scripts = self.patches.enter_context(
mock.patch.object(
- dsaz, 'maybe_remove_ubuntu_network_config_scripts',
- mock.MagicMock()))
+ dsaz,
+ "maybe_remove_ubuntu_network_config_scripts",
+ mock.MagicMock(),
+ )
+ )
super(TestAzureDataSource, self).setUp()
def apply_patches(self, patches):
@@ -619,15 +724,21 @@ class TestAzureDataSource(CiTestCase):
self.patches.enter_context(mock.patch.object(module, name, new))
def _get_mockds(self):
- sysctl_out = "dev.storvsc.3.%pnpinfo: "\
- "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\
- "deviceid=f8b3781b-1e82-4818-a1c3-63d806ec15bb\n"
- sysctl_out += "dev.storvsc.2.%pnpinfo: "\
- "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\
- "deviceid=f8b3781a-1e82-4818-a1c3-63d806ec15bb\n"
- sysctl_out += "dev.storvsc.1.%pnpinfo: "\
- "classid=32412632-86cb-44a2-9b5c-50d1417354f5 "\
- "deviceid=00000000-0001-8899-0000-000000000000\n"
+ sysctl_out = (
+ "dev.storvsc.3.%pnpinfo: "
+ "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "
+ "deviceid=f8b3781b-1e82-4818-a1c3-63d806ec15bb\n"
+ )
+ sysctl_out += (
+ "dev.storvsc.2.%pnpinfo: "
+ "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "
+ "deviceid=f8b3781a-1e82-4818-a1c3-63d806ec15bb\n"
+ )
+ sysctl_out += (
+ "dev.storvsc.1.%pnpinfo: "
+ "classid=32412632-86cb-44a2-9b5c-50d1417354f5 "
+ "deviceid=00000000-0001-8899-0000-000000000000\n"
+ )
camctl_devbus = """
scbus0 on ata0 bus 0
scbus1 on ata1 bus 0
@@ -642,45 +753,57 @@ scbus-1 on xpt0 bus 0
<Msft Virtual Disk 1.0> at scbus2 target 0 lun 0 (da0,pass1)
<Msft Virtual Disk 1.0> at scbus3 target 1 lun 0 (da1,pass2)
"""
- self.apply_patches([
- (dsaz, 'get_dev_storvsc_sysctl', mock.MagicMock(
- return_value=sysctl_out)),
- (dsaz, 'get_camcontrol_dev_bus', mock.MagicMock(
- return_value=camctl_devbus)),
- (dsaz, 'get_camcontrol_dev', mock.MagicMock(
- return_value=camctl_dev))
- ])
+ self.apply_patches(
+ [
+ (
+ dsaz,
+ "get_dev_storvsc_sysctl",
+ mock.MagicMock(return_value=sysctl_out),
+ ),
+ (
+ dsaz,
+ "get_camcontrol_dev_bus",
+ mock.MagicMock(return_value=camctl_devbus),
+ ),
+ (
+ dsaz,
+ "get_camcontrol_dev",
+ mock.MagicMock(return_value=camctl_dev),
+ ),
+ ]
+ )
return dsaz
- def _get_ds(self, data, distro='ubuntu',
- apply_network=None, instance_id=None):
-
+ def _get_ds(
+ self, data, distro="ubuntu", apply_network=None, instance_id=None
+ ):
def _wait_for_files(flist, _maxwait=None, _naplen=None):
- data['waited'] = flist
+ data["waited"] = flist
return []
def _load_possible_azure_ds(seed_dir, cache_dir):
yield seed_dir
yield dsaz.DEFAULT_PROVISIONING_ISO_DEV
- yield from data.get('dsdevs', [])
+ yield from data.get("dsdevs", [])
if cache_dir:
yield cache_dir
seed_dir = os.path.join(self.paths.seed_dir, "azure")
- if data.get('ovfcontent') is not None:
- populate_dir(seed_dir,
- {'ovf-env.xml': data['ovfcontent']})
+ if data.get("ovfcontent") is not None:
+ populate_dir(seed_dir, {"ovf-env.xml": data["ovfcontent"]})
- dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
+ dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d
self.m_is_platform_viable = mock.MagicMock(autospec=True)
self.m_get_metadata_from_fabric = mock.MagicMock(
- return_value={'public-keys': []})
+ return_value={"public-keys": []}
+ )
self.m_report_failure_to_fabric = mock.MagicMock(autospec=True)
self.m_ephemeral_dhcpv4 = mock.MagicMock()
self.m_ephemeral_dhcpv4_with_reporting = mock.MagicMock()
self.m_list_possible_azure_ds = mock.MagicMock(
- side_effect=_load_possible_azure_ds)
+ side_effect=_load_possible_azure_ds
+ )
if instance_id:
self.instance_id = instance_id
@@ -688,39 +811,59 @@ scbus-1 on xpt0 bus 0
self.instance_id = EXAMPLE_UUID
def _dmi_mocks(key):
- if key == 'system-uuid':
+ if key == "system-uuid":
return self.instance_id
- elif key == 'chassis-asset-tag':
- return '7783-7084-3265-9085-8269-3286-77'
-
- self.apply_patches([
- (dsaz, 'list_possible_azure_ds',
- self.m_list_possible_azure_ds),
- (dsaz, '_is_platform_viable',
- self.m_is_platform_viable),
- (dsaz, 'get_metadata_from_fabric',
- self.m_get_metadata_from_fabric),
- (dsaz, 'report_failure_to_fabric',
- self.m_report_failure_to_fabric),
- (dsaz, 'EphemeralDHCPv4', self.m_ephemeral_dhcpv4),
- (dsaz, 'EphemeralDHCPv4WithReporting',
- self.m_ephemeral_dhcpv4_with_reporting),
- (dsaz, 'get_boot_telemetry', mock.MagicMock()),
- (dsaz, 'get_system_info', mock.MagicMock()),
- (dsaz.subp, 'which', lambda x: True),
- (dsaz.dmi, 'read_dmi_data', mock.MagicMock(
- side_effect=_dmi_mocks)),
- (dsaz.util, 'wait_for_files', mock.MagicMock(
- side_effect=_wait_for_files)),
- ])
+ elif key == "chassis-asset-tag":
+ return "7783-7084-3265-9085-8269-3286-77"
+
+ self.apply_patches(
+ [
+ (
+ dsaz,
+ "list_possible_azure_ds",
+ self.m_list_possible_azure_ds,
+ ),
+ (dsaz, "_is_platform_viable", self.m_is_platform_viable),
+ (
+ dsaz,
+ "get_metadata_from_fabric",
+ self.m_get_metadata_from_fabric,
+ ),
+ (
+ dsaz,
+ "report_failure_to_fabric",
+ self.m_report_failure_to_fabric,
+ ),
+ (dsaz, "EphemeralDHCPv4", self.m_ephemeral_dhcpv4),
+ (
+ dsaz,
+ "EphemeralDHCPv4WithReporting",
+ self.m_ephemeral_dhcpv4_with_reporting,
+ ),
+ (dsaz, "get_boot_telemetry", mock.MagicMock()),
+ (dsaz, "get_system_info", mock.MagicMock()),
+ (dsaz.subp, "which", lambda x: True),
+ (
+ dsaz.dmi,
+ "read_dmi_data",
+ mock.MagicMock(side_effect=_dmi_mocks),
+ ),
+ (
+ dsaz.util,
+ "wait_for_files",
+ mock.MagicMock(side_effect=_wait_for_files),
+ ),
+ ]
+ )
if isinstance(distro, str):
distro_cls = distros.fetch(distro)
- distro = distro_cls(distro, data.get('sys_cfg', {}), self.paths)
+ distro = distro_cls(distro, data.get("sys_cfg", {}), self.paths)
dsrc = dsaz.DataSourceAzure(
- data.get('sys_cfg', {}), distro=distro, paths=self.paths)
+ data.get("sys_cfg", {}), distro=distro, paths=self.paths
+ )
if apply_network is not None:
- dsrc.ds_cfg['apply_network_config'] = apply_network
+ dsrc.ds_cfg["apply_network_config"] = apply_network
return dsrc
@@ -774,19 +917,18 @@ scbus-1 on xpt0 bus 0
data = {}
dsrc = self._get_ds(data)
self.m_is_platform_viable.return_value = False
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \
- mock.patch.object(dsrc, '_report_failure') as m_report_failure:
+ with mock.patch.object(
+ dsrc, "crawl_metadata"
+ ) as m_crawl_metadata, mock.patch.object(
+ dsrc, "_report_failure"
+ ) as m_report_failure:
ret = dsrc.get_data()
self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
self.assertFalse(ret)
# Assert that for non viable platforms,
# there is no communication with the Azure datasource.
- self.assertEqual(
- 0,
- m_crawl_metadata.call_count)
- self.assertEqual(
- 0,
- m_report_failure.call_count)
+ self.assertEqual(0, m_crawl_metadata.call_count)
+ self.assertEqual(0, m_report_failure.call_count)
def test_platform_viable_but_no_devs_should_return_no_datasource(self):
"""For platforms where the Azure platform is viable
@@ -797,170 +939,190 @@ scbus-1 on xpt0 bus 0
"""
data = {}
dsrc = self._get_ds(data)
- with mock.patch.object(dsrc, '_report_failure') as m_report_failure:
+ with mock.patch.object(dsrc, "_report_failure") as m_report_failure:
self.m_is_platform_viable.return_value = True
ret = dsrc.get_data()
self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
self.assertFalse(ret)
- self.assertEqual(
- 1,
- m_report_failure.call_count)
+ self.assertEqual(1, m_report_failure.call_count)
def test_crawl_metadata_exception_returns_no_datasource(self):
data = {}
dsrc = self._get_ds(data)
self.m_is_platform_viable.return_value = True
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata:
+ with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
m_crawl_metadata.side_effect = Exception
ret = dsrc.get_data()
self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
- self.assertEqual(
- 1,
- m_crawl_metadata.call_count)
+ self.assertEqual(1, m_crawl_metadata.call_count)
self.assertFalse(ret)
def test_crawl_metadata_exception_should_report_failure_with_msg(self):
data = {}
dsrc = self._get_ds(data)
self.m_is_platform_viable.return_value = True
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \
- mock.patch.object(dsrc, '_report_failure') as m_report_failure:
+ with mock.patch.object(
+ dsrc, "crawl_metadata"
+ ) as m_crawl_metadata, mock.patch.object(
+ dsrc, "_report_failure"
+ ) as m_report_failure:
m_crawl_metadata.side_effect = Exception
dsrc.get_data()
- self.assertEqual(
- 1,
- m_crawl_metadata.call_count)
+ self.assertEqual(1, m_crawl_metadata.call_count)
m_report_failure.assert_called_once_with(
- description=dsaz.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE)
+ description=dsaz.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
+ )
def test_crawl_metadata_exc_should_log_could_not_crawl_msg(self):
data = {}
dsrc = self._get_ds(data)
self.m_is_platform_viable.return_value = True
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata:
+ with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
m_crawl_metadata.side_effect = Exception
dsrc.get_data()
- self.assertEqual(
- 1,
- m_crawl_metadata.call_count)
+ self.assertEqual(1, m_crawl_metadata.call_count)
self.assertIn(
- "Could not crawl Azure metadata",
- self.logs.getvalue())
+ "Could not crawl Azure metadata", self.logs.getvalue()
+ )
def test_basic_seed_dir(self):
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': {}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
dsrc = self._get_ds(data)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertEqual(dsrc.userdata_raw, "")
- self.assertEqual(dsrc.metadata['local-hostname'], odata['HostName'])
- self.assertTrue(os.path.isfile(
- os.path.join(self.waagent_d, 'ovf-env.xml')))
- self.assertEqual('azure', dsrc.cloud_name)
- self.assertEqual('azure', dsrc.platform_type)
+ self.assertEqual(dsrc.metadata["local-hostname"], odata["HostName"])
+ self.assertTrue(
+ os.path.isfile(os.path.join(self.waagent_d, "ovf-env.xml"))
+ )
+ self.assertEqual("azure", dsrc.cloud_name)
+ self.assertEqual("azure", dsrc.platform_type)
self.assertEqual(
- 'seed-dir (%s/seed/azure)' % self.tmp, dsrc.subplatform)
+ "seed-dir (%s/seed/azure)" % self.tmp, dsrc.subplatform
+ )
def test_basic_dev_file(self):
"""When a device path is used, present that in subplatform."""
- data = {'sys_cfg': {}, 'dsdevs': ['/dev/cd0']}
+ data = {"sys_cfg": {}, "dsdevs": ["/dev/cd0"]}
dsrc = self._get_ds(data)
# DSAzure will attempt to mount /dev/sr0 first, which should
# fail with mount error since the list of devices doesn't have
# /dev/sr0
- with mock.patch(MOCKPATH + 'util.mount_cb') as m_mount_cb:
+ with mock.patch(MOCKPATH + "util.mount_cb") as m_mount_cb:
m_mount_cb.side_effect = [
MountFailedError("fail"),
- ({'local-hostname': 'me'}, 'ud', {'cfg': ''}, {})
+ ({"local-hostname": "me"}, "ud", {"cfg": ""}, {}),
]
self.assertTrue(dsrc.get_data())
- self.assertEqual(dsrc.userdata_raw, 'ud')
- self.assertEqual(dsrc.metadata['local-hostname'], 'me')
- self.assertEqual('azure', dsrc.cloud_name)
- self.assertEqual('azure', dsrc.platform_type)
- self.assertEqual('config-disk (/dev/cd0)', dsrc.subplatform)
+ self.assertEqual(dsrc.userdata_raw, "ud")
+ self.assertEqual(dsrc.metadata["local-hostname"], "me")
+ self.assertEqual("azure", dsrc.cloud_name)
+ self.assertEqual("azure", dsrc.platform_type)
+ self.assertEqual("config-disk (/dev/cd0)", dsrc.subplatform)
def test_get_data_non_ubuntu_will_not_remove_network_scripts(self):
"""get_data on non-Ubuntu will not remove ubuntu net scripts."""
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': {}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
- dsrc = self._get_ds(data, distro='debian')
+ dsrc = self._get_ds(data, distro="debian")
dsrc.get_data()
self.m_remove_ubuntu_network_scripts.assert_not_called()
def test_get_data_on_ubuntu_will_remove_network_scripts(self):
"""get_data will remove ubuntu net scripts on Ubuntu distro."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
- dsrc = self._get_ds(data, distro='ubuntu')
+ dsrc = self._get_ds(data, distro="ubuntu")
dsrc.get_data()
self.m_remove_ubuntu_network_scripts.assert_called_once_with()
def test_get_data_on_ubuntu_will_not_remove_network_scripts_disabled(self):
"""When apply_network_config false, do not remove scripts on Ubuntu."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": False}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
- dsrc = self._get_ds(data, distro='ubuntu')
+ dsrc = self._get_ds(data, distro="ubuntu")
dsrc.get_data()
self.m_remove_ubuntu_network_scripts.assert_not_called()
def test_crawl_metadata_returns_structured_data_and_caches_nothing(self):
"""Return all structured metadata and cache no class attributes."""
yaml_cfg = ""
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'UserData': {'text': 'FOOBAR', 'encoding': 'plain'},
- 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': {}}
+ odata = {
+ "HostName": "myhost",
+ "UserName": "myuser",
+ "UserData": {"text": "FOOBAR", "encoding": "plain"},
+ "dscfg": {"text": yaml_cfg, "encoding": "plain"},
+ }
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
dsrc = self._get_ds(data)
expected_cfg = {
- 'PreprovisionedVMType': None,
- 'PreprovisionedVm': False,
- 'datasource': {'Azure': {}},
- 'system_info': {'default_user': {'name': 'myuser'}}}
+ "PreprovisionedVMType": None,
+ "PreprovisionedVm": False,
+ "datasource": {"Azure": {}},
+ "system_info": {"default_user": {"name": "myuser"}},
+ }
expected_metadata = {
- 'azure_data': {
- 'configurationsettype': 'LinuxProvisioningConfiguration'},
- 'imds': NETWORK_METADATA,
- 'instance-id': EXAMPLE_UUID,
- 'local-hostname': 'myhost',
- 'random_seed': 'wild'}
+ "azure_data": {
+ "configurationsettype": "LinuxProvisioningConfiguration"
+ },
+ "imds": NETWORK_METADATA,
+ "instance-id": EXAMPLE_UUID,
+ "local-hostname": "myhost",
+ "random_seed": "wild",
+ }
crawled_metadata = dsrc.crawl_metadata()
self.assertCountEqual(
crawled_metadata.keys(),
- ['cfg', 'files', 'metadata', 'userdata_raw'])
- self.assertEqual(crawled_metadata['cfg'], expected_cfg)
+ ["cfg", "files", "metadata", "userdata_raw"],
+ )
+ self.assertEqual(crawled_metadata["cfg"], expected_cfg)
self.assertEqual(
- list(crawled_metadata['files'].keys()), ['ovf-env.xml'])
+ list(crawled_metadata["files"].keys()), ["ovf-env.xml"]
+ )
self.assertIn(
- b'<HostName>myhost</HostName>',
- crawled_metadata['files']['ovf-env.xml'])
- self.assertEqual(crawled_metadata['metadata'], expected_metadata)
- self.assertEqual(crawled_metadata['userdata_raw'], 'FOOBAR')
+ b"<HostName>myhost</HostName>",
+ crawled_metadata["files"]["ovf-env.xml"],
+ )
+ self.assertEqual(crawled_metadata["metadata"], expected_metadata)
+ self.assertEqual(crawled_metadata["userdata_raw"], "FOOBAR")
self.assertEqual(dsrc.userdata_raw, None)
self.assertEqual(dsrc.metadata, {})
self.assertEqual(dsrc._metadata_imds, UNSET)
- self.assertFalse(os.path.isfile(
- os.path.join(self.waagent_d, 'ovf-env.xml')))
+ self.assertFalse(
+ os.path.isfile(os.path.join(self.waagent_d, "ovf-env.xml"))
+ )
def test_crawl_metadata_raises_invalid_metadata_on_error(self):
"""crawl_metadata raises an exception on invalid ovf-env.xml."""
- data = {'ovfcontent': "BOGUS", 'sys_cfg': {}}
+ data = {"ovfcontent": "BOGUS", "sys_cfg": {}}
dsrc = self._get_ds(data)
- error_msg = ('BrokenAzureDataSource: Invalid ovf-env.xml:'
- ' syntax error: line 1, column 0')
+ error_msg = (
+ "BrokenAzureDataSource: Invalid ovf-env.xml:"
+ " syntax error: line 1, column 0"
+ )
with self.assertRaises(InvalidMetaDataException) as cm:
dsrc.crawl_metadata()
self.assertEqual(str(cm.exception), error_msg)
@@ -971,20 +1133,19 @@ scbus-1 on xpt0 bus 0
platform_settings={"PreprovisionedVm": "False"}
)
- data = {
- 'ovfcontent': ovfenv,
- 'sys_cfg': {}
- }
+ data = {"ovfcontent": ovfenv, "sys_cfg": {}}
dsrc = self._get_ds(data)
dsrc.crawl_metadata()
self.assertEqual(1, self.m_get_metadata_from_imds.call_count)
@mock.patch(
- 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting')
- @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
+ "cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting"
+ )
+ @mock.patch("cloudinit.sources.DataSourceAzure.util.write_file")
@mock.patch(
- 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
- @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds')
+ "cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready"
+ )
+ @mock.patch("cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds")
def test_crawl_metadata_call_imds_twice_with_reprovision(
self, poll_imds_func, m_report_ready, m_write, m_dhcp
):
@@ -993,21 +1154,20 @@ scbus-1 on xpt0 bus 0
platform_settings={"PreprovisionedVm": "True"}
)
- data = {
- 'ovfcontent': ovfenv,
- 'sys_cfg': {}
- }
+ data = {"ovfcontent": ovfenv, "sys_cfg": {}}
dsrc = self._get_ds(data)
poll_imds_func.return_value = ovfenv
dsrc.crawl_metadata()
self.assertEqual(2, self.m_get_metadata_from_imds.call_count)
@mock.patch(
- 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting')
- @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
+ "cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting"
+ )
+ @mock.patch("cloudinit.sources.DataSourceAzure.util.write_file")
@mock.patch(
- 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
- @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds')
+ "cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready"
+ )
+ @mock.patch("cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds")
def test_crawl_metadata_on_reprovision_reports_ready(
self, poll_imds_func, m_report_ready, m_write, m_dhcp
):
@@ -1016,37 +1176,36 @@ scbus-1 on xpt0 bus 0
platform_settings={"PreprovisionedVm": "True"}
)
- data = {
- 'ovfcontent': ovfenv,
- 'sys_cfg': {}
- }
+ data = {"ovfcontent": ovfenv, "sys_cfg": {}}
dsrc = self._get_ds(data)
poll_imds_func.return_value = ovfenv
dsrc.crawl_metadata()
self.assertEqual(1, m_report_ready.call_count)
@mock.patch(
- 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting')
- @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
+ "cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting"
+ )
+ @mock.patch("cloudinit.sources.DataSourceAzure.util.write_file")
@mock.patch(
- 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
- @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds')
+ "cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready"
+ )
+ @mock.patch("cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds")
@mock.patch(
- 'cloudinit.sources.DataSourceAzure.DataSourceAzure.'
- '_wait_for_all_nics_ready')
+ "cloudinit.sources.DataSourceAzure.DataSourceAzure."
+ "_wait_for_all_nics_ready"
+ )
def test_crawl_metadata_waits_for_nic_on_savable_vms(
self, detect_nics, poll_imds_func, report_ready_func, m_write, m_dhcp
):
"""If reprovisioning, report ready at the end"""
ovfenv = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVMType": "Savable",
- "PreprovisionedVm": "True"}
+ platform_settings={
+ "PreprovisionedVMType": "Savable",
+ "PreprovisionedVm": "True",
+ }
)
- data = {
- 'ovfcontent': ovfenv,
- 'sys_cfg': {}
- }
+ data = {"ovfcontent": ovfenv, "sys_cfg": {}}
dsrc = self._get_ds(data)
poll_imds_func.return_value = ovfenv
dsrc.crawl_metadata()
@@ -1054,18 +1213,27 @@ scbus-1 on xpt0 bus 0
self.assertEqual(1, detect_nics.call_count)
@mock.patch(
- 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting')
- @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
+ "cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting"
+ )
+ @mock.patch("cloudinit.sources.DataSourceAzure.util.write_file")
@mock.patch(
- 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
- @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds')
+ "cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready"
+ )
+ @mock.patch("cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds")
@mock.patch(
- 'cloudinit.sources.DataSourceAzure.DataSourceAzure.'
- '_wait_for_all_nics_ready')
- @mock.patch('os.path.isfile')
+ "cloudinit.sources.DataSourceAzure.DataSourceAzure."
+ "_wait_for_all_nics_ready"
+ )
+ @mock.patch("os.path.isfile")
def test_detect_nics_when_marker_present(
- self, is_file, detect_nics, poll_imds_func, report_ready_func, m_write,
- m_dhcp):
+ self,
+ is_file,
+ detect_nics,
+ poll_imds_func,
+ report_ready_func,
+ m_write,
+ m_dhcp,
+ ):
"""If reprovisioning, wait for nic attach if marker present"""
def is_file_ret(key):
@@ -1074,10 +1242,7 @@ scbus-1 on xpt0 bus 0
is_file.side_effect = is_file_ret
ovfenv = construct_valid_ovf_env()
- data = {
- 'ovfcontent': ovfenv,
- 'sys_cfg': {}
- }
+ data = {"ovfcontent": ovfenv, "sys_cfg": {}}
dsrc = self._get_ds(data)
poll_imds_func.return_value = ovfenv
@@ -1085,29 +1250,28 @@ scbus-1 on xpt0 bus 0
self.assertEqual(1, report_ready_func.call_count)
self.assertEqual(1, detect_nics.call_count)
- @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
- @mock.patch('cloudinit.sources.helpers.netlink.'
- 'wait_for_media_disconnect_connect')
+ @mock.patch("cloudinit.sources.DataSourceAzure.util.write_file")
@mock.patch(
- 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
- @mock.patch('cloudinit.sources.DataSourceAzure.readurl')
+ "cloudinit.sources.helpers.netlink.wait_for_media_disconnect_connect"
+ )
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready"
+ )
+ @mock.patch("cloudinit.sources.DataSourceAzure.readurl")
def test_crawl_metadata_on_reprovision_reports_ready_using_lease(
- self, m_readurl, m_report_ready,
- m_media_switch, m_write
+ self, m_readurl, m_report_ready, m_media_switch, m_write
):
"""If reprovisioning, report ready using the obtained lease"""
ovfenv = construct_valid_ovf_env(
platform_settings={"PreprovisionedVm": "True"}
)
- data = {
- 'ovfcontent': ovfenv,
- 'sys_cfg': {}
- }
+ data = {"ovfcontent": ovfenv, "sys_cfg": {}}
dsrc = self._get_ds(data)
- with mock.patch.object(dsrc.distro.networking, 'is_up') \
- as m_dsrc_distro_networking_is_up:
+ with mock.patch.object(
+ dsrc.distro.networking, "is_up"
+ ) as m_dsrc_distro_networking_is_up:
# For this mock, net should not be up,
# so that cached ephemeral won't be used.
@@ -1116,16 +1280,21 @@ scbus-1 on xpt0 bus 0
m_dsrc_distro_networking_is_up.return_value = False
lease = {
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}
- self.m_ephemeral_dhcpv4_with_reporting.return_value \
- .__enter__.return_value = lease
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+ self.m_ephemeral_dhcpv4_with_reporting.return_value.__enter__.return_value = ( # noqa: E501
+ lease
+ )
m_media_switch.return_value = None
reprovision_ovfenv = construct_valid_ovf_env()
m_readurl.return_value = url_helper.StringResponse(
- reprovision_ovfenv.encode('utf-8'))
+ reprovision_ovfenv.encode("utf-8")
+ )
dsrc.crawl_metadata()
self.assertEqual(2, m_report_ready.call_count)
@@ -1133,91 +1302,118 @@ scbus-1 on xpt0 bus 0
def test_waagent_d_has_0700_perms(self):
# we expect /var/lib/waagent to be created 0700
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertTrue(os.path.isdir(self.waagent_d))
self.assertEqual(stat.S_IMODE(os.stat(self.waagent_d).st_mode), 0o700)
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
def test_network_config_set_from_imds(self, m_driver):
"""Datasource.network_config returns IMDS network data."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
odata = {}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
expected_network_config = {
- 'ethernets': {
- 'eth0': {'set-name': 'eth0',
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'dhcp6': False,
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100}}},
- 'version': 2}
+ "ethernets": {
+ "eth0": {
+ "set-name": "eth0",
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ }
+ },
+ "version": 2,
+ }
dsrc = self._get_ds(data)
dsrc.get_data()
self.assertEqual(expected_network_config, dsrc.network_config)
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
def test_network_config_set_from_imds_route_metric_for_secondary_nic(
- self, m_driver):
+ self, m_driver
+ ):
"""Datasource.network_config adds route-metric to secondary nics."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
odata = {}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
expected_network_config = {
- 'ethernets': {
- 'eth0': {'set-name': 'eth0',
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'dhcp6': False,
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100}},
- 'eth1': {'set-name': 'eth1',
- 'match': {'macaddress': '22:0d:3a:04:75:98'},
- 'dhcp6': False,
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 200}},
- 'eth2': {'set-name': 'eth2',
- 'match': {'macaddress': '33:0d:3a:04:75:98'},
- 'dhcp6': False,
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 300}}},
- 'version': 2}
+ "ethernets": {
+ "eth0": {
+ "set-name": "eth0",
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ },
+ "eth1": {
+ "set-name": "eth1",
+ "match": {"macaddress": "22:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 200},
+ },
+ "eth2": {
+ "set-name": "eth2",
+ "match": {"macaddress": "33:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 300},
+ },
+ },
+ "version": 2,
+ }
imds_data = copy.deepcopy(NETWORK_METADATA)
- imds_data['network']['interface'].append(SECONDARY_INTERFACE)
+ imds_data["network"]["interface"].append(SECONDARY_INTERFACE)
third_intf = copy.deepcopy(SECONDARY_INTERFACE)
- third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33')
- third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0'
- third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6'
- imds_data['network']['interface'].append(third_intf)
+ third_intf["macAddress"] = third_intf["macAddress"].replace("22", "33")
+ third_intf["ipv4"]["subnet"][0]["address"] = "10.0.2.0"
+ third_intf["ipv4"]["ipAddress"][0]["privateIpAddress"] = "10.0.2.6"
+ imds_data["network"]["interface"].append(third_intf)
self.m_get_metadata_from_imds.return_value = imds_data
dsrc = self._get_ds(data)
dsrc.get_data()
self.assertEqual(expected_network_config, dsrc.network_config)
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
def test_network_config_set_from_imds_for_secondary_nic_no_ip(
- self, m_driver):
+ self, m_driver
+ ):
"""If an IP address is empty then there should no config for it."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
odata = {}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
expected_network_config = {
- 'ethernets': {
- 'eth0': {'set-name': 'eth0',
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'dhcp6': False,
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100}}},
- 'version': 2}
+ "ethernets": {
+ "eth0": {
+ "set-name": "eth0",
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ }
+ },
+ "version": 2,
+ }
imds_data = copy.deepcopy(NETWORK_METADATA)
- imds_data['network']['interface'].append(SECONDARY_INTERFACE_NO_IP)
+ imds_data["network"]["interface"].append(SECONDARY_INTERFACE_NO_IP)
self.m_get_metadata_from_imds.return_value = imds_data
dsrc = self._get_ds(data)
dsrc.get_data()
@@ -1225,91 +1421,110 @@ scbus-1 on xpt0 bus 0
def test_availability_zone_set_from_imds(self):
"""Datasource.availability returns IMDS platformFaultDomain."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
odata = {}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
dsrc = self._get_ds(data)
dsrc.get_data()
- self.assertEqual('0', dsrc.availability_zone)
+ self.assertEqual("0", dsrc.availability_zone)
def test_region_set_from_imds(self):
"""Datasource.region returns IMDS region location."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
odata = {}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
dsrc = self._get_ds(data)
dsrc.get_data()
- self.assertEqual('eastus2', dsrc.region)
+ self.assertEqual("eastus2", dsrc.region)
def test_sys_cfg_set_never_destroy_ntfs(self):
- sys_cfg = {'datasource': {'Azure': {
- 'never_destroy_ntfs': 'user-supplied-value'}}}
- data = {'ovfcontent': construct_valid_ovf_env(data={}),
- 'sys_cfg': sys_cfg}
+ sys_cfg = {
+ "datasource": {
+ "Azure": {"never_destroy_ntfs": "user-supplied-value"}
+ }
+ }
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data={}),
+ "sys_cfg": sys_cfg,
+ }
dsrc = self._get_ds(data)
ret = self._get_and_setup(dsrc)
self.assertTrue(ret)
- self.assertEqual(dsrc.ds_cfg.get(dsaz.DS_CFG_KEY_PRESERVE_NTFS),
- 'user-supplied-value')
+ self.assertEqual(
+ dsrc.ds_cfg.get(dsaz.DS_CFG_KEY_PRESERVE_NTFS),
+ "user-supplied-value",
+ )
def test_username_used(self):
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
dsrc = self._get_ds(data)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(dsrc.cfg['system_info']['default_user']['name'],
- "myuser")
+ self.assertEqual(
+ dsrc.cfg["system_info"]["default_user"]["name"], "myuser"
+ )
def test_password_given(self):
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'UserPassword': "mypass"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
+ odata = {
+ "HostName": "myhost",
+ "UserName": "myuser",
+ "UserPassword": "mypass",
+ }
+ data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
dsrc = self._get_ds(data)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertIn('default_user', dsrc.cfg['system_info'])
- defuser = dsrc.cfg['system_info']['default_user']
+ self.assertIn("default_user", dsrc.cfg["system_info"])
+ defuser = dsrc.cfg["system_info"]["default_user"]
# default user should be updated username and should not be locked.
- self.assertEqual(defuser['name'], odata['UserName'])
- self.assertFalse(defuser['lock_passwd'])
+ self.assertEqual(defuser["name"], odata["UserName"])
+ self.assertFalse(defuser["lock_passwd"])
# passwd is crypt formated string $id$salt$encrypted
# encrypting plaintext with salt value of everything up to final '$'
# should equal that after the '$'
- pos = defuser['passwd'].rfind("$") + 1
- self.assertEqual(defuser['passwd'],
- crypt.crypt(odata['UserPassword'],
- defuser['passwd'][0:pos]))
+ pos = defuser["passwd"].rfind("$") + 1
+ self.assertEqual(
+ defuser["passwd"],
+ crypt.crypt(odata["UserPassword"], defuser["passwd"][0:pos]),
+ )
# the same hashed value should also be present in cfg['password']
- self.assertEqual(defuser['passwd'], dsrc.cfg['password'])
+ self.assertEqual(defuser["passwd"], dsrc.cfg["password"])
def test_user_not_locked_if_password_redacted(self):
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'UserPassword': dsaz.DEF_PASSWD_REDACTION}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
+ odata = {
+ "HostName": "myhost",
+ "UserName": "myuser",
+ "UserPassword": dsaz.DEF_PASSWD_REDACTION,
+ }
+ data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
dsrc = self._get_ds(data)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertIn('default_user', dsrc.cfg['system_info'])
- defuser = dsrc.cfg['system_info']['default_user']
+ self.assertIn("default_user", dsrc.cfg["system_info"])
+ defuser = dsrc.cfg["system_info"]["default_user"]
# default user should be updated username and should not be locked.
- self.assertEqual(defuser['name'], odata['UserName'])
- self.assertIn('lock_passwd', defuser)
- self.assertFalse(defuser['lock_passwd'])
+ self.assertEqual(defuser["name"], odata["UserName"])
+ self.assertIn("lock_passwd", defuser)
+ self.assertFalse(defuser["lock_passwd"])
def test_userdata_plain(self):
mydata = "FOOBAR"
- odata = {'UserData': {'text': mydata, 'encoding': 'plain'}}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
+ odata = {"UserData": {"text": mydata, "encoding": "plain"}}
+ data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
dsrc = self._get_ds(data)
ret = dsrc.get_data()
@@ -1318,72 +1533,86 @@ scbus-1 on xpt0 bus 0
def test_userdata_found(self):
mydata = "FOOBAR"
- odata = {'UserData': {'text': b64e(mydata), 'encoding': 'base64'}}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
+ odata = {"UserData": {"text": b64e(mydata), "encoding": "base64"}}
+ data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
dsrc = self._get_ds(data)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(dsrc.userdata_raw, mydata.encode('utf-8'))
+ self.assertEqual(dsrc.userdata_raw, mydata.encode("utf-8"))
def test_default_ephemeral_configs_ephemeral_exists(self):
# make sure the ephemeral configs are correct if disk present
odata = {}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': {}}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
orig_exists = dsaz.os.path.exists
def changed_exists(path):
- return True if path == dsaz.RESOURCE_DISK_PATH else orig_exists(
- path)
+ return (
+ True if path == dsaz.RESOURCE_DISK_PATH else orig_exists(path)
+ )
- with mock.patch(MOCKPATH + 'os.path.exists', new=changed_exists):
+ with mock.patch(MOCKPATH + "os.path.exists", new=changed_exists):
dsrc = self._get_ds(data)
ret = dsrc.get_data()
self.assertTrue(ret)
cfg = dsrc.get_config_obj()
- self.assertEqual(dsrc.device_name_to_device("ephemeral0"),
- dsaz.RESOURCE_DISK_PATH)
- assert 'disk_setup' in cfg
- assert 'fs_setup' in cfg
- self.assertIsInstance(cfg['disk_setup'], dict)
- self.assertIsInstance(cfg['fs_setup'], list)
+ self.assertEqual(
+ dsrc.device_name_to_device("ephemeral0"),
+ dsaz.RESOURCE_DISK_PATH,
+ )
+ assert "disk_setup" in cfg
+ assert "fs_setup" in cfg
+ self.assertIsInstance(cfg["disk_setup"], dict)
+ self.assertIsInstance(cfg["fs_setup"], list)
def test_default_ephemeral_configs_ephemeral_does_not_exist(self):
# make sure the ephemeral configs are correct if disk not present
odata = {}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': {}}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
orig_exists = dsaz.os.path.exists
def changed_exists(path):
- return False if path == dsaz.RESOURCE_DISK_PATH else orig_exists(
- path)
+ return (
+ False if path == dsaz.RESOURCE_DISK_PATH else orig_exists(path)
+ )
- with mock.patch(MOCKPATH + 'os.path.exists', new=changed_exists):
+ with mock.patch(MOCKPATH + "os.path.exists", new=changed_exists):
dsrc = self._get_ds(data)
ret = dsrc.get_data()
self.assertTrue(ret)
cfg = dsrc.get_config_obj()
- assert 'disk_setup' not in cfg
- assert 'fs_setup' not in cfg
+ assert "disk_setup" not in cfg
+ assert "fs_setup" not in cfg
def test_provide_disk_aliases(self):
# Make sure that user can affect disk aliases
- dscfg = {'disk_aliases': {'ephemeral0': '/dev/sdc'}}
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'dscfg': {'text': b64e(yaml.dump(dscfg)),
- 'encoding': 'base64'}}
- usercfg = {'disk_setup': {'/dev/sdc': {'something': '...'},
- 'ephemeral0': False}}
- userdata = '#cloud-config' + yaml.dump(usercfg) + "\n"
+ dscfg = {"disk_aliases": {"ephemeral0": "/dev/sdc"}}
+ odata = {
+ "HostName": "myhost",
+ "UserName": "myuser",
+ "dscfg": {"text": b64e(yaml.dump(dscfg)), "encoding": "base64"},
+ }
+ usercfg = {
+ "disk_setup": {
+ "/dev/sdc": {"something": "..."},
+ "ephemeral0": False,
+ }
+ }
+ userdata = "#cloud-config" + yaml.dump(usercfg) + "\n"
ovfcontent = construct_valid_ovf_env(data=odata, userdata=userdata)
- data = {'ovfcontent': ovfcontent, 'sys_cfg': {}}
+ data = {"ovfcontent": ovfcontent, "sys_cfg": {}}
dsrc = self._get_ds(data)
ret = dsrc.get_data()
@@ -1394,92 +1623,95 @@ scbus-1 on xpt0 bus 0
def test_userdata_arrives(self):
userdata = "This is my user-data"
xml = construct_valid_ovf_env(data={}, userdata=userdata)
- data = {'ovfcontent': xml}
+ data = {"ovfcontent": xml}
dsrc = self._get_ds(data)
dsrc.get_data()
- self.assertEqual(userdata.encode('us-ascii'), dsrc.userdata_raw)
+ self.assertEqual(userdata.encode("us-ascii"), dsrc.userdata_raw)
def test_password_redacted_in_ovf(self):
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'UserPassword': "mypass"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
+ odata = {
+ "HostName": "myhost",
+ "UserName": "myuser",
+ "UserPassword": "mypass",
+ }
+ data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
dsrc = self._get_ds(data)
ret = dsrc.get_data()
self.assertTrue(ret)
- ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml')
+ ovf_env_path = os.path.join(self.waagent_d, "ovf-env.xml")
# The XML should not be same since the user password is redacted
on_disk_ovf = load_file(ovf_env_path)
- self.xml_notequals(data['ovfcontent'], on_disk_ovf)
+ self.xml_notequals(data["ovfcontent"], on_disk_ovf)
# Make sure that the redacted password on disk is not used by CI
- self.assertNotEqual(dsrc.cfg.get('password'),
- dsaz.DEF_PASSWD_REDACTION)
+ self.assertNotEqual(
+ dsrc.cfg.get("password"), dsaz.DEF_PASSWD_REDACTION
+ )
# Make sure that the password was really encrypted
et = ET.fromstring(on_disk_ovf)
for elem in et.iter():
- if 'UserPassword' in elem.tag:
+ if "UserPassword" in elem.tag:
self.assertEqual(dsaz.DEF_PASSWD_REDACTION, elem.text)
def test_ovf_env_arrives_in_waagent_dir(self):
xml = construct_valid_ovf_env(data={}, userdata="FOODATA")
- dsrc = self._get_ds({'ovfcontent': xml})
+ dsrc = self._get_ds({"ovfcontent": xml})
dsrc.get_data()
# 'data_dir' is '/var/lib/waagent' (walinux-agent's state dir)
# we expect that the ovf-env.xml file is copied there.
- ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml')
+ ovf_env_path = os.path.join(self.waagent_d, "ovf-env.xml")
self.assertTrue(os.path.exists(ovf_env_path))
self.xml_equals(xml, load_file(ovf_env_path))
def test_ovf_can_include_unicode(self):
xml = construct_valid_ovf_env(data={})
- xml = '\ufeff{0}'.format(xml)
- dsrc = self._get_ds({'ovfcontent': xml})
+ xml = "\ufeff{0}".format(xml)
+ dsrc = self._get_ds({"ovfcontent": xml})
dsrc.get_data()
- def test_dsaz_report_ready_returns_true_when_report_succeeds(
- self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ def test_dsaz_report_ready_returns_true_when_report_succeeds(self):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
self.assertTrue(dsrc._report_ready(lease=mock.MagicMock()))
- def test_dsaz_report_ready_returns_false_and_does_not_propagate_exc(
- self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ def test_dsaz_report_ready_returns_false_and_does_not_propagate_exc(self):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
self.m_get_metadata_from_fabric.side_effect = Exception
self.assertFalse(dsrc._report_ready(lease=mock.MagicMock()))
def test_dsaz_report_failure_returns_true_when_report_succeeds(self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata:
+ with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
# mock crawl metadata failure to cause report failure
m_crawl_metadata.side_effect = Exception
self.assertTrue(dsrc._report_failure())
- self.assertEqual(
- 1,
- self.m_report_failure_to_fabric.call_count)
+ self.assertEqual(1, self.m_report_failure_to_fabric.call_count)
def test_dsaz_report_failure_returns_false_and_does_not_propagate_exc(
- self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
-
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \
- mock.patch.object(dsrc, '_ephemeral_dhcp_ctx') \
- as m_ephemeral_dhcp_ctx, \
- mock.patch.object(dsrc.distro.networking, 'is_up') \
- as m_dsrc_distro_networking_is_up:
+ self,
+ ):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+
+ with mock.patch.object(
+ dsrc, "crawl_metadata"
+ ) as m_crawl_metadata, mock.patch.object(
+ dsrc, "_ephemeral_dhcp_ctx"
+ ) as m_ephemeral_dhcp_ctx, mock.patch.object(
+ dsrc.distro.networking, "is_up"
+ ) as m_dsrc_distro_networking_is_up:
# mock crawl metadata failure to cause report failure
m_crawl_metadata.side_effect = Exception
# setup mocks to allow using cached ephemeral dhcp lease
m_dsrc_distro_networking_is_up.return_value = True
- test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245'
- test_lease = {'unknown-245': test_lease_dhcp_option_245}
+ test_lease_dhcp_option_245 = "test_lease_dhcp_option_245"
+ test_lease = {"unknown-245": test_lease_dhcp_option_245}
m_ephemeral_dhcp_ctx.lease = test_lease
# We expect 3 calls to report_failure_to_fabric,
@@ -1490,91 +1722,97 @@ scbus-1 on xpt0 bus 0
# 3. Using fallback lease to report failure to Azure
self.m_report_failure_to_fabric.side_effect = Exception
self.assertFalse(dsrc._report_failure())
- self.assertEqual(
- 3,
- self.m_report_failure_to_fabric.call_count)
+ self.assertEqual(3, self.m_report_failure_to_fabric.call_count)
def test_dsaz_report_failure_description_msg(self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata:
+ with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
# mock crawl metadata failure to cause report failure
m_crawl_metadata.side_effect = Exception
- test_msg = 'Test report failure description message'
+ test_msg = "Test report failure description message"
self.assertTrue(dsrc._report_failure(description=test_msg))
self.m_report_failure_to_fabric.assert_called_once_with(
- dhcp_opts=mock.ANY, description=test_msg)
+ dhcp_opts=mock.ANY, description=test_msg
+ )
def test_dsaz_report_failure_no_description_msg(self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata:
+ with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
m_crawl_metadata.side_effect = Exception
self.assertTrue(dsrc._report_failure()) # no description msg
self.m_report_failure_to_fabric.assert_called_once_with(
- dhcp_opts=mock.ANY, description=None)
+ dhcp_opts=mock.ANY, description=None
+ )
def test_dsaz_report_failure_uses_cached_ephemeral_dhcp_ctx_lease(self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
-
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \
- mock.patch.object(dsrc, '_ephemeral_dhcp_ctx') \
- as m_ephemeral_dhcp_ctx, \
- mock.patch.object(dsrc.distro.networking, 'is_up') \
- as m_dsrc_distro_networking_is_up:
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+
+ with mock.patch.object(
+ dsrc, "crawl_metadata"
+ ) as m_crawl_metadata, mock.patch.object(
+ dsrc, "_ephemeral_dhcp_ctx"
+ ) as m_ephemeral_dhcp_ctx, mock.patch.object(
+ dsrc.distro.networking, "is_up"
+ ) as m_dsrc_distro_networking_is_up:
# mock crawl metadata failure to cause report failure
m_crawl_metadata.side_effect = Exception
# setup mocks to allow using cached ephemeral dhcp lease
m_dsrc_distro_networking_is_up.return_value = True
- test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245'
- test_lease = {'unknown-245': test_lease_dhcp_option_245}
+ test_lease_dhcp_option_245 = "test_lease_dhcp_option_245"
+ test_lease = {"unknown-245": test_lease_dhcp_option_245}
m_ephemeral_dhcp_ctx.lease = test_lease
self.assertTrue(dsrc._report_failure())
# ensure called with cached ephemeral dhcp lease option 245
self.m_report_failure_to_fabric.assert_called_once_with(
- description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245)
+ description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245
+ )
# ensure cached ephemeral is cleaned
- self.assertEqual(
- 1,
- m_ephemeral_dhcp_ctx.clean_network.call_count)
+ self.assertEqual(1, m_ephemeral_dhcp_ctx.clean_network.call_count)
def test_dsaz_report_failure_no_net_uses_new_ephemeral_dhcp_lease(self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \
- mock.patch.object(dsrc.distro.networking, 'is_up') \
- as m_dsrc_distro_networking_is_up:
+ with mock.patch.object(
+ dsrc, "crawl_metadata"
+ ) as m_crawl_metadata, mock.patch.object(
+ dsrc.distro.networking, "is_up"
+ ) as m_dsrc_distro_networking_is_up:
# mock crawl metadata failure to cause report failure
m_crawl_metadata.side_effect = Exception
# net is not up and cannot use cached ephemeral dhcp
m_dsrc_distro_networking_is_up.return_value = False
# setup ephemeral dhcp lease discovery mock
- test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245'
- test_lease = {'unknown-245': test_lease_dhcp_option_245}
- self.m_ephemeral_dhcpv4_with_reporting.return_value \
- .__enter__.return_value = test_lease
+ test_lease_dhcp_option_245 = "test_lease_dhcp_option_245"
+ test_lease = {"unknown-245": test_lease_dhcp_option_245}
+ self.m_ephemeral_dhcpv4_with_reporting.return_value.__enter__.return_value = ( # noqa: E501
+ test_lease
+ )
self.assertTrue(dsrc._report_failure())
# ensure called with the newly discovered
# ephemeral dhcp lease option 245
self.m_report_failure_to_fabric.assert_called_once_with(
- description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245)
+ description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245
+ )
- def test_dsaz_report_failure_no_net_and_no_dhcp_uses_fallback_lease(
- self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ def test_dsaz_report_failure_no_net_and_no_dhcp_uses_fallback_lease(self):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \
- mock.patch.object(dsrc.distro.networking, 'is_up') \
- as m_dsrc_distro_networking_is_up:
+ with mock.patch.object(
+ dsrc, "crawl_metadata"
+ ) as m_crawl_metadata, mock.patch.object(
+ dsrc.distro.networking, "is_up"
+ ) as m_dsrc_distro_networking_is_up:
# mock crawl metadata failure to cause report failure
m_crawl_metadata.side_effect = Exception
@@ -1582,29 +1820,31 @@ scbus-1 on xpt0 bus 0
m_dsrc_distro_networking_is_up.return_value = False
# ephemeral dhcp discovery failure,
# so cannot use a new ephemeral dhcp
- self.m_ephemeral_dhcpv4_with_reporting.return_value \
- .__enter__.side_effect = Exception
+ self.m_ephemeral_dhcpv4_with_reporting.return_value.__enter__.side_effect = ( # noqa: E501
+ Exception
+ )
self.assertTrue(dsrc._report_failure())
# ensure called with fallback lease
self.m_report_failure_to_fabric.assert_called_once_with(
description=mock.ANY,
- fallback_lease_file=dsrc.dhclient_lease_file)
+ fallback_lease_file=dsrc.dhclient_lease_file,
+ )
def test_exception_fetching_fabric_data_doesnt_propagate(self):
"""Errors communicating with fabric should warn, but return True."""
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
self.m_get_metadata_from_fabric.side_effect = Exception
ret = self._get_and_setup(dsrc)
self.assertTrue(ret)
def test_fabric_data_included_in_metadata(self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- self.m_get_metadata_from_fabric.return_value = {'test': 'value'}
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ self.m_get_metadata_from_fabric.return_value = {"test": "value"}
ret = self._get_and_setup(dsrc)
self.assertTrue(ret)
- self.assertEqual('value', dsrc.metadata['test'])
+ self.assertEqual("value", dsrc.metadata["test"])
def test_instance_id_case_insensitive(self):
"""Return the previous iid when current is a case-insensitive match."""
@@ -1612,152 +1852,180 @@ scbus-1 on xpt0 bus 0
upper_iid = EXAMPLE_UUID.upper()
# lowercase current UUID
ds = self._get_ds(
- {'ovfcontent': construct_valid_ovf_env()}, instance_id=lower_iid
+ {"ovfcontent": construct_valid_ovf_env()}, instance_id=lower_iid
)
# UPPERCASE previous
write_file(
- os.path.join(self.paths.cloud_dir, 'data', 'instance-id'),
- upper_iid)
+ os.path.join(self.paths.cloud_dir, "data", "instance-id"),
+ upper_iid,
+ )
ds.get_data()
- self.assertEqual(upper_iid, ds.metadata['instance-id'])
+ self.assertEqual(upper_iid, ds.metadata["instance-id"])
# UPPERCASE current UUID
ds = self._get_ds(
- {'ovfcontent': construct_valid_ovf_env()}, instance_id=upper_iid
+ {"ovfcontent": construct_valid_ovf_env()}, instance_id=upper_iid
)
# lowercase previous
write_file(
- os.path.join(self.paths.cloud_dir, 'data', 'instance-id'),
- lower_iid)
+ os.path.join(self.paths.cloud_dir, "data", "instance-id"),
+ lower_iid,
+ )
ds.get_data()
- self.assertEqual(lower_iid, ds.metadata['instance-id'])
+ self.assertEqual(lower_iid, ds.metadata["instance-id"])
def test_instance_id_endianness(self):
"""Return the previous iid when dmi uuid is the byteswapped iid."""
- ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ ds = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
# byte-swapped previous
write_file(
- os.path.join(self.paths.cloud_dir, 'data', 'instance-id'),
- '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8')
+ os.path.join(self.paths.cloud_dir, "data", "instance-id"),
+ "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8",
+ )
ds.get_data()
self.assertEqual(
- '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8', ds.metadata['instance-id'])
+ "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8", ds.metadata["instance-id"]
+ )
# not byte-swapped previous
write_file(
- os.path.join(self.paths.cloud_dir, 'data', 'instance-id'),
- '644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8')
+ os.path.join(self.paths.cloud_dir, "data", "instance-id"),
+ "644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8",
+ )
ds.get_data()
- self.assertEqual(self.instance_id, ds.metadata['instance-id'])
+ self.assertEqual(self.instance_id, ds.metadata["instance-id"])
def test_instance_id_from_dmidecode_used(self):
- ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ ds = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
ds.get_data()
- self.assertEqual(self.instance_id, ds.metadata['instance-id'])
+ self.assertEqual(self.instance_id, ds.metadata["instance-id"])
def test_instance_id_from_dmidecode_used_for_builtin(self):
- ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ ds = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
ds.get_data()
- self.assertEqual(self.instance_id, ds.metadata['instance-id'])
+ self.assertEqual(self.instance_id, ds.metadata["instance-id"])
- @mock.patch(MOCKPATH + 'util.is_FreeBSD')
- @mock.patch(MOCKPATH + '_check_freebsd_cdrom')
- def test_list_possible_azure_ds(self, m_check_fbsd_cdrom,
- m_is_FreeBSD):
+ @mock.patch(MOCKPATH + "util.is_FreeBSD")
+ @mock.patch(MOCKPATH + "_check_freebsd_cdrom")
+ def test_list_possible_azure_ds(self, m_check_fbsd_cdrom, m_is_FreeBSD):
"""On FreeBSD, possible devs should show /dev/cd0."""
m_is_FreeBSD.return_value = True
m_check_fbsd_cdrom.return_value = True
possible_ds = []
- for src in dsaz.list_possible_azure_ds(
- "seed_dir", "cache_dir"):
+ for src in dsaz.list_possible_azure_ds("seed_dir", "cache_dir"):
possible_ds.append(src)
- self.assertEqual(possible_ds, ["seed_dir",
- dsaz.DEFAULT_PROVISIONING_ISO_DEV,
- "/dev/cd0",
- "cache_dir"])
self.assertEqual(
- [mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list)
+ possible_ds,
+ [
+ "seed_dir",
+ dsaz.DEFAULT_PROVISIONING_ISO_DEV,
+ "/dev/cd0",
+ "cache_dir",
+ ],
+ )
+ self.assertEqual(
+ [mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list
+ )
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
- @mock.patch('cloudinit.net.generate_fallback_config')
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ @mock.patch("cloudinit.net.generate_fallback_config")
def test_imds_network_config(self, mock_fallback, m_driver):
"""Network config is generated from IMDS network data when present."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
dsrc = self._get_ds(data)
ret = dsrc.get_data()
self.assertTrue(ret)
expected_cfg = {
- 'ethernets': {
- 'eth0': {'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100},
- 'dhcp6': False,
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'set-name': 'eth0'}},
- 'version': 2}
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": False,
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ }
self.assertEqual(expected_cfg, dsrc.network_config)
mock_fallback.assert_not_called()
- @mock.patch('cloudinit.net.get_interface_mac')
- @mock.patch('cloudinit.net.get_devicelist')
- @mock.patch('cloudinit.net.device_driver')
- @mock.patch('cloudinit.net.generate_fallback_config')
+ @mock.patch("cloudinit.net.get_interface_mac")
+ @mock.patch("cloudinit.net.get_devicelist")
+ @mock.patch("cloudinit.net.device_driver")
+ @mock.patch("cloudinit.net.generate_fallback_config")
def test_imds_network_ignored_when_apply_network_config_false(
- self, mock_fallback, mock_dd, mock_devlist, mock_get_mac):
+ self, mock_fallback, mock_dd, mock_devlist, mock_get_mac
+ ):
"""When apply_network_config is False, use fallback instead of IMDS."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": False}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
fallback_config = {
- 'version': 1,
- 'config': [{
- 'type': 'physical', 'name': 'eth0',
- 'mac_address': '00:11:22:33:44:55',
- 'params': {'driver': 'hv_netsvc'},
- 'subnets': [{'type': 'dhcp'}],
- }]
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "eth0",
+ "mac_address": "00:11:22:33:44:55",
+ "params": {"driver": "hv_netsvc"},
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
}
mock_fallback.return_value = fallback_config
- mock_devlist.return_value = ['eth0']
- mock_dd.return_value = ['hv_netsvc']
- mock_get_mac.return_value = '00:11:22:33:44:55'
+ mock_devlist.return_value = ["eth0"]
+ mock_dd.return_value = ["hv_netsvc"]
+ mock_get_mac.return_value = "00:11:22:33:44:55"
dsrc = self._get_ds(data)
self.assertTrue(dsrc.get_data())
self.assertEqual(dsrc.network_config, fallback_config)
- @mock.patch('cloudinit.net.get_interface_mac')
- @mock.patch('cloudinit.net.get_devicelist')
- @mock.patch('cloudinit.net.device_driver')
- @mock.patch('cloudinit.net.generate_fallback_config', autospec=True)
- def test_fallback_network_config(self, mock_fallback, mock_dd,
- mock_devlist, mock_get_mac):
+ @mock.patch("cloudinit.net.get_interface_mac")
+ @mock.patch("cloudinit.net.get_devicelist")
+ @mock.patch("cloudinit.net.device_driver")
+ @mock.patch("cloudinit.net.generate_fallback_config", autospec=True)
+ def test_fallback_network_config(
+ self, mock_fallback, mock_dd, mock_devlist, mock_get_mac
+ ):
"""On absent IMDS network data, generate network fallback config."""
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': {}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
fallback_config = {
- 'version': 1,
- 'config': [{
- 'type': 'physical', 'name': 'eth0',
- 'mac_address': '00:11:22:33:44:55',
- 'params': {'driver': 'hv_netsvc'},
- 'subnets': [{'type': 'dhcp'}],
- }]
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "eth0",
+ "mac_address": "00:11:22:33:44:55",
+ "params": {"driver": "hv_netsvc"},
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
}
mock_fallback.return_value = fallback_config
- mock_devlist.return_value = ['eth0']
- mock_dd.return_value = ['hv_netsvc']
- mock_get_mac.return_value = '00:11:22:33:44:55'
+ mock_devlist.return_value = ["eth0"]
+ mock_dd.return_value = ["hv_netsvc"]
+ mock_get_mac.return_value = "00:11:22:33:44:55"
dsrc = self._get_ds(data)
# Represent empty response from network imds
@@ -1768,37 +2036,41 @@ scbus-1 on xpt0 bus 0
netconfig = dsrc.network_config
self.assertEqual(netconfig, fallback_config)
mock_fallback.assert_called_with(
- blacklist_drivers=['mlx4_core', 'mlx5_core'],
- config_driver=True)
+ blacklist_drivers=["mlx4_core", "mlx5_core"], config_driver=True
+ )
- @mock.patch(MOCKPATH + 'net.get_interfaces', autospec=True)
- def test_blacklist_through_distro(
- self, m_net_get_interfaces):
+ @mock.patch(MOCKPATH + "net.get_interfaces", autospec=True)
+ def test_blacklist_through_distro(self, m_net_get_interfaces):
"""Verify Azure DS updates blacklist drivers in the distro's
- networking object."""
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': {}}
+ networking object."""
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
- distro_cls = distros.fetch('ubuntu')
- distro = distro_cls('ubuntu', {}, self.paths)
+ distro_cls = distros.fetch("ubuntu")
+ distro = distro_cls("ubuntu", {}, self.paths)
dsrc = self._get_ds(data, distro=distro)
dsrc.get_data()
- self.assertEqual(distro.networking.blacklist_drivers,
- dsaz.BLACKLIST_DRIVERS)
+ self.assertEqual(
+ distro.networking.blacklist_drivers, dsaz.BLACKLIST_DRIVERS
+ )
distro.networking.get_interfaces_by_mac()
m_net_get_interfaces.assert_called_with(
- blacklist_drivers=dsaz.BLACKLIST_DRIVERS)
+ blacklist_drivers=dsaz.BLACKLIST_DRIVERS
+ )
@mock.patch(
- 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates')
+ "cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates"
+ )
def test_get_public_ssh_keys_with_imds(self, m_parse_certificates):
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
data = {
- 'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
}
dsrc = self._get_ds(data)
dsrc.get_data()
@@ -1808,32 +2080,32 @@ scbus-1 on xpt0 bus 0
self.assertEqual(m_parse_certificates.call_count, 0)
def test_key_without_crlf_valid(self):
- test_key = 'ssh-rsa somerandomkeystuff some comment'
+ test_key = "ssh-rsa somerandomkeystuff some comment"
assert True is dsaz._key_is_openssh_formatted(test_key)
def test_key_with_crlf_invalid(self):
- test_key = 'ssh-rsa someran\r\ndomkeystuff some comment'
+ test_key = "ssh-rsa someran\r\ndomkeystuff some comment"
assert False is dsaz._key_is_openssh_formatted(test_key)
def test_key_endswith_crlf_valid(self):
- test_key = 'ssh-rsa somerandomkeystuff some comment\r\n'
+ test_key = "ssh-rsa somerandomkeystuff some comment\r\n"
assert True is dsaz._key_is_openssh_formatted(test_key)
@mock.patch(
- 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates')
- @mock.patch(MOCKPATH + 'get_metadata_from_imds')
+ "cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates"
+ )
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
def test_get_public_ssh_keys_with_no_openssh_format(
- self,
- m_get_metadata_from_imds,
- m_parse_certificates):
+ self, m_get_metadata_from_imds, m_parse_certificates
+ ):
imds_data = copy.deepcopy(NETWORK_METADATA)
- imds_data['compute']['publicKeys'][0]['keyData'] = 'no-openssh-format'
+ imds_data["compute"]["publicKeys"][0]["keyData"] = "no-openssh-format"
m_get_metadata_from_imds.return_value = imds_data
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
data = {
- 'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
}
dsrc = self._get_ds(data)
dsrc.get_data()
@@ -1842,38 +2114,37 @@ scbus-1 on xpt0 bus 0
self.assertEqual(ssh_keys, [])
self.assertEqual(m_parse_certificates.call_count, 0)
- @mock.patch(MOCKPATH + 'get_metadata_from_imds')
- def test_get_public_ssh_keys_without_imds(
- self,
- m_get_metadata_from_imds):
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_get_public_ssh_keys_without_imds(self, m_get_metadata_from_imds):
m_get_metadata_from_imds.return_value = dict()
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
data = {
- 'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
}
dsrc = self._get_ds(data)
- dsaz.get_metadata_from_fabric.return_value = {'public-keys': ['key2']}
+ dsaz.get_metadata_from_fabric.return_value = {"public-keys": ["key2"]}
dsrc.get_data()
dsrc.setup(True)
ssh_keys = dsrc.get_public_ssh_keys()
- self.assertEqual(ssh_keys, ['key2'])
+ self.assertEqual(ssh_keys, ["key2"])
- @mock.patch(MOCKPATH + 'get_metadata_from_imds')
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
def test_imds_api_version_wanted_nonexistent(
- self,
- m_get_metadata_from_imds):
+ self, m_get_metadata_from_imds
+ ):
def get_metadata_from_imds_side_eff(*args, **kwargs):
- if kwargs['api_version'] == dsaz.IMDS_VER_WANT:
+ if kwargs["api_version"] == dsaz.IMDS_VER_WANT:
raise url_helper.UrlError("No IMDS version", code=400)
return NETWORK_METADATA
+
m_get_metadata_from_imds.side_effect = get_metadata_from_imds_side_eff
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
data = {
- 'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
}
dsrc = self._get_ds(data)
dsrc.get_data()
@@ -1881,86 +2152,86 @@ scbus-1 on xpt0 bus 0
self.assertTrue(dsrc.failed_desired_api_version)
@mock.patch(
- MOCKPATH + 'get_metadata_from_imds', return_value=NETWORK_METADATA)
+ MOCKPATH + "get_metadata_from_imds", return_value=NETWORK_METADATA
+ )
def test_imds_api_version_wanted_exists(self, m_get_metadata_from_imds):
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
data = {
- 'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
}
dsrc = self._get_ds(data)
dsrc.get_data()
self.assertIsNotNone(dsrc.metadata)
self.assertFalse(dsrc.failed_desired_api_version)
- @mock.patch(MOCKPATH + 'get_metadata_from_imds')
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
def test_hostname_from_imds(self, m_get_metadata_from_imds):
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
data = {
- 'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
}
imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA)
imds_data_with_os_profile["compute"]["osProfile"] = dict(
adminUsername="username1",
computerName="hostname1",
- disablePasswordAuthentication="true"
+ disablePasswordAuthentication="true",
)
m_get_metadata_from_imds.return_value = imds_data_with_os_profile
dsrc = self._get_ds(data)
dsrc.get_data()
self.assertEqual(dsrc.metadata["local-hostname"], "hostname1")
- @mock.patch(MOCKPATH + 'get_metadata_from_imds')
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
def test_username_from_imds(self, m_get_metadata_from_imds):
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
data = {
- 'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
}
imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA)
imds_data_with_os_profile["compute"]["osProfile"] = dict(
adminUsername="username1",
computerName="hostname1",
- disablePasswordAuthentication="true"
+ disablePasswordAuthentication="true",
)
m_get_metadata_from_imds.return_value = imds_data_with_os_profile
dsrc = self._get_ds(data)
dsrc.get_data()
self.assertEqual(
- dsrc.cfg["system_info"]["default_user"]["name"],
- "username1"
+ dsrc.cfg["system_info"]["default_user"]["name"], "username1"
)
- @mock.patch(MOCKPATH + 'get_metadata_from_imds')
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
def test_disable_password_from_imds(self, m_get_metadata_from_imds):
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
data = {
- 'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
}
imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA)
imds_data_with_os_profile["compute"]["osProfile"] = dict(
adminUsername="username1",
computerName="hostname1",
- disablePasswordAuthentication="true"
+ disablePasswordAuthentication="true",
)
m_get_metadata_from_imds.return_value = imds_data_with_os_profile
dsrc = self._get_ds(data)
dsrc.get_data()
self.assertTrue(dsrc.metadata["disable_password"])
- @mock.patch(MOCKPATH + 'get_metadata_from_imds')
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
def test_userdata_from_imds(self, m_get_metadata_from_imds):
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
data = {
- 'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
}
userdata = "userdataImds"
imds_data = copy.deepcopy(NETWORK_METADATA)
@@ -1974,20 +2245,22 @@ scbus-1 on xpt0 bus 0
dsrc = self._get_ds(data)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(dsrc.userdata_raw, userdata.encode('utf-8'))
+ self.assertEqual(dsrc.userdata_raw, userdata.encode("utf-8"))
- @mock.patch(MOCKPATH + 'get_metadata_from_imds')
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
def test_userdata_from_imds_with_customdata_from_OVF(
- self, m_get_metadata_from_imds):
+ self, m_get_metadata_from_imds
+ ):
userdataOVF = "userdataOVF"
odata = {
- 'HostName': "myhost", 'UserName': "myuser",
- 'UserData': {'text': b64e(userdataOVF), 'encoding': 'base64'}
+ "HostName": "myhost",
+ "UserName": "myuser",
+ "UserData": {"text": b64e(userdataOVF), "encoding": "base64"},
}
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
data = {
- 'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
}
userdataImds = "userdataImds"
@@ -2002,7 +2275,7 @@ scbus-1 on xpt0 bus 0
dsrc = self._get_ds(data)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(dsrc.userdata_raw, userdataOVF.encode('utf-8'))
+ self.assertEqual(dsrc.userdata_raw, userdataOVF.encode("utf-8"))
class TestLoadAzureDsDir(CiTestCase):
@@ -2017,39 +2290,40 @@ class TestLoadAzureDsDir(CiTestCase):
with self.assertRaises(dsaz.NonAzureDataSource) as context_manager:
dsaz.load_azure_ds_dir(self.source_dir)
self.assertEqual(
- 'No ovf-env file found',
- str(context_manager.exception))
+ "No ovf-env file found", str(context_manager.exception)
+ )
def test_wb_invalid_ovf_env_xml_calls_read_azure_ovf(self):
"""load_azure_ds_dir calls read_azure_ovf to parse the xml."""
- ovf_path = os.path.join(self.source_dir, 'ovf-env.xml')
- with open(ovf_path, 'wb') as stream:
- stream.write(b'invalid xml')
+ ovf_path = os.path.join(self.source_dir, "ovf-env.xml")
+ with open(ovf_path, "wb") as stream:
+ stream.write(b"invalid xml")
with self.assertRaises(dsaz.BrokenAzureDataSource) as context_manager:
dsaz.load_azure_ds_dir(self.source_dir)
self.assertEqual(
- 'Invalid ovf-env.xml: syntax error: line 1, column 0',
- str(context_manager.exception))
+ "Invalid ovf-env.xml: syntax error: line 1, column 0",
+ str(context_manager.exception),
+ )
class TestReadAzureOvf(CiTestCase):
-
def test_invalid_xml_raises_non_azure_ds(self):
invalid_xml = "<foo>" + construct_valid_ovf_env(data={})
- self.assertRaises(dsaz.BrokenAzureDataSource,
- dsaz.read_azure_ovf, invalid_xml)
+ self.assertRaises(
+ dsaz.BrokenAzureDataSource, dsaz.read_azure_ovf, invalid_xml
+ )
def test_load_with_pubkeys(self):
- mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}]
- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
+ mypklist = [{"fingerprint": "fp1", "path": "path1", "value": ""}]
+ pubkeys = [(x["fingerprint"], x["path"], x["value"]) for x in mypklist]
content = construct_valid_ovf_env(pubkeys=pubkeys)
(_md, _ud, cfg) = dsaz.read_azure_ovf(content)
for mypk in mypklist:
- self.assertIn(mypk, cfg['_pubkeys'])
+ self.assertIn(mypk, cfg["_pubkeys"])
class TestCanDevBeReformatted(CiTestCase):
- warning_file = 'dataloss_warning_readme.txt'
+ warning_file = "dataloss_warning_readme.txt"
def _domock(self, mockpath, sattr=None):
patcher = mock.patch(mockpath)
@@ -2060,42 +2334,42 @@ class TestCanDevBeReformatted(CiTestCase):
bypath = {}
for path, data in devs.items():
bypath[path] = data
- if 'realpath' in data:
- bypath[data['realpath']] = data
- for ppath, pdata in data.get('partitions', {}).items():
+ if "realpath" in data:
+ bypath[data["realpath"]] = data
+ for ppath, pdata in data.get("partitions", {}).items():
bypath[ppath] = pdata
- if 'realpath' in data:
- bypath[pdata['realpath']] = pdata
+ if "realpath" in data:
+ bypath[pdata["realpath"]] = pdata
def realpath(d):
- return bypath[d].get('realpath', d)
+ return bypath[d].get("realpath", d)
def partitions_on_device(devpath):
- parts = bypath.get(devpath, {}).get('partitions', {})
+ parts = bypath.get(devpath, {}).get("partitions", {})
ret = []
for path, data in parts.items():
- ret.append((data.get('num'), realpath(path)))
+ ret.append((data.get("num"), realpath(path)))
# return sorted by partition number
return sorted(ret, key=lambda d: d[0])
def mount_cb(device, callback, mtype, update_env_for_mount):
- self.assertEqual('ntfs', mtype)
- self.assertEqual('C', update_env_for_mount.get('LANG'))
+ self.assertEqual("ntfs", mtype)
+ self.assertEqual("C", update_env_for_mount.get("LANG"))
p = self.tmp_dir()
- for f in bypath.get(device).get('files', []):
+ for f in bypath.get(device).get("files", []):
write_file(os.path.join(p, f), content=f)
return callback(p)
def has_ntfs_fs(device):
- return bypath.get(device, {}).get('fs') == 'ntfs'
+ return bypath.get(device, {}).get("fs") == "ntfs"
p = MOCKPATH
- self._domock(p + "_partitions_on_device", 'm_partitions_on_device')
- self._domock(p + "_has_ntfs_filesystem", 'm_has_ntfs_filesystem')
- self._domock(p + "util.mount_cb", 'm_mount_cb')
- self._domock(p + "os.path.realpath", 'm_realpath')
- self._domock(p + "os.path.exists", 'm_exists')
- self._domock(p + "util.SeLinuxGuard", 'm_selguard')
+ self._domock(p + "_partitions_on_device", "m_partitions_on_device")
+ self._domock(p + "_has_ntfs_filesystem", "m_has_ntfs_filesystem")
+ self._domock(p + "util.mount_cb", "m_mount_cb")
+ self._domock(p + "os.path.realpath", "m_realpath")
+ self._domock(p + "os.path.exists", "m_exists")
+ self._domock(p + "util.SeLinuxGuard", "m_selguard")
self.m_exists.side_effect = lambda p: p in bypath
self.m_realpath.side_effect = realpath
@@ -2107,330 +2381,433 @@ class TestCanDevBeReformatted(CiTestCase):
def test_three_partitions_is_false(self):
"""A disk with 3 partitions can not be formatted."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1},
- '/dev/sda2': {'num': 2},
- '/dev/sda3': {'num': 3},
- }}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1},
+ "/dev/sda2": {"num": 2},
+ "/dev/sda3": {"num": 3},
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
self.assertFalse(value)
self.assertIn("3 or more", msg.lower())
def test_no_partitions_is_false(self):
"""A disk with no partitions can not be formatted."""
- self.patchup({'/dev/sda': {}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
+ self.patchup({"/dev/sda": {}})
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
self.assertFalse(value)
self.assertIn("not partitioned", msg.lower())
def test_two_partitions_not_ntfs_false(self):
"""2 partitions and 2nd not ntfs can not be formatted."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1},
- '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []},
- }}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1},
+ "/dev/sda2": {"num": 2, "fs": "ext4", "files": []},
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
self.assertFalse(value)
self.assertIn("not ntfs", msg.lower())
def test_two_partitions_ntfs_populated_false(self):
"""2 partitions and populated ntfs fs on 2nd can not be formatted."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1},
- '/dev/sda2': {'num': 2, 'fs': 'ntfs',
- 'files': ['secret.txt']},
- }}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1},
+ "/dev/sda2": {
+ "num": 2,
+ "fs": "ntfs",
+ "files": ["secret.txt"],
+ },
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
self.assertFalse(value)
self.assertIn("files on it", msg.lower())
def test_two_partitions_ntfs_empty_is_true(self):
"""2 partitions and empty ntfs fs on 2nd can be formatted."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1},
- '/dev/sda2': {'num': 2, 'fs': 'ntfs', 'files': []},
- }}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1},
+ "/dev/sda2": {"num": 2, "fs": "ntfs", "files": []},
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
self.assertTrue(value)
self.assertIn("safe for", msg.lower())
def test_one_partition_not_ntfs_false(self):
"""1 partition witih fs other than ntfs can not be formatted."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1, 'fs': 'zfs'},
- }}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1, "fs": "zfs"},
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
self.assertFalse(value)
self.assertIn("not ntfs", msg.lower())
def test_one_partition_ntfs_populated_false(self):
"""1 mountable ntfs partition with many files can not be formatted."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1, 'fs': 'ntfs',
- 'files': ['file1.txt', 'file2.exe']},
- }}})
- with mock.patch.object(dsaz.LOG, 'warning') as warning:
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {
+ "num": 1,
+ "fs": "ntfs",
+ "files": ["file1.txt", "file2.exe"],
+ },
+ }
+ }
+ }
+ )
+ with mock.patch.object(dsaz.LOG, "warning") as warning:
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
wmsg = warning.call_args[0][0]
- self.assertIn("looks like you're using NTFS on the ephemeral disk",
- wmsg)
+ self.assertIn(
+ "looks like you're using NTFS on the ephemeral disk", wmsg
+ )
self.assertFalse(value)
self.assertIn("files on it", msg.lower())
def test_one_partition_ntfs_empty_is_true(self):
"""1 mountable ntfs partition and no files can be formatted."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []}
- }}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1, "fs": "ntfs", "files": []}
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
self.assertTrue(value)
self.assertIn("safe for", msg.lower())
def test_one_partition_ntfs_empty_with_dataloss_file_is_true(self):
"""1 mountable ntfs partition and only warn file can be formatted."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1, 'fs': 'ntfs',
- 'files': ['dataloss_warning_readme.txt']}
- }}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {
+ "num": 1,
+ "fs": "ntfs",
+ "files": ["dataloss_warning_readme.txt"],
+ }
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
self.assertTrue(value)
self.assertIn("safe for", msg.lower())
def test_one_partition_through_realpath_is_true(self):
"""A symlink to a device with 1 ntfs partition can be formatted."""
- epath = '/dev/disk/cloud/azure_resource'
- self.patchup({
- epath: {
- 'realpath': '/dev/sdb',
- 'partitions': {
- epath + '-part1': {
- 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file],
- 'realpath': '/dev/sdb1'}
- }}})
- value, msg = dsaz.can_dev_be_reformatted(epath,
- preserve_ntfs=False)
+ epath = "/dev/disk/cloud/azure_resource"
+ self.patchup(
+ {
+ epath: {
+ "realpath": "/dev/sdb",
+ "partitions": {
+ epath
+ + "-part1": {
+ "num": 1,
+ "fs": "ntfs",
+ "files": [self.warning_file],
+ "realpath": "/dev/sdb1",
+ }
+ },
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(epath, preserve_ntfs=False)
self.assertTrue(value)
self.assertIn("safe for", msg.lower())
def test_three_partition_through_realpath_is_false(self):
"""A symlink to a device with 3 partitions can not be formatted."""
- epath = '/dev/disk/cloud/azure_resource'
- self.patchup({
- epath: {
- 'realpath': '/dev/sdb',
- 'partitions': {
- epath + '-part1': {
- 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file],
- 'realpath': '/dev/sdb1'},
- epath + '-part2': {'num': 2, 'fs': 'ext3',
- 'realpath': '/dev/sdb2'},
- epath + '-part3': {'num': 3, 'fs': 'ext',
- 'realpath': '/dev/sdb3'}
- }}})
- value, msg = dsaz.can_dev_be_reformatted(epath,
- preserve_ntfs=False)
+ epath = "/dev/disk/cloud/azure_resource"
+ self.patchup(
+ {
+ epath: {
+ "realpath": "/dev/sdb",
+ "partitions": {
+ epath
+ + "-part1": {
+ "num": 1,
+ "fs": "ntfs",
+ "files": [self.warning_file],
+ "realpath": "/dev/sdb1",
+ },
+ epath
+ + "-part2": {
+ "num": 2,
+ "fs": "ext3",
+ "realpath": "/dev/sdb2",
+ },
+ epath
+ + "-part3": {
+ "num": 3,
+ "fs": "ext",
+ "realpath": "/dev/sdb3",
+ },
+ },
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(epath, preserve_ntfs=False)
self.assertFalse(value)
self.assertIn("3 or more", msg.lower())
def test_ntfs_mount_errors_true(self):
"""can_dev_be_reformatted does not fail if NTFS is unknown fstype."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []}
- }}})
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1, "fs": "ntfs", "files": []}
+ }
+ }
+ }
+ )
error_msgs = [
"Stderr: mount: unknown filesystem type 'ntfs'", # RHEL
- "Stderr: mount: /dev/sdb1: unknown filesystem type 'ntfs'" # SLES
+ "Stderr: mount: /dev/sdb1: unknown filesystem type 'ntfs'", # SLES
]
for err_msg in error_msgs:
self.m_mount_cb.side_effect = MountFailedError(
- "Failed mounting %s to %s due to: \nUnexpected.\n%s" %
- ('/dev/sda', '/fake-tmp/dir', err_msg))
+ "Failed mounting %s to %s due to: \nUnexpected.\n%s"
+ % ("/dev/sda", "/fake-tmp/dir", err_msg)
+ )
- value, msg = dsaz.can_dev_be_reformatted('/dev/sda',
- preserve_ntfs=False)
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
self.assertTrue(value)
- self.assertIn('cannot mount NTFS, assuming', msg)
+ self.assertIn("cannot mount NTFS, assuming", msg)
def test_never_destroy_ntfs_config_false(self):
"""Normally formattable situation with never_destroy_ntfs set."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1, 'fs': 'ntfs',
- 'files': ['dataloss_warning_readme.txt']}
- }}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=True)
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {
+ "num": 1,
+ "fs": "ntfs",
+ "files": ["dataloss_warning_readme.txt"],
+ }
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=True
+ )
self.assertFalse(value)
- self.assertIn("config says to never destroy NTFS "
- "(datasource.Azure.never_destroy_ntfs)", msg)
+ self.assertIn(
+ "config says to never destroy NTFS "
+ "(datasource.Azure.never_destroy_ntfs)",
+ msg,
+ )
class TestClearCachedData(CiTestCase):
-
def test_clear_cached_attrs_clears_imds(self):
"""All class attributes are reset to defaults, including imds data."""
tmp = self.tmp_dir()
- paths = helpers.Paths(
- {'cloud_dir': tmp, 'run_dir': tmp})
+ paths = helpers.Paths({"cloud_dir": tmp, "run_dir": tmp})
dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=paths)
clean_values = [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds]
- dsrc.metadata = 'md'
- dsrc.userdata = 'ud'
- dsrc._metadata_imds = 'imds'
+ dsrc.metadata = "md"
+ dsrc.userdata = "ud"
+ dsrc._metadata_imds = "imds"
dsrc._dirty_cache = True
dsrc.clear_cached_attrs()
self.assertEqual(
- [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds],
- clean_values)
+ [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds], clean_values
+ )
class TestAzureNetExists(CiTestCase):
-
def test_azure_net_must_exist_for_legacy_objpkl(self):
"""DataSourceAzureNet must exist for old obj.pkl files
- that reference it."""
+ that reference it."""
self.assertTrue(hasattr(dsaz, "DataSourceAzureNet"))
class TestPreprovisioningReadAzureOvfFlag(CiTestCase):
-
def test_read_azure_ovf_with_true_flag(self):
"""The read_azure_ovf method should set the PreprovisionedVM
- cfg flag if the proper setting is present."""
+ cfg flag if the proper setting is present."""
content = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVm": "True"})
+ platform_settings={"PreprovisionedVm": "True"}
+ )
ret = dsaz.read_azure_ovf(content)
cfg = ret[2]
- self.assertTrue(cfg['PreprovisionedVm'])
+ self.assertTrue(cfg["PreprovisionedVm"])
def test_read_azure_ovf_with_false_flag(self):
"""The read_azure_ovf method should set the PreprovisionedVM
- cfg flag to false if the proper setting is false."""
+ cfg flag to false if the proper setting is false."""
content = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVm": "False"})
+ platform_settings={"PreprovisionedVm": "False"}
+ )
ret = dsaz.read_azure_ovf(content)
cfg = ret[2]
- self.assertFalse(cfg['PreprovisionedVm'])
+ self.assertFalse(cfg["PreprovisionedVm"])
def test_read_azure_ovf_without_flag(self):
"""The read_azure_ovf method should not set the
- PreprovisionedVM cfg flag."""
+ PreprovisionedVM cfg flag."""
content = construct_valid_ovf_env()
ret = dsaz.read_azure_ovf(content)
cfg = ret[2]
- self.assertFalse(cfg['PreprovisionedVm'])
+ self.assertFalse(cfg["PreprovisionedVm"])
self.assertEqual(None, cfg["PreprovisionedVMType"])
def test_read_azure_ovf_with_running_type(self):
"""The read_azure_ovf method should set PreprovisionedVMType
- cfg flag to Running."""
+ cfg flag to Running."""
content = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVMType": "Running",
- "PreprovisionedVm": "True"})
+ platform_settings={
+ "PreprovisionedVMType": "Running",
+ "PreprovisionedVm": "True",
+ }
+ )
ret = dsaz.read_azure_ovf(content)
cfg = ret[2]
- self.assertTrue(cfg['PreprovisionedVm'])
- self.assertEqual("Running", cfg['PreprovisionedVMType'])
+ self.assertTrue(cfg["PreprovisionedVm"])
+ self.assertEqual("Running", cfg["PreprovisionedVMType"])
def test_read_azure_ovf_with_savable_type(self):
"""The read_azure_ovf method should set PreprovisionedVMType
- cfg flag to Savable."""
+ cfg flag to Savable."""
content = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVMType": "Savable",
- "PreprovisionedVm": "True"})
+ platform_settings={
+ "PreprovisionedVMType": "Savable",
+ "PreprovisionedVm": "True",
+ }
+ )
ret = dsaz.read_azure_ovf(content)
cfg = ret[2]
- self.assertTrue(cfg['PreprovisionedVm'])
- self.assertEqual("Savable", cfg['PreprovisionedVMType'])
+ self.assertTrue(cfg["PreprovisionedVm"])
+ self.assertEqual("Savable", cfg["PreprovisionedVMType"])
-@mock.patch('os.path.isfile')
+@mock.patch("os.path.isfile")
class TestPreprovisioningShouldReprovision(CiTestCase):
-
def setUp(self):
super(TestPreprovisioningShouldReprovision, self).setUp()
tmp = self.tmp_dir()
- self.waagent_d = self.tmp_path('/var/lib/waagent', tmp)
- self.paths = helpers.Paths({'cloud_dir': tmp})
- dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
+ self.waagent_d = self.tmp_path("/var/lib/waagent", tmp)
+ self.paths = helpers.Paths({"cloud_dir": tmp})
+ dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d
- @mock.patch(MOCKPATH + 'util.write_file')
+ @mock.patch(MOCKPATH + "util.write_file")
def test__should_reprovision_with_true_cfg(self, isfile, write_f):
"""The _should_reprovision method should return true with config
- flag present."""
+ flag present."""
isfile.return_value = False
dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- self.assertTrue(dsa._should_reprovision(
- (None, None, {'PreprovisionedVm': True}, None)))
+ self.assertTrue(
+ dsa._should_reprovision(
+ (None, None, {"PreprovisionedVm": True}, None)
+ )
+ )
def test__should_reprovision_with_file_existing(self, isfile):
"""The _should_reprovision method should return True if the sentinal
- exists."""
+ exists."""
isfile.return_value = True
dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- self.assertTrue(dsa._should_reprovision(
- (None, None, {'preprovisionedvm': False}, None)))
+ self.assertTrue(
+ dsa._should_reprovision(
+ (None, None, {"preprovisionedvm": False}, None)
+ )
+ )
def test__should_reprovision_returns_false(self, isfile):
"""The _should_reprovision method should return False
- if config and sentinal are not present."""
+ if config and sentinal are not present."""
isfile.return_value = False
dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
self.assertFalse(dsa._should_reprovision((None, None, {}, None)))
- @mock.patch(MOCKPATH + 'util.write_file', autospec=True)
+ @mock.patch(MOCKPATH + "util.write_file", autospec=True)
def test__should_reprovision_uses_imds_md(self, write_file, isfile):
"""The _should_reprovision method should be able to
retrieve the preprovisioning VM type from imds metadata"""
isfile.return_value = False
dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- self.assertTrue(dsa._should_reprovision(
- (None, None, {}, None),
- {'extended': {'compute': {'ppsType': 'Running'}}}))
- self.assertFalse(dsa._should_reprovision(
- (None, None, {}, None),
- {}))
- self.assertFalse(dsa._should_reprovision(
- (None, None, {}, None),
- {'extended': {'compute': {"hasCustomData": False}}}))
-
- @mock.patch(MOCKPATH + 'DataSourceAzure._poll_imds')
+ self.assertTrue(
+ dsa._should_reprovision(
+ (None, None, {}, None),
+ {"extended": {"compute": {"ppsType": "Running"}}},
+ )
+ )
+ self.assertFalse(dsa._should_reprovision((None, None, {}, None), {}))
+ self.assertFalse(
+ dsa._should_reprovision(
+ (None, None, {}, None),
+ {"extended": {"compute": {"hasCustomData": False}}},
+ )
+ )
+
+ @mock.patch(MOCKPATH + "DataSourceAzure._poll_imds")
def test_reprovision_calls__poll_imds(self, _poll_imds, isfile):
"""_reprovision will poll IMDS."""
isfile.return_value = False
hostname = "myhost"
username = "myuser"
- odata = {'HostName': hostname, 'UserName': username}
+ odata = {"HostName": hostname, "UserName": username}
_poll_imds.return_value = construct_valid_ovf_env(data=odata)
dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
dsa._reprovision()
@@ -2438,18 +2815,19 @@ class TestPreprovisioningShouldReprovision(CiTestCase):
class TestPreprovisioningHotAttachNics(CiTestCase):
-
def setUp(self):
super(TestPreprovisioningHotAttachNics, self).setUp()
self.tmp = self.tmp_dir()
- self.waagent_d = self.tmp_path('/var/lib/waagent', self.tmp)
- self.paths = helpers.Paths({'cloud_dir': self.tmp})
- dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
- self.paths = helpers.Paths({'cloud_dir': self.tmp})
-
- @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_detach_event',
- autospec=True)
- @mock.patch(MOCKPATH + 'util.write_file', autospec=True)
+ self.waagent_d = self.tmp_path("/var/lib/waagent", self.tmp)
+ self.paths = helpers.Paths({"cloud_dir": self.tmp})
+ dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d
+ self.paths = helpers.Paths({"cloud_dir": self.tmp})
+
+ @mock.patch(
+ "cloudinit.sources.helpers.netlink.wait_for_nic_detach_event",
+ autospec=True,
+ )
+ @mock.patch(MOCKPATH + "util.write_file", autospec=True)
def test_nic_detach_writes_marker(self, m_writefile, m_detach):
"""When we detect that a nic gets detached, we write a marker for it"""
dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
@@ -2458,16 +2836,17 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
m_detach.assert_called_with(nl_sock)
self.assertEqual(1, m_detach.call_count)
m_writefile.assert_called_with(
- dsaz.REPROVISION_NIC_DETACHED_MARKER_FILE, mock.ANY)
+ dsaz.REPROVISION_NIC_DETACHED_MARKER_FILE, mock.ANY
+ )
- @mock.patch(MOCKPATH + 'util.write_file', autospec=True)
- @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface')
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting')
- @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready')
- @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach')
+ @mock.patch(MOCKPATH + "util.write_file", autospec=True)
+ @mock.patch(MOCKPATH + "DataSourceAzure.fallback_interface")
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4WithReporting")
+ @mock.patch(MOCKPATH + "DataSourceAzure._report_ready")
+ @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach")
def test_detect_nic_attach_reports_ready_and_waits_for_detach(
- self, m_detach, m_report_ready, m_dhcp, m_fallback_if,
- m_writefile):
+ self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_writefile
+ ):
"""Report ready first and then wait for nic detach"""
dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
dsa._wait_for_all_nics_ready()
@@ -2476,16 +2855,18 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
self.assertEqual(1, m_detach.call_count)
self.assertEqual(1, m_writefile.call_count)
self.assertEqual(1, m_dhcp.call_count)
- m_writefile.assert_called_with(dsaz.REPORTED_READY_MARKER_FILE,
- mock.ANY)
-
- @mock.patch('os.path.isfile')
- @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface')
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting')
- @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready')
- @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach')
+ m_writefile.assert_called_with(
+ dsaz.REPORTED_READY_MARKER_FILE, mock.ANY
+ )
+
+ @mock.patch("os.path.isfile")
+ @mock.patch(MOCKPATH + "DataSourceAzure.fallback_interface")
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4WithReporting")
+ @mock.patch(MOCKPATH + "DataSourceAzure._report_ready")
+ @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach")
def test_detect_nic_attach_skips_report_ready_when_marker_present(
- self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile):
+ self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile
+ ):
"""Skip reporting ready if we already have a marker file."""
dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
@@ -2499,13 +2880,14 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
self.assertEqual(0, m_dhcp.call_count)
self.assertEqual(1, m_detach.call_count)
- @mock.patch('os.path.isfile')
- @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface')
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting')
- @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready')
- @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach')
+ @mock.patch("os.path.isfile")
+ @mock.patch(MOCKPATH + "DataSourceAzure.fallback_interface")
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4WithReporting")
+ @mock.patch(MOCKPATH + "DataSourceAzure._report_ready")
+ @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach")
def test_detect_nic_attach_skips_nic_detach_when_marker_present(
- self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile):
+ self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile
+ ):
"""Skip wait for nic detach if it already happened."""
dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
@@ -2516,22 +2898,32 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
self.assertEqual(0, m_dhcp.call_count)
self.assertEqual(0, m_detach.call_count)
- @mock.patch(MOCKPATH + 'DataSourceAzure.wait_for_link_up', autospec=True)
- @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_attach_event')
- @mock.patch('cloudinit.sources.net.find_fallback_nic')
- @mock.patch(MOCKPATH + 'get_metadata_from_imds')
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
- @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach')
- @mock.patch('os.path.isfile')
+ @mock.patch(MOCKPATH + "DataSourceAzure.wait_for_link_up", autospec=True)
+ @mock.patch("cloudinit.sources.helpers.netlink.wait_for_nic_attach_event")
+ @mock.patch("cloudinit.sources.net.find_fallback_nic")
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4")
+ @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach")
+ @mock.patch("os.path.isfile")
def test_wait_for_nic_attach_if_no_fallback_interface(
- self, m_isfile, m_detach, m_dhcpv4, m_imds, m_fallback_if,
- m_attach, m_link_up):
+ self,
+ m_isfile,
+ m_detach,
+ m_dhcpv4,
+ m_imds,
+ m_fallback_if,
+ m_attach,
+ m_link_up,
+ ):
"""Wait for nic attach if we do not have a fallback interface"""
dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
lease = {
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
m_isfile.return_value = True
m_attach.return_value = "eth0"
@@ -2550,22 +2942,32 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
self.assertEqual(1, m_link_up.call_count)
m_link_up.assert_called_with(mock.ANY, "eth0")
- @mock.patch(MOCKPATH + 'DataSourceAzure.wait_for_link_up')
- @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_attach_event')
- @mock.patch('cloudinit.sources.net.find_fallback_nic')
- @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback')
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
- @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach')
- @mock.patch('os.path.isfile')
+ @mock.patch(MOCKPATH + "DataSourceAzure.wait_for_link_up")
+ @mock.patch("cloudinit.sources.helpers.netlink.wait_for_nic_attach_event")
+ @mock.patch("cloudinit.sources.net.find_fallback_nic")
+ @mock.patch(MOCKPATH + "DataSourceAzure.get_imds_data_with_api_fallback")
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4")
+ @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach")
+ @mock.patch("os.path.isfile")
def test_wait_for_nic_attach_multinic_attach(
- self, m_isfile, m_detach, m_dhcpv4, m_imds, m_fallback_if,
- m_attach, m_link_up):
+ self,
+ m_isfile,
+ m_detach,
+ m_dhcpv4,
+ m_imds,
+ m_fallback_if,
+ m_attach,
+ m_link_up,
+ ):
"""Wait for nic attach if we do not have a fallback interface"""
dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
lease = {
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
m_attach_call_count = 0
def nic_attach_ret(nl_sock, nics_found):
@@ -2580,15 +2982,15 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
# Simulate two NICs by adding the same one twice.
md = {
"interface": [
- IMDS_NETWORK_METADATA['interface'][0],
- IMDS_NETWORK_METADATA['interface'][0]
+ IMDS_NETWORK_METADATA["interface"][0],
+ IMDS_NETWORK_METADATA["interface"][0],
]
}
def network_metadata_ret(ifname, retries, type, exc_cb, infinite):
if ifname == "eth0":
return md
- raise requests.Timeout('Fake connection timeout')
+ raise requests.Timeout("Fake connection timeout")
m_isfile.return_value = True
m_attach.side_effect = nic_attach_ret
@@ -2607,25 +3009,29 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
self.assertEqual(1, m_imds.call_count)
self.assertEqual(2, m_link_up.call_count)
- @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback')
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
+ @mock.patch(MOCKPATH + "DataSourceAzure.get_imds_data_with_api_fallback")
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4")
def test_check_if_nic_is_primary_retries_on_failures(
- self, m_dhcpv4, m_imds):
+ self, m_dhcpv4, m_imds
+ ):
"""Retry polling for network metadata on all failures except timeout
and network unreachable errors"""
dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
lease = {
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
eth0Retries = []
eth1Retries = []
# Simulate two NICs by adding the same one twice.
md = {
"interface": [
- IMDS_NETWORK_METADATA['interface'][0],
- IMDS_NETWORK_METADATA['interface'][0]
+ IMDS_NETWORK_METADATA["interface"][0],
+ IMDS_NETWORK_METADATA["interface"][0],
]
}
@@ -2645,9 +3051,9 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
# We are expected to retry for a certain period for both
# timeout errors and network unreachable errors.
if _ < 5:
- cause = requests.Timeout('Fake connection timeout')
+ cause = requests.Timeout("Fake connection timeout")
else:
- cause = requests.ConnectionError('Network Unreachable')
+ cause = requests.ConnectionError("Network Unreachable")
error = url_helper.UrlError(cause=cause)
eth1Retries.append(exc_cb("Connection timeout", error))
# Should stop retrying after 10 retries
@@ -2679,31 +3085,31 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
self.assertTrue(eth1Retries[i])
self.assertFalse(eth1Retries[10])
- @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up')
- def test_wait_for_link_up_returns_if_already_up(
- self, m_is_link_up):
+ @mock.patch("cloudinit.distros.networking.LinuxNetworking.try_set_link_up")
+ def test_wait_for_link_up_returns_if_already_up(self, m_is_link_up):
"""Waiting for link to be up should return immediately if the link is
- already up."""
+ already up."""
- distro_cls = distros.fetch('ubuntu')
- distro = distro_cls('ubuntu', {}, self.paths)
+ distro_cls = distros.fetch("ubuntu")
+ distro = distro_cls("ubuntu", {}, self.paths)
dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
m_is_link_up.return_value = True
dsa.wait_for_link_up("eth0")
self.assertEqual(1, m_is_link_up.call_count)
- @mock.patch(MOCKPATH + 'net.is_up', autospec=True)
- @mock.patch(MOCKPATH + 'util.write_file')
- @mock.patch('cloudinit.net.read_sys_net')
- @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up')
+ @mock.patch(MOCKPATH + "net.is_up", autospec=True)
+ @mock.patch(MOCKPATH + "util.write_file")
+ @mock.patch("cloudinit.net.read_sys_net")
+ @mock.patch("cloudinit.distros.networking.LinuxNetworking.try_set_link_up")
def test_wait_for_link_up_checks_link_after_sleep(
- self, m_try_set_link_up, m_read_sys_net, m_writefile, m_is_up):
+ self, m_try_set_link_up, m_read_sys_net, m_writefile, m_is_up
+ ):
"""Waiting for link to be up should return immediately if the link is
- already up."""
+ already up."""
- distro_cls = distros.fetch('ubuntu')
- distro = distro_cls('ubuntu', {}, self.paths)
+ distro_cls = distros.fetch("ubuntu")
+ distro = distro_cls("ubuntu", {}, self.paths)
dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
m_try_set_link_up.return_value = False
@@ -2718,21 +3124,22 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
m_is_up.side_effect = is_up_mock
- with mock.patch('cloudinit.sources.DataSourceAzure.sleep'):
+ with mock.patch("cloudinit.sources.DataSourceAzure.sleep"):
dsa.wait_for_link_up("eth0")
self.assertEqual(2, m_try_set_link_up.call_count)
self.assertEqual(2, m_is_up.call_count)
- @mock.patch(MOCKPATH + 'util.write_file')
- @mock.patch('cloudinit.net.read_sys_net')
- @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up')
+ @mock.patch(MOCKPATH + "util.write_file")
+ @mock.patch("cloudinit.net.read_sys_net")
+ @mock.patch("cloudinit.distros.networking.LinuxNetworking.try_set_link_up")
def test_wait_for_link_up_writes_to_device_file(
- self, m_is_link_up, m_read_sys_net, m_writefile):
+ self, m_is_link_up, m_read_sys_net, m_writefile
+ ):
"""Waiting for link to be up should return immediately if the link is
- already up."""
+ already up."""
- distro_cls = distros.fetch('ubuntu')
- distro = distro_cls('ubuntu', {}, self.paths)
+ distro_cls = distros.fetch("ubuntu")
+ distro = distro_cls("ubuntu", {}, self.paths)
dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
callcount = 0
@@ -2751,48 +3158,59 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
self.assertEqual(1, m_read_sys_net.call_count)
self.assertEqual(2, m_writefile.call_count)
- @mock.patch('cloudinit.sources.helpers.netlink.'
- 'create_bound_netlink_socket')
+ @mock.patch(
+ "cloudinit.sources.helpers.netlink.create_bound_netlink_socket"
+ )
def test_wait_for_all_nics_ready_raises_if_socket_fails(self, m_socket):
"""Waiting for all nics should raise exception if netlink socket
- creation fails."""
+ creation fails."""
m_socket.side_effect = netlink.NetlinkCreateSocketError
- distro_cls = distros.fetch('ubuntu')
- distro = distro_cls('ubuntu', {}, self.paths)
+ distro_cls = distros.fetch("ubuntu")
+ distro = distro_cls("ubuntu", {}, self.paths)
dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
- self.assertRaises(netlink.NetlinkCreateSocketError,
- dsa._wait_for_all_nics_ready)
+ self.assertRaises(
+ netlink.NetlinkCreateSocketError, dsa._wait_for_all_nics_ready
+ )
# dsa._wait_for_all_nics_ready()
-@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
-@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
-@mock.patch('cloudinit.sources.helpers.netlink.'
- 'wait_for_media_disconnect_connect')
-@mock.patch('requests.Session.request')
-@mock.patch(MOCKPATH + 'DataSourceAzure._report_ready')
+@mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
+@mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+@mock.patch(
+ "cloudinit.sources.helpers.netlink.wait_for_media_disconnect_connect"
+)
+@mock.patch("requests.Session.request")
+@mock.patch(MOCKPATH + "DataSourceAzure._report_ready")
class TestPreprovisioningPollIMDS(CiTestCase):
-
def setUp(self):
super(TestPreprovisioningPollIMDS, self).setUp()
self.tmp = self.tmp_dir()
- self.waagent_d = self.tmp_path('/var/lib/waagent', self.tmp)
- self.paths = helpers.Paths({'cloud_dir': self.tmp})
- dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
-
- @mock.patch('time.sleep', mock.MagicMock())
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
- def test_poll_imds_re_dhcp_on_timeout(self, m_dhcpv4, m_report_ready,
- m_request, m_media_switch, m_dhcp,
- m_net):
+ self.waagent_d = self.tmp_path("/var/lib/waagent", self.tmp)
+ self.paths = helpers.Paths({"cloud_dir": self.tmp})
+ dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d
+
+ @mock.patch("time.sleep", mock.MagicMock())
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4")
+ def test_poll_imds_re_dhcp_on_timeout(
+ self,
+ m_dhcpv4,
+ m_report_ready,
+ m_request,
+ m_media_switch,
+ m_dhcp,
+ m_net,
+ ):
"""The poll_imds will retry DHCP on IMDS timeout."""
- report_file = self.tmp_path('report_marker', self.tmp)
+ report_file = self.tmp_path("report_marker", self.tmp)
lease = {
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
m_dhcp.return_value = [lease]
m_media_switch.return_value = None
dhcp_ctx = mock.MagicMock(lease=lease)
@@ -2804,7 +3222,7 @@ class TestPreprovisioningPollIMDS(CiTestCase):
def fake_timeout_once(**kwargs):
self.tries += 1
if self.tries == 1:
- raise requests.Timeout('Fake connection timeout')
+ raise requests.Timeout("Fake connection timeout")
elif self.tries in (2, 3):
response = requests.Response()
response.status_code = 404 if self.tries == 2 else 410
@@ -2817,41 +3235,54 @@ class TestPreprovisioningPollIMDS(CiTestCase):
m_request.side_effect = fake_timeout_once
dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file):
+ with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
dsa._poll_imds()
self.assertEqual(m_report_ready.call_count, 1)
m_report_ready.assert_called_with(lease=lease)
- self.assertEqual(3, m_dhcpv4.call_count, 'Expected 3 DHCP calls')
- self.assertEqual(4, self.tries, 'Expected 4 total reads from IMDS')
+ self.assertEqual(3, m_dhcpv4.call_count, "Expected 3 DHCP calls")
+ self.assertEqual(4, self.tries, "Expected 4 total reads from IMDS")
- @mock.patch('os.path.isfile')
+ @mock.patch("os.path.isfile")
def test_poll_imds_skips_dhcp_if_ctx_present(
- self, m_isfile, report_ready_func, fake_resp, m_media_switch,
- m_dhcp, m_net):
+ self,
+ m_isfile,
+ report_ready_func,
+ fake_resp,
+ m_media_switch,
+ m_dhcp,
+ m_net,
+ ):
"""The poll_imds function should reuse the dhcp ctx if it is already
- present. This happens when we wait for nic to be hot-attached before
- polling for reprovisiondata. Note that if this ctx is set when
- _poll_imds is called, then it is not expected to be waiting for
- media_disconnect_connect either."""
- report_file = self.tmp_path('report_marker', self.tmp)
+ present. This happens when we wait for nic to be hot-attached before
+ polling for reprovisiondata. Note that if this ctx is set when
+ _poll_imds is called, then it is not expected to be waiting for
+ media_disconnect_connect either."""
+ report_file = self.tmp_path("report_marker", self.tmp)
m_isfile.return_value = True
dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
dsa._ephemeral_dhcp_ctx = "Dummy dhcp ctx"
- with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file):
+ with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
dsa._poll_imds()
self.assertEqual(0, m_dhcp.call_count)
self.assertEqual(0, m_media_switch.call_count)
- @mock.patch('os.path.isfile')
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
+ @mock.patch("os.path.isfile")
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4")
def test_poll_imds_does_dhcp_on_retries_if_ctx_present(
- self, m_ephemeral_dhcpv4, m_isfile, report_ready_func, m_request,
- m_media_switch, m_dhcp, m_net):
+ self,
+ m_ephemeral_dhcpv4,
+ m_isfile,
+ report_ready_func,
+ m_request,
+ m_media_switch,
+ m_dhcp,
+ m_net,
+ ):
"""The poll_imds function should reuse the dhcp ctx if it is already
- present. This happens when we wait for nic to be hot-attached before
- polling for reprovisiondata. Note that if this ctx is set when
- _poll_imds is called, then it is not expected to be waiting for
- media_disconnect_connect either."""
+ present. This happens when we wait for nic to be hot-attached before
+ polling for reprovisiondata. Note that if this ctx is set when
+ _poll_imds is called, then it is not expected to be waiting for
+ media_disconnect_connect either."""
tries = 0
@@ -2859,15 +3290,16 @@ class TestPreprovisioningPollIMDS(CiTestCase):
nonlocal tries
tries += 1
if tries == 1:
- raise requests.Timeout('Fake connection timeout')
+ raise requests.Timeout("Fake connection timeout")
return mock.MagicMock(status_code=200, text="good", content="good")
m_request.side_effect = fake_timeout_once
- report_file = self.tmp_path('report_marker', self.tmp)
+ report_file = self.tmp_path("report_marker", self.tmp)
m_isfile.return_value = True
dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
- with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file),\
- mock.patch.object(dsa, '_ephemeral_dhcp_ctx') as m_dhcp_ctx:
+ with mock.patch(
+ MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file
+ ), mock.patch.object(dsa, "_ephemeral_dhcp_ctx") as m_dhcp_ctx:
m_dhcp_ctx.obtain_lease.return_value = "Dummy lease"
dsa._ephemeral_dhcp_ctx = m_dhcp_ctx
dsa._poll_imds()
@@ -2877,145 +3309,189 @@ class TestPreprovisioningPollIMDS(CiTestCase):
self.assertEqual(2, m_request.call_count)
def test_does_not_poll_imds_report_ready_when_marker_file_exists(
- self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net):
+ self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net
+ ):
"""poll_imds should not call report ready when the reported ready
marker file exists"""
- report_file = self.tmp_path('report_marker', self.tmp)
- write_file(report_file, content='dont run report_ready :)')
- m_dhcp.return_value = [{
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}]
+ report_file = self.tmp_path("report_marker", self.tmp)
+ write_file(report_file, content="dont run report_ready :)")
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+ ]
m_media_switch.return_value = None
dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file):
+ with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
dsa._poll_imds()
self.assertEqual(m_report_ready.call_count, 0)
def test_poll_imds_report_ready_success_writes_marker_file(
- self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net):
+ self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net
+ ):
"""poll_imds should write the report_ready marker file if
reporting ready succeeds"""
- report_file = self.tmp_path('report_marker', self.tmp)
- m_dhcp.return_value = [{
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}]
+ report_file = self.tmp_path("report_marker", self.tmp)
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+ ]
m_media_switch.return_value = None
dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
self.assertFalse(os.path.exists(report_file))
- with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file):
+ with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
dsa._poll_imds()
self.assertEqual(m_report_ready.call_count, 1)
self.assertTrue(os.path.exists(report_file))
def test_poll_imds_report_ready_failure_raises_exc_and_doesnt_write_marker(
- self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net):
+ self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net
+ ):
"""poll_imds should write the report_ready marker file if
reporting ready succeeds"""
- report_file = self.tmp_path('report_marker', self.tmp)
- m_dhcp.return_value = [{
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}]
+ report_file = self.tmp_path("report_marker", self.tmp)
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+ ]
m_media_switch.return_value = None
m_report_ready.return_value = False
dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
self.assertFalse(os.path.exists(report_file))
- with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file):
- self.assertRaises(
- InvalidMetaDataException,
- dsa._poll_imds)
+ with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
+ self.assertRaises(InvalidMetaDataException, dsa._poll_imds)
self.assertEqual(m_report_ready.call_count, 1)
self.assertFalse(os.path.exists(report_file))
-@mock.patch(MOCKPATH + 'DataSourceAzure._report_ready', mock.MagicMock())
-@mock.patch(MOCKPATH + 'subp.subp', mock.MagicMock())
-@mock.patch(MOCKPATH + 'util.write_file', mock.MagicMock())
-@mock.patch('cloudinit.sources.helpers.netlink.'
- 'wait_for_media_disconnect_connect')
-@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network', autospec=True)
-@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
-@mock.patch('requests.Session.request')
+@mock.patch(MOCKPATH + "DataSourceAzure._report_ready", mock.MagicMock())
+@mock.patch(MOCKPATH + "subp.subp", mock.MagicMock())
+@mock.patch(MOCKPATH + "util.write_file", mock.MagicMock())
+@mock.patch(
+ "cloudinit.sources.helpers.netlink.wait_for_media_disconnect_connect"
+)
+@mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network", autospec=True)
+@mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+@mock.patch("requests.Session.request")
class TestAzureDataSourcePreprovisioning(CiTestCase):
-
def setUp(self):
super(TestAzureDataSourcePreprovisioning, self).setUp()
tmp = self.tmp_dir()
- self.waagent_d = self.tmp_path('/var/lib/waagent', tmp)
- self.paths = helpers.Paths({'cloud_dir': tmp})
- dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
+ self.waagent_d = self.tmp_path("/var/lib/waagent", tmp)
+ self.paths = helpers.Paths({"cloud_dir": tmp})
+ dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d
- def test_poll_imds_returns_ovf_env(self, m_request,
- m_dhcp, m_net,
- m_media_switch):
+ def test_poll_imds_returns_ovf_env(
+ self, m_request, m_dhcp, m_net, m_media_switch
+ ):
"""The _poll_imds method should return the ovf_env.xml."""
m_media_switch.return_value = None
- m_dhcp.return_value = [{
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0'}]
- url = 'http://{0}/metadata/reprovisiondata?api-version=2019-06-01'
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ }
+ ]
+ url = "http://{0}/metadata/reprovisiondata?api-version=2019-06-01"
host = "169.254.169.254"
full_url = url.format(host)
- m_request.return_value = mock.MagicMock(status_code=200, text="ovf",
- content="ovf")
+ m_request.return_value = mock.MagicMock(
+ status_code=200, text="ovf", content="ovf"
+ )
dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
self.assertTrue(len(dsa._poll_imds()) > 0)
- self.assertEqual(m_request.call_args_list,
- [mock.call(allow_redirects=True,
- headers={'Metadata': 'true',
- 'User-Agent':
- 'Cloud-Init/%s' % vs()
- }, method='GET',
- timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS,
- url=full_url)])
+ self.assertEqual(
+ m_request.call_args_list,
+ [
+ mock.call(
+ allow_redirects=True,
+ headers={
+ "Metadata": "true",
+ "User-Agent": "Cloud-Init/%s" % vs(),
+ },
+ method="GET",
+ timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS,
+ url=full_url,
+ )
+ ],
+ )
self.assertEqual(m_dhcp.call_count, 2)
m_net.assert_any_call(
- broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9',
- prefix_or_mask='255.255.255.0', router='192.168.2.1',
- static_routes=None)
+ broadcast="192.168.2.255",
+ interface="eth9",
+ ip="192.168.2.9",
+ prefix_or_mask="255.255.255.0",
+ router="192.168.2.1",
+ static_routes=None,
+ )
self.assertEqual(m_net.call_count, 2)
- def test__reprovision_calls__poll_imds(self, m_request,
- m_dhcp, m_net,
- m_media_switch):
+ def test__reprovision_calls__poll_imds(
+ self, m_request, m_dhcp, m_net, m_media_switch
+ ):
"""The _reprovision method should call poll IMDS."""
m_media_switch.return_value = None
- m_dhcp.return_value = [{
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}]
- url = 'http://{0}/metadata/reprovisiondata?api-version=2019-06-01'
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+ ]
+ url = "http://{0}/metadata/reprovisiondata?api-version=2019-06-01"
host = "169.254.169.254"
full_url = url.format(host)
hostname = "myhost"
username = "myuser"
- odata = {'HostName': hostname, 'UserName': username}
+ odata = {"HostName": hostname, "UserName": username}
content = construct_valid_ovf_env(data=odata)
- m_request.return_value = mock.MagicMock(status_code=200, text=content,
- content=content)
+ m_request.return_value = mock.MagicMock(
+ status_code=200, text=content, content=content
+ )
dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
md, _ud, cfg, _d = dsa._reprovision()
- self.assertEqual(md['local-hostname'], hostname)
- self.assertEqual(cfg['system_info']['default_user']['name'], username)
+ self.assertEqual(md["local-hostname"], hostname)
+ self.assertEqual(cfg["system_info"]["default_user"]["name"], username)
self.assertIn(
mock.call(
allow_redirects=True,
headers={
- 'Metadata': 'true',
- 'User-Agent': 'Cloud-Init/%s' % vs()
+ "Metadata": "true",
+ "User-Agent": "Cloud-Init/%s" % vs(),
},
- method='GET',
+ method="GET",
timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS,
- url=full_url
+ url=full_url,
),
- m_request.call_args_list)
+ m_request.call_args_list,
+ )
self.assertEqual(m_dhcp.call_count, 2)
m_net.assert_any_call(
- broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9',
- prefix_or_mask='255.255.255.0', router='192.168.2.1',
- static_routes=None)
+ broadcast="192.168.2.255",
+ interface="eth9",
+ ip="192.168.2.9",
+ prefix_or_mask="255.255.255.0",
+ router="192.168.2.1",
+ static_routes=None,
+ )
self.assertEqual(m_net.call_count, 2)
@@ -3029,36 +3505,42 @@ class TestRemoveUbuntuNetworkConfigScripts(CiTestCase):
def test_remove_network_scripts_removes_both_files_and_directories(self):
"""Any files or directories in paths are removed when present."""
- file1 = self.tmp_path('file1', dir=self.tmp)
- subdir = self.tmp_path('sub1', dir=self.tmp)
- subfile = self.tmp_path('leaf1', dir=subdir)
- write_file(file1, 'file1content')
- write_file(subfile, 'leafcontent')
+ file1 = self.tmp_path("file1", dir=self.tmp)
+ subdir = self.tmp_path("sub1", dir=self.tmp)
+ subfile = self.tmp_path("leaf1", dir=subdir)
+ write_file(file1, "file1content")
+ write_file(subfile, "leafcontent")
dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[subdir, file1])
for path in (file1, subdir, subfile):
- self.assertFalse(os.path.exists(path),
- 'Found unremoved: %s' % path)
+ self.assertFalse(
+ os.path.exists(path), "Found unremoved: %s" % path
+ )
expected_logs = [
- 'INFO: Removing Ubuntu extended network scripts because cloud-init'
- ' updates Azure network configuration on the following events:'
+ "INFO: Removing Ubuntu extended network scripts because cloud-init"
+ " updates Azure network configuration on the following events:"
" ['boot', 'boot-legacy']",
- 'Recursively deleting %s' % subdir,
- 'Attempting to remove %s' % file1]
+ "Recursively deleting %s" % subdir,
+ "Attempting to remove %s" % file1,
+ ]
for log in expected_logs:
self.assertIn(log, self.logs.getvalue())
def test_remove_network_scripts_only_attempts_removal_if_path_exists(self):
"""Any files or directories absent are skipped without error."""
- dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[
- self.tmp_path('nodirhere/', dir=self.tmp),
- self.tmp_path('notfilehere', dir=self.tmp)])
- self.assertNotIn('/not/a', self.logs.getvalue()) # No delete logs
-
- @mock.patch(MOCKPATH + 'os.path.exists')
- def test_remove_network_scripts_default_removes_stock_scripts(self,
- m_exists):
+ dsaz.maybe_remove_ubuntu_network_config_scripts(
+ paths=[
+ self.tmp_path("nodirhere/", dir=self.tmp),
+ self.tmp_path("notfilehere", dir=self.tmp),
+ ]
+ )
+ self.assertNotIn("/not/a", self.logs.getvalue()) # No delete logs
+
+ @mock.patch(MOCKPATH + "os.path.exists")
+ def test_remove_network_scripts_default_removes_stock_scripts(
+ self, m_exists
+ ):
"""Azure's stock ubuntu image scripts and artifacts are removed."""
# Report path absent on all to avoid delete operation
m_exists.return_value = False
@@ -3070,24 +3552,25 @@ class TestRemoveUbuntuNetworkConfigScripts(CiTestCase):
class TestWBIsPlatformViable(CiTestCase):
"""White box tests for _is_platform_viable."""
+
with_logs = True
- @mock.patch(MOCKPATH + 'dmi.read_dmi_data')
+ @mock.patch(MOCKPATH + "dmi.read_dmi_data")
def test_true_on_non_azure_chassis(self, m_read_dmi_data):
"""Return True if DMI chassis-asset-tag is AZURE_CHASSIS_ASSET_TAG."""
m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG
- self.assertTrue(dsaz._is_platform_viable('doesnotmatter'))
+ self.assertTrue(dsaz._is_platform_viable("doesnotmatter"))
- @mock.patch(MOCKPATH + 'os.path.exists')
- @mock.patch(MOCKPATH + 'dmi.read_dmi_data')
+ @mock.patch(MOCKPATH + "os.path.exists")
+ @mock.patch(MOCKPATH + "dmi.read_dmi_data")
def test_true_on_azure_ovf_env_in_seed_dir(self, m_read_dmi_data, m_exist):
"""Return True if ovf-env.xml exists in known seed dirs."""
# Non-matching Azure chassis-asset-tag
- m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG + 'X'
+ m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG + "X"
m_exist.return_value = True
- self.assertTrue(dsaz._is_platform_viable('/some/seed/dir'))
- m_exist.called_once_with('/other/seed/dir')
+ self.assertTrue(dsaz._is_platform_viable("/some/seed/dir"))
+ m_exist.called_once_with("/other/seed/dir")
def test_false_on_no_matching_azure_criteria(self):
"""Report non-azure on unmatched asset tag, ovf-env absent and no dev.
@@ -3096,17 +3579,25 @@ class TestWBIsPlatformViable(CiTestCase):
AZURE_CHASSIS_ASSET_TAG, no ovf-env.xml files exist in known seed dirs
and no devices have a label starting with prefix 'rd_rdfe_'.
"""
- self.assertFalse(wrap_and_call(
- MOCKPATH,
- {'os.path.exists': False,
- # Non-matching Azure chassis-asset-tag
- 'dmi.read_dmi_data': dsaz.AZURE_CHASSIS_ASSET_TAG + 'X',
- 'subp.which': None},
- dsaz._is_platform_viable, 'doesnotmatter'))
+ self.assertFalse(
+ wrap_and_call(
+ MOCKPATH,
+ {
+ "os.path.exists": False,
+ # Non-matching Azure chassis-asset-tag
+ "dmi.read_dmi_data": dsaz.AZURE_CHASSIS_ASSET_TAG + "X",
+ "subp.which": None,
+ },
+ dsaz._is_platform_viable,
+ "doesnotmatter",
+ )
+ )
self.assertIn(
"DEBUG: Non-Azure DMI asset tag '{0}' discovered.\n".format(
- dsaz.AZURE_CHASSIS_ASSET_TAG + 'X'),
- self.logs.getvalue())
+ dsaz.AZURE_CHASSIS_ASSET_TAG + "X"
+ ),
+ self.logs.getvalue(),
+ )
class TestRandomSeed(CiTestCase):
@@ -3120,13 +3611,14 @@ class TestRandomSeed(CiTestCase):
path = resourceLocation("azure/non_unicode_random_string")
result = dsaz._get_random_seed(path)
- obj = {'seed': result}
+ obj = {"seed": result}
try:
serialized = json_dumps(obj)
deserialized = load_json(serialized)
except UnicodeDecodeError:
self.fail("Non-serializable random seed returned")
- self.assertEqual(deserialized['seed'], result)
+ self.assertEqual(deserialized["seed"], result)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_azure_helper.py b/tests/unittests/sources/test_azure_helper.py
index 24c582c2..6f7f2890 100644
--- a/tests/unittests/sources/test_azure_helper.py
+++ b/tests/unittests/sources/test_azure_helper.py
@@ -9,10 +9,9 @@ from xml.etree import ElementTree
from xml.sax.saxutils import escape, unescape
from cloudinit.sources.helpers import azure as azure_helper
-from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir
-
-from cloudinit.util import load_file
from cloudinit.sources.helpers.azure import WALinuxAgentShim as wa_shim
+from cloudinit.util import load_file
+from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir
GOAL_STATE_TEMPLATE = """\
<?xml version="1.0" encoding="utf-8"?>
@@ -52,7 +51,7 @@ GOAL_STATE_TEMPLATE = """\
</GoalState>
"""
-HEALTH_REPORT_XML_TEMPLATE = '''\
+HEALTH_REPORT_XML_TEMPLATE = """\
<?xml version="1.0" encoding="utf-8"?>
<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
@@ -70,14 +69,16 @@ HEALTH_REPORT_XML_TEMPLATE = '''\
</RoleInstanceList>
</Container>
</Health>
-'''
+"""
-HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = dedent('''\
+HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = dedent(
+ """\
<Details>
<SubStatus>{health_substatus}</SubStatus>
<Description>{health_description}</Description>
</Details>
- ''')
+ """
+)
HEALTH_REPORT_DESCRIPTION_TRIM_LEN = 512
@@ -87,24 +88,27 @@ class SentinelException(Exception):
class TestFindEndpoint(CiTestCase):
-
def setUp(self):
super(TestFindEndpoint, self).setUp()
patches = ExitStack()
self.addCleanup(patches.close)
self.load_file = patches.enter_context(
- mock.patch.object(azure_helper.util, 'load_file'))
+ mock.patch.object(azure_helper.util, "load_file")
+ )
self.dhcp_options = patches.enter_context(
- mock.patch.object(wa_shim, '_load_dhclient_json'))
+ mock.patch.object(wa_shim, "_load_dhclient_json")
+ )
self.networkd_leases = patches.enter_context(
- mock.patch.object(wa_shim, '_networkd_get_value_from_leases'))
+ mock.patch.object(wa_shim, "_networkd_get_value_from_leases")
+ )
self.networkd_leases.return_value = None
def test_missing_file(self):
- """wa_shim find_endpoint uses default endpoint if leasefile not found
+ """wa_shim find_endpoint uses default endpoint if
+ leasefile not found
"""
self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16")
@@ -112,80 +116,93 @@ class TestFindEndpoint(CiTestCase):
"""wa_shim find_endpoint uses default endpoint if leasefile is found
but does not contain DHCP Option 245 (whose value is the endpoint)
"""
- self.load_file.return_value = ''
- self.dhcp_options.return_value = {'eth0': {'key': 'value'}}
+ self.load_file.return_value = ""
+ self.dhcp_options.return_value = {"eth0": {"key": "value"}}
self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16")
@staticmethod
def _build_lease_content(encoded_address):
endpoint = azure_helper._get_dhcp_endpoint_option_name()
- return '\n'.join([
- 'lease {',
- ' interface "eth0";',
- ' option {0} {1};'.format(endpoint, encoded_address),
- '}'])
+ return "\n".join(
+ [
+ "lease {",
+ ' interface "eth0";',
+ " option {0} {1};".format(endpoint, encoded_address),
+ "}",
+ ]
+ )
def test_from_dhcp_client(self):
self.dhcp_options.return_value = {"eth0": {"unknown_245": "5:4:3:2"}}
- self.assertEqual('5.4.3.2', wa_shim.find_endpoint(None))
+ self.assertEqual("5.4.3.2", wa_shim.find_endpoint(None))
def test_latest_lease_used(self):
- encoded_addresses = ['5:4:3:2', '4:3:2:1']
- file_content = '\n'.join([self._build_lease_content(encoded_address)
- for encoded_address in encoded_addresses])
+ encoded_addresses = ["5:4:3:2", "4:3:2:1"]
+ file_content = "\n".join(
+ [
+ self._build_lease_content(encoded_address)
+ for encoded_address in encoded_addresses
+ ]
+ )
self.load_file.return_value = file_content
- self.assertEqual(encoded_addresses[-1].replace(':', '.'),
- wa_shim.find_endpoint("foobar"))
+ self.assertEqual(
+ encoded_addresses[-1].replace(":", "."),
+ wa_shim.find_endpoint("foobar"),
+ )
class TestExtractIpAddressFromLeaseValue(CiTestCase):
-
def test_hex_string(self):
- ip_address, encoded_address = '98.76.54.32', '62:4c:36:20'
+ ip_address, encoded_address = "98.76.54.32", "62:4c:36:20"
self.assertEqual(
- ip_address, wa_shim.get_ip_from_lease_value(encoded_address))
+ ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
+ )
def test_hex_string_with_single_character_part(self):
- ip_address, encoded_address = '4.3.2.1', '4:3:2:1'
+ ip_address, encoded_address = "4.3.2.1", "4:3:2:1"
self.assertEqual(
- ip_address, wa_shim.get_ip_from_lease_value(encoded_address))
+ ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
+ )
def test_packed_string(self):
- ip_address, encoded_address = '98.76.54.32', 'bL6 '
+ ip_address, encoded_address = "98.76.54.32", "bL6 "
self.assertEqual(
- ip_address, wa_shim.get_ip_from_lease_value(encoded_address))
+ ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
+ )
def test_packed_string_with_escaped_quote(self):
- ip_address, encoded_address = '100.72.34.108', 'dH\\"l'
+ ip_address, encoded_address = "100.72.34.108", 'dH\\"l'
self.assertEqual(
- ip_address, wa_shim.get_ip_from_lease_value(encoded_address))
+ ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
+ )
def test_packed_string_containing_a_colon(self):
- ip_address, encoded_address = '100.72.58.108', 'dH:l'
+ ip_address, encoded_address = "100.72.58.108", "dH:l"
self.assertEqual(
- ip_address, wa_shim.get_ip_from_lease_value(encoded_address))
+ ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
+ )
class TestGoalStateParsing(CiTestCase):
default_parameters = {
- 'incarnation': 1,
- 'container_id': 'MyContainerId',
- 'instance_id': 'MyInstanceId',
- 'certificates_url': 'MyCertificatesUrl',
+ "incarnation": 1,
+ "container_id": "MyContainerId",
+ "instance_id": "MyInstanceId",
+ "certificates_url": "MyCertificatesUrl",
}
def _get_formatted_goal_state_xml_string(self, **kwargs):
parameters = self.default_parameters.copy()
parameters.update(kwargs)
xml = GOAL_STATE_TEMPLATE.format(**parameters)
- if parameters['certificates_url'] is None:
+ if parameters["certificates_url"] is None:
new_xml_lines = []
for line in xml.splitlines():
- if 'Certificates' in line:
+ if "Certificates" in line:
continue
new_xml_lines.append(line)
- xml = '\n'.join(new_xml_lines)
+ xml = "\n".join(new_xml_lines)
return xml
def _get_goal_state(self, m_azure_endpoint_client=None, **kwargs):
@@ -195,17 +212,17 @@ class TestGoalStateParsing(CiTestCase):
return azure_helper.GoalState(xml, m_azure_endpoint_client)
def test_incarnation_parsed_correctly(self):
- incarnation = '123'
+ incarnation = "123"
goal_state = self._get_goal_state(incarnation=incarnation)
self.assertEqual(incarnation, goal_state.incarnation)
def test_container_id_parsed_correctly(self):
- container_id = 'TestContainerId'
+ container_id = "TestContainerId"
goal_state = self._get_goal_state(container_id=container_id)
self.assertEqual(container_id, goal_state.container_id)
def test_instance_id_parsed_correctly(self):
- instance_id = 'TestInstanceId'
+ instance_id = "TestInstanceId"
goal_state = self._get_goal_state(instance_id=instance_id)
self.assertEqual(instance_id, goal_state.instance_id)
@@ -214,67 +231,72 @@ class TestGoalStateParsing(CiTestCase):
previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
current_iid = "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8"
self.assertTrue(
- azure_helper.is_byte_swapped(previous_iid, current_iid))
+ azure_helper.is_byte_swapped(previous_iid, current_iid)
+ )
def test_instance_id_no_byte_swap_same_instance_id(self):
previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
current_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
self.assertFalse(
- azure_helper.is_byte_swapped(previous_iid, current_iid))
+ azure_helper.is_byte_swapped(previous_iid, current_iid)
+ )
def test_instance_id_no_byte_swap_diff_instance_id(self):
previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
current_iid = "G0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
self.assertFalse(
- azure_helper.is_byte_swapped(previous_iid, current_iid))
+ azure_helper.is_byte_swapped(previous_iid, current_iid)
+ )
def test_certificates_xml_parsed_and_fetched_correctly(self):
m_azure_endpoint_client = mock.MagicMock()
- certificates_url = 'TestCertificatesUrl'
+ certificates_url = "TestCertificatesUrl"
goal_state = self._get_goal_state(
m_azure_endpoint_client=m_azure_endpoint_client,
- certificates_url=certificates_url)
+ certificates_url=certificates_url,
+ )
certificates_xml = goal_state.certificates_xml
self.assertEqual(1, m_azure_endpoint_client.get.call_count)
self.assertEqual(
- certificates_url,
- m_azure_endpoint_client.get.call_args[0][0])
+ certificates_url, m_azure_endpoint_client.get.call_args[0][0]
+ )
self.assertTrue(
- m_azure_endpoint_client.get.call_args[1].get(
- 'secure', False))
+ m_azure_endpoint_client.get.call_args[1].get("secure", False)
+ )
self.assertEqual(
- m_azure_endpoint_client.get.return_value.contents,
- certificates_xml)
+ m_azure_endpoint_client.get.return_value.contents, certificates_xml
+ )
def test_missing_certificates_skips_http_get(self):
m_azure_endpoint_client = mock.MagicMock()
goal_state = self._get_goal_state(
m_azure_endpoint_client=m_azure_endpoint_client,
- certificates_url=None)
+ certificates_url=None,
+ )
certificates_xml = goal_state.certificates_xml
self.assertEqual(0, m_azure_endpoint_client.get.call_count)
self.assertIsNone(certificates_xml)
def test_invalid_goal_state_xml_raises_parse_error(self):
- xml = 'random non-xml data'
+ xml = "random non-xml data"
with self.assertRaises(ElementTree.ParseError):
azure_helper.GoalState(xml, mock.MagicMock())
def test_missing_container_id_in_goal_state_xml_raises_exc(self):
xml = self._get_formatted_goal_state_xml_string()
- xml = re.sub('<ContainerId>.*</ContainerId>', '', xml)
+ xml = re.sub("<ContainerId>.*</ContainerId>", "", xml)
with self.assertRaises(azure_helper.InvalidGoalStateXMLException):
azure_helper.GoalState(xml, mock.MagicMock())
def test_missing_instance_id_in_goal_state_xml_raises_exc(self):
xml = self._get_formatted_goal_state_xml_string()
- xml = re.sub('<InstanceId>.*</InstanceId>', '', xml)
+ xml = re.sub("<InstanceId>.*</InstanceId>", "", xml)
with self.assertRaises(azure_helper.InvalidGoalStateXMLException):
azure_helper.GoalState(xml, mock.MagicMock())
def test_missing_incarnation_in_goal_state_xml_raises_exc(self):
xml = self._get_formatted_goal_state_xml_string()
- xml = re.sub('<Incarnation>.*</Incarnation>', '', xml)
+ xml = re.sub("<Incarnation>.*</Incarnation>", "", xml)
with self.assertRaises(azure_helper.InvalidGoalStateXMLException):
azure_helper.GoalState(xml, mock.MagicMock())
@@ -282,8 +304,8 @@ class TestGoalStateParsing(CiTestCase):
class TestAzureEndpointHttpClient(CiTestCase):
regular_headers = {
- 'x-ms-agent-name': 'WALinuxAgent',
- 'x-ms-version': '2012-11-30',
+ "x-ms-agent-name": "WALinuxAgent",
+ "x-ms-version": "2012-11-30",
}
def setUp(self):
@@ -291,43 +313,48 @@ class TestAzureEndpointHttpClient(CiTestCase):
patches = ExitStack()
self.addCleanup(patches.close)
self.m_http_with_retries = patches.enter_context(
- mock.patch.object(azure_helper, 'http_with_retries'))
+ mock.patch.object(azure_helper, "http_with_retries")
+ )
def test_non_secure_get(self):
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
- url = 'MyTestUrl'
+ url = "MyTestUrl"
response = client.get(url, secure=False)
self.assertEqual(1, self.m_http_with_retries.call_count)
self.assertEqual(self.m_http_with_retries.return_value, response)
self.assertEqual(
mock.call(url, headers=self.regular_headers),
- self.m_http_with_retries.call_args)
+ self.m_http_with_retries.call_args,
+ )
def test_non_secure_get_raises_exception(self):
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
- url = 'MyTestUrl'
+ url = "MyTestUrl"
self.m_http_with_retries.side_effect = SentinelException
self.assertRaises(SentinelException, client.get, url, secure=False)
self.assertEqual(1, self.m_http_with_retries.call_count)
def test_secure_get(self):
- url = 'MyTestUrl'
+ url = "MyTestUrl"
m_certificate = mock.MagicMock()
expected_headers = self.regular_headers.copy()
- expected_headers.update({
- "x-ms-cipher-name": "DES_EDE3_CBC",
- "x-ms-guest-agent-public-x509-cert": m_certificate,
- })
+ expected_headers.update(
+ {
+ "x-ms-cipher-name": "DES_EDE3_CBC",
+ "x-ms-guest-agent-public-x509-cert": m_certificate,
+ }
+ )
client = azure_helper.AzureEndpointHttpClient(m_certificate)
response = client.get(url, secure=True)
self.assertEqual(1, self.m_http_with_retries.call_count)
self.assertEqual(self.m_http_with_retries.return_value, response)
self.assertEqual(
mock.call(url, headers=expected_headers),
- self.m_http_with_retries.call_args)
+ self.m_http_with_retries.call_args,
+ )
def test_secure_get_raises_exception(self):
- url = 'MyTestUrl'
+ url = "MyTestUrl"
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
self.m_http_with_retries.side_effect = SentinelException
self.assertRaises(SentinelException, client.get, url, secure=True)
@@ -335,44 +362,50 @@ class TestAzureEndpointHttpClient(CiTestCase):
def test_post(self):
m_data = mock.MagicMock()
- url = 'MyTestUrl'
+ url = "MyTestUrl"
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
response = client.post(url, data=m_data)
self.assertEqual(1, self.m_http_with_retries.call_count)
self.assertEqual(self.m_http_with_retries.return_value, response)
self.assertEqual(
mock.call(url, data=m_data, headers=self.regular_headers),
- self.m_http_with_retries.call_args)
+ self.m_http_with_retries.call_args,
+ )
def test_post_raises_exception(self):
m_data = mock.MagicMock()
- url = 'MyTestUrl'
+ url = "MyTestUrl"
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
self.m_http_with_retries.side_effect = SentinelException
self.assertRaises(SentinelException, client.post, url, data=m_data)
self.assertEqual(1, self.m_http_with_retries.call_count)
def test_post_with_extra_headers(self):
- url = 'MyTestUrl'
+ url = "MyTestUrl"
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
- extra_headers = {'test': 'header'}
+ extra_headers = {"test": "header"}
client.post(url, extra_headers=extra_headers)
expected_headers = self.regular_headers.copy()
expected_headers.update(extra_headers)
self.assertEqual(1, self.m_http_with_retries.call_count)
self.assertEqual(
mock.call(url, data=mock.ANY, headers=expected_headers),
- self.m_http_with_retries.call_args)
+ self.m_http_with_retries.call_args,
+ )
def test_post_with_sleep_with_extra_headers_raises_exception(self):
m_data = mock.MagicMock()
- url = 'MyTestUrl'
- extra_headers = {'test': 'header'}
+ url = "MyTestUrl"
+ extra_headers = {"test": "header"}
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
self.m_http_with_retries.side_effect = SentinelException
self.assertRaises(
- SentinelException, client.post,
- url, data=m_data, extra_headers=extra_headers)
+ SentinelException,
+ client.post,
+ url,
+ data=m_data,
+ extra_headers=extra_headers,
+ )
self.assertEqual(1, self.m_http_with_retries.call_count)
@@ -392,128 +425,139 @@ class TestAzureHelperHttpWithRetries(CiTestCase):
self.m_readurl = patches.enter_context(
mock.patch.object(
- azure_helper.url_helper, 'readurl', mock.MagicMock()))
+ azure_helper.url_helper, "readurl", mock.MagicMock()
+ )
+ )
self.m_sleep = patches.enter_context(
- mock.patch.object(azure_helper.time, 'sleep', autospec=True))
+ mock.patch.object(azure_helper.time, "sleep", autospec=True)
+ )
def test_http_with_retries(self):
- self.m_readurl.return_value = 'TestResp'
+ self.m_readurl.return_value = "TestResp"
self.assertEqual(
- azure_helper.http_with_retries('testurl'),
- self.m_readurl.return_value)
+ azure_helper.http_with_retries("testurl"),
+ self.m_readurl.return_value,
+ )
self.assertEqual(self.m_readurl.call_count, 1)
- def test_http_with_retries_propagates_readurl_exc_and_logs_exc(
- self):
+ def test_http_with_retries_propagates_readurl_exc_and_logs_exc(self):
self.m_readurl.side_effect = SentinelException
self.assertRaises(
- SentinelException, azure_helper.http_with_retries, 'testurl')
+ SentinelException, azure_helper.http_with_retries, "testurl"
+ )
self.assertEqual(self.m_readurl.call_count, self.max_readurl_attempts)
self.assertIsNotNone(
re.search(
- r'Failed HTTP request with Azure endpoint \S* during '
- r'attempt \d+ with exception: \S*',
- self.logs.getvalue()))
+ r"Failed HTTP request with Azure endpoint \S* during "
+ r"attempt \d+ with exception: \S*",
+ self.logs.getvalue(),
+ )
+ )
self.assertIsNone(
re.search(
- r'Successful HTTP request with Azure endpoint \S* after '
- r'\d+ attempts',
- self.logs.getvalue()))
+ r"Successful HTTP request with Azure endpoint \S* after "
+ r"\d+ attempts",
+ self.logs.getvalue(),
+ )
+ )
def test_http_with_retries_delayed_success_due_to_temporary_readurl_exc(
- self):
- self.m_readurl.side_effect = \
- [SentinelException] * self.periodic_logging_attempts + \
- ['TestResp']
- self.m_readurl.return_value = 'TestResp'
-
- response = azure_helper.http_with_retries('testurl')
- self.assertEqual(
- response,
- self.m_readurl.return_value)
+ self,
+ ):
+ self.m_readurl.side_effect = [
+ SentinelException
+ ] * self.periodic_logging_attempts + ["TestResp"]
+ self.m_readurl.return_value = "TestResp"
+
+ response = azure_helper.http_with_retries("testurl")
+ self.assertEqual(response, self.m_readurl.return_value)
self.assertEqual(
- self.m_readurl.call_count,
- self.periodic_logging_attempts + 1)
+ self.m_readurl.call_count, self.periodic_logging_attempts + 1
+ )
# Ensure that cloud-init did sleep between each failed request
self.assertEqual(
- self.m_sleep.call_count,
- self.periodic_logging_attempts)
+ self.m_sleep.call_count, self.periodic_logging_attempts
+ )
self.m_sleep.assert_called_with(self.sleep_duration_between_retries)
def test_http_with_retries_long_delay_logs_periodic_failure_msg(self):
- self.m_readurl.side_effect = \
- [SentinelException] * self.periodic_logging_attempts + \
- ['TestResp']
- self.m_readurl.return_value = 'TestResp'
+ self.m_readurl.side_effect = [
+ SentinelException
+ ] * self.periodic_logging_attempts + ["TestResp"]
+ self.m_readurl.return_value = "TestResp"
- azure_helper.http_with_retries('testurl')
+ azure_helper.http_with_retries("testurl")
self.assertEqual(
- self.m_readurl.call_count,
- self.periodic_logging_attempts + 1)
+ self.m_readurl.call_count, self.periodic_logging_attempts + 1
+ )
self.assertIsNotNone(
re.search(
- r'Failed HTTP request with Azure endpoint \S* during '
- r'attempt \d+ with exception: \S*',
- self.logs.getvalue()))
+ r"Failed HTTP request with Azure endpoint \S* during "
+ r"attempt \d+ with exception: \S*",
+ self.logs.getvalue(),
+ )
+ )
self.assertIsNotNone(
re.search(
- r'Successful HTTP request with Azure endpoint \S* after '
- r'\d+ attempts',
- self.logs.getvalue()))
+ r"Successful HTTP request with Azure endpoint \S* after "
+ r"\d+ attempts",
+ self.logs.getvalue(),
+ )
+ )
def test_http_with_retries_short_delay_does_not_log_periodic_failure_msg(
- self):
- self.m_readurl.side_effect = \
- [SentinelException] * \
- (self.periodic_logging_attempts - 1) + \
- ['TestResp']
- self.m_readurl.return_value = 'TestResp'
-
- azure_helper.http_with_retries('testurl')
+ self,
+ ):
+ self.m_readurl.side_effect = [SentinelException] * (
+ self.periodic_logging_attempts - 1
+ ) + ["TestResp"]
+ self.m_readurl.return_value = "TestResp"
+
+ azure_helper.http_with_retries("testurl")
self.assertEqual(
- self.m_readurl.call_count,
- self.periodic_logging_attempts)
+ self.m_readurl.call_count, self.periodic_logging_attempts
+ )
self.assertIsNone(
re.search(
- r'Failed HTTP request with Azure endpoint \S* during '
- r'attempt \d+ with exception: \S*',
- self.logs.getvalue()))
+ r"Failed HTTP request with Azure endpoint \S* during "
+ r"attempt \d+ with exception: \S*",
+ self.logs.getvalue(),
+ )
+ )
self.assertIsNotNone(
re.search(
- r'Successful HTTP request with Azure endpoint \S* after '
- r'\d+ attempts',
- self.logs.getvalue()))
+ r"Successful HTTP request with Azure endpoint \S* after "
+ r"\d+ attempts",
+ self.logs.getvalue(),
+ )
+ )
def test_http_with_retries_calls_url_helper_readurl_with_args_kwargs(self):
testurl = mock.MagicMock()
kwargs = {
- 'headers': mock.MagicMock(),
- 'data': mock.MagicMock(),
+ "headers": mock.MagicMock(),
+ "data": mock.MagicMock(),
# timeout kwarg should not be modified or deleted if present
- 'timeout': mock.MagicMock()
+ "timeout": mock.MagicMock(),
}
azure_helper.http_with_retries(testurl, **kwargs)
self.m_readurl.assert_called_once_with(testurl, **kwargs)
def test_http_with_retries_adds_timeout_kwarg_if_not_present(self):
testurl = mock.MagicMock()
- kwargs = {
- 'headers': mock.MagicMock(),
- 'data': mock.MagicMock()
- }
+ kwargs = {"headers": mock.MagicMock(), "data": mock.MagicMock()}
expected_kwargs = copy.deepcopy(kwargs)
- expected_kwargs['timeout'] = self.default_readurl_timeout
+ expected_kwargs["timeout"] = self.default_readurl_timeout
azure_helper.http_with_retries(testurl, **kwargs)
self.m_readurl.assert_called_once_with(testurl, **expected_kwargs)
- def test_http_with_retries_deletes_retries_kwargs_passed_in(
- self):
+ def test_http_with_retries_deletes_retries_kwargs_passed_in(self):
"""http_with_retries already implements retry logic,
so url_helper.readurl should not have retries.
http_with_retries should delete kwargs that
@@ -521,44 +565,44 @@ class TestAzureHelperHttpWithRetries(CiTestCase):
"""
testurl = mock.MagicMock()
kwargs = {
- 'headers': mock.MagicMock(),
- 'data': mock.MagicMock(),
- 'timeout': mock.MagicMock(),
- 'retries': mock.MagicMock(),
- 'infinite': mock.MagicMock()
+ "headers": mock.MagicMock(),
+ "data": mock.MagicMock(),
+ "timeout": mock.MagicMock(),
+ "retries": mock.MagicMock(),
+ "infinite": mock.MagicMock(),
}
expected_kwargs = copy.deepcopy(kwargs)
- expected_kwargs.pop('retries', None)
- expected_kwargs.pop('infinite', None)
+ expected_kwargs.pop("retries", None)
+ expected_kwargs.pop("infinite", None)
azure_helper.http_with_retries(testurl, **kwargs)
self.m_readurl.assert_called_once_with(testurl, **expected_kwargs)
self.assertIn(
- 'retries kwarg passed in for communication with Azure endpoint.',
- self.logs.getvalue())
+ "retries kwarg passed in for communication with Azure endpoint.",
+ self.logs.getvalue(),
+ )
self.assertIn(
- 'infinite kwarg passed in for communication with Azure endpoint.',
- self.logs.getvalue())
+ "infinite kwarg passed in for communication with Azure endpoint.",
+ self.logs.getvalue(),
+ )
class TestOpenSSLManager(CiTestCase):
-
def setUp(self):
super(TestOpenSSLManager, self).setUp()
patches = ExitStack()
self.addCleanup(patches.close)
self.subp = patches.enter_context(
- mock.patch.object(azure_helper.subp, 'subp'))
+ mock.patch.object(azure_helper.subp, "subp")
+ )
try:
- self.open = patches.enter_context(
- mock.patch('__builtin__.open'))
+ self.open = patches.enter_context(mock.patch("__builtin__.open"))
except ImportError:
- self.open = patches.enter_context(
- mock.patch('builtins.open'))
+ self.open = patches.enter_context(mock.patch("builtins.open"))
- @mock.patch.object(azure_helper, 'cd', mock.MagicMock())
- @mock.patch.object(azure_helper.temp_utils, 'mkdtemp')
+ @mock.patch.object(azure_helper, "cd", mock.MagicMock())
+ @mock.patch.object(azure_helper.temp_utils, "mkdtemp")
def test_openssl_manager_creates_a_tmpdir(self, mkdtemp):
manager = azure_helper.OpenSSLManager()
self.assertEqual(mkdtemp.return_value, manager.tmpdir)
@@ -567,16 +611,16 @@ class TestOpenSSLManager(CiTestCase):
subp_directory = {}
def capture_directory(*args, **kwargs):
- subp_directory['path'] = os.getcwd()
+ subp_directory["path"] = os.getcwd()
self.subp.side_effect = capture_directory
manager = azure_helper.OpenSSLManager()
- self.assertEqual(manager.tmpdir, subp_directory['path'])
+ self.assertEqual(manager.tmpdir, subp_directory["path"])
manager.clean_up()
- @mock.patch.object(azure_helper, 'cd', mock.MagicMock())
- @mock.patch.object(azure_helper.temp_utils, 'mkdtemp', mock.MagicMock())
- @mock.patch.object(azure_helper.util, 'del_dir')
+ @mock.patch.object(azure_helper, "cd", mock.MagicMock())
+ @mock.patch.object(azure_helper.temp_utils, "mkdtemp", mock.MagicMock())
+ @mock.patch.object(azure_helper.util, "del_dir")
def test_clean_up(self, del_dir):
manager = azure_helper.OpenSSLManager()
manager.clean_up()
@@ -584,43 +628,42 @@ class TestOpenSSLManager(CiTestCase):
class TestOpenSSLManagerActions(CiTestCase):
-
def setUp(self):
super(TestOpenSSLManagerActions, self).setUp()
self.allowed_subp = True
def _data_file(self, name):
- path = 'tests/data/azure'
+ path = "tests/data/azure"
return os.path.join(path, name)
@unittest.skip("todo move to cloud_test")
def test_pubkey_extract(self):
- cert = load_file(self._data_file('pubkey_extract_cert'))
- good_key = load_file(self._data_file('pubkey_extract_ssh_key'))
+ cert = load_file(self._data_file("pubkey_extract_cert"))
+ good_key = load_file(self._data_file("pubkey_extract_ssh_key"))
sslmgr = azure_helper.OpenSSLManager()
key = sslmgr._get_ssh_key_from_cert(cert)
self.assertEqual(good_key, key)
- good_fingerprint = '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473'
+ good_fingerprint = "073E19D14D1C799224C6A0FD8DDAB6A8BF27D473"
fingerprint = sslmgr._get_fingerprint_from_cert(cert)
self.assertEqual(good_fingerprint, fingerprint)
@unittest.skip("todo move to cloud_test")
- @mock.patch.object(azure_helper.OpenSSLManager, '_decrypt_certs_from_xml')
+ @mock.patch.object(azure_helper.OpenSSLManager, "_decrypt_certs_from_xml")
def test_parse_certificates(self, mock_decrypt_certs):
"""Azure control plane puts private keys as well as certificates
- into the Certificates XML object. Make sure only the public keys
- from certs are extracted and that fingerprints are converted to
- the form specified in the ovf-env.xml file.
+ into the Certificates XML object. Make sure only the public keys
+ from certs are extracted and that fingerprints are converted to
+ the form specified in the ovf-env.xml file.
"""
- cert_contents = load_file(self._data_file('parse_certificates_pem'))
- fingerprints = load_file(self._data_file(
- 'parse_certificates_fingerprints')
+ cert_contents = load_file(self._data_file("parse_certificates_pem"))
+ fingerprints = load_file(
+ self._data_file("parse_certificates_fingerprints")
).splitlines()
mock_decrypt_certs.return_value = cert_contents
sslmgr = azure_helper.OpenSSLManager()
- keys_by_fp = sslmgr.parse_certificates('')
+ keys_by_fp = sslmgr.parse_certificates("")
for fp in keys_by_fp.keys():
self.assertIn(fp, fingerprints)
for fp in fingerprints:
@@ -632,21 +675,23 @@ class TestGoalStateHealthReporter(CiTestCase):
maxDiff = None
default_parameters = {
- 'incarnation': 1634,
- 'container_id': 'MyContainerId',
- 'instance_id': 'MyInstanceId'
+ "incarnation": 1634,
+ "container_id": "MyContainerId",
+ "instance_id": "MyInstanceId",
}
- test_azure_endpoint = 'TestEndpoint'
- test_health_report_url = 'http://{0}/machine?comp=health'.format(
- test_azure_endpoint)
- test_default_headers = {'Content-Type': 'text/xml; charset=utf-8'}
+ test_azure_endpoint = "TestEndpoint"
+ test_health_report_url = "http://{0}/machine?comp=health".format(
+ test_azure_endpoint
+ )
+ test_default_headers = {"Content-Type": "text/xml; charset=utf-8"}
- provisioning_success_status = 'Ready'
- provisioning_not_ready_status = 'NotReady'
- provisioning_failure_substatus = 'ProvisioningFailed'
+ provisioning_success_status = "Ready"
+ provisioning_not_ready_status = "NotReady"
+ provisioning_failure_substatus = "ProvisioningFailed"
provisioning_failure_err_description = (
- 'Test error message containing provisioning failure details')
+ "Test error message containing provisioning failure details"
+ )
def setUp(self):
super(TestGoalStateHealthReporter, self).setUp()
@@ -654,22 +699,28 @@ class TestGoalStateHealthReporter(CiTestCase):
self.addCleanup(patches.close)
patches.enter_context(
- mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock()))
+ mock.patch.object(azure_helper.time, "sleep", mock.MagicMock())
+ )
self.read_file_or_url = patches.enter_context(
- mock.patch.object(azure_helper.url_helper, 'read_file_or_url'))
+ mock.patch.object(azure_helper.url_helper, "read_file_or_url")
+ )
self.post = patches.enter_context(
- mock.patch.object(azure_helper.AzureEndpointHttpClient,
- 'post'))
+ mock.patch.object(azure_helper.AzureEndpointHttpClient, "post")
+ )
self.GoalState = patches.enter_context(
- mock.patch.object(azure_helper, 'GoalState'))
- self.GoalState.return_value.container_id = \
- self.default_parameters['container_id']
- self.GoalState.return_value.instance_id = \
- self.default_parameters['instance_id']
- self.GoalState.return_value.incarnation = \
- self.default_parameters['incarnation']
+ mock.patch.object(azure_helper, "GoalState")
+ )
+ self.GoalState.return_value.container_id = self.default_parameters[
+ "container_id"
+ ]
+ self.GoalState.return_value.instance_id = self.default_parameters[
+ "instance_id"
+ ]
+ self.GoalState.return_value.incarnation = self.default_parameters[
+ "incarnation"
+ ]
def _text_from_xpath_in_xroot(self, xroot, xpath):
element = xroot.find(xpath)
@@ -685,34 +736,41 @@ class TestGoalStateHealthReporter(CiTestCase):
def _get_report_ready_health_document(self):
return self._get_formatted_health_report_xml_string(
- incarnation=escape(str(self.default_parameters['incarnation'])),
- container_id=escape(self.default_parameters['container_id']),
- instance_id=escape(self.default_parameters['instance_id']),
+ incarnation=escape(str(self.default_parameters["incarnation"])),
+ container_id=escape(self.default_parameters["container_id"]),
+ instance_id=escape(self.default_parameters["instance_id"]),
health_status=escape(self.provisioning_success_status),
- health_detail_subsection='')
+ health_detail_subsection="",
+ )
def _get_report_failure_health_document(self):
- health_detail_subsection = \
+ health_detail_subsection = (
self._get_formatted_health_detail_subsection_xml_string(
health_substatus=escape(self.provisioning_failure_substatus),
health_description=escape(
- self.provisioning_failure_err_description))
+ self.provisioning_failure_err_description
+ ),
+ )
+ )
return self._get_formatted_health_report_xml_string(
- incarnation=escape(str(self.default_parameters['incarnation'])),
- container_id=escape(self.default_parameters['container_id']),
- instance_id=escape(self.default_parameters['instance_id']),
+ incarnation=escape(str(self.default_parameters["incarnation"])),
+ container_id=escape(self.default_parameters["container_id"]),
+ instance_id=escape(self.default_parameters["instance_id"]),
health_status=escape(self.provisioning_not_ready_status),
- health_detail_subsection=health_detail_subsection)
+ health_detail_subsection=health_detail_subsection,
+ )
def test_send_ready_signal_sends_post_request(self):
with mock.patch.object(
- azure_helper.GoalStateHealthReporter,
- 'build_report') as m_build_report:
+ azure_helper.GoalStateHealthReporter, "build_report"
+ ) as m_build_report:
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
- client, self.test_azure_endpoint)
+ client,
+ self.test_azure_endpoint,
+ )
reporter.send_ready_signal()
self.assertEqual(1, self.post.call_count)
@@ -720,73 +778,94 @@ class TestGoalStateHealthReporter(CiTestCase):
mock.call(
self.test_health_report_url,
data=m_build_report.return_value,
- extra_headers=self.test_default_headers),
- self.post.call_args)
+ extra_headers=self.test_default_headers,
+ ),
+ self.post.call_args,
+ )
def test_send_failure_signal_sends_post_request(self):
with mock.patch.object(
- azure_helper.GoalStateHealthReporter,
- 'build_report') as m_build_report:
+ azure_helper.GoalStateHealthReporter, "build_report"
+ ) as m_build_report:
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
- client, self.test_azure_endpoint)
+ client,
+ self.test_azure_endpoint,
+ )
reporter.send_failure_signal(
- description=self.provisioning_failure_err_description)
+ description=self.provisioning_failure_err_description
+ )
self.assertEqual(1, self.post.call_count)
self.assertEqual(
mock.call(
self.test_health_report_url,
data=m_build_report.return_value,
- extra_headers=self.test_default_headers),
- self.post.call_args)
+ extra_headers=self.test_default_headers,
+ ),
+ self.post.call_args,
+ )
def test_build_report_for_ready_signal_health_document(self):
health_document = self._get_report_ready_health_document()
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
- self.test_azure_endpoint)
+ self.test_azure_endpoint,
+ )
generated_health_document = reporter.build_report(
- incarnation=self.default_parameters['incarnation'],
- container_id=self.default_parameters['container_id'],
- instance_id=self.default_parameters['instance_id'],
- status=self.provisioning_success_status)
+ incarnation=self.default_parameters["incarnation"],
+ container_id=self.default_parameters["container_id"],
+ instance_id=self.default_parameters["instance_id"],
+ status=self.provisioning_success_status,
+ )
self.assertEqual(health_document, generated_health_document)
generated_xroot = ElementTree.fromstring(generated_health_document)
self.assertEqual(
self._text_from_xpath_in_xroot(
- generated_xroot, './GoalStateIncarnation'),
- str(self.default_parameters['incarnation']))
+ generated_xroot, "./GoalStateIncarnation"
+ ),
+ str(self.default_parameters["incarnation"]),
+ )
self.assertEqual(
self._text_from_xpath_in_xroot(
- generated_xroot, './Container/ContainerId'),
- str(self.default_parameters['container_id']))
+ generated_xroot, "./Container/ContainerId"
+ ),
+ str(self.default_parameters["container_id"]),
+ )
self.assertEqual(
self._text_from_xpath_in_xroot(
- generated_xroot,
- './Container/RoleInstanceList/Role/InstanceId'),
- str(self.default_parameters['instance_id']))
+ generated_xroot, "./Container/RoleInstanceList/Role/InstanceId"
+ ),
+ str(self.default_parameters["instance_id"]),
+ )
self.assertEqual(
self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/State'),
- escape(self.provisioning_success_status))
+ "./Container/RoleInstanceList/Role/Health/State",
+ ),
+ escape(self.provisioning_success_status),
+ )
self.assertIsNone(
self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/Details'))
+ "./Container/RoleInstanceList/Role/Health/Details",
+ )
+ )
self.assertIsNone(
self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/Details/SubStatus'))
+ "./Container/RoleInstanceList/Role/Health/Details/SubStatus",
+ )
+ )
self.assertIsNone(
self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/Details/Description')
+ "./Container/RoleInstanceList/Role/Health/Details/Description",
+ )
)
def test_build_report_for_failure_signal_health_document(self):
@@ -794,120 +873,143 @@ class TestGoalStateHealthReporter(CiTestCase):
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
- self.test_azure_endpoint)
+ self.test_azure_endpoint,
+ )
generated_health_document = reporter.build_report(
- incarnation=self.default_parameters['incarnation'],
- container_id=self.default_parameters['container_id'],
- instance_id=self.default_parameters['instance_id'],
+ incarnation=self.default_parameters["incarnation"],
+ container_id=self.default_parameters["container_id"],
+ instance_id=self.default_parameters["instance_id"],
status=self.provisioning_not_ready_status,
substatus=self.provisioning_failure_substatus,
- description=self.provisioning_failure_err_description)
+ description=self.provisioning_failure_err_description,
+ )
self.assertEqual(health_document, generated_health_document)
generated_xroot = ElementTree.fromstring(generated_health_document)
self.assertEqual(
self._text_from_xpath_in_xroot(
- generated_xroot, './GoalStateIncarnation'),
- str(self.default_parameters['incarnation']))
+ generated_xroot, "./GoalStateIncarnation"
+ ),
+ str(self.default_parameters["incarnation"]),
+ )
self.assertEqual(
self._text_from_xpath_in_xroot(
- generated_xroot, './Container/ContainerId'),
- self.default_parameters['container_id'])
+ generated_xroot, "./Container/ContainerId"
+ ),
+ self.default_parameters["container_id"],
+ )
self.assertEqual(
self._text_from_xpath_in_xroot(
- generated_xroot,
- './Container/RoleInstanceList/Role/InstanceId'),
- self.default_parameters['instance_id'])
+ generated_xroot, "./Container/RoleInstanceList/Role/InstanceId"
+ ),
+ self.default_parameters["instance_id"],
+ )
self.assertEqual(
self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/State'),
- escape(self.provisioning_not_ready_status))
+ "./Container/RoleInstanceList/Role/Health/State",
+ ),
+ escape(self.provisioning_not_ready_status),
+ )
self.assertEqual(
self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/Details/'
- 'SubStatus'),
- escape(self.provisioning_failure_substatus))
+ "./Container/RoleInstanceList/Role/Health/Details/SubStatus",
+ ),
+ escape(self.provisioning_failure_substatus),
+ )
self.assertEqual(
self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/Details/'
- 'Description'),
- escape(self.provisioning_failure_err_description))
+ "./Container/RoleInstanceList/Role/Health/Details/Description",
+ ),
+ escape(self.provisioning_failure_err_description),
+ )
def test_send_ready_signal_calls_build_report(self):
with mock.patch.object(
- azure_helper.GoalStateHealthReporter, 'build_report'
+ azure_helper.GoalStateHealthReporter, "build_report"
) as m_build_report:
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
- self.test_azure_endpoint)
+ self.test_azure_endpoint,
+ )
reporter.send_ready_signal()
self.assertEqual(1, m_build_report.call_count)
self.assertEqual(
mock.call(
- incarnation=self.default_parameters['incarnation'],
- container_id=self.default_parameters['container_id'],
- instance_id=self.default_parameters['instance_id'],
- status=self.provisioning_success_status),
- m_build_report.call_args)
+ incarnation=self.default_parameters["incarnation"],
+ container_id=self.default_parameters["container_id"],
+ instance_id=self.default_parameters["instance_id"],
+ status=self.provisioning_success_status,
+ ),
+ m_build_report.call_args,
+ )
def test_send_failure_signal_calls_build_report(self):
with mock.patch.object(
- azure_helper.GoalStateHealthReporter, 'build_report'
+ azure_helper.GoalStateHealthReporter, "build_report"
) as m_build_report:
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
- self.test_azure_endpoint)
+ self.test_azure_endpoint,
+ )
reporter.send_failure_signal(
- description=self.provisioning_failure_err_description)
+ description=self.provisioning_failure_err_description
+ )
self.assertEqual(1, m_build_report.call_count)
self.assertEqual(
mock.call(
- incarnation=self.default_parameters['incarnation'],
- container_id=self.default_parameters['container_id'],
- instance_id=self.default_parameters['instance_id'],
+ incarnation=self.default_parameters["incarnation"],
+ container_id=self.default_parameters["container_id"],
+ instance_id=self.default_parameters["instance_id"],
status=self.provisioning_not_ready_status,
substatus=self.provisioning_failure_substatus,
- description=self.provisioning_failure_err_description),
- m_build_report.call_args)
+ description=self.provisioning_failure_err_description,
+ ),
+ m_build_report.call_args,
+ )
def test_build_report_escapes_chars(self):
- incarnation = 'jd8\'9*&^<\'A><A[p&o+\"SD()*&&&LKAJSD23'
- container_id = '&&<\"><><ds8\'9+7&d9a86!@($09asdl;<>'
- instance_id = 'Opo>>>jas\'&d;[p&fp\"a<<!!@&&'
- health_status = '&<897\"6&>&aa\'sd!@&!)((*<&>'
- health_substatus = '&as\"d<<a&s>d<\'^@!5&6<7'
- health_description = '&&&>!#$\"&&<as\'1!@$d&>><>&\"sd<67<]>>'
-
- health_detail_subsection = \
+ incarnation = "jd8'9*&^<'A><A[p&o+\"SD()*&&&LKAJSD23"
+ container_id = "&&<\"><><ds8'9+7&d9a86!@($09asdl;<>"
+ instance_id = "Opo>>>jas'&d;[p&fp\"a<<!!@&&"
+ health_status = "&<897\"6&>&aa'sd!@&!)((*<&>"
+ health_substatus = "&as\"d<<a&s>d<'^@!5&6<7"
+ health_description = '&&&>!#$"&&<as\'1!@$d&>><>&"sd<67<]>>'
+
+ health_detail_subsection = (
self._get_formatted_health_detail_subsection_xml_string(
health_substatus=escape(health_substatus),
- health_description=escape(health_description))
+ health_description=escape(health_description),
+ )
+ )
health_document = self._get_formatted_health_report_xml_string(
incarnation=escape(incarnation),
container_id=escape(container_id),
instance_id=escape(instance_id),
health_status=escape(health_status),
- health_detail_subsection=health_detail_subsection)
+ health_detail_subsection=health_detail_subsection,
+ )
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
- self.test_azure_endpoint)
+ self.test_azure_endpoint,
+ )
generated_health_document = reporter.build_report(
incarnation=incarnation,
container_id=container_id,
instance_id=instance_id,
status=health_status,
substatus=health_substatus,
- description=health_description)
+ description=health_description,
+ )
self.assertEqual(health_document, generated_health_document)
@@ -915,26 +1017,31 @@ class TestGoalStateHealthReporter(CiTestCase):
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
- self.test_azure_endpoint)
- long_err_msg = 'a9&ea8>>>e as1< d\"q2*&(^%\'a=5<' * 100
+ self.test_azure_endpoint,
+ )
+ long_err_msg = "a9&ea8>>>e as1< d\"q2*&(^%'a=5<" * 100
generated_health_document = reporter.build_report(
- incarnation=self.default_parameters['incarnation'],
- container_id=self.default_parameters['container_id'],
- instance_id=self.default_parameters['instance_id'],
+ incarnation=self.default_parameters["incarnation"],
+ container_id=self.default_parameters["container_id"],
+ instance_id=self.default_parameters["instance_id"],
status=self.provisioning_not_ready_status,
substatus=self.provisioning_failure_substatus,
- description=long_err_msg)
+ description=long_err_msg,
+ )
generated_xroot = ElementTree.fromstring(generated_health_document)
generated_health_report_description = self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/Details/Description')
+ "./Container/RoleInstanceList/Role/Health/Details/Description",
+ )
self.assertEqual(
len(unescape(generated_health_report_description)),
- HEALTH_REPORT_DESCRIPTION_TRIM_LEN)
+ HEALTH_REPORT_DESCRIPTION_TRIM_LEN,
+ )
def test_trim_description_then_escape_conforms_to_len_limits_worst_case(
- self):
+ self,
+ ):
"""When unescaped characters are XML-escaped, the length increases.
Char Escape String
< &lt;
@@ -963,46 +1070,53 @@ class TestGoalStateHealthReporter(CiTestCase):
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
- self.test_azure_endpoint)
- long_err_msg = '\'\"' * 10000
+ self.test_azure_endpoint,
+ )
+ long_err_msg = "'\"" * 10000
generated_health_document = reporter.build_report(
- incarnation=self.default_parameters['incarnation'],
- container_id=self.default_parameters['container_id'],
- instance_id=self.default_parameters['instance_id'],
+ incarnation=self.default_parameters["incarnation"],
+ container_id=self.default_parameters["container_id"],
+ instance_id=self.default_parameters["instance_id"],
status=self.provisioning_not_ready_status,
substatus=self.provisioning_failure_substatus,
- description=long_err_msg)
+ description=long_err_msg,
+ )
generated_xroot = ElementTree.fromstring(generated_health_document)
generated_health_report_description = self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/Details/Description')
+ "./Container/RoleInstanceList/Role/Health/Details/Description",
+ )
# The escaped description string should be less than
# the Azure platform limit for the escaped description string.
self.assertLessEqual(len(generated_health_report_description), 4096)
class TestWALinuxAgentShim(CiTestCase):
-
def setUp(self):
super(TestWALinuxAgentShim, self).setUp()
patches = ExitStack()
self.addCleanup(patches.close)
self.AzureEndpointHttpClient = patches.enter_context(
- mock.patch.object(azure_helper, 'AzureEndpointHttpClient'))
+ mock.patch.object(azure_helper, "AzureEndpointHttpClient")
+ )
self.find_endpoint = patches.enter_context(
- mock.patch.object(wa_shim, 'find_endpoint'))
+ mock.patch.object(wa_shim, "find_endpoint")
+ )
self.GoalState = patches.enter_context(
- mock.patch.object(azure_helper, 'GoalState'))
+ mock.patch.object(azure_helper, "GoalState")
+ )
self.OpenSSLManager = patches.enter_context(
- mock.patch.object(azure_helper, 'OpenSSLManager', autospec=True))
+ mock.patch.object(azure_helper, "OpenSSLManager", autospec=True)
+ )
patches.enter_context(
- mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock()))
+ mock.patch.object(azure_helper.time, "sleep", mock.MagicMock())
+ )
- self.test_incarnation = 'TestIncarnation'
- self.test_container_id = 'TestContainerId'
- self.test_instance_id = 'TestInstanceId'
+ self.test_incarnation = "TestIncarnation"
+ self.test_container_id = "TestContainerId"
+ self.test_instance_id = "TestInstanceId"
self.GoalState.return_value.incarnation = self.test_incarnation
self.GoalState.return_value.container_id = self.test_container_id
self.GoalState.return_value.instance_id = self.test_instance_id
@@ -1010,7 +1124,7 @@ class TestWALinuxAgentShim(CiTestCase):
def test_eject_iso_is_called(self):
shim = wa_shim()
with mock.patch.object(
- shim, 'eject_iso', autospec=True
+ shim, "eject_iso", autospec=True
) as m_eject_iso:
shim.register_with_azure_and_fetch_data(iso_dev="/dev/sr0")
m_eject_iso.assert_called_once_with("/dev/sr0")
@@ -1019,102 +1133,113 @@ class TestWALinuxAgentShim(CiTestCase):
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
self.assertEqual(
- [mock.call(None)],
- self.AzureEndpointHttpClient.call_args_list)
+ [mock.call(None)], self.AzureEndpointHttpClient.call_args_list
+ )
def test_http_client_does_not_use_certificate_for_report_failure(self):
shim = wa_shim()
- shim.register_with_azure_and_report_failure(description='TestDesc')
+ shim.register_with_azure_and_report_failure(description="TestDesc")
self.assertEqual(
- [mock.call(None)],
- self.AzureEndpointHttpClient.call_args_list)
+ [mock.call(None)], self.AzureEndpointHttpClient.call_args_list
+ )
def test_correct_url_used_for_goalstate_during_report_ready(self):
- self.find_endpoint.return_value = 'test_endpoint'
+ self.find_endpoint.return_value = "test_endpoint"
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
m_get = self.AzureEndpointHttpClient.return_value.get
self.assertEqual(
- [mock.call('http://test_endpoint/machine/?comp=goalstate')],
- m_get.call_args_list)
+ [mock.call("http://test_endpoint/machine/?comp=goalstate")],
+ m_get.call_args_list,
+ )
self.assertEqual(
- [mock.call(
- m_get.return_value.contents,
- self.AzureEndpointHttpClient.return_value,
- False
- )],
- self.GoalState.call_args_list)
+ [
+ mock.call(
+ m_get.return_value.contents,
+ self.AzureEndpointHttpClient.return_value,
+ False,
+ )
+ ],
+ self.GoalState.call_args_list,
+ )
def test_correct_url_used_for_goalstate_during_report_failure(self):
- self.find_endpoint.return_value = 'test_endpoint'
+ self.find_endpoint.return_value = "test_endpoint"
shim = wa_shim()
- shim.register_with_azure_and_report_failure(description='TestDesc')
+ shim.register_with_azure_and_report_failure(description="TestDesc")
m_get = self.AzureEndpointHttpClient.return_value.get
self.assertEqual(
- [mock.call('http://test_endpoint/machine/?comp=goalstate')],
- m_get.call_args_list)
+ [mock.call("http://test_endpoint/machine/?comp=goalstate")],
+ m_get.call_args_list,
+ )
self.assertEqual(
- [mock.call(
- m_get.return_value.contents,
- self.AzureEndpointHttpClient.return_value,
- False
- )],
- self.GoalState.call_args_list)
+ [
+ mock.call(
+ m_get.return_value.contents,
+ self.AzureEndpointHttpClient.return_value,
+ False,
+ )
+ ],
+ self.GoalState.call_args_list,
+ )
def test_certificates_used_to_determine_public_keys(self):
# if register_with_azure_and_fetch_data() isn't passed some info about
# the user's public keys, there's no point in even trying to parse the
# certificates
shim = wa_shim()
- mypk = [{'fingerprint': 'fp1', 'path': 'path1'},
- {'fingerprint': 'fp3', 'path': 'path3', 'value': ''}]
- certs = {'fp1': 'expected-key',
- 'fp2': 'should-not-be-found',
- 'fp3': 'expected-no-value-key',
- }
+ mypk = [
+ {"fingerprint": "fp1", "path": "path1"},
+ {"fingerprint": "fp3", "path": "path3", "value": ""},
+ ]
+ certs = {
+ "fp1": "expected-key",
+ "fp2": "should-not-be-found",
+ "fp3": "expected-no-value-key",
+ }
sslmgr = self.OpenSSLManager.return_value
sslmgr.parse_certificates.return_value = certs
data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk)
self.assertEqual(
[mock.call(self.GoalState.return_value.certificates_xml)],
- sslmgr.parse_certificates.call_args_list)
- self.assertIn('expected-key', data['public-keys'])
- self.assertIn('expected-no-value-key', data['public-keys'])
- self.assertNotIn('should-not-be-found', data['public-keys'])
+ sslmgr.parse_certificates.call_args_list,
+ )
+ self.assertIn("expected-key", data["public-keys"])
+ self.assertIn("expected-no-value-key", data["public-keys"])
+ self.assertNotIn("should-not-be-found", data["public-keys"])
def test_absent_certificates_produces_empty_public_keys(self):
- mypk = [{'fingerprint': 'fp1', 'path': 'path1'}]
+ mypk = [{"fingerprint": "fp1", "path": "path1"}]
self.GoalState.return_value.certificates_xml = None
shim = wa_shim()
data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk)
- self.assertEqual([], data['public-keys'])
+ self.assertEqual([], data["public-keys"])
def test_correct_url_used_for_report_ready(self):
- self.find_endpoint.return_value = 'test_endpoint'
+ self.find_endpoint.return_value = "test_endpoint"
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
- expected_url = 'http://test_endpoint/machine?comp=health'
+ expected_url = "http://test_endpoint/machine?comp=health"
self.assertEqual(
[mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)],
- self.AzureEndpointHttpClient.return_value.post
- .call_args_list)
+ self.AzureEndpointHttpClient.return_value.post.call_args_list,
+ )
def test_correct_url_used_for_report_failure(self):
- self.find_endpoint.return_value = 'test_endpoint'
+ self.find_endpoint.return_value = "test_endpoint"
shim = wa_shim()
- shim.register_with_azure_and_report_failure(description='TestDesc')
- expected_url = 'http://test_endpoint/machine?comp=health'
+ shim.register_with_azure_and_report_failure(description="TestDesc")
+ expected_url = "http://test_endpoint/machine?comp=health"
self.assertEqual(
[mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)],
- self.AzureEndpointHttpClient.return_value.post
- .call_args_list)
+ self.AzureEndpointHttpClient.return_value.post.call_args_list,
+ )
def test_goal_state_values_used_for_report_ready(self):
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
posted_document = (
- self.AzureEndpointHttpClient.return_value.post
- .call_args[1]['data']
+ self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"]
)
self.assertIn(self.test_incarnation, posted_document)
self.assertIn(self.test_container_id, posted_document)
@@ -1122,10 +1247,9 @@ class TestWALinuxAgentShim(CiTestCase):
def test_goal_state_values_used_for_report_failure(self):
shim = wa_shim()
- shim.register_with_azure_and_report_failure(description='TestDesc')
+ shim.register_with_azure_and_report_failure(description="TestDesc")
posted_document = (
- self.AzureEndpointHttpClient.return_value.post
- .call_args[1]['data']
+ self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"]
)
self.assertIn(self.test_incarnation, posted_document)
self.assertIn(self.test_container_id, posted_document)
@@ -1138,57 +1262,66 @@ class TestWALinuxAgentShim(CiTestCase):
incarnation=escape(self.test_incarnation),
container_id=escape(self.test_container_id),
instance_id=escape(self.test_instance_id),
- health_status=escape('Ready'),
- health_detail_subsection='')
+ health_status=escape("Ready"),
+ health_detail_subsection="",
+ )
posted_document = (
- self.AzureEndpointHttpClient.return_value.post
- .call_args[1]['data'])
+ self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"]
+ )
self.assertEqual(health_document, posted_document)
def test_xml_elems_in_report_failure_post(self):
shim = wa_shim()
- shim.register_with_azure_and_report_failure(description='TestDesc')
+ shim.register_with_azure_and_report_failure(description="TestDesc")
health_document = HEALTH_REPORT_XML_TEMPLATE.format(
incarnation=escape(self.test_incarnation),
container_id=escape(self.test_container_id),
instance_id=escape(self.test_instance_id),
- health_status=escape('NotReady'),
- health_detail_subsection=HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE
- .format(
- health_substatus=escape('ProvisioningFailed'),
- health_description=escape('TestDesc')))
+ health_status=escape("NotReady"),
+ health_detail_subsection=(
+ HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format(
+ health_substatus=escape("ProvisioningFailed"),
+ health_description=escape("TestDesc"),
+ )
+ ),
+ )
posted_document = (
- self.AzureEndpointHttpClient.return_value.post
- .call_args[1]['data'])
+ self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"]
+ )
self.assertEqual(health_document, posted_document)
- @mock.patch.object(azure_helper, 'GoalStateHealthReporter', autospec=True)
+ @mock.patch.object(azure_helper, "GoalStateHealthReporter", autospec=True)
def test_register_with_azure_and_fetch_data_calls_send_ready_signal(
- self, m_goal_state_health_reporter):
+ self, m_goal_state_health_reporter
+ ):
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
self.assertEqual(
1,
- m_goal_state_health_reporter.return_value.send_ready_signal
- .call_count)
+ m_goal_state_health_reporter.return_value.send_ready_signal.call_count, # noqa: E501
+ )
- @mock.patch.object(azure_helper, 'GoalStateHealthReporter', autospec=True)
+ @mock.patch.object(azure_helper, "GoalStateHealthReporter", autospec=True)
def test_register_with_azure_and_report_failure_calls_send_failure_signal(
- self, m_goal_state_health_reporter):
+ self, m_goal_state_health_reporter
+ ):
shim = wa_shim()
- shim.register_with_azure_and_report_failure(description='TestDesc')
- m_goal_state_health_reporter.return_value.send_failure_signal \
- .assert_called_once_with(description='TestDesc')
+ shim.register_with_azure_and_report_failure(description="TestDesc")
+ m_goal_state_health_reporter.return_value.send_failure_signal.assert_called_once_with( # noqa: E501
+ description="TestDesc"
+ )
def test_register_with_azure_and_report_failure_does_not_need_certificates(
- self):
+ self,
+ ):
shim = wa_shim()
with mock.patch.object(
- shim, '_fetch_goal_state_from_azure', autospec=True
+ shim, "_fetch_goal_state_from_azure", autospec=True
) as m_fetch_goal_state_from_azure:
- shim.register_with_azure_and_report_failure(description='TestDesc')
+ shim.register_with_azure_and_report_failure(description="TestDesc")
m_fetch_goal_state_from_azure.assert_called_once_with(
- need_certificate=False)
+ need_certificate=False
+ )
def test_clean_up_can_be_called_at_any_time(self):
shim = wa_shim()
@@ -1197,7 +1330,7 @@ class TestWALinuxAgentShim(CiTestCase):
def test_openssl_manager_not_instantiated_by_shim_report_status(self):
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
- shim.register_with_azure_and_report_failure(description='TestDesc')
+ shim.register_with_azure_and_report_failure(description="TestDesc")
shim.clean_up()
self.OpenSSLManager.assert_not_called()
@@ -1209,178 +1342,204 @@ class TestWALinuxAgentShim(CiTestCase):
def test_clean_up_after_report_failure(self):
shim = wa_shim()
- shim.register_with_azure_and_report_failure(description='TestDesc')
+ shim.register_with_azure_and_report_failure(description="TestDesc")
shim.clean_up()
self.OpenSSLManager.return_value.clean_up.assert_not_called()
def test_fetch_goalstate_during_report_ready_raises_exc_on_get_exc(self):
- self.AzureEndpointHttpClient.return_value.get \
- .side_effect = SentinelException
+ self.AzureEndpointHttpClient.return_value.get.side_effect = (
+ SentinelException
+ )
shim = wa_shim()
- self.assertRaises(SentinelException,
- shim.register_with_azure_and_fetch_data)
+ self.assertRaises(
+ SentinelException, shim.register_with_azure_and_fetch_data
+ )
def test_fetch_goalstate_during_report_failure_raises_exc_on_get_exc(self):
- self.AzureEndpointHttpClient.return_value.get \
- .side_effect = SentinelException
+ self.AzureEndpointHttpClient.return_value.get.side_effect = (
+ SentinelException
+ )
shim = wa_shim()
- self.assertRaises(SentinelException,
- shim.register_with_azure_and_report_failure,
- description='TestDesc')
+ self.assertRaises(
+ SentinelException,
+ shim.register_with_azure_and_report_failure,
+ description="TestDesc",
+ )
def test_fetch_goalstate_during_report_ready_raises_exc_on_parse_exc(self):
self.GoalState.side_effect = SentinelException
shim = wa_shim()
- self.assertRaises(SentinelException,
- shim.register_with_azure_and_fetch_data)
+ self.assertRaises(
+ SentinelException, shim.register_with_azure_and_fetch_data
+ )
def test_fetch_goalstate_during_report_failure_raises_exc_on_parse_exc(
- self):
+ self,
+ ):
self.GoalState.side_effect = SentinelException
shim = wa_shim()
- self.assertRaises(SentinelException,
- shim.register_with_azure_and_report_failure,
- description='TestDesc')
+ self.assertRaises(
+ SentinelException,
+ shim.register_with_azure_and_report_failure,
+ description="TestDesc",
+ )
def test_failure_to_send_report_ready_health_doc_bubbles_up(self):
- self.AzureEndpointHttpClient.return_value.post \
- .side_effect = SentinelException
+ self.AzureEndpointHttpClient.return_value.post.side_effect = (
+ SentinelException
+ )
shim = wa_shim()
- self.assertRaises(SentinelException,
- shim.register_with_azure_and_fetch_data)
+ self.assertRaises(
+ SentinelException, shim.register_with_azure_and_fetch_data
+ )
def test_failure_to_send_report_failure_health_doc_bubbles_up(self):
- self.AzureEndpointHttpClient.return_value.post \
- .side_effect = SentinelException
+ self.AzureEndpointHttpClient.return_value.post.side_effect = (
+ SentinelException
+ )
shim = wa_shim()
- self.assertRaises(SentinelException,
- shim.register_with_azure_and_report_failure,
- description='TestDesc')
+ self.assertRaises(
+ SentinelException,
+ shim.register_with_azure_and_report_failure,
+ description="TestDesc",
+ )
class TestGetMetadataGoalStateXMLAndReportReadyToFabric(CiTestCase):
-
def setUp(self):
super(TestGetMetadataGoalStateXMLAndReportReadyToFabric, self).setUp()
patches = ExitStack()
self.addCleanup(patches.close)
self.m_shim = patches.enter_context(
- mock.patch.object(azure_helper, 'WALinuxAgentShim'))
+ mock.patch.object(azure_helper, "WALinuxAgentShim")
+ )
def test_data_from_shim_returned(self):
ret = azure_helper.get_metadata_from_fabric()
self.assertEqual(
- self.m_shim.return_value.register_with_azure_and_fetch_data
- .return_value,
- ret)
+ self.m_shim.return_value.register_with_azure_and_fetch_data.return_value, # noqa: E501
+ ret,
+ )
def test_success_calls_clean_up(self):
azure_helper.get_metadata_from_fabric()
self.assertEqual(1, self.m_shim.return_value.clean_up.call_count)
- def test_failure_in_registration_propagates_exc_and_calls_clean_up(
- self):
- self.m_shim.return_value.register_with_azure_and_fetch_data \
- .side_effect = SentinelException
- self.assertRaises(SentinelException,
- azure_helper.get_metadata_from_fabric)
+ def test_failure_in_registration_propagates_exc_and_calls_clean_up(self):
+ self.m_shim.return_value.register_with_azure_and_fetch_data.side_effect = ( # noqa: E501
+ SentinelException
+ )
+ self.assertRaises(
+ SentinelException, azure_helper.get_metadata_from_fabric
+ )
self.assertEqual(1, self.m_shim.return_value.clean_up.call_count)
def test_calls_shim_register_with_azure_and_fetch_data(self):
m_pubkey_info = mock.MagicMock()
azure_helper.get_metadata_from_fabric(
- pubkey_info=m_pubkey_info, iso_dev="/dev/sr0")
+ pubkey_info=m_pubkey_info, iso_dev="/dev/sr0"
+ )
self.assertEqual(
1,
- self.m_shim.return_value
- .register_with_azure_and_fetch_data.call_count)
+ self.m_shim.return_value.register_with_azure_and_fetch_data.call_count, # noqa: E501
+ )
self.assertEqual(
mock.call(iso_dev="/dev/sr0", pubkey_info=m_pubkey_info),
- self.m_shim.return_value
- .register_with_azure_and_fetch_data.call_args)
+ self.m_shim.return_value.register_with_azure_and_fetch_data.call_args, # noqa: E501
+ )
def test_instantiates_shim_with_kwargs(self):
m_fallback_lease_file = mock.MagicMock()
m_dhcp_options = mock.MagicMock()
azure_helper.get_metadata_from_fabric(
- fallback_lease_file=m_fallback_lease_file,
- dhcp_opts=m_dhcp_options)
+ fallback_lease_file=m_fallback_lease_file, dhcp_opts=m_dhcp_options
+ )
self.assertEqual(1, self.m_shim.call_count)
self.assertEqual(
mock.call(
fallback_lease_file=m_fallback_lease_file,
- dhcp_options=m_dhcp_options),
- self.m_shim.call_args)
+ dhcp_options=m_dhcp_options,
+ ),
+ self.m_shim.call_args,
+ )
class TestGetMetadataGoalStateXMLAndReportFailureToFabric(CiTestCase):
-
def setUp(self):
super(
- TestGetMetadataGoalStateXMLAndReportFailureToFabric, self).setUp()
+ TestGetMetadataGoalStateXMLAndReportFailureToFabric, self
+ ).setUp()
patches = ExitStack()
self.addCleanup(patches.close)
self.m_shim = patches.enter_context(
- mock.patch.object(azure_helper, 'WALinuxAgentShim'))
+ mock.patch.object(azure_helper, "WALinuxAgentShim")
+ )
def test_success_calls_clean_up(self):
azure_helper.report_failure_to_fabric()
- self.assertEqual(
- 1,
- self.m_shim.return_value.clean_up.call_count)
+ self.assertEqual(1, self.m_shim.return_value.clean_up.call_count)
def test_failure_in_shim_report_failure_propagates_exc_and_calls_clean_up(
- self):
- self.m_shim.return_value.register_with_azure_and_report_failure \
- .side_effect = SentinelException
- self.assertRaises(SentinelException,
- azure_helper.report_failure_to_fabric)
- self.assertEqual(
- 1,
- self.m_shim.return_value.clean_up.call_count)
+ self,
+ ):
+ self.m_shim.return_value.register_with_azure_and_report_failure.side_effect = ( # noqa: E501
+ SentinelException
+ )
+ self.assertRaises(
+ SentinelException, azure_helper.report_failure_to_fabric
+ )
+ self.assertEqual(1, self.m_shim.return_value.clean_up.call_count)
def test_report_failure_to_fabric_with_desc_calls_shim_report_failure(
- self):
- azure_helper.report_failure_to_fabric(description='TestDesc')
- self.m_shim.return_value.register_with_azure_and_report_failure \
- .assert_called_once_with(description='TestDesc')
+ self,
+ ):
+ azure_helper.report_failure_to_fabric(description="TestDesc")
+ self.m_shim.return_value.register_with_azure_and_report_failure.assert_called_once_with( # noqa: E501
+ description="TestDesc"
+ )
def test_report_failure_to_fabric_with_no_desc_calls_shim_report_failure(
- self):
+ self,
+ ):
azure_helper.report_failure_to_fabric()
# default err message description should be shown to the user
# if no description is passed in
- self.m_shim.return_value.register_with_azure_and_report_failure \
- .assert_called_once_with(
- description=azure_helper
- .DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE)
+ self.m_shim.return_value.register_with_azure_and_report_failure.assert_called_once_with( # noqa: E501
+ description=(
+ azure_helper.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
+ )
+ )
def test_report_failure_to_fabric_empty_desc_calls_shim_report_failure(
- self):
- azure_helper.report_failure_to_fabric(description='')
+ self,
+ ):
+ azure_helper.report_failure_to_fabric(description="")
# default err message description should be shown to the user
# if an empty description is passed in
- self.m_shim.return_value.register_with_azure_and_report_failure \
- .assert_called_once_with(
- description=azure_helper
- .DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE)
+ self.m_shim.return_value.register_with_azure_and_report_failure.assert_called_once_with( # noqa: E501
+ description=(
+ azure_helper.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
+ )
+ )
def test_instantiates_shim_with_kwargs(self):
m_fallback_lease_file = mock.MagicMock()
m_dhcp_options = mock.MagicMock()
azure_helper.report_failure_to_fabric(
- fallback_lease_file=m_fallback_lease_file,
- dhcp_opts=m_dhcp_options)
+ fallback_lease_file=m_fallback_lease_file, dhcp_opts=m_dhcp_options
+ )
self.m_shim.assert_called_once_with(
fallback_lease_file=m_fallback_lease_file,
- dhcp_options=m_dhcp_options)
+ dhcp_options=m_dhcp_options,
+ )
class TestExtractIpAddressFromNetworkd(CiTestCase):
- azure_lease = dedent("""\
+ azure_lease = dedent(
+ """\
# This is private data. Do not parse.
ADDRESS=10.132.0.5
NETMASK=255.255.255.255
@@ -1399,7 +1558,8 @@ class TestExtractIpAddressFromNetworkd(CiTestCase):
ROUTES=10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1
CLIENTID=ff405663a200020000ab11332859494d7a8b4c
OPTION_245=624c3620
- """)
+ """
+ )
def setUp(self):
super(TestExtractIpAddressFromNetworkd, self).setUp()
@@ -1408,21 +1568,25 @@ class TestExtractIpAddressFromNetworkd(CiTestCase):
def test_no_valid_leases_is_none(self):
"""No valid leases should return None."""
self.assertIsNone(
- wa_shim._networkd_get_value_from_leases(self.lease_d))
+ wa_shim._networkd_get_value_from_leases(self.lease_d)
+ )
def test_option_245_is_found_in_single(self):
"""A single valid lease with 245 option should return it."""
- populate_dir(self.lease_d, {'9': self.azure_lease})
+ populate_dir(self.lease_d, {"9": self.azure_lease})
self.assertEqual(
- '624c3620', wa_shim._networkd_get_value_from_leases(self.lease_d))
+ "624c3620", wa_shim._networkd_get_value_from_leases(self.lease_d)
+ )
def test_option_245_not_found_returns_None(self):
"""A valid lease, but no option 245 should return None."""
populate_dir(
self.lease_d,
- {'9': self.azure_lease.replace("OPTION_245", "OPTION_999")})
+ {"9": self.azure_lease.replace("OPTION_245", "OPTION_999")},
+ )
self.assertIsNone(
- wa_shim._networkd_get_value_from_leases(self.lease_d))
+ wa_shim._networkd_get_value_from_leases(self.lease_d)
+ )
def test_multiple_returns_first(self):
"""Somewhat arbitrarily return the first address when multiple.
@@ -1432,10 +1596,14 @@ class TestExtractIpAddressFromNetworkd(CiTestCase):
myval = "624c3601"
populate_dir(
self.lease_d,
- {'9': self.azure_lease,
- '2': self.azure_lease.replace("624c3620", myval)})
+ {
+ "9": self.azure_lease,
+ "2": self.azure_lease.replace("624c3620", myval),
+ },
+ )
self.assertEqual(
- myval, wa_shim._networkd_get_value_from_leases(self.lease_d))
+ myval, wa_shim._networkd_get_value_from_leases(self.lease_d)
+ )
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_cloudsigma.py b/tests/unittests/sources/test_cloudsigma.py
index 2eae16ee..a2f26245 100644
--- a/tests/unittests/sources/test_cloudsigma.py
+++ b/tests/unittests/sources/test_cloudsigma.py
@@ -2,12 +2,9 @@
import copy
+from cloudinit import distros, helpers, sources
from cloudinit.cs_utils import Cepko
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import sources
from cloudinit.sources import DataSourceCloudSigma
-
from tests.unittests import helpers as test_helpers
SERVER_CONTEXT = {
@@ -28,10 +25,10 @@ SERVER_CONTEXT = {
"vendor_data": {
"location": "zrh",
"cloudinit": "#cloud-config\n\n...",
- }
+ },
}
-DS_PATH = 'cloudinit.sources.DataSourceCloudSigma.DataSourceCloudSigma'
+DS_PATH = "cloudinit.sources.DataSourceCloudSigma.DataSourceCloudSigma"
class CepkoMock(Cepko):
@@ -45,41 +42,48 @@ class CepkoMock(Cepko):
class DataSourceCloudSigmaTest(test_helpers.CiTestCase):
def setUp(self):
super(DataSourceCloudSigmaTest, self).setUp()
- self.paths = helpers.Paths({'run_dir': self.tmp_dir()})
- self.add_patch(DS_PATH + '.is_running_in_cloudsigma',
- "m_is_container", return_value=True)
+ self.paths = helpers.Paths({"run_dir": self.tmp_dir()})
+ self.add_patch(
+ DS_PATH + ".is_running_in_cloudsigma",
+ "m_is_container",
+ return_value=True,
+ )
distro_cls = distros.fetch("ubuntu")
distro = distro_cls("ubuntu", cfg={}, paths=self.paths)
self.datasource = DataSourceCloudSigma.DataSourceCloudSigma(
- sys_cfg={}, distro=distro, paths=self.paths)
+ sys_cfg={}, distro=distro, paths=self.paths
+ )
self.datasource.cepko = CepkoMock(SERVER_CONTEXT)
def test_get_hostname(self):
self.datasource.get_data()
self.assertEqual("test_server", self.datasource.get_hostname())
- self.datasource.metadata['name'] = ''
+ self.datasource.metadata["name"] = ""
self.assertEqual("65b2fb23", self.datasource.get_hostname())
- utf8_hostname = b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82'.decode('utf-8')
- self.datasource.metadata['name'] = utf8_hostname
+ utf8_hostname = b"\xd1\x82\xd0\xb5\xd1\x81\xd1\x82".decode("utf-8")
+ self.datasource.metadata["name"] = utf8_hostname
self.assertEqual("65b2fb23", self.datasource.get_hostname())
def test_get_public_ssh_keys(self):
self.datasource.get_data()
- self.assertEqual([SERVER_CONTEXT['meta']['ssh_public_key']],
- self.datasource.get_public_ssh_keys())
+ self.assertEqual(
+ [SERVER_CONTEXT["meta"]["ssh_public_key"]],
+ self.datasource.get_public_ssh_keys(),
+ )
def test_get_instance_id(self):
self.datasource.get_data()
- self.assertEqual(SERVER_CONTEXT['uuid'],
- self.datasource.get_instance_id())
+ self.assertEqual(
+ SERVER_CONTEXT["uuid"], self.datasource.get_instance_id()
+ )
def test_platform(self):
"""All platform-related attributes are set."""
self.datasource.get_data()
- self.assertEqual(self.datasource.cloud_name, 'cloudsigma')
- self.assertEqual(self.datasource.platform_type, 'cloudsigma')
- self.assertEqual(self.datasource.subplatform, 'cepko (/dev/ttyS1)')
+ self.assertEqual(self.datasource.cloud_name, "cloudsigma")
+ self.assertEqual(self.datasource.platform_type, "cloudsigma")
+ self.assertEqual(self.datasource.subplatform, "cepko (/dev/ttyS1)")
def test_metadata(self):
self.datasource.get_data()
@@ -87,22 +91,26 @@ class DataSourceCloudSigmaTest(test_helpers.CiTestCase):
def test_user_data(self):
self.datasource.get_data()
- self.assertEqual(self.datasource.userdata_raw,
- SERVER_CONTEXT['meta']['cloudinit-user-data'])
+ self.assertEqual(
+ self.datasource.userdata_raw,
+ SERVER_CONTEXT["meta"]["cloudinit-user-data"],
+ )
def test_encoded_user_data(self):
encoded_context = copy.deepcopy(SERVER_CONTEXT)
- encoded_context['meta']['base64_fields'] = 'cloudinit-user-data'
- encoded_context['meta']['cloudinit-user-data'] = 'aGkgd29ybGQK'
+ encoded_context["meta"]["base64_fields"] = "cloudinit-user-data"
+ encoded_context["meta"]["cloudinit-user-data"] = "aGkgd29ybGQK"
self.datasource.cepko = CepkoMock(encoded_context)
self.datasource.get_data()
- self.assertEqual(self.datasource.userdata_raw, b'hi world\n')
+ self.assertEqual(self.datasource.userdata_raw, b"hi world\n")
def test_vendor_data(self):
self.datasource.get_data()
- self.assertEqual(self.datasource.vendordata_raw,
- SERVER_CONTEXT['vendor_data']['cloudinit'])
+ self.assertEqual(
+ self.datasource.vendordata_raw,
+ SERVER_CONTEXT["vendor_data"]["cloudinit"],
+ )
def test_lack_of_vendor_data(self):
stripped_context = copy.deepcopy(SERVER_CONTEXT)
@@ -125,13 +133,13 @@ class DsLoads(test_helpers.TestCase):
def test_get_datasource_list_returns_in_local(self):
deps = (sources.DEP_FILESYSTEM,)
ds_list = DataSourceCloudSigma.get_datasource_list(deps)
- self.assertEqual(ds_list,
- [DataSourceCloudSigma.DataSourceCloudSigma])
+ self.assertEqual(ds_list, [DataSourceCloudSigma.DataSourceCloudSigma])
def test_list_sources_finds_ds(self):
found = sources.list_sources(
- ['CloudSigma'], (sources.DEP_FILESYSTEM,), ['cloudinit.sources'])
- self.assertEqual([DataSourceCloudSigma.DataSourceCloudSigma],
- found)
+ ["CloudSigma"], (sources.DEP_FILESYSTEM,), ["cloudinit.sources"]
+ )
+ self.assertEqual([DataSourceCloudSigma.DataSourceCloudSigma], found)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_cloudstack.py b/tests/unittests/sources/test_cloudstack.py
index 2b1a1b70..f7c69f91 100644
--- a/tests/unittests/sources/test_cloudstack.py
+++ b/tests/unittests/sources/test_cloudstack.py
@@ -1,80 +1,90 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import helpers
-from cloudinit import util
-from cloudinit.sources.DataSourceCloudStack import (
- DataSourceCloudStack, get_latest_lease)
-
-from tests.unittests.helpers import CiTestCase, ExitStack, mock
-
import os
import time
-MOD_PATH = 'cloudinit.sources.DataSourceCloudStack'
-DS_PATH = MOD_PATH + '.DataSourceCloudStack'
+from cloudinit import helpers, util
+from cloudinit.sources.DataSourceCloudStack import (
+ DataSourceCloudStack,
+ get_latest_lease,
+)
+from tests.unittests.helpers import CiTestCase, ExitStack, mock
+
+MOD_PATH = "cloudinit.sources.DataSourceCloudStack"
+DS_PATH = MOD_PATH + ".DataSourceCloudStack"
class TestCloudStackPasswordFetching(CiTestCase):
-
def setUp(self):
super(TestCloudStackPasswordFetching, self).setUp()
self.patches = ExitStack()
self.addCleanup(self.patches.close)
mod_name = MOD_PATH
- self.patches.enter_context(mock.patch('{0}.ec2'.format(mod_name)))
- self.patches.enter_context(mock.patch('{0}.uhelp'.format(mod_name)))
+ self.patches.enter_context(mock.patch("{0}.ec2".format(mod_name)))
+ self.patches.enter_context(mock.patch("{0}.uhelp".format(mod_name)))
default_gw = "192.201.20.0"
get_latest_lease = mock.MagicMock(return_value=None)
- self.patches.enter_context(mock.patch(
- mod_name + '.get_latest_lease', get_latest_lease))
+ self.patches.enter_context(
+ mock.patch(mod_name + ".get_latest_lease", get_latest_lease)
+ )
get_default_gw = mock.MagicMock(return_value=default_gw)
- self.patches.enter_context(mock.patch(
- mod_name + '.get_default_gateway', get_default_gw))
+ self.patches.enter_context(
+ mock.patch(mod_name + ".get_default_gateway", get_default_gw)
+ )
get_networkd_server_address = mock.MagicMock(return_value=None)
- self.patches.enter_context(mock.patch(
- mod_name + '.dhcp.networkd_get_option_from_leases',
- get_networkd_server_address))
+ self.patches.enter_context(
+ mock.patch(
+ mod_name + ".dhcp.networkd_get_option_from_leases",
+ get_networkd_server_address,
+ )
+ )
self.tmp = self.tmp_dir()
def _set_password_server_response(self, response_string):
- subp = mock.MagicMock(return_value=(response_string, ''))
+ subp = mock.MagicMock(return_value=(response_string, ""))
self.patches.enter_context(
- mock.patch('cloudinit.sources.DataSourceCloudStack.subp.subp',
- subp))
+ mock.patch(
+ "cloudinit.sources.DataSourceCloudStack.subp.subp", subp
+ )
+ )
return subp
def test_empty_password_doesnt_create_config(self):
- self._set_password_server_response('')
+ self._set_password_server_response("")
ds = DataSourceCloudStack(
- {}, None, helpers.Paths({'run_dir': self.tmp}))
+ {}, None, helpers.Paths({"run_dir": self.tmp})
+ )
ds.get_data()
self.assertEqual({}, ds.get_config_obj())
def test_saved_password_doesnt_create_config(self):
- self._set_password_server_response('saved_password')
+ self._set_password_server_response("saved_password")
ds = DataSourceCloudStack(
- {}, None, helpers.Paths({'run_dir': self.tmp}))
+ {}, None, helpers.Paths({"run_dir": self.tmp})
+ )
ds.get_data()
self.assertEqual({}, ds.get_config_obj())
- @mock.patch(DS_PATH + '.wait_for_metadata_service')
+ @mock.patch(DS_PATH + ".wait_for_metadata_service")
def test_password_sets_password(self, m_wait):
m_wait.return_value = True
- password = 'SekritSquirrel'
+ password = "SekritSquirrel"
self._set_password_server_response(password)
ds = DataSourceCloudStack(
- {}, None, helpers.Paths({'run_dir': self.tmp}))
+ {}, None, helpers.Paths({"run_dir": self.tmp})
+ )
ds.get_data()
- self.assertEqual(password, ds.get_config_obj()['password'])
+ self.assertEqual(password, ds.get_config_obj()["password"])
- @mock.patch(DS_PATH + '.wait_for_metadata_service')
+ @mock.patch(DS_PATH + ".wait_for_metadata_service")
def test_bad_request_doesnt_stop_ds_from_working(self, m_wait):
m_wait.return_value = True
- self._set_password_server_response('bad_request')
+ self._set_password_server_response("bad_request")
ds = DataSourceCloudStack(
- {}, None, helpers.Paths({'run_dir': self.tmp}))
+ {}, None, helpers.Paths({"run_dir": self.tmp})
+ )
self.assertTrue(ds.get_data())
def assertRequestTypesSent(self, subp, expected_request_types):
@@ -82,42 +92,44 @@ class TestCloudStackPasswordFetching(CiTestCase):
for call in subp.call_args_list:
args = call[0][0]
for arg in args:
- if arg.startswith('DomU_Request'):
+ if arg.startswith("DomU_Request"):
request_types.append(arg.split()[1])
self.assertEqual(expected_request_types, request_types)
- @mock.patch(DS_PATH + '.wait_for_metadata_service')
+ @mock.patch(DS_PATH + ".wait_for_metadata_service")
def test_valid_response_means_password_marked_as_saved(self, m_wait):
m_wait.return_value = True
- password = 'SekritSquirrel'
+ password = "SekritSquirrel"
subp = self._set_password_server_response(password)
ds = DataSourceCloudStack(
- {}, None, helpers.Paths({'run_dir': self.tmp}))
+ {}, None, helpers.Paths({"run_dir": self.tmp})
+ )
ds.get_data()
- self.assertRequestTypesSent(subp,
- ['send_my_password', 'saved_password'])
+ self.assertRequestTypesSent(
+ subp, ["send_my_password", "saved_password"]
+ )
def _check_password_not_saved_for(self, response_string):
subp = self._set_password_server_response(response_string)
ds = DataSourceCloudStack(
- {}, None, helpers.Paths({'run_dir': self.tmp}))
- with mock.patch(DS_PATH + '.wait_for_metadata_service') as m_wait:
+ {}, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ with mock.patch(DS_PATH + ".wait_for_metadata_service") as m_wait:
m_wait.return_value = True
ds.get_data()
- self.assertRequestTypesSent(subp, ['send_my_password'])
+ self.assertRequestTypesSent(subp, ["send_my_password"])
def test_password_not_saved_if_empty(self):
- self._check_password_not_saved_for('')
+ self._check_password_not_saved_for("")
def test_password_not_saved_if_already_saved(self):
- self._check_password_not_saved_for('saved_password')
+ self._check_password_not_saved_for("saved_password")
def test_password_not_saved_if_bad_request(self):
- self._check_password_not_saved_for('bad_request')
+ self._check_password_not_saved_for("bad_request")
class TestGetLatestLease(CiTestCase):
-
def _populate_dir_list(self, bdir, files):
"""populate_dir_list([(name, data), (name, data)])
@@ -133,8 +145,9 @@ class TestGetLatestLease(CiTestCase):
def _pop_and_test(self, files, expected):
lease_d = self.tmp_dir()
self._populate_dir_list(lease_d, files)
- self.assertEqual(self.tmp_path(expected, lease_d),
- get_latest_lease(lease_d))
+ self.assertEqual(
+ self.tmp_path(expected, lease_d), get_latest_lease(lease_d)
+ )
def test_skips_dhcpv6_files(self):
"""files started with dhclient6 should be skipped."""
@@ -161,9 +174,15 @@ class TestGetLatestLease(CiTestCase):
def test_ignores_by_extension(self):
"""only .lease or .leases file should be considered."""
- self._pop_and_test(["dhclient.lease", "dhclient.lease.bk",
- "dhclient.lease-old", "dhclient.leaselease"],
- "dhclient.lease")
+ self._pop_and_test(
+ [
+ "dhclient.lease",
+ "dhclient.lease.bk",
+ "dhclient.lease-old",
+ "dhclient.leaselease",
+ ],
+ "dhclient.lease",
+ )
def test_selects_newest_matching(self):
"""If multiple files match, the newest written should be used."""
diff --git a/tests/unittests/sources/test_common.py b/tests/unittests/sources/test_common.py
index bb8fa530..a5bdb629 100644
--- a/tests/unittests/sources/test_common.py
+++ b/tests/unittests/sources/test_common.py
@@ -1,39 +1,34 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import settings
-from cloudinit import sources
-from cloudinit import type_utils
-from cloudinit.sources import (
- DataSource,
- DataSourceAliYun as AliYun,
- DataSourceAltCloud as AltCloud,
- DataSourceAzure as Azure,
- DataSourceBigstep as Bigstep,
- DataSourceCloudSigma as CloudSigma,
- DataSourceCloudStack as CloudStack,
- DataSourceConfigDrive as ConfigDrive,
- DataSourceDigitalOcean as DigitalOcean,
- DataSourceEc2 as Ec2,
- DataSourceExoscale as Exoscale,
- DataSourceGCE as GCE,
- DataSourceHetzner as Hetzner,
- DataSourceIBMCloud as IBMCloud,
- DataSourceLXD as LXD,
- DataSourceMAAS as MAAS,
- DataSourceNoCloud as NoCloud,
- DataSourceOpenNebula as OpenNebula,
- DataSourceOpenStack as OpenStack,
- DataSourceOracle as Oracle,
- DataSourceOVF as OVF,
- DataSourceRbxCloud as RbxCloud,
- DataSourceScaleway as Scaleway,
- DataSourceSmartOS as SmartOS,
- DataSourceUpCloud as UpCloud,
- DataSourceVultr as Vultr,
- DataSourceVMware as VMware,
-)
+from cloudinit import settings, sources, type_utils
+from cloudinit.sources import DataSource
+from cloudinit.sources import DataSourceAliYun as AliYun
+from cloudinit.sources import DataSourceAltCloud as AltCloud
+from cloudinit.sources import DataSourceAzure as Azure
+from cloudinit.sources import DataSourceBigstep as Bigstep
+from cloudinit.sources import DataSourceCloudSigma as CloudSigma
+from cloudinit.sources import DataSourceCloudStack as CloudStack
+from cloudinit.sources import DataSourceConfigDrive as ConfigDrive
+from cloudinit.sources import DataSourceDigitalOcean as DigitalOcean
+from cloudinit.sources import DataSourceEc2 as Ec2
+from cloudinit.sources import DataSourceExoscale as Exoscale
+from cloudinit.sources import DataSourceGCE as GCE
+from cloudinit.sources import DataSourceHetzner as Hetzner
+from cloudinit.sources import DataSourceIBMCloud as IBMCloud
+from cloudinit.sources import DataSourceLXD as LXD
+from cloudinit.sources import DataSourceMAAS as MAAS
+from cloudinit.sources import DataSourceNoCloud as NoCloud
from cloudinit.sources import DataSourceNone as DSNone
-
+from cloudinit.sources import DataSourceOpenNebula as OpenNebula
+from cloudinit.sources import DataSourceOpenStack as OpenStack
+from cloudinit.sources import DataSourceOracle as Oracle
+from cloudinit.sources import DataSourceOVF as OVF
+from cloudinit.sources import DataSourceRbxCloud as RbxCloud
+from cloudinit.sources import DataSourceScaleway as Scaleway
+from cloudinit.sources import DataSourceSmartOS as SmartOS
+from cloudinit.sources import DataSourceUpCloud as UpCloud
+from cloudinit.sources import DataSourceVMware as VMware
+from cloudinit.sources import DataSourceVultr as Vultr
from tests.unittests import helpers as test_helpers
DEFAULT_LOCAL = [
@@ -78,24 +73,27 @@ DEFAULT_NETWORK = [
class ExpectedDataSources(test_helpers.TestCase):
- builtin_list = settings.CFG_BUILTIN['datasource_list']
+ builtin_list = settings.CFG_BUILTIN["datasource_list"]
deps_local = [sources.DEP_FILESYSTEM]
deps_network = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
pkg_list = [type_utils.obj_name(sources)]
def test_expected_default_local_sources_found(self):
found = sources.list_sources(
- self.builtin_list, self.deps_local, self.pkg_list)
+ self.builtin_list, self.deps_local, self.pkg_list
+ )
self.assertEqual(set(DEFAULT_LOCAL), set(found))
def test_expected_default_network_sources_found(self):
found = sources.list_sources(
- self.builtin_list, self.deps_network, self.pkg_list)
+ self.builtin_list, self.deps_network, self.pkg_list
+ )
self.assertEqual(set(DEFAULT_NETWORK), set(found))
def test_expected_nondefault_network_sources_found(self):
found = sources.list_sources(
- ['AliYun'], self.deps_network, self.pkg_list)
+ ["AliYun"], self.deps_network, self.pkg_list
+ )
self.assertEqual(set([AliYun.DataSourceAliYun]), set(found))
@@ -103,19 +101,23 @@ class TestDataSourceInvariants(test_helpers.TestCase):
def test_data_sources_have_valid_network_config_sources(self):
for ds in DEFAULT_LOCAL + DEFAULT_NETWORK:
for cfg_src in ds.network_config_sources:
- fail_msg = ('{} has an invalid network_config_sources entry:'
- ' {}'.format(str(ds), cfg_src))
- self.assertTrue(hasattr(sources.NetworkConfigSource, cfg_src),
- fail_msg)
+ fail_msg = (
+ "{} has an invalid network_config_sources entry:"
+ " {}".format(str(ds), cfg_src)
+ )
+ self.assertTrue(
+ hasattr(sources.NetworkConfigSource, cfg_src), fail_msg
+ )
def test_expected_dsname_defined(self):
for ds in DEFAULT_LOCAL + DEFAULT_NETWORK:
fail_msg = (
- '{} has an invalid / missing dsname property: {}'.format(
+ "{} has an invalid / missing dsname property: {}".format(
str(ds), str(ds.dsname)
)
)
self.assertNotEqual(ds.dsname, DataSource.dsname, fail_msg)
self.assertIsNotNone(ds.dsname)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_configdrive.py b/tests/unittests/sources/test_configdrive.py
index 775d0622..1fc40a0e 100644
--- a/tests/unittests/sources/test_configdrive.py
+++ b/tests/unittests/sources/test_configdrive.py
@@ -1,139 +1,229 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from copy import copy, deepcopy
import json
import os
+from copy import copy, deepcopy
-from cloudinit import helpers
-from cloudinit.net import eni
-from cloudinit.net import network_state
-from cloudinit import settings
+from cloudinit import helpers, settings, util
+from cloudinit.net import eni, network_state
from cloudinit.sources import DataSourceConfigDrive as ds
from cloudinit.sources.helpers import openstack
-from cloudinit import util
-
from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir
-
-PUBKEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n'
+PUBKEY = "ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n"
EC2_META = {
- 'ami-id': 'ami-00000001',
- 'ami-launch-index': 0,
- 'ami-manifest-path': 'FIXME',
- 'block-device-mapping': {
- 'ami': 'sda1',
- 'ephemeral0': 'sda2',
- 'root': '/dev/sda1',
- 'swap': 'sda3'},
- 'hostname': 'sm-foo-test.novalocal',
- 'instance-action': 'none',
- 'instance-id': 'i-00000001',
- 'instance-type': 'm1.tiny',
- 'local-hostname': 'sm-foo-test.novalocal',
- 'local-ipv4': None,
- 'placement': {'availability-zone': 'nova'},
- 'public-hostname': 'sm-foo-test.novalocal',
- 'public-ipv4': '',
- 'public-keys': {'0': {'openssh-key': PUBKEY}},
- 'reservation-id': 'r-iru5qm4m',
- 'security-groups': ['default']
+ "ami-id": "ami-00000001",
+ "ami-launch-index": 0,
+ "ami-manifest-path": "FIXME",
+ "block-device-mapping": {
+ "ami": "sda1",
+ "ephemeral0": "sda2",
+ "root": "/dev/sda1",
+ "swap": "sda3",
+ },
+ "hostname": "sm-foo-test.novalocal",
+ "instance-action": "none",
+ "instance-id": "i-00000001",
+ "instance-type": "m1.tiny",
+ "local-hostname": "sm-foo-test.novalocal",
+ "local-ipv4": None,
+ "placement": {"availability-zone": "nova"},
+ "public-hostname": "sm-foo-test.novalocal",
+ "public-ipv4": "",
+ "public-keys": {"0": {"openssh-key": PUBKEY}},
+ "reservation-id": "r-iru5qm4m",
+ "security-groups": ["default"],
}
-USER_DATA = b'#!/bin/sh\necho This is user data\n'
+USER_DATA = b"#!/bin/sh\necho This is user data\n"
OSTACK_META = {
- 'availability_zone': 'nova',
- 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'},
- {'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}],
- 'hostname': 'sm-foo-test.novalocal',
- 'meta': {'dsmode': 'local', 'my-meta': 'my-value'},
- 'name': 'sm-foo-test',
- 'public_keys': {'mykey': PUBKEY},
- 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'}
-
-CONTENT_0 = b'This is contents of /etc/foo.cfg\n'
-CONTENT_1 = b'# this is /etc/bar/bar.cfg\n'
+ "availability_zone": "nova",
+ "files": [
+ {"content_path": "/content/0000", "path": "/etc/foo.cfg"},
+ {"content_path": "/content/0001", "path": "/etc/bar/bar.cfg"},
+ ],
+ "hostname": "sm-foo-test.novalocal",
+ "meta": {"dsmode": "local", "my-meta": "my-value"},
+ "name": "sm-foo-test",
+ "public_keys": {"mykey": PUBKEY},
+ "uuid": "b0fa911b-69d4-4476-bbe2-1c92bff6535c",
+}
+
+CONTENT_0 = b"This is contents of /etc/foo.cfg\n"
+CONTENT_1 = b"# this is /etc/bar/bar.cfg\n"
NETWORK_DATA = {
- 'services': [
- {'type': 'dns', 'address': '199.204.44.24'},
- {'type': 'dns', 'address': '199.204.47.54'}
+ "services": [
+ {"type": "dns", "address": "199.204.44.24"},
+ {"type": "dns", "address": "199.204.47.54"},
+ ],
+ "links": [
+ {
+ "vif_id": "2ecc7709-b3f7-4448-9580-e1ec32d75bbd",
+ "ethernet_mac_address": "fa:16:3e:69:b0:58",
+ "type": "ovs",
+ "mtu": None,
+ "id": "tap2ecc7709-b3",
+ },
+ {
+ "vif_id": "2f88d109-5b57-40e6-af32-2472df09dc33",
+ "ethernet_mac_address": "fa:16:3e:d4:57:ad",
+ "type": "ovs",
+ "mtu": None,
+ "id": "tap2f88d109-5b",
+ },
+ {
+ "vif_id": "1a5382f8-04c5-4d75-ab98-d666c1ef52cc",
+ "ethernet_mac_address": "fa:16:3e:05:30:fe",
+ "type": "ovs",
+ "mtu": None,
+ "id": "tap1a5382f8-04",
+ "name": "nic0",
+ },
],
- 'links': [
- {'vif_id': '2ecc7709-b3f7-4448-9580-e1ec32d75bbd',
- 'ethernet_mac_address': 'fa:16:3e:69:b0:58',
- 'type': 'ovs', 'mtu': None, 'id': 'tap2ecc7709-b3'},
- {'vif_id': '2f88d109-5b57-40e6-af32-2472df09dc33',
- 'ethernet_mac_address': 'fa:16:3e:d4:57:ad',
- 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'},
- {'vif_id': '1a5382f8-04c5-4d75-ab98-d666c1ef52cc',
- 'ethernet_mac_address': 'fa:16:3e:05:30:fe',
- 'type': 'ovs', 'mtu': None, 'id': 'tap1a5382f8-04', 'name': 'nic0'}
+ "networks": [
+ {
+ "link": "tap2ecc7709-b3",
+ "type": "ipv4_dhcp",
+ "network_id": "6d6357ac-0f70-4afa-8bd7-c274cc4ea235",
+ "id": "network0",
+ },
+ {
+ "link": "tap2f88d109-5b",
+ "type": "ipv4_dhcp",
+ "network_id": "d227a9b3-6960-4d94-8976-ee5788b44f54",
+ "id": "network1",
+ },
+ {
+ "link": "tap1a5382f8-04",
+ "type": "ipv4_dhcp",
+ "network_id": "dab2ba57-cae2-4311-a5ed-010b263891f5",
+ "id": "network2",
+ },
],
- 'networks': [
- {'link': 'tap2ecc7709-b3', 'type': 'ipv4_dhcp',
- 'network_id': '6d6357ac-0f70-4afa-8bd7-c274cc4ea235',
- 'id': 'network0'},
- {'link': 'tap2f88d109-5b', 'type': 'ipv4_dhcp',
- 'network_id': 'd227a9b3-6960-4d94-8976-ee5788b44f54',
- 'id': 'network1'},
- {'link': 'tap1a5382f8-04', 'type': 'ipv4_dhcp',
- 'network_id': 'dab2ba57-cae2-4311-a5ed-010b263891f5',
- 'id': 'network2'}
- ]
}
NETWORK_DATA_2 = {
"services": [
{"type": "dns", "address": "1.1.1.191"},
- {"type": "dns", "address": "1.1.1.4"}],
+ {"type": "dns", "address": "1.1.1.4"},
+ ],
"networks": [
- {"network_id": "d94bbe94-7abc-48d4-9c82-4628ea26164a", "type": "ipv4",
- "netmask": "255.255.255.248", "link": "eth0",
- "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0",
- "gateway": "2.2.2.9"}],
- "ip_address": "2.2.2.10", "id": "network0-ipv4"},
- {"network_id": "ca447c83-6409-499b-aaef-6ad1ae995348", "type": "ipv4",
- "netmask": "255.255.255.224", "link": "eth1",
- "routes": [], "ip_address": "3.3.3.24", "id": "network1-ipv4"}],
+ {
+ "network_id": "d94bbe94-7abc-48d4-9c82-4628ea26164a",
+ "type": "ipv4",
+ "netmask": "255.255.255.248",
+ "link": "eth0",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "2.2.2.9",
+ }
+ ],
+ "ip_address": "2.2.2.10",
+ "id": "network0-ipv4",
+ },
+ {
+ "network_id": "ca447c83-6409-499b-aaef-6ad1ae995348",
+ "type": "ipv4",
+ "netmask": "255.255.255.224",
+ "link": "eth1",
+ "routes": [],
+ "ip_address": "3.3.3.24",
+ "id": "network1-ipv4",
+ },
+ ],
"links": [
- {"ethernet_mac_address": "fa:16:3e:dd:50:9a", "mtu": 1500,
- "type": "vif", "id": "eth0", "vif_id": "vif-foo1"},
- {"ethernet_mac_address": "fa:16:3e:a8:14:69", "mtu": 1500,
- "type": "vif", "id": "eth1", "vif_id": "vif-foo2"}]
+ {
+ "ethernet_mac_address": "fa:16:3e:dd:50:9a",
+ "mtu": 1500,
+ "type": "vif",
+ "id": "eth0",
+ "vif_id": "vif-foo1",
+ },
+ {
+ "ethernet_mac_address": "fa:16:3e:a8:14:69",
+ "mtu": 1500,
+ "type": "vif",
+ "id": "eth1",
+ "vif_id": "vif-foo2",
+ },
+ ],
}
# This network data ha 'tap' or null type for a link.
NETWORK_DATA_3 = {
- "services": [{"type": "dns", "address": "172.16.36.11"},
- {"type": "dns", "address": "172.16.36.12"}],
+ "services": [
+ {"type": "dns", "address": "172.16.36.11"},
+ {"type": "dns", "address": "172.16.36.12"},
+ ],
"networks": [
- {"network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e",
- "type": "ipv4", "netmask": "255.255.255.128",
- "link": "tap77a0dc5b-72", "ip_address": "172.17.48.18",
- "id": "network0",
- "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0",
- "gateway": "172.17.48.1"}]},
- {"network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e",
- "type": "ipv6", "netmask": "ffff:ffff:ffff:ffff::",
- "link": "tap77a0dc5b-72",
- "ip_address": "fdb8:52d0:9d14:0:f816:3eff:fe9f:70d",
- "id": "network1",
- "routes": [{"netmask": "::", "network": "::",
- "gateway": "fdb8:52d0:9d14::1"}]},
- {"network_id": "1f53cb0e-72d3-47c7-94b9-ff4397c5fe54",
- "type": "ipv4", "netmask": "255.255.255.128",
- "link": "tap7d6b7bec-93", "ip_address": "172.16.48.13",
- "id": "network2",
- "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0",
- "gateway": "172.16.48.1"},
- {"netmask": "255.255.0.0", "network": "172.16.0.0",
- "gateway": "172.16.48.1"}]}],
+ {
+ "network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e",
+ "type": "ipv4",
+ "netmask": "255.255.255.128",
+ "link": "tap77a0dc5b-72",
+ "ip_address": "172.17.48.18",
+ "id": "network0",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.17.48.1",
+ }
+ ],
+ },
+ {
+ "network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e",
+ "type": "ipv6",
+ "netmask": "ffff:ffff:ffff:ffff::",
+ "link": "tap77a0dc5b-72",
+ "ip_address": "fdb8:52d0:9d14:0:f816:3eff:fe9f:70d",
+ "id": "network1",
+ "routes": [
+ {
+ "netmask": "::",
+ "network": "::",
+ "gateway": "fdb8:52d0:9d14::1",
+ }
+ ],
+ },
+ {
+ "network_id": "1f53cb0e-72d3-47c7-94b9-ff4397c5fe54",
+ "type": "ipv4",
+ "netmask": "255.255.255.128",
+ "link": "tap7d6b7bec-93",
+ "ip_address": "172.16.48.13",
+ "id": "network2",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.16.48.1",
+ },
+ {
+ "netmask": "255.255.0.0",
+ "network": "172.16.0.0",
+ "gateway": "172.16.48.1",
+ },
+ ],
+ },
+ ],
"links": [
- {"ethernet_mac_address": "fa:16:3e:dd:50:9a", "mtu": None,
- "type": "tap", "id": "tap77a0dc5b-72",
- "vif_id": "77a0dc5b-720e-41b7-bfa7-1b2ff62e0d48"},
- {"ethernet_mac_address": "fa:16:3e:a8:14:69", "mtu": None,
- "type": None, "id": "tap7d6b7bec-93",
- "vif_id": "7d6b7bec-93e6-4c03-869a-ddc5014892d5"}
- ]
+ {
+ "ethernet_mac_address": "fa:16:3e:dd:50:9a",
+ "mtu": None,
+ "type": "tap",
+ "id": "tap77a0dc5b-72",
+ "vif_id": "77a0dc5b-720e-41b7-bfa7-1b2ff62e0d48",
+ },
+ {
+ "ethernet_mac_address": "fa:16:3e:a8:14:69",
+ "mtu": None,
+ "type": None,
+ "id": "tap7d6b7bec-93",
+ "vif_id": "7d6b7bec-93e6-4c03-869a-ddc5014892d5",
+ },
+ ],
}
BOND_MAC = "fa:16:3e:b3:72:36"
@@ -143,122 +233,182 @@ NETWORK_DATA_BOND = {
{"type": "dns", "address": "1.1.1.4"},
],
"networks": [
- {"id": "network2-ipv4", "ip_address": "2.2.2.13",
- "link": "vlan2", "netmask": "255.255.255.248",
- "network_id": "4daf5ce8-38cf-4240-9f1a-04e86d7c6117",
- "type": "ipv4",
- "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0",
- "gateway": "2.2.2.9"}]},
- {"id": "network3-ipv4", "ip_address": "10.0.1.5",
- "link": "vlan3", "netmask": "255.255.255.248",
- "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d",
- "type": "ipv4",
- "routes": [{"netmask": "255.255.255.255",
- "network": "192.168.1.0", "gateway": "10.0.1.1"}]}
+ {
+ "id": "network2-ipv4",
+ "ip_address": "2.2.2.13",
+ "link": "vlan2",
+ "netmask": "255.255.255.248",
+ "network_id": "4daf5ce8-38cf-4240-9f1a-04e86d7c6117",
+ "type": "ipv4",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "2.2.2.9",
+ }
+ ],
+ },
+ {
+ "id": "network3-ipv4",
+ "ip_address": "10.0.1.5",
+ "link": "vlan3",
+ "netmask": "255.255.255.248",
+ "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d",
+ "type": "ipv4",
+ "routes": [
+ {
+ "netmask": "255.255.255.255",
+ "network": "192.168.1.0",
+ "gateway": "10.0.1.1",
+ }
+ ],
+ },
],
"links": [
- {"ethernet_mac_address": "0c:c4:7a:34:6e:3c",
- "id": "eth0", "mtu": 1500, "type": "phy"},
- {"ethernet_mac_address": "0c:c4:7a:34:6e:3d",
- "id": "eth1", "mtu": 1500, "type": "phy"},
- {"bond_links": ["eth0", "eth1"],
- "bond_miimon": 100, "bond_mode": "4",
- "bond_xmit_hash_policy": "layer3+4",
- "ethernet_mac_address": BOND_MAC,
- "id": "bond0", "type": "bond"},
- {"ethernet_mac_address": "fa:16:3e:b3:72:30",
- "id": "vlan2", "type": "vlan", "vlan_id": 602,
- "vlan_link": "bond0", "vlan_mac_address": "fa:16:3e:b3:72:30"},
- {"ethernet_mac_address": "fa:16:3e:66:ab:a6",
- "id": "vlan3", "type": "vlan", "vlan_id": 612, "vlan_link": "bond0",
- "vlan_mac_address": "fa:16:3e:66:ab:a6"}
- ]
+ {
+ "ethernet_mac_address": "0c:c4:7a:34:6e:3c",
+ "id": "eth0",
+ "mtu": 1500,
+ "type": "phy",
+ },
+ {
+ "ethernet_mac_address": "0c:c4:7a:34:6e:3d",
+ "id": "eth1",
+ "mtu": 1500,
+ "type": "phy",
+ },
+ {
+ "bond_links": ["eth0", "eth1"],
+ "bond_miimon": 100,
+ "bond_mode": "4",
+ "bond_xmit_hash_policy": "layer3+4",
+ "ethernet_mac_address": BOND_MAC,
+ "id": "bond0",
+ "type": "bond",
+ },
+ {
+ "ethernet_mac_address": "fa:16:3e:b3:72:30",
+ "id": "vlan2",
+ "type": "vlan",
+ "vlan_id": 602,
+ "vlan_link": "bond0",
+ "vlan_mac_address": "fa:16:3e:b3:72:30",
+ },
+ {
+ "ethernet_mac_address": "fa:16:3e:66:ab:a6",
+ "id": "vlan3",
+ "type": "vlan",
+ "vlan_id": 612,
+ "vlan_link": "bond0",
+ "vlan_mac_address": "fa:16:3e:66:ab:a6",
+ },
+ ],
}
NETWORK_DATA_VLAN = {
"services": [{"type": "dns", "address": "1.1.1.191"}],
"networks": [
- {"id": "network1-ipv4", "ip_address": "10.0.1.5",
- "link": "vlan1", "netmask": "255.255.255.248",
- "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d",
- "type": "ipv4",
- "routes": [{"netmask": "255.255.255.255",
- "network": "192.168.1.0", "gateway": "10.0.1.1"}]}
+ {
+ "id": "network1-ipv4",
+ "ip_address": "10.0.1.5",
+ "link": "vlan1",
+ "netmask": "255.255.255.248",
+ "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d",
+ "type": "ipv4",
+ "routes": [
+ {
+ "netmask": "255.255.255.255",
+ "network": "192.168.1.0",
+ "gateway": "10.0.1.1",
+ }
+ ],
+ }
],
"links": [
- {"ethernet_mac_address": "fa:16:3e:69:b0:58",
- "id": "eth0", "mtu": 1500, "type": "phy"},
- {"ethernet_mac_address": "fa:16:3e:b3:72:30",
- "id": "vlan1", "type": "vlan", "vlan_id": 602,
- "vlan_link": "eth0", "vlan_mac_address": "fa:16:3e:b3:72:30"},
- ]
+ {
+ "ethernet_mac_address": "fa:16:3e:69:b0:58",
+ "id": "eth0",
+ "mtu": 1500,
+ "type": "phy",
+ },
+ {
+ "ethernet_mac_address": "fa:16:3e:b3:72:30",
+ "id": "vlan1",
+ "type": "vlan",
+ "vlan_id": 602,
+ "vlan_link": "eth0",
+ "vlan_mac_address": "fa:16:3e:b3:72:30",
+ },
+ ],
}
KNOWN_MACS = {
- 'fa:16:3e:69:b0:58': 'enp0s1',
- 'fa:16:3e:d4:57:ad': 'enp0s2',
- 'fa:16:3e:dd:50:9a': 'foo1',
- 'fa:16:3e:a8:14:69': 'foo2',
- 'fa:16:3e:ed:9a:59': 'foo3',
- '0c:c4:7a:34:6e:3d': 'oeth1',
- '0c:c4:7a:34:6e:3c': 'oeth0',
+ "fa:16:3e:69:b0:58": "enp0s1",
+ "fa:16:3e:d4:57:ad": "enp0s2",
+ "fa:16:3e:dd:50:9a": "foo1",
+ "fa:16:3e:a8:14:69": "foo2",
+ "fa:16:3e:ed:9a:59": "foo3",
+ "0c:c4:7a:34:6e:3d": "oeth1",
+ "0c:c4:7a:34:6e:3c": "oeth0",
}
CFG_DRIVE_FILES_V2 = {
- 'ec2/2009-04-04/meta-data.json': json.dumps(EC2_META),
- 'ec2/2009-04-04/user-data': USER_DATA,
- 'ec2/latest/meta-data.json': json.dumps(EC2_META),
- 'ec2/latest/user-data': USER_DATA,
- 'openstack/2012-08-10/meta_data.json': json.dumps(OSTACK_META),
- 'openstack/2012-08-10/user_data': USER_DATA,
- 'openstack/content/0000': CONTENT_0,
- 'openstack/content/0001': CONTENT_1,
- 'openstack/latest/meta_data.json': json.dumps(OSTACK_META),
- 'openstack/latest/user_data': USER_DATA,
- 'openstack/latest/network_data.json': json.dumps(NETWORK_DATA),
- 'openstack/2015-10-15/meta_data.json': json.dumps(OSTACK_META),
- 'openstack/2015-10-15/user_data': USER_DATA,
- 'openstack/2015-10-15/network_data.json': json.dumps(NETWORK_DATA)}
+ "ec2/2009-04-04/meta-data.json": json.dumps(EC2_META),
+ "ec2/2009-04-04/user-data": USER_DATA,
+ "ec2/latest/meta-data.json": json.dumps(EC2_META),
+ "ec2/latest/user-data": USER_DATA,
+ "openstack/2012-08-10/meta_data.json": json.dumps(OSTACK_META),
+ "openstack/2012-08-10/user_data": USER_DATA,
+ "openstack/content/0000": CONTENT_0,
+ "openstack/content/0001": CONTENT_1,
+ "openstack/latest/meta_data.json": json.dumps(OSTACK_META),
+ "openstack/latest/user_data": USER_DATA,
+ "openstack/latest/network_data.json": json.dumps(NETWORK_DATA),
+ "openstack/2015-10-15/meta_data.json": json.dumps(OSTACK_META),
+ "openstack/2015-10-15/user_data": USER_DATA,
+ "openstack/2015-10-15/network_data.json": json.dumps(NETWORK_DATA),
+}
M_PATH = "cloudinit.sources.DataSourceConfigDrive."
class TestConfigDriveDataSource(CiTestCase):
-
def setUp(self):
super(TestConfigDriveDataSource, self).setUp()
self.add_patch(
- M_PATH + "util.find_devs_with",
- "m_find_devs_with", return_value=[])
+ M_PATH + "util.find_devs_with", "m_find_devs_with", return_value=[]
+ )
self.tmp = self.tmp_dir()
def test_ec2_metadata(self):
populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
found = ds.read_config_drive(self.tmp)
- self.assertTrue('ec2-metadata' in found)
- ec2_md = found['ec2-metadata']
+ self.assertTrue("ec2-metadata" in found)
+ ec2_md = found["ec2-metadata"]
self.assertEqual(EC2_META, ec2_md)
def test_dev_os_remap(self):
populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
- cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
- None,
- helpers.Paths({}))
+ cfg_ds = ds.DataSourceConfigDrive(
+ settings.CFG_BUILTIN, None, helpers.Paths({})
+ )
found = ds.read_config_drive(self.tmp)
- cfg_ds.metadata = found['metadata']
+ cfg_ds.metadata = found["metadata"]
name_tests = {
- 'ami': '/dev/vda1',
- 'root': '/dev/vda1',
- 'ephemeral0': '/dev/vda2',
- 'swap': '/dev/vda3',
+ "ami": "/dev/vda1",
+ "root": "/dev/vda1",
+ "ephemeral0": "/dev/vda2",
+ "swap": "/dev/vda3",
}
for name, dev_name in name_tests.items():
with ExitStack() as mocks:
- provided_name = dev_name[len('/dev/'):]
+ provided_name = dev_name[len("/dev/") :]
provided_name = "s" + provided_name[1:]
find_mock = mocks.enter_context(
- mock.patch.object(util, 'find_devs_with',
- return_value=[provided_name]))
+ mock.patch.object(
+ util, "find_devs_with", return_value=[provided_name]
+ )
+ )
# We want os.path.exists() to return False on its first call,
# and True on its second call. We use a handy generator as
# the mock side effect for this. The mocked function returns
@@ -267,9 +417,12 @@ class TestConfigDriveDataSource(CiTestCase):
def exists_side_effect():
yield False
yield True
+
exists_mock = mocks.enter_context(
- mock.patch.object(os.path, 'exists',
- side_effect=exists_side_effect()))
+ mock.patch.object(
+ os.path, "exists", side_effect=exists_side_effect()
+ )
+ )
self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
find_mock.assert_called_once_with(mock.ANY)
@@ -277,26 +430,28 @@ class TestConfigDriveDataSource(CiTestCase):
def test_dev_os_map(self):
populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
- cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
- None,
- helpers.Paths({}))
+ cfg_ds = ds.DataSourceConfigDrive(
+ settings.CFG_BUILTIN, None, helpers.Paths({})
+ )
found = ds.read_config_drive(self.tmp)
- os_md = found['metadata']
+ os_md = found["metadata"]
cfg_ds.metadata = os_md
name_tests = {
- 'ami': '/dev/vda1',
- 'root': '/dev/vda1',
- 'ephemeral0': '/dev/vda2',
- 'swap': '/dev/vda3',
+ "ami": "/dev/vda1",
+ "root": "/dev/vda1",
+ "ephemeral0": "/dev/vda2",
+ "swap": "/dev/vda3",
}
for name, dev_name in name_tests.items():
with ExitStack() as mocks:
find_mock = mocks.enter_context(
- mock.patch.object(util, 'find_devs_with',
- return_value=[dev_name]))
+ mock.patch.object(
+ util, "find_devs_with", return_value=[dev_name]
+ )
+ )
exists_mock = mocks.enter_context(
- mock.patch.object(os.path, 'exists',
- return_value=True))
+ mock.patch.object(os.path, "exists", return_value=True)
+ )
self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
find_mock.assert_called_once_with(mock.ANY)
@@ -304,22 +459,22 @@ class TestConfigDriveDataSource(CiTestCase):
def test_dev_ec2_remap(self):
populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
- cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
- None,
- helpers.Paths({}))
+ cfg_ds = ds.DataSourceConfigDrive(
+ settings.CFG_BUILTIN, None, helpers.Paths({})
+ )
found = ds.read_config_drive(self.tmp)
- ec2_md = found['ec2-metadata']
- os_md = found['metadata']
+ ec2_md = found["ec2-metadata"]
+ os_md = found["metadata"]
cfg_ds.ec2_metadata = ec2_md
cfg_ds.metadata = os_md
name_tests = {
- 'ami': '/dev/vda1',
- 'root': '/dev/vda1',
- 'ephemeral0': '/dev/vda2',
- 'swap': '/dev/vda3',
+ "ami": "/dev/vda1",
+ "root": "/dev/vda1",
+ "ephemeral0": "/dev/vda2",
+ "swap": "/dev/vda3",
None: None,
- 'bob': None,
- 'root2k': None,
+ "bob": None,
+ "root2k": None,
}
for name, dev_name in name_tests.items():
# We want os.path.exists() to return False on its first call,
@@ -329,8 +484,10 @@ class TestConfigDriveDataSource(CiTestCase):
def exists_side_effect():
yield False
yield True
- with mock.patch.object(os.path, 'exists',
- side_effect=exists_side_effect()):
+
+ with mock.patch.object(
+ os.path, "exists", side_effect=exists_side_effect()
+ ):
self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
# We don't assert the call count for os.path.exists() because
# not all of the entries in name_tests results in two calls to
@@ -339,25 +496,25 @@ class TestConfigDriveDataSource(CiTestCase):
def test_dev_ec2_map(self):
populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
- cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
- None,
- helpers.Paths({}))
+ cfg_ds = ds.DataSourceConfigDrive(
+ settings.CFG_BUILTIN, None, helpers.Paths({})
+ )
found = ds.read_config_drive(self.tmp)
- ec2_md = found['ec2-metadata']
- os_md = found['metadata']
+ ec2_md = found["ec2-metadata"]
+ os_md = found["metadata"]
cfg_ds.ec2_metadata = ec2_md
cfg_ds.metadata = os_md
name_tests = {
- 'ami': '/dev/sda1',
- 'root': '/dev/sda1',
- 'ephemeral0': '/dev/sda2',
- 'swap': '/dev/sda3',
+ "ami": "/dev/sda1",
+ "root": "/dev/sda1",
+ "ephemeral0": "/dev/sda2",
+ "swap": "/dev/sda3",
None: None,
- 'bob': None,
- 'root2k': None,
+ "bob": None,
+ "root2k": None,
}
for name, dev_name in name_tests.items():
- with mock.patch.object(os.path, 'exists', return_value=True):
+ with mock.patch.object(os.path, "exists", return_value=True):
self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
def test_dir_valid(self):
@@ -368,14 +525,14 @@ class TestConfigDriveDataSource(CiTestCase):
found = ds.read_config_drive(self.tmp)
expected_md = copy(OSTACK_META)
- expected_md['instance-id'] = expected_md['uuid']
- expected_md['local-hostname'] = expected_md['hostname']
+ expected_md["instance-id"] = expected_md["uuid"]
+ expected_md["local-hostname"] = expected_md["hostname"]
- self.assertEqual(USER_DATA, found['userdata'])
- self.assertEqual(expected_md, found['metadata'])
- self.assertEqual(NETWORK_DATA, found['networkdata'])
- self.assertEqual(found['files']['/etc/foo.cfg'], CONTENT_0)
- self.assertEqual(found['files']['/etc/bar/bar.cfg'], CONTENT_1)
+ self.assertEqual(USER_DATA, found["userdata"])
+ self.assertEqual(expected_md, found["metadata"])
+ self.assertEqual(NETWORK_DATA, found["networkdata"])
+ self.assertEqual(found["files"]["/etc/foo.cfg"], CONTENT_0)
+ self.assertEqual(found["files"]["/etc/bar/bar.cfg"], CONTENT_1)
def test_seed_dir_valid_extra(self):
"""Verify extra files do not affect datasource validity."""
@@ -389,10 +546,10 @@ class TestConfigDriveDataSource(CiTestCase):
found = ds.read_config_drive(self.tmp)
expected_md = copy(OSTACK_META)
- expected_md['instance-id'] = expected_md['uuid']
- expected_md['local-hostname'] = expected_md['hostname']
+ expected_md["instance-id"] = expected_md["uuid"]
+ expected_md["local-hostname"] = expected_md["hostname"]
- self.assertEqual(expected_md, found['metadata'])
+ self.assertEqual(expected_md, found["metadata"])
def test_seed_dir_bad_json_metadata(self):
"""Verify that bad json in metadata raises BrokenConfigDriveDir."""
@@ -404,8 +561,9 @@ class TestConfigDriveDataSource(CiTestCase):
populate_dir(self.tmp, data)
- self.assertRaises(openstack.BrokenMetadata,
- ds.read_config_drive, self.tmp)
+ self.assertRaises(
+ openstack.BrokenMetadata, ds.read_config_drive, self.tmp
+ )
def test_seed_dir_no_configdrive(self):
"""Verify that no metadata raises NonConfigDriveDir."""
@@ -416,20 +574,18 @@ class TestConfigDriveDataSource(CiTestCase):
data["openstack/latest/random-file.txt"] = "random-content"
data["content/foo"] = "foocontent"
- self.assertRaises(openstack.NonReadable,
- ds.read_config_drive, my_d)
+ self.assertRaises(openstack.NonReadable, ds.read_config_drive, my_d)
def test_seed_dir_missing(self):
"""Verify that missing seed_dir raises NonConfigDriveDir."""
my_d = os.path.join(self.tmp, "nonexistantdirectory")
- self.assertRaises(openstack.NonReadable,
- ds.read_config_drive, my_d)
+ self.assertRaises(openstack.NonReadable, ds.read_config_drive, my_d)
def test_find_candidates(self):
devs_with_answers = {}
def my_devs_with(*args, **kwargs):
- criteria = args[0] if len(args) else kwargs.pop('criteria', None)
+ criteria = args[0] if len(args) else kwargs.pop("criteria", None)
return devs_with_answers.get(criteria, [])
def my_is_partition(dev):
@@ -442,60 +598,67 @@ class TestConfigDriveDataSource(CiTestCase):
orig_is_partition = util.is_partition
util.is_partition = my_is_partition
- devs_with_answers = {"TYPE=vfat": [],
- "TYPE=iso9660": ["/dev/vdb"],
- "LABEL=config-2": ["/dev/vdb"]}
+ devs_with_answers = {
+ "TYPE=vfat": [],
+ "TYPE=iso9660": ["/dev/vdb"],
+ "LABEL=config-2": ["/dev/vdb"],
+ }
self.assertEqual(["/dev/vdb"], ds.find_candidate_devs())
# add a vfat item
# zdd reverse sorts after vdb, but config-2 label is preferred
- devs_with_answers['TYPE=vfat'] = ["/dev/zdd"]
- self.assertEqual(["/dev/vdb", "/dev/zdd"],
- ds.find_candidate_devs())
+ devs_with_answers["TYPE=vfat"] = ["/dev/zdd"]
+ self.assertEqual(
+ ["/dev/vdb", "/dev/zdd"], ds.find_candidate_devs()
+ )
# verify that partitions are considered, that have correct label.
- devs_with_answers = {"TYPE=vfat": ["/dev/sda1"],
- "TYPE=iso9660": [],
- "LABEL=config-2": ["/dev/vdb3"]}
- self.assertEqual(["/dev/vdb3"],
- ds.find_candidate_devs())
+ devs_with_answers = {
+ "TYPE=vfat": ["/dev/sda1"],
+ "TYPE=iso9660": [],
+ "LABEL=config-2": ["/dev/vdb3"],
+ }
+ self.assertEqual(["/dev/vdb3"], ds.find_candidate_devs())
# Verify that uppercase labels are also found.
- devs_with_answers = {"TYPE=vfat": [],
- "TYPE=iso9660": ["/dev/vdb"],
- "LABEL=CONFIG-2": ["/dev/vdb"]}
+ devs_with_answers = {
+ "TYPE=vfat": [],
+ "TYPE=iso9660": ["/dev/vdb"],
+ "LABEL=CONFIG-2": ["/dev/vdb"],
+ }
self.assertEqual(["/dev/vdb"], ds.find_candidate_devs())
finally:
util.find_devs_with = orig_find_devs_with
util.is_partition = orig_is_partition
- @mock.patch(M_PATH + 'on_first_boot')
+ @mock.patch(M_PATH + "on_first_boot")
def test_pubkeys_v2(self, on_first_boot):
"""Verify that public-keys work in config-drive-v2."""
myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2)
- self.assertEqual(myds.get_public_ssh_keys(),
- [OSTACK_META['public_keys']['mykey']])
- self.assertEqual('configdrive', myds.cloud_name)
- self.assertEqual('openstack', myds.platform)
- self.assertEqual('seed-dir (%s/seed)' % self.tmp, myds.subplatform)
+ self.assertEqual(
+ myds.get_public_ssh_keys(), [OSTACK_META["public_keys"]["mykey"]]
+ )
+ self.assertEqual("configdrive", myds.cloud_name)
+ self.assertEqual("openstack", myds.platform)
+ self.assertEqual("seed-dir (%s/seed)" % self.tmp, myds.subplatform)
def test_subplatform_config_drive_when_starts_with_dev(self):
"""subplatform reports config-drive when source starts with /dev/."""
- cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
- None,
- helpers.Paths({}))
- with mock.patch(M_PATH + 'find_candidate_devs') as m_find_devs:
- with mock.patch(M_PATH + 'util.mount_cb'):
- with mock.patch(M_PATH + 'on_first_boot'):
- m_find_devs.return_value = ['/dev/anything']
+ cfg_ds = ds.DataSourceConfigDrive(
+ settings.CFG_BUILTIN, None, helpers.Paths({})
+ )
+ with mock.patch(M_PATH + "find_candidate_devs") as m_find_devs:
+ with mock.patch(M_PATH + "util.mount_cb"):
+ with mock.patch(M_PATH + "on_first_boot"):
+ m_find_devs.return_value = ["/dev/anything"]
self.assertEqual(True, cfg_ds.get_data())
- self.assertEqual('config-disk (/dev/anything)', cfg_ds.subplatform)
+ self.assertEqual("config-disk (/dev/anything)", cfg_ds.subplatform)
@mock.patch(
"cloudinit.net.is_openvswitch_internal_interface",
- mock.Mock(return_value=False)
+ mock.Mock(return_value=False),
)
class TestNetJson(CiTestCase):
def setUp(self):
@@ -503,55 +666,74 @@ class TestNetJson(CiTestCase):
self.tmp = self.tmp_dir()
self.maxDiff = None
- @mock.patch(M_PATH + 'on_first_boot')
+ @mock.patch(M_PATH + "on_first_boot")
def test_network_data_is_found(self, on_first_boot):
"""Verify that network_data is present in ds in config-drive-v2."""
myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2)
self.assertIsNotNone(myds.network_json)
- @mock.patch(M_PATH + 'on_first_boot')
+ @mock.patch(M_PATH + "on_first_boot")
def test_network_config_is_converted(self, on_first_boot):
"""Verify that network_data is converted and present on ds object."""
myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2)
- network_config = openstack.convert_net_json(NETWORK_DATA,
- known_macs=KNOWN_MACS)
+ network_config = openstack.convert_net_json(
+ NETWORK_DATA, known_macs=KNOWN_MACS
+ )
self.assertEqual(myds.network_config, network_config)
def test_network_config_conversion_dhcp6(self):
"""Test some ipv6 input network json and check the expected
- conversions."""
+ conversions."""
in_data = {
- 'links': [
- {'vif_id': '2ecc7709-b3f7-4448-9580-e1ec32d75bbd',
- 'ethernet_mac_address': 'fa:16:3e:69:b0:58',
- 'type': 'ovs', 'mtu': None, 'id': 'tap2ecc7709-b3'},
- {'vif_id': '2f88d109-5b57-40e6-af32-2472df09dc33',
- 'ethernet_mac_address': 'fa:16:3e:d4:57:ad',
- 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'},
+ "links": [
+ {
+ "vif_id": "2ecc7709-b3f7-4448-9580-e1ec32d75bbd",
+ "ethernet_mac_address": "fa:16:3e:69:b0:58",
+ "type": "ovs",
+ "mtu": None,
+ "id": "tap2ecc7709-b3",
+ },
+ {
+ "vif_id": "2f88d109-5b57-40e6-af32-2472df09dc33",
+ "ethernet_mac_address": "fa:16:3e:d4:57:ad",
+ "type": "ovs",
+ "mtu": None,
+ "id": "tap2f88d109-5b",
+ },
+ ],
+ "networks": [
+ {
+ "link": "tap2ecc7709-b3",
+ "type": "ipv6_dhcpv6-stateless",
+ "network_id": "6d6357ac-0f70-4afa-8bd7-c274cc4ea235",
+ "id": "network0",
+ },
+ {
+ "link": "tap2f88d109-5b",
+ "type": "ipv6_dhcpv6-stateful",
+ "network_id": "d227a9b3-6960-4d94-8976-ee5788b44f54",
+ "id": "network1",
+ },
],
- 'networks': [
- {'link': 'tap2ecc7709-b3', 'type': 'ipv6_dhcpv6-stateless',
- 'network_id': '6d6357ac-0f70-4afa-8bd7-c274cc4ea235',
- 'id': 'network0'},
- {'link': 'tap2f88d109-5b', 'type': 'ipv6_dhcpv6-stateful',
- 'network_id': 'd227a9b3-6960-4d94-8976-ee5788b44f54',
- 'id': 'network1'},
- ]
}
out_data = {
- 'version': 1,
- 'config': [
- {'mac_address': 'fa:16:3e:69:b0:58',
- 'mtu': None,
- 'name': 'enp0s1',
- 'subnets': [{'type': 'ipv6_dhcpv6-stateless'}],
- 'type': 'physical'},
- {'mac_address': 'fa:16:3e:d4:57:ad',
- 'mtu': None,
- 'name': 'enp0s2',
- 'subnets': [{'type': 'ipv6_dhcpv6-stateful'}],
- 'type': 'physical',
- 'accept-ra': True}
+ "version": 1,
+ "config": [
+ {
+ "mac_address": "fa:16:3e:69:b0:58",
+ "mtu": None,
+ "name": "enp0s1",
+ "subnets": [{"type": "ipv6_dhcpv6-stateless"}],
+ "type": "physical",
+ },
+ {
+ "mac_address": "fa:16:3e:d4:57:ad",
+ "mtu": None,
+ "name": "enp0s2",
+ "subnets": [{"type": "ipv6_dhcpv6-stateful"}],
+ "type": "physical",
+ "accept-ra": True,
+ },
],
}
conv_data = openstack.convert_net_json(in_data, known_macs=KNOWN_MACS)
@@ -559,107 +741,115 @@ class TestNetJson(CiTestCase):
def test_network_config_conversions(self):
"""Tests a bunch of input network json and checks the
- expected conversions."""
+ expected conversions."""
in_datas = [
NETWORK_DATA,
{
- 'services': [{'type': 'dns', 'address': '172.19.0.12'}],
- 'networks': [{
- 'network_id': 'dacd568d-5be6-4786-91fe-750c374b78b4',
- 'type': 'ipv4',
- 'netmask': '255.255.252.0',
- 'link': 'tap1a81968a-79',
- 'routes': [{
- 'netmask': '0.0.0.0',
- 'network': '0.0.0.0',
- 'gateway': '172.19.3.254',
- }],
- 'ip_address': '172.19.1.34',
- 'id': 'network0',
- }],
- 'links': [{
- 'type': 'bridge',
- 'vif_id': '1a81968a-797a-400f-8a80-567f997eb93f',
- 'ethernet_mac_address': 'fa:16:3e:ed:9a:59',
- 'id': 'tap1a81968a-79',
- 'mtu': None,
- }],
+ "services": [{"type": "dns", "address": "172.19.0.12"}],
+ "networks": [
+ {
+ "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
+ "type": "ipv4",
+ "netmask": "255.255.252.0",
+ "link": "tap1a81968a-79",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.19.3.254",
+ }
+ ],
+ "ip_address": "172.19.1.34",
+ "id": "network0",
+ }
+ ],
+ "links": [
+ {
+ "type": "bridge",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f",
+ "ethernet_mac_address": "fa:16:3e:ed:9a:59",
+ "id": "tap1a81968a-79",
+ "mtu": None,
+ }
+ ],
},
]
out_datas = [
{
- 'version': 1,
- 'config': [
+ "version": 1,
+ "config": [
{
- 'subnets': [{'type': 'dhcp4'}],
- 'type': 'physical',
- 'mac_address': 'fa:16:3e:69:b0:58',
- 'name': 'enp0s1',
- 'mtu': None,
+ "subnets": [{"type": "dhcp4"}],
+ "type": "physical",
+ "mac_address": "fa:16:3e:69:b0:58",
+ "name": "enp0s1",
+ "mtu": None,
},
{
- 'subnets': [{'type': 'dhcp4'}],
- 'type': 'physical',
- 'mac_address': 'fa:16:3e:d4:57:ad',
- 'name': 'enp0s2',
- 'mtu': None,
+ "subnets": [{"type": "dhcp4"}],
+ "type": "physical",
+ "mac_address": "fa:16:3e:d4:57:ad",
+ "name": "enp0s2",
+ "mtu": None,
},
{
- 'subnets': [{'type': 'dhcp4'}],
- 'type': 'physical',
- 'mac_address': 'fa:16:3e:05:30:fe',
- 'name': 'nic0',
- 'mtu': None,
+ "subnets": [{"type": "dhcp4"}],
+ "type": "physical",
+ "mac_address": "fa:16:3e:05:30:fe",
+ "name": "nic0",
+ "mtu": None,
},
{
- 'type': 'nameserver',
- 'address': '199.204.44.24',
+ "type": "nameserver",
+ "address": "199.204.44.24",
},
{
- 'type': 'nameserver',
- 'address': '199.204.47.54',
- }
+ "type": "nameserver",
+ "address": "199.204.47.54",
+ },
],
-
},
{
- 'version': 1,
- 'config': [
+ "version": 1,
+ "config": [
{
- 'name': 'foo3',
- 'mac_address': 'fa:16:3e:ed:9a:59',
- 'mtu': None,
- 'type': 'physical',
- 'subnets': [
+ "name": "foo3",
+ "mac_address": "fa:16:3e:ed:9a:59",
+ "mtu": None,
+ "type": "physical",
+ "subnets": [
{
- 'address': '172.19.1.34',
- 'netmask': '255.255.252.0',
- 'type': 'static',
- 'ipv4': True,
- 'routes': [{
- 'gateway': '172.19.3.254',
- 'netmask': '0.0.0.0',
- 'network': '0.0.0.0',
- }],
+ "address": "172.19.1.34",
+ "netmask": "255.255.252.0",
+ "type": "static",
+ "ipv4": True,
+ "routes": [
+ {
+ "gateway": "172.19.3.254",
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ }
+ ],
}
- ]
+ ],
},
{
- 'type': 'nameserver',
- 'address': '172.19.0.12',
- }
+ "type": "nameserver",
+ "address": "172.19.0.12",
+ },
],
},
]
for in_data, out_data in zip(in_datas, out_datas):
- conv_data = openstack.convert_net_json(in_data,
- known_macs=KNOWN_MACS)
+ conv_data = openstack.convert_net_json(
+ in_data, known_macs=KNOWN_MACS
+ )
self.assertEqual(out_data, conv_data)
@mock.patch(
"cloudinit.net.is_openvswitch_internal_interface",
- mock.Mock(return_value=False)
+ mock.Mock(return_value=False),
)
class TestConvertNetworkData(CiTestCase):
@@ -670,86 +860,105 @@ class TestConvertNetworkData(CiTestCase):
self.tmp = self.tmp_dir()
def _getnames_in_config(self, ncfg):
- return set([n['name'] for n in ncfg['config']
- if n['type'] == 'physical'])
+ return set(
+ [n["name"] for n in ncfg["config"] if n["type"] == "physical"]
+ )
def test_conversion_fills_names(self):
ncfg = openstack.convert_net_json(NETWORK_DATA, known_macs=KNOWN_MACS)
- expected = set(['nic0', 'enp0s1', 'enp0s2'])
+ expected = set(["nic0", "enp0s1", "enp0s2"])
found = self._getnames_in_config(ncfg)
self.assertEqual(found, expected)
- @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
def test_convert_reads_system_prefers_name(self, get_interfaces_by_mac):
macs = KNOWN_MACS.copy()
- macs.update({'fa:16:3e:05:30:fe': 'foonic1',
- 'fa:16:3e:69:b0:58': 'ens1'})
+ macs.update(
+ {"fa:16:3e:05:30:fe": "foonic1", "fa:16:3e:69:b0:58": "ens1"}
+ )
get_interfaces_by_mac.return_value = macs
ncfg = openstack.convert_net_json(NETWORK_DATA)
- expected = set(['nic0', 'ens1', 'enp0s2'])
+ expected = set(["nic0", "ens1", "enp0s2"])
found = self._getnames_in_config(ncfg)
self.assertEqual(found, expected)
def test_convert_raises_value_error_on_missing_name(self):
- macs = {'aa:aa:aa:aa:aa:00': 'ens1'}
- self.assertRaises(ValueError, openstack.convert_net_json,
- NETWORK_DATA, known_macs=macs)
+ macs = {"aa:aa:aa:aa:aa:00": "ens1"}
+ self.assertRaises(
+ ValueError,
+ openstack.convert_net_json,
+ NETWORK_DATA,
+ known_macs=macs,
+ )
def test_conversion_with_route(self):
- ncfg = openstack.convert_net_json(NETWORK_DATA_2,
- known_macs=KNOWN_MACS)
+ ncfg = openstack.convert_net_json(
+ NETWORK_DATA_2, known_macs=KNOWN_MACS
+ )
# not the best test, but see that we get a route in the
# network config and that it gets rendered to an ENI file
routes = []
- for n in ncfg['config']:
- for s in n.get('subnets', []):
- routes.extend(s.get('routes', []))
+ for n in ncfg["config"]:
+ for s in n.get("subnets", []):
+ routes.extend(s.get("routes", []))
self.assertIn(
- {'network': '0.0.0.0', 'netmask': '0.0.0.0', 'gateway': '2.2.2.9'},
- routes)
+ {"network": "0.0.0.0", "netmask": "0.0.0.0", "gateway": "2.2.2.9"},
+ routes,
+ )
eni_renderer = eni.Renderer()
eni_renderer.render_network_state(
- network_state.parse_net_config_data(ncfg), target=self.tmp)
- with open(os.path.join(self.tmp, "etc",
- "network", "interfaces"), 'r') as f:
+ network_state.parse_net_config_data(ncfg), target=self.tmp
+ )
+ with open(
+ os.path.join(self.tmp, "etc", "network", "interfaces"), "r"
+ ) as f:
eni_rendering = f.read()
self.assertIn("route add default gw 2.2.2.9", eni_rendering)
def test_conversion_with_tap(self):
- ncfg = openstack.convert_net_json(NETWORK_DATA_3,
- known_macs=KNOWN_MACS)
+ ncfg = openstack.convert_net_json(
+ NETWORK_DATA_3, known_macs=KNOWN_MACS
+ )
physicals = set()
- for i in ncfg['config']:
- if i.get('type') == "physical":
- physicals.add(i['name'])
- self.assertEqual(physicals, set(('foo1', 'foo2')))
+ for i in ncfg["config"]:
+ if i.get("type") == "physical":
+ physicals.add(i["name"])
+ self.assertEqual(physicals, set(("foo1", "foo2")))
def test_bond_conversion(self):
# light testing of bond conversion and eni rendering of bond
- ncfg = openstack.convert_net_json(NETWORK_DATA_BOND,
- known_macs=KNOWN_MACS)
+ ncfg = openstack.convert_net_json(
+ NETWORK_DATA_BOND, known_macs=KNOWN_MACS
+ )
eni_renderer = eni.Renderer()
eni_renderer.render_network_state(
- network_state.parse_net_config_data(ncfg), target=self.tmp)
- with open(os.path.join(self.tmp, "etc",
- "network", "interfaces"), 'r') as f:
+ network_state.parse_net_config_data(ncfg), target=self.tmp
+ )
+ with open(
+ os.path.join(self.tmp, "etc", "network", "interfaces"), "r"
+ ) as f:
eni_rendering = f.read()
# Verify there are expected interfaces in the net config.
interfaces = sorted(
- [i['name'] for i in ncfg['config']
- if i['type'] in ('vlan', 'bond', 'physical')])
+ [
+ i["name"]
+ for i in ncfg["config"]
+ if i["type"] in ("vlan", "bond", "physical")
+ ]
+ )
self.assertEqual(
sorted(["oeth0", "oeth1", "bond0", "bond0.602", "bond0.612"]),
- interfaces)
+ interfaces,
+ )
words = eni_rendering.split()
# 'eth0' and 'eth1' are the ids. because their mac adresses
# map to other names, we should not see them in the ENI
- self.assertNotIn('eth0', words)
- self.assertNotIn('eth1', words)
+ self.assertNotIn("eth0", words)
+ self.assertNotIn("eth1", words)
# oeth0 and oeth1 are the interface names for eni.
# bond0 will be generated for the bond. Each should be auto.
@@ -762,13 +971,16 @@ class TestConvertNetworkData(CiTestCase):
def test_vlan(self):
# light testing of vlan config conversion and eni rendering
- ncfg = openstack.convert_net_json(NETWORK_DATA_VLAN,
- known_macs=KNOWN_MACS)
+ ncfg = openstack.convert_net_json(
+ NETWORK_DATA_VLAN, known_macs=KNOWN_MACS
+ )
eni_renderer = eni.Renderer()
eni_renderer.render_network_state(
- network_state.parse_net_config_data(ncfg), target=self.tmp)
- with open(os.path.join(self.tmp, "etc",
- "network", "interfaces"), 'r') as f:
+ network_state.parse_net_config_data(ncfg), target=self.tmp
+ )
+ with open(
+ os.path.join(self.tmp, "etc", "network", "interfaces"), "r"
+ ) as f:
eni_rendering = f.read()
self.assertIn("iface enp0s1", eni_rendering)
@@ -778,52 +990,63 @@ class TestConvertNetworkData(CiTestCase):
def test_mac_addrs_can_be_upper_case(self):
# input mac addresses on rackspace may be upper case
my_netdata = deepcopy(NETWORK_DATA)
- for link in my_netdata['links']:
- link['ethernet_mac_address'] = link['ethernet_mac_address'].upper()
+ for link in my_netdata["links"]:
+ link["ethernet_mac_address"] = link["ethernet_mac_address"].upper()
ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS)
config_name2mac = {}
- for n in ncfg['config']:
- if n['type'] == 'physical':
- config_name2mac[n['name']] = n['mac_address']
-
- expected = {'nic0': 'fa:16:3e:05:30:fe', 'enp0s1': 'fa:16:3e:69:b0:58',
- 'enp0s2': 'fa:16:3e:d4:57:ad'}
+ for n in ncfg["config"]:
+ if n["type"] == "physical":
+ config_name2mac[n["name"]] = n["mac_address"]
+
+ expected = {
+ "nic0": "fa:16:3e:05:30:fe",
+ "enp0s1": "fa:16:3e:69:b0:58",
+ "enp0s2": "fa:16:3e:d4:57:ad",
+ }
self.assertEqual(expected, config_name2mac)
def test_unknown_device_types_accepted(self):
# If we don't recognise a link, we should treat it as physical for a
# best-effort boot
my_netdata = deepcopy(NETWORK_DATA)
- my_netdata['links'][0]['type'] = 'my-special-link-type'
+ my_netdata["links"][0]["type"] = "my-special-link-type"
ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS)
config_name2mac = {}
- for n in ncfg['config']:
- if n['type'] == 'physical':
- config_name2mac[n['name']] = n['mac_address']
-
- expected = {'nic0': 'fa:16:3e:05:30:fe', 'enp0s1': 'fa:16:3e:69:b0:58',
- 'enp0s2': 'fa:16:3e:d4:57:ad'}
+ for n in ncfg["config"]:
+ if n["type"] == "physical":
+ config_name2mac[n["name"]] = n["mac_address"]
+
+ expected = {
+ "nic0": "fa:16:3e:05:30:fe",
+ "enp0s1": "fa:16:3e:69:b0:58",
+ "enp0s2": "fa:16:3e:d4:57:ad",
+ }
self.assertEqual(expected, config_name2mac)
# We should, however, warn the user that we don't recognise the type
- self.assertIn('Unknown network_data link type (my-special-link-type)',
- self.logs.getvalue())
+ self.assertIn(
+ "Unknown network_data link type (my-special-link-type)",
+ self.logs.getvalue(),
+ )
def cfg_ds_from_dir(base_d, files=None):
run = os.path.join(base_d, "run")
os.mkdir(run)
cfg_ds = ds.DataSourceConfigDrive(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': run}))
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": run})
+ )
cfg_ds.seed_dir = os.path.join(base_d, "seed")
if files:
populate_dir(cfg_ds.seed_dir, files)
cfg_ds.known_macs = KNOWN_MACS.copy()
if not cfg_ds.get_data():
- raise RuntimeError("Data source did not extract itself from"
- " seed directory %s" % cfg_ds.seed_dir)
+ raise RuntimeError(
+ "Data source did not extract itself from seed directory %s"
+ % cfg_ds.seed_dir
+ )
return cfg_ds
@@ -832,13 +1055,14 @@ def populate_ds_from_read_config(cfg_ds, source, results):
read_config_drive_dir hopefully in line with what it would have
if cfg_ds.get_data had been successfully called"""
cfg_ds.source = source
- cfg_ds.metadata = results.get('metadata')
- cfg_ds.ec2_metadata = results.get('ec2-metadata')
- cfg_ds.userdata_raw = results.get('userdata')
- cfg_ds.version = results.get('version')
- cfg_ds.network_json = results.get('networkdata')
+ cfg_ds.metadata = results.get("metadata")
+ cfg_ds.ec2_metadata = results.get("ec2-metadata")
+ cfg_ds.userdata_raw = results.get("userdata")
+ cfg_ds.version = results.get("version")
+ cfg_ds.network_json = results.get("networkdata")
cfg_ds._network_config = openstack.convert_net_json(
- cfg_ds.network_json, known_macs=KNOWN_MACS)
+ cfg_ds.network_json, known_macs=KNOWN_MACS
+ )
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_digitalocean.py b/tests/unittests/sources/test_digitalocean.py
index 351bf7ba..f3e6224e 100644
--- a/tests/unittests/sources/test_digitalocean.py
+++ b/tests/unittests/sources/test_digitalocean.py
@@ -8,19 +8,20 @@
import json
-from cloudinit import helpers
-from cloudinit import settings
+from cloudinit import helpers, settings
from cloudinit.sources import DataSourceDigitalOcean
from cloudinit.sources.helpers import digitalocean
+from tests.unittests.helpers import CiTestCase, mock
-from tests.unittests.helpers import mock, CiTestCase
-
-DO_MULTIPLE_KEYS = ["ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co",
- "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co"]
+DO_MULTIPLE_KEYS = [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co",
+]
DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@do.co"
# the following JSON was taken from droplet (that's why its a string)
-DO_META = json.loads("""
+DO_META = json.loads(
+ """
{
"droplet_id": "22532410",
"hostname": "utl-96268",
@@ -76,89 +77,94 @@ DO_META = json.loads("""
]
}
}
-""")
+"""
+)
# This has no private interface
DO_META_2 = {
"droplet_id": 27223699,
"hostname": "smtest1",
- "vendor_data": "\n".join([
- ('"Content-Type: multipart/mixed; '
- 'boundary=\"===============8645434374073493512==\"'),
- 'MIME-Version: 1.0',
- '',
- '--===============8645434374073493512==',
- 'MIME-Version: 1.0'
- 'Content-Type: text/cloud-config; charset="us-ascii"'
- 'Content-Transfer-Encoding: 7bit'
- 'Content-Disposition: attachment; filename="cloud-config"'
- '',
- '#cloud-config',
- 'disable_root: false',
- 'manage_etc_hosts: true',
- '',
- '',
- '--===============8645434374073493512=='
- ]),
- "public_keys": [
- "ssh-rsa AAAAB3NzaN...N3NtHw== smoser@brickies"
- ],
+ "vendor_data": "\n".join(
+ [
+ '"Content-Type: multipart/mixed; '
+ 'boundary="===============8645434374073493512=="',
+ "MIME-Version: 1.0",
+ "",
+ "--===============8645434374073493512==",
+ "MIME-Version: 1.0"
+ 'Content-Type: text/cloud-config; charset="us-ascii"'
+ "Content-Transfer-Encoding: 7bit"
+ 'Content-Disposition: attachment; filename="cloud-config"'
+ "",
+ "#cloud-config",
+ "disable_root: false",
+ "manage_etc_hosts: true",
+ "",
+ "",
+ "--===============8645434374073493512==",
+ ]
+ ),
+ "public_keys": ["ssh-rsa AAAAB3NzaN...N3NtHw== smoser@brickies"],
"auth_key": "88888888888888888888888888888888",
"region": "nyc3",
"interfaces": {
- "public": [{
- "ipv4": {
- "ip_address": "45.55.249.133",
- "netmask": "255.255.192.0",
- "gateway": "45.55.192.1"
- },
- "anchor_ipv4": {
- "ip_address": "10.17.0.5",
- "netmask": "255.255.0.0",
- "gateway": "10.17.0.1"
- },
- "mac": "ae:cc:08:7c:88:00",
- "type": "public"
- }]
+ "public": [
+ {
+ "ipv4": {
+ "ip_address": "45.55.249.133",
+ "netmask": "255.255.192.0",
+ "gateway": "45.55.192.1",
+ },
+ "anchor_ipv4": {
+ "ip_address": "10.17.0.5",
+ "netmask": "255.255.0.0",
+ "gateway": "10.17.0.1",
+ },
+ "mac": "ae:cc:08:7c:88:00",
+ "type": "public",
+ }
+ ]
},
"floating_ip": {"ipv4": {"active": True, "ip_address": "138.197.59.92"}},
"dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]},
"tags": None,
}
-DO_META['public_keys'] = DO_SINGLE_KEY
+DO_META["public_keys"] = DO_SINGLE_KEY
-MD_URL = 'http://169.254.169.254/metadata/v1.json'
+MD_URL = "http://169.254.169.254/metadata/v1.json"
def _mock_dmi():
- return (True, DO_META.get('id'))
+ return (True, DO_META.get("id"))
class TestDataSourceDigitalOcean(CiTestCase):
"""
Test reading the meta-data
"""
+
def setUp(self):
super(TestDataSourceDigitalOcean, self).setUp()
self.tmp = self.tmp_dir()
def get_ds(self, get_sysinfo=_mock_dmi):
ds = DataSourceDigitalOcean.DataSourceDigitalOcean(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
ds.use_ip4LL = False
if get_sysinfo is not None:
ds._get_sysinfo = get_sysinfo
return ds
- @mock.patch('cloudinit.sources.helpers.digitalocean.read_sysinfo')
+ @mock.patch("cloudinit.sources.helpers.digitalocean.read_sysinfo")
def test_returns_false_not_on_docean(self, m_read_sysinfo):
m_read_sysinfo.return_value = (False, None)
ds = self.get_ds(get_sysinfo=None)
self.assertEqual(False, ds.get_data())
self.assertTrue(m_read_sysinfo.called)
- @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata')
+ @mock.patch("cloudinit.sources.helpers.digitalocean.read_metadata")
def test_metadata(self, mock_readmd):
mock_readmd.return_value = DO_META.copy()
@@ -168,22 +174,23 @@ class TestDataSourceDigitalOcean(CiTestCase):
self.assertTrue(mock_readmd.called)
- self.assertEqual(DO_META.get('user_data'), ds.get_userdata_raw())
- self.assertEqual(DO_META.get('vendor_data'), ds.get_vendordata_raw())
- self.assertEqual(DO_META.get('region'), ds.availability_zone)
- self.assertEqual(DO_META.get('droplet_id'), ds.get_instance_id())
- self.assertEqual(DO_META.get('hostname'), ds.get_hostname())
+ self.assertEqual(DO_META.get("user_data"), ds.get_userdata_raw())
+ self.assertEqual(DO_META.get("vendor_data"), ds.get_vendordata_raw())
+ self.assertEqual(DO_META.get("region"), ds.availability_zone)
+ self.assertEqual(DO_META.get("droplet_id"), ds.get_instance_id())
+ self.assertEqual(DO_META.get("hostname"), ds.get_hostname())
# Single key
- self.assertEqual([DO_META.get('public_keys')],
- ds.get_public_ssh_keys())
+ self.assertEqual(
+ [DO_META.get("public_keys")], ds.get_public_ssh_keys()
+ )
self.assertIsInstance(ds.get_public_ssh_keys(), list)
- @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata')
+ @mock.patch("cloudinit.sources.helpers.digitalocean.read_metadata")
def test_multiple_ssh_keys(self, mock_readmd):
metadata = DO_META.copy()
- metadata['public_keys'] = DO_MULTIPLE_KEYS
+ metadata["public_keys"] = DO_MULTIPLE_KEYS
mock_readmd.return_value = metadata.copy()
ds = self.get_ds()
@@ -193,38 +200,39 @@ class TestDataSourceDigitalOcean(CiTestCase):
self.assertTrue(mock_readmd.called)
# Multiple keys
- self.assertEqual(metadata['public_keys'], ds.get_public_ssh_keys())
+ self.assertEqual(metadata["public_keys"], ds.get_public_ssh_keys())
self.assertIsInstance(ds.get_public_ssh_keys(), list)
class TestNetworkConvert(CiTestCase):
-
def _get_networking(self):
self.m_get_by_mac.return_value = {
- '04:01:57:d1:9e:01': 'ens1',
- '04:01:57:d1:9e:02': 'ens2',
- 'b8:ae:ed:75:5f:9a': 'enp0s25',
- 'ae:cc:08:7c:88:00': 'meta2p1'}
+ "04:01:57:d1:9e:01": "ens1",
+ "04:01:57:d1:9e:02": "ens2",
+ "b8:ae:ed:75:5f:9a": "enp0s25",
+ "ae:cc:08:7c:88:00": "meta2p1",
+ }
netcfg = digitalocean.convert_network_configuration(
- DO_META['interfaces'], DO_META['dns']['nameservers'])
- self.assertIn('config', netcfg)
+ DO_META["interfaces"], DO_META["dns"]["nameservers"]
+ )
+ self.assertIn("config", netcfg)
return netcfg
def setUp(self):
super(TestNetworkConvert, self).setUp()
- self.add_patch('cloudinit.net.get_interfaces_by_mac', 'm_get_by_mac')
+ self.add_patch("cloudinit.net.get_interfaces_by_mac", "m_get_by_mac")
def test_networking_defined(self):
netcfg = self._get_networking()
self.assertIsNotNone(netcfg)
dns_defined = False
- for part in netcfg.get('config'):
- n_type = part.get('type')
+ for part in netcfg.get("config"):
+ n_type = part.get("type")
print("testing part ", n_type, "\n", json.dumps(part, indent=3))
- if n_type == 'nameserver':
- n_address = part.get('address')
+ if n_type == "nameserver":
+ n_address = part.get("address")
self.assertIsNotNone(n_address)
self.assertEqual(len(n_address), 3)
@@ -234,9 +242,9 @@ class TestNetworkConvert(CiTestCase):
dns_defined = True
else:
- n_subnets = part.get('type')
- n_name = part.get('name')
- n_mac = part.get('mac_address')
+ n_subnets = part.get("type")
+ n_name = part.get("name")
+ n_mac = part.get("mac_address")
self.assertIsNotNone(n_type)
self.assertIsNotNone(n_subnets)
@@ -247,21 +255,21 @@ class TestNetworkConvert(CiTestCase):
def _get_nic_definition(self, int_type, expected_name):
"""helper function to return if_type (i.e. public) and the expected
- name used by cloud-init (i.e eth0)"""
+ name used by cloud-init (i.e eth0)"""
netcfg = self._get_networking()
- meta_def = (DO_META.get('interfaces')).get(int_type)[0]
+ meta_def = (DO_META.get("interfaces")).get(int_type)[0]
- self.assertEqual(int_type, meta_def.get('type'))
+ self.assertEqual(int_type, meta_def.get("type"))
- for nic_def in netcfg.get('config'):
+ for nic_def in netcfg.get("config"):
print(nic_def)
- if nic_def.get('name') == expected_name:
+ if nic_def.get("name") == expected_name:
return nic_def, meta_def
def _get_match_subn(self, subnets, ip_addr):
"""get the matching subnet definition based on ip address"""
for subn in subnets:
- address = subn.get('address')
+ address = subn.get("address")
self.assertIsNotNone(address)
# equals won't work because of ipv6 addressing being in
@@ -274,99 +282,108 @@ class TestNetworkConvert(CiTestCase):
"""test to make sure the eth0 ipv4 and ipv6 gateways are defined"""
netcfg = self._get_networking()
gateways = []
- for nic_def in netcfg.get('config'):
- if nic_def.get('type') != 'physical':
+ for nic_def in netcfg.get("config"):
+ if nic_def.get("type") != "physical":
continue
- for subn in nic_def.get('subnets'):
- if 'gateway' in subn:
- gateways.append(subn.get('gateway'))
+ for subn in nic_def.get("subnets"):
+ if "gateway" in subn:
+ gateways.append(subn.get("gateway"))
# we should have two gateways, one ipv4 and ipv6
self.assertEqual(len(gateways), 2)
# make that the ipv6 gateway is there
- (nic_def, meta_def) = self._get_nic_definition('public', 'eth0')
- ipv4_def = meta_def.get('ipv4')
- self.assertIn(ipv4_def.get('gateway'), gateways)
+ (nic_def, meta_def) = self._get_nic_definition("public", "eth0")
+ ipv4_def = meta_def.get("ipv4")
+ self.assertIn(ipv4_def.get("gateway"), gateways)
# make sure the the ipv6 gateway is there
- ipv6_def = meta_def.get('ipv6')
- self.assertIn(ipv6_def.get('gateway'), gateways)
+ ipv6_def = meta_def.get("ipv6")
+ self.assertIn(ipv6_def.get("gateway"), gateways)
def test_public_interface_defined(self):
"""test that the public interface is defined as eth0"""
- (nic_def, meta_def) = self._get_nic_definition('public', 'eth0')
- self.assertEqual('eth0', nic_def.get('name'))
- self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address'))
- self.assertEqual('physical', nic_def.get('type'))
+ (nic_def, meta_def) = self._get_nic_definition("public", "eth0")
+ self.assertEqual("eth0", nic_def.get("name"))
+ self.assertEqual(meta_def.get("mac"), nic_def.get("mac_address"))
+ self.assertEqual("physical", nic_def.get("type"))
def test_private_interface_defined(self):
"""test that the private interface is defined as eth1"""
- (nic_def, meta_def) = self._get_nic_definition('private', 'eth1')
- self.assertEqual('eth1', nic_def.get('name'))
- self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address'))
- self.assertEqual('physical', nic_def.get('type'))
+ (nic_def, meta_def) = self._get_nic_definition("private", "eth1")
+ self.assertEqual("eth1", nic_def.get("name"))
+ self.assertEqual(meta_def.get("mac"), nic_def.get("mac_address"))
+ self.assertEqual("physical", nic_def.get("type"))
def test_public_interface_ipv6(self):
"""test public ipv6 addressing"""
- (nic_def, meta_def) = self._get_nic_definition('public', 'eth0')
- ipv6_def = meta_def.get('ipv6')
+ (nic_def, meta_def) = self._get_nic_definition("public", "eth0")
+ ipv6_def = meta_def.get("ipv6")
self.assertIsNotNone(ipv6_def)
- subn_def = self._get_match_subn(nic_def.get('subnets'),
- ipv6_def.get('ip_address'))
+ subn_def = self._get_match_subn(
+ nic_def.get("subnets"), ipv6_def.get("ip_address")
+ )
- cidr_notated_address = "{0}/{1}".format(ipv6_def.get('ip_address'),
- ipv6_def.get('cidr'))
+ cidr_notated_address = "{0}/{1}".format(
+ ipv6_def.get("ip_address"), ipv6_def.get("cidr")
+ )
- self.assertEqual(cidr_notated_address, subn_def.get('address'))
- self.assertEqual(ipv6_def.get('gateway'), subn_def.get('gateway'))
+ self.assertEqual(cidr_notated_address, subn_def.get("address"))
+ self.assertEqual(ipv6_def.get("gateway"), subn_def.get("gateway"))
def test_public_interface_ipv4(self):
"""test public ipv4 addressing"""
- (nic_def, meta_def) = self._get_nic_definition('public', 'eth0')
- ipv4_def = meta_def.get('ipv4')
+ (nic_def, meta_def) = self._get_nic_definition("public", "eth0")
+ ipv4_def = meta_def.get("ipv4")
self.assertIsNotNone(ipv4_def)
- subn_def = self._get_match_subn(nic_def.get('subnets'),
- ipv4_def.get('ip_address'))
+ subn_def = self._get_match_subn(
+ nic_def.get("subnets"), ipv4_def.get("ip_address")
+ )
- self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask'))
- self.assertEqual(ipv4_def.get('gateway'), subn_def.get('gateway'))
+ self.assertEqual(ipv4_def.get("netmask"), subn_def.get("netmask"))
+ self.assertEqual(ipv4_def.get("gateway"), subn_def.get("gateway"))
def test_public_interface_anchor_ipv4(self):
"""test public ipv4 addressing"""
- (nic_def, meta_def) = self._get_nic_definition('public', 'eth0')
- ipv4_def = meta_def.get('anchor_ipv4')
+ (nic_def, meta_def) = self._get_nic_definition("public", "eth0")
+ ipv4_def = meta_def.get("anchor_ipv4")
self.assertIsNotNone(ipv4_def)
- subn_def = self._get_match_subn(nic_def.get('subnets'),
- ipv4_def.get('ip_address'))
+ subn_def = self._get_match_subn(
+ nic_def.get("subnets"), ipv4_def.get("ip_address")
+ )
- self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask'))
- self.assertNotIn('gateway', subn_def)
+ self.assertEqual(ipv4_def.get("netmask"), subn_def.get("netmask"))
+ self.assertNotIn("gateway", subn_def)
- @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
def test_convert_without_private(self, m_get_by_mac):
m_get_by_mac.return_value = {
- 'b8:ae:ed:75:5f:9a': 'enp0s25',
- 'ae:cc:08:7c:88:00': 'meta2p1'}
+ "b8:ae:ed:75:5f:9a": "enp0s25",
+ "ae:cc:08:7c:88:00": "meta2p1",
+ }
netcfg = digitalocean.convert_network_configuration(
- DO_META_2['interfaces'], DO_META_2['dns']['nameservers'])
+ DO_META_2["interfaces"], DO_META_2["dns"]["nameservers"]
+ )
# print(netcfg)
byname = {}
- for i in netcfg['config']:
- if 'name' in i:
- if i['name'] in byname:
- raise ValueError("name '%s' in config twice: %s" %
- (i['name'], netcfg))
- byname[i['name']] = i
- self.assertTrue('eth0' in byname)
- self.assertTrue('subnets' in byname['eth0'])
- eth0 = byname['eth0']
+ for i in netcfg["config"]:
+ if "name" in i:
+ if i["name"] in byname:
+ raise ValueError(
+ "name '%s' in config twice: %s" % (i["name"], netcfg)
+ )
+ byname[i["name"]] = i
+ self.assertTrue("eth0" in byname)
+ self.assertTrue("subnets" in byname["eth0"])
+ eth0 = byname["eth0"]
self.assertEqual(
- sorted(['45.55.249.133', '10.17.0.5']),
- sorted([i['address'] for i in eth0['subnets']]))
+ sorted(["45.55.249.133", "10.17.0.5"]),
+ sorted([i["address"] for i in eth0["subnets"]]),
+ )
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_ec2.py b/tests/unittests/sources/test_ec2.py
index 19c2bbcd..b376660d 100644
--- a/tests/unittests/sources/test_ec2.py
+++ b/tests/unittests/sources/test_ec2.py
@@ -1,35 +1,37 @@
# This file is part of cloud-init. See LICENSE file for license information.
import copy
-import httpretty
import json
-import requests
from unittest import mock
+import httpretty
+import requests
+
from cloudinit import helpers
from cloudinit.sources import DataSourceEc2 as ec2
from tests.unittests import helpers as test_helpers
-
DYNAMIC_METADATA = {
"instance-identity": {
- "document": json.dumps({
- "devpayProductCodes": None,
- "marketplaceProductCodes": ["1abc2defghijklm3nopqrs4tu"],
- "availabilityZone": "us-west-2b",
- "privateIp": "10.158.112.84",
- "version": "2017-09-30",
- "instanceId": "my-identity-id",
- "billingProducts": None,
- "instanceType": "t2.micro",
- "accountId": "123456789012",
- "imageId": "ami-5fb8c835",
- "pendingTime": "2016-11-19T16:32:11Z",
- "architecture": "x86_64",
- "kernelId": None,
- "ramdiskId": None,
- "region": "us-west-2"
- })
+ "document": json.dumps(
+ {
+ "devpayProductCodes": None,
+ "marketplaceProductCodes": ["1abc2defghijklm3nopqrs4tu"],
+ "availabilityZone": "us-west-2b",
+ "privateIp": "10.158.112.84",
+ "version": "2017-09-30",
+ "instanceId": "my-identity-id",
+ "billingProducts": None,
+ "instanceType": "t2.micro",
+ "accountId": "123456789012",
+ "imageId": "ami-5fb8c835",
+ "pendingTime": "2016-11-19T16:32:11Z",
+ "architecture": "x86_64",
+ "kernelId": None,
+ "ramdiskId": None,
+ "region": "us-west-2",
+ }
+ )
}
}
@@ -52,7 +54,7 @@ DEFAULT_METADATA = {
"local-hostname": "ip-172-3-3-15.us-east-2.compute.internal",
"local-ipv4": "172.3.3.15",
"mac": "06:17:04:d7:26:09",
- "metrics": {"vhostmd": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"},
+ "metrics": {"vhostmd": '<?xml version="1.0" encoding="UTF-8"?>'},
"network": {
"interfaces": {
"macs": {
@@ -61,13 +63,15 @@ DEFAULT_METADATA = {
"interface-id": "eni-e44ef49e",
"ipv4-associations": {"13.59.77.202": "172.3.3.15"},
"ipv6s": "2600:1f16:aeb:b20b:9d87:a4af:5cc9:73dc",
- "local-hostname": ("ip-172-3-3-15.us-east-2."
- "compute.internal"),
+ "local-hostname": (
+ "ip-172-3-3-15.us-east-2.compute.internal"
+ ),
"local-ipv4s": "172.3.3.15",
"mac": "06:17:04:d7:26:09",
"owner-id": "950047163771",
- "public-hostname": ("ec2-13-59-77-202.us-east-2."
- "compute.amazonaws.com"),
+ "public-hostname": (
+ "ec2-13-59-77-202.us-east-2.compute.amazonaws.com"
+ ),
"public-ipv4s": "13.59.77.202",
"security-group-ids": "sg-5a61d333",
"security-groups": "wide-open",
@@ -77,20 +81,22 @@ DEFAULT_METADATA = {
"vpc-id": "vpc-87e72bee",
"vpc-ipv4-cidr-block": "172.31.0.0/16",
"vpc-ipv4-cidr-blocks": "172.31.0.0/16",
- "vpc-ipv6-cidr-blocks": "2600:1f16:aeb:b200::/56"
+ "vpc-ipv6-cidr-blocks": "2600:1f16:aeb:b200::/56",
},
"06:17:04:d7:26:08": {
- "device-number": "1", # Only IPv4 local config
+ "device-number": "1", # Only IPv4 local config
"interface-id": "eni-e44ef49f",
"ipv4-associations": {"": "172.3.3.16"},
"ipv6s": "", # No IPv6 config
- "local-hostname": ("ip-172-3-3-16.us-east-2."
- "compute.internal"),
+ "local-hostname": (
+ "ip-172-3-3-16.us-east-2.compute.internal"
+ ),
"local-ipv4s": "172.3.3.16",
"mac": "06:17:04:d7:26:08",
"owner-id": "950047163771",
- "public-hostname": ("ec2-172-3-3-16.us-east-2."
- "compute.amazonaws.com"),
+ "public-hostname": (
+ "ec2-172-3-3-16.us-east-2.compute.amazonaws.com"
+ ),
"public-ipv4s": "", # No public ipv4 config
"security-group-ids": "sg-5a61d333",
"security-groups": "wide-open",
@@ -100,8 +106,8 @@ DEFAULT_METADATA = {
"vpc-id": "vpc-87e72bee",
"vpc-ipv4-cidr-block": "172.31.0.0/16",
"vpc-ipv4-cidr-blocks": "172.31.0.0/16",
- "vpc-ipv6-cidr-blocks": ""
- }
+ "vpc-ipv6-cidr-blocks": "",
+ },
}
}
},
@@ -123,24 +129,17 @@ DEFAULT_METADATA = {
NIC1_MD_IPV4_IPV6_MULTI_IP = {
"device-number": "0",
"interface-id": "eni-0d6335689899ce9cc",
- "ipv4-associations": {
- "18.218.219.181": "172.31.44.13"
- },
+ "ipv4-associations": {"18.218.219.181": "172.31.44.13"},
"ipv6s": [
"2600:1f16:292:100:c187:593c:4349:136",
"2600:1f16:292:100:f153:12a3:c37c:11f9",
- "2600:1f16:292:100:f152:2222:3333:4444"
- ],
- "local-hostname": ("ip-172-31-44-13.us-east-2."
- "compute.internal"),
- "local-ipv4s": [
- "172.31.44.13",
- "172.31.45.70"
+ "2600:1f16:292:100:f152:2222:3333:4444",
],
+ "local-hostname": "ip-172-31-44-13.us-east-2.compute.internal",
+ "local-ipv4s": ["172.31.44.13", "172.31.45.70"],
"mac": "0a:07:84:3d:6e:38",
"owner-id": "329910648901",
- "public-hostname": ("ec2-18-218-219-181.us-east-2."
- "compute.amazonaws.com"),
+ "public-hostname": "ec2-18-218-219-181.us-east-2.compute.amazonaws.com",
"public-ipv4s": "18.218.219.181",
"security-group-ids": "sg-0c387755222ba8d2e",
"security-groups": "launch-wizard-4",
@@ -150,7 +149,7 @@ NIC1_MD_IPV4_IPV6_MULTI_IP = {
"vpc-id": "vpc-a07f62c8",
"vpc-ipv4-cidr-block": "172.31.0.0/16",
"vpc-ipv4-cidr-blocks": "172.31.0.0/16",
- "vpc_ipv6_cidr_blocks": "2600:1f16:292:100::/56"
+ "vpc_ipv6_cidr_blocks": "2600:1f16:292:100::/56",
}
NIC2_MD = {
@@ -166,30 +165,22 @@ NIC2_MD = {
"subnet-ipv4-cidr-block": "172.31.32.0/20",
"vpc-id": "vpc-a07f62c8",
"vpc-ipv4-cidr-block": "172.31.0.0/16",
- "vpc-ipv4-cidr-blocks": "172.31.0.0/16"
+ "vpc-ipv4-cidr-blocks": "172.31.0.0/16",
}
SECONDARY_IP_METADATA_2018_09_24 = {
"ami-id": "ami-0986c2ac728528ac2",
"ami-launch-index": "0",
"ami-manifest-path": "(unknown)",
- "block-device-mapping": {
- "ami": "/dev/sda1",
- "root": "/dev/sda1"
- },
- "events": {
- "maintenance": {
- "history": "[]",
- "scheduled": "[]"
- }
- },
+ "block-device-mapping": {"ami": "/dev/sda1", "root": "/dev/sda1"},
+ "events": {"maintenance": {"history": "[]", "scheduled": "[]"}},
"hostname": "ip-172-31-44-13.us-east-2.compute.internal",
"identity-credentials": {
"ec2": {
"info": {
"AccountId": "329910648901",
"Code": "Success",
- "LastUpdated": "2019-07-06T14:22:56Z"
+ "LastUpdated": "2019-07-06T14:22:56Z",
}
}
},
@@ -199,9 +190,7 @@ SECONDARY_IP_METADATA_2018_09_24 = {
"local-hostname": "ip-172-31-44-13.us-east-2.compute.internal",
"local-ipv4": "172.31.44.13",
"mac": "0a:07:84:3d:6e:38",
- "metrics": {
- "vhostmd": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
- },
+ "metrics": {"vhostmd": '<?xml version="1.0" encoding="UTF-8"?>'},
"network": {
"interfaces": {
"macs": {
@@ -209,27 +198,17 @@ SECONDARY_IP_METADATA_2018_09_24 = {
}
}
},
- "placement": {
- "availability-zone": "us-east-2c"
- },
+ "placement": {"availability-zone": "us-east-2c"},
"profile": "default-hvm",
- "public-hostname": (
- "ec2-18-218-219-181.us-east-2.compute.amazonaws.com"),
+ "public-hostname": "ec2-18-218-219-181.us-east-2.compute.amazonaws.com",
"public-ipv4": "18.218.219.181",
- "public-keys": {
- "yourkeyname,e": [
- "ssh-rsa AAAAW...DZ yourkeyname"
- ]
- },
+ "public-keys": {"yourkeyname,e": ["ssh-rsa AAAAW...DZ yourkeyname"]},
"reservation-id": "r-09b4917135cdd33be",
"security-groups": "launch-wizard-4",
- "services": {
- "domain": "amazonaws.com",
- "partition": "aws"
- }
+ "services": {"domain": "amazonaws.com", "partition": "aws"},
}
-M_PATH_NET = 'cloudinit.sources.DataSourceEc2.net.'
+M_PATH_NET = "cloudinit.sources.DataSourceEc2.net."
def _register_ssh_keys(rfunc, base_url, keys_data):
@@ -250,9 +229,9 @@ def _register_ssh_keys(rfunc, base_url, keys_data):
"""
base_url = base_url.rstrip("/")
- odd_index = '\n'.join(
- ["{0}={1}".format(n, name)
- for n, name in enumerate(sorted(keys_data))])
+ odd_index = "\n".join(
+ ["{0}={1}".format(n, name) for n, name in enumerate(sorted(keys_data))]
+ )
rfunc(base_url, odd_index)
rfunc(base_url + "/", odd_index)
@@ -260,7 +239,7 @@ def _register_ssh_keys(rfunc, base_url, keys_data):
for n, name in enumerate(sorted(keys_data)):
val = keys_data[name]
if isinstance(val, list):
- val = '\n'.join(val)
+ val = "\n".join(val)
burl = base_url + "/%s" % n
rfunc(burl, "openssh-key")
rfunc(burl + "/", "openssh-key")
@@ -281,6 +260,7 @@ def register_mock_metaserver(base_url, data):
base_url/mac with 00:16:3e:00:00:00
In the index, references to lists or dictionaries have a trailing /.
"""
+
def register_helper(register, base_url, body):
if not isinstance(base_url, str):
register(base_url, body)
@@ -289,25 +269,24 @@ def register_mock_metaserver(base_url, data):
if isinstance(body, str):
register(base_url, body)
elif isinstance(body, list):
- register(base_url, '\n'.join(body) + '\n')
- register(base_url + '/', '\n'.join(body) + '\n')
+ register(base_url, "\n".join(body) + "\n")
+ register(base_url + "/", "\n".join(body) + "\n")
elif isinstance(body, dict):
vals = []
for k, v in body.items():
- if k == 'public-keys':
- _register_ssh_keys(
- register, base_url + '/public-keys/', v)
+ if k == "public-keys":
+ _register_ssh_keys(register, base_url + "/public-keys/", v)
continue
suffix = k.rstrip("/")
if not isinstance(v, (str, list)):
suffix += "/"
vals.append(suffix)
- url = base_url + '/' + suffix
+ url = base_url + "/" + suffix
register_helper(register, url, v)
- register(base_url, '\n'.join(vals) + '\n')
- register(base_url + '/', '\n'.join(vals) + '\n')
+ register(base_url, "\n".join(vals) + "\n")
+ register(base_url + "/", "\n".join(vals) + "\n")
elif body is None:
- register(base_url, 'not found', status=404)
+ register(base_url, "not found", status=404)
def myreg(*argc, **kwargs):
url = argc[0]
@@ -322,9 +301,9 @@ class TestEc2(test_helpers.HttprettyTestCase):
maxDiff = None
valid_platform_data = {
- 'uuid': 'ec212f79-87d1-2f1d-588f-d86dc0fd5412',
- 'uuid_source': 'dmi',
- 'serial': 'ec212f79-87d1-2f1d-588f-d86dc0fd5412',
+ "uuid": "ec212f79-87d1-2f1d-588f-d86dc0fd5412",
+ "uuid_source": "dmi",
+ "serial": "ec212f79-87d1-2f1d-588f-d86dc0fd5412",
}
def setUp(self):
@@ -333,9 +312,9 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.metadata_addr = self.datasource.metadata_urls[0]
self.tmp = self.tmp_dir()
- def data_url(self, version, data_item='meta-data'):
+ def data_url(self, version, data_item="meta-data"):
"""Return a metadata url based on the version provided."""
- return '/'.join([self.metadata_addr, version, data_item])
+ return "/".join([self.metadata_addr, version, data_item])
def _patch_add_cleanup(self, mpath, *args, **kwargs):
p = mock.patch(mpath, *args, **kwargs)
@@ -345,7 +324,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
def _setup_ds(self, sys_cfg, platform_data, md, md_version=None):
self.uris = []
distro = {}
- paths = helpers.Paths({'run_dir': self.tmp})
+ paths = helpers.Paths({"run_dir": self.tmp})
if sys_cfg is None:
sys_cfg = {}
ds = self.datasource(sys_cfg=sys_cfg, distro=distro, paths=paths)
@@ -354,32 +333,39 @@ class TestEc2(test_helpers.HttprettyTestCase):
if platform_data is not None:
self._patch_add_cleanup(
"cloudinit.sources.DataSourceEc2._collect_platform_data",
- return_value=platform_data)
+ return_value=platform_data,
+ )
if md:
- all_versions = (
- [ds.min_metadata_version] + ds.extended_metadata_versions)
- token_url = self.data_url('latest', data_item='api/token')
- register_mock_metaserver(token_url, 'API-TOKEN')
+ all_versions = [
+ ds.min_metadata_version
+ ] + ds.extended_metadata_versions
+ token_url = self.data_url("latest", data_item="api/token")
+ register_mock_metaserver(token_url, "API-TOKEN")
for version in all_versions:
- metadata_url = self.data_url(version) + '/'
+ metadata_url = self.data_url(version) + "/"
if version == md_version:
# Register all metadata for desired version
register_mock_metaserver(
- metadata_url, md.get('md', DEFAULT_METADATA))
+ metadata_url, md.get("md", DEFAULT_METADATA)
+ )
userdata_url = self.data_url(
- version, data_item='user-data')
- register_mock_metaserver(userdata_url, md.get('ud', ''))
+ version, data_item="user-data"
+ )
+ register_mock_metaserver(userdata_url, md.get("ud", ""))
identity_url = self.data_url(
- version, data_item='dynamic/instance-identity')
+ version, data_item="dynamic/instance-identity"
+ )
register_mock_metaserver(
- identity_url, md.get('id', DYNAMIC_METADATA))
+ identity_url, md.get("id", DYNAMIC_METADATA)
+ )
else:
- instance_id_url = metadata_url + 'instance-id'
+ instance_id_url = metadata_url + "instance-id"
if version == ds.min_metadata_version:
# Add min_metadata_version service availability check
register_mock_metaserver(
- instance_id_url, DEFAULT_METADATA['instance-id'])
+ instance_id_url, DEFAULT_METADATA["instance-id"]
+ )
else:
# Register 404s for all unrequested extended versions
register_mock_metaserver(instance_id_url, None)
@@ -389,24 +375,33 @@ class TestEc2(test_helpers.HttprettyTestCase):
"""network_config property returns network version 2 for metadata"""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
- md={'md': DEFAULT_METADATA})
- find_fallback_path = M_PATH_NET + 'find_fallback_nic'
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": DEFAULT_METADATA},
+ )
+ find_fallback_path = M_PATH_NET + "find_fallback_nic"
with mock.patch(find_fallback_path) as m_find_fallback:
- m_find_fallback.return_value = 'eth9'
+ m_find_fallback.return_value = "eth9"
ds.get_data()
- mac1 = '06:17:04:d7:26:09' # Defined in DEFAULT_METADATA
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': '06:17:04:d7:26:09'}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': True}}}
- patch_path = M_PATH_NET + 'get_interfaces_by_mac'
- get_interface_mac_path = M_PATH_NET + 'get_interface_mac'
+ mac1 = "06:17:04:d7:26:09" # Defined in DEFAULT_METADATA
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": "06:17:04:d7:26:09"},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": True,
+ }
+ },
+ }
+ patch_path = M_PATH_NET + "get_interfaces_by_mac"
+ get_interface_mac_path = M_PATH_NET + "get_interface_mac"
with mock.patch(patch_path) as m_get_interfaces_by_mac:
with mock.patch(find_fallback_path) as m_find_fallback:
with mock.patch(get_interface_mac_path) as m_get_mac:
- m_get_interfaces_by_mac.return_value = {mac1: 'eth9'}
- m_find_fallback.return_value = 'eth9'
+ m_get_interfaces_by_mac.return_value = {mac1: "eth9"}
+ m_find_fallback.return_value = "eth9"
m_get_mac.return_value = mac1
self.assertEqual(expected, ds.network_config)
@@ -418,24 +413,33 @@ class TestEc2(test_helpers.HttprettyTestCase):
"""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
- md={'md': DEFAULT_METADATA})
- find_fallback_path = M_PATH_NET + 'find_fallback_nic'
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": DEFAULT_METADATA},
+ )
+ find_fallback_path = M_PATH_NET + "find_fallback_nic"
with mock.patch(find_fallback_path) as m_find_fallback:
- m_find_fallback.return_value = 'eth9'
+ m_find_fallback.return_value = "eth9"
ds.get_data()
- mac1 = '06:17:04:d7:26:08' # IPv4 only in DEFAULT_METADATA
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': mac1.lower()}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': False}}}
- patch_path = M_PATH_NET + 'get_interfaces_by_mac'
- get_interface_mac_path = M_PATH_NET + 'get_interface_mac'
+ mac1 = "06:17:04:d7:26:08" # IPv4 only in DEFAULT_METADATA
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": mac1.lower()},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": False,
+ }
+ },
+ }
+ patch_path = M_PATH_NET + "get_interfaces_by_mac"
+ get_interface_mac_path = M_PATH_NET + "get_interface_mac"
with mock.patch(patch_path) as m_get_interfaces_by_mac:
with mock.patch(find_fallback_path) as m_find_fallback:
with mock.patch(get_interface_mac_path) as m_get_mac:
- m_get_interfaces_by_mac.return_value = {mac1: 'eth9'}
- m_find_fallback.return_value = 'eth9'
+ m_get_interfaces_by_mac.return_value = {mac1: "eth9"}
+ m_find_fallback.return_value = "eth9"
m_get_mac.return_value = mac1
self.assertEqual(expected, ds.network_config)
@@ -447,27 +451,38 @@ class TestEc2(test_helpers.HttprettyTestCase):
"""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
- md={'md': SECONDARY_IP_METADATA_2018_09_24})
- find_fallback_path = M_PATH_NET + 'find_fallback_nic'
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": SECONDARY_IP_METADATA_2018_09_24},
+ )
+ find_fallback_path = M_PATH_NET + "find_fallback_nic"
with mock.patch(find_fallback_path) as m_find_fallback:
- m_find_fallback.return_value = 'eth9'
+ m_find_fallback.return_value = "eth9"
ds.get_data()
- mac1 = '0a:07:84:3d:6e:38' # 1 secondary IPv4 and 2 secondary IPv6
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': mac1}, 'set-name': 'eth9',
- 'addresses': ['172.31.45.70/20',
- '2600:1f16:292:100:f152:2222:3333:4444/128',
- '2600:1f16:292:100:f153:12a3:c37c:11f9/128'],
- 'dhcp4': True, 'dhcp6': True}}}
- patch_path = M_PATH_NET + 'get_interfaces_by_mac'
- get_interface_mac_path = M_PATH_NET + 'get_interface_mac'
+ mac1 = "0a:07:84:3d:6e:38" # 1 secondary IPv4 and 2 secondary IPv6
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": mac1},
+ "set-name": "eth9",
+ "addresses": [
+ "172.31.45.70/20",
+ "2600:1f16:292:100:f152:2222:3333:4444/128",
+ "2600:1f16:292:100:f153:12a3:c37c:11f9/128",
+ ],
+ "dhcp4": True,
+ "dhcp6": True,
+ }
+ },
+ }
+ patch_path = M_PATH_NET + "get_interfaces_by_mac"
+ get_interface_mac_path = M_PATH_NET + "get_interface_mac"
with mock.patch(patch_path) as m_get_interfaces_by_mac:
with mock.patch(find_fallback_path) as m_find_fallback:
with mock.patch(get_interface_mac_path) as m_get_mac:
- m_get_interfaces_by_mac.return_value = {mac1: 'eth9'}
- m_find_fallback.return_value = 'eth9'
+ m_get_interfaces_by_mac.return_value = {mac1: "eth9"}
+ m_find_fallback.return_value = "eth9"
m_get_mac.return_value = mac1
self.assertEqual(expected, ds.network_config)
@@ -475,12 +490,13 @@ class TestEc2(test_helpers.HttprettyTestCase):
"""network_config property is cached in DataSourceEc2."""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
- md={'md': DEFAULT_METADATA})
- ds._network_config = {'cached': 'data'}
- self.assertEqual({'cached': 'data'}, ds.network_config)
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": DEFAULT_METADATA},
+ )
+ ds._network_config = {"cached": "data"}
+ self.assertEqual({"cached": "data"}, ds.network_config)
- @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
def test_network_config_cached_property_refreshed_on_upgrade(self, m_dhcp):
"""Refresh the network_config Ec2 cache if network key is absent.
@@ -488,28 +504,39 @@ class TestEc2(test_helpers.HttprettyTestCase):
which lacked newly required network key.
"""
old_metadata = copy.deepcopy(DEFAULT_METADATA)
- old_metadata.pop('network')
+ old_metadata.pop("network")
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
- md={'md': old_metadata})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": old_metadata},
+ )
self.assertTrue(ds.get_data())
# Provide new revision of metadata that contains network data
register_mock_metaserver(
- 'http://169.254.169.254/2009-04-04/meta-data/', DEFAULT_METADATA)
- mac1 = '06:17:04:d7:26:09' # Defined in DEFAULT_METADATA
- get_interface_mac_path = M_PATH_NET + 'get_interfaces_by_mac'
- ds.fallback_nic = 'eth9'
+ "http://169.254.169.254/2009-04-04/meta-data/", DEFAULT_METADATA
+ )
+ mac1 = "06:17:04:d7:26:09" # Defined in DEFAULT_METADATA
+ get_interface_mac_path = M_PATH_NET + "get_interfaces_by_mac"
+ ds.fallback_nic = "eth9"
with mock.patch(get_interface_mac_path) as m_get_interfaces_by_mac:
- m_get_interfaces_by_mac.return_value = {mac1: 'eth9'}
+ m_get_interfaces_by_mac.return_value = {mac1: "eth9"}
nc = ds.network_config # Will re-crawl network metadata
self.assertIsNotNone(nc)
self.assertIn(
- 'Refreshing stale metadata from prior to upgrade',
- self.logs.getvalue())
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': mac1}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': True}}}
+ "Refreshing stale metadata from prior to upgrade",
+ self.logs.getvalue(),
+ )
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": True,
+ }
+ },
+ }
self.assertEqual(expected, ds.network_config)
def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self):
@@ -522,40 +549,46 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.datasource = ec2.DataSourceEc2Local
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': DEFAULT_METADATA})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
# Mock 404s on all versions except latest
- all_versions = (
- [ds.min_metadata_version] + ds.extended_metadata_versions)
+ all_versions = [
+ ds.min_metadata_version
+ ] + ds.extended_metadata_versions
for ver in all_versions[:-1]:
register_mock_metaserver(
- 'http://169.254.169.254/{0}/meta-data/instance-id'.format(ver),
- None)
- ds.metadata_address = 'http://169.254.169.254'
+ "http://169.254.169.254/{0}/meta-data/instance-id".format(ver),
+ None,
+ )
+ ds.metadata_address = "http://169.254.169.254"
register_mock_metaserver(
- '{0}/{1}/meta-data/'.format(ds.metadata_address, all_versions[-1]),
- DEFAULT_METADATA)
+ "{0}/{1}/meta-data/".format(ds.metadata_address, all_versions[-1]),
+ DEFAULT_METADATA,
+ )
# Register dynamic/instance-identity document which we now read.
register_mock_metaserver(
- '{0}/{1}/dynamic/'.format(ds.metadata_address, all_versions[-1]),
- DYNAMIC_METADATA)
+ "{0}/{1}/dynamic/".format(ds.metadata_address, all_versions[-1]),
+ DYNAMIC_METADATA,
+ )
ds._cloud_name = ec2.CloudNames.AWS
# Setup cached metadata on the Datasource
ds.metadata = DEFAULT_METADATA
- self.assertEqual('my-identity-id', ds.get_instance_id())
+ self.assertEqual("my-identity-id", ds.get_instance_id())
def test_classic_instance_true(self):
"""If no vpc-id in metadata, is_classic_instance must return true."""
md_copy = copy.deepcopy(DEFAULT_METADATA)
- ifaces_md = md_copy.get('network', {}).get('interfaces', {})
- for _mac, mac_data in ifaces_md.get('macs', {}).items():
- if 'vpc-id' in mac_data:
- del mac_data['vpc-id']
+ ifaces_md = md_copy.get("network", {}).get("interfaces", {})
+ for _mac, mac_data in ifaces_md.get("macs", {}).items():
+ if "vpc-id" in mac_data:
+ del mac_data["vpc-id"]
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': md_copy})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": md_copy},
+ )
self.assertTrue(ds.get_data())
self.assertTrue(ds.is_classic_instance())
@@ -563,8 +596,9 @@ class TestEc2(test_helpers.HttprettyTestCase):
"""If vpc-id in metadata, is_classic_instance must return false."""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': DEFAULT_METADATA})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
self.assertTrue(ds.get_data())
self.assertFalse(ds.is_classic_instance())
@@ -572,108 +606,117 @@ class TestEc2(test_helpers.HttprettyTestCase):
"""Inaccessibility of http://169.254.169.254 are retried."""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md=None)
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md=None,
+ )
conn_error = requests.exceptions.ConnectionError(
- '[Errno 113] no route to host'
+ "[Errno 113] no route to host"
)
- mock_success = mock.MagicMock(contents=b'fakesuccess')
+ mock_success = mock.MagicMock(contents=b"fakesuccess")
mock_success.ok.return_value = True
- with mock.patch('cloudinit.url_helper.readurl') as m_readurl:
+ with mock.patch("cloudinit.url_helper.readurl") as m_readurl:
m_readurl.side_effect = (conn_error, conn_error, mock_success)
- with mock.patch('cloudinit.url_helper.time.sleep'):
+ with mock.patch("cloudinit.url_helper.time.sleep"):
self.assertTrue(ds.wait_for_metadata_service())
# Just one /latest/api/token request
self.assertEqual(3, len(m_readurl.call_args_list))
for readurl_call in m_readurl.call_args_list:
- self.assertIn('latest/api/token', readurl_call[0][0])
+ self.assertIn("latest/api/token", readurl_call[0][0])
def test_aws_token_403_fails_without_retries(self):
"""Verify that 403s fetching AWS tokens are not retried."""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md=None)
- token_url = self.data_url('latest', data_item='api/token')
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md=None,
+ )
+ token_url = self.data_url("latest", data_item="api/token")
httpretty.register_uri(httpretty.PUT, token_url, body={}, status=403)
self.assertFalse(ds.get_data())
# Just one /latest/api/token request
logs = self.logs.getvalue()
failed_put_log = '"PUT /latest/api/token HTTP/1.1" 403 0'
expected_logs = [
- 'WARNING: Ec2 IMDS endpoint returned a 403 error. HTTP endpoint is'
- ' disabled. Aborting.',
+ "WARNING: Ec2 IMDS endpoint returned a 403 error. HTTP endpoint is"
+ " disabled. Aborting.",
"WARNING: IMDS's HTTP endpoint is probably disabled",
- failed_put_log
+ failed_put_log,
]
for log in expected_logs:
self.assertIn(log, logs)
self.assertEqual(
1,
- len([line for line in logs.splitlines() if failed_put_log in line])
+ len(
+ [line for line in logs.splitlines() if failed_put_log in line]
+ ),
)
def test_aws_token_redacted(self):
"""Verify that aws tokens are redacted when logged."""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': DEFAULT_METADATA})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
self.assertTrue(ds.get_data())
all_logs = self.logs.getvalue().splitlines()
REDACT_TTL = "'X-aws-ec2-metadata-token-ttl-seconds': 'REDACTED'"
REDACT_TOK = "'X-aws-ec2-metadata-token': 'REDACTED'"
logs_with_redacted_ttl = [log for log in all_logs if REDACT_TTL in log]
logs_with_redacted = [log for log in all_logs if REDACT_TOK in log]
- logs_with_token = [log for log in all_logs if 'API-TOKEN' in log]
+ logs_with_token = [log for log in all_logs if "API-TOKEN" in log]
self.assertEqual(1, len(logs_with_redacted_ttl))
self.assertEqual(81, len(logs_with_redacted))
self.assertEqual(0, len(logs_with_token))
- @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
def test_valid_platform_with_strict_true(self, m_dhcp):
"""Valid platform data should return true with strict_id true."""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
- md={'md': DEFAULT_METADATA})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": DEFAULT_METADATA},
+ )
ret = ds.get_data()
self.assertTrue(ret)
self.assertEqual(0, m_dhcp.call_count)
- self.assertEqual('aws', ds.cloud_name)
- self.assertEqual('ec2', ds.platform_type)
- self.assertEqual('metadata (%s)' % ds.metadata_address, ds.subplatform)
+ self.assertEqual("aws", ds.cloud_name)
+ self.assertEqual("ec2", ds.platform_type)
+ self.assertEqual("metadata (%s)" % ds.metadata_address, ds.subplatform)
def test_valid_platform_with_strict_false(self):
"""Valid platform data should return true with strict_id false."""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': DEFAULT_METADATA})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
ret = ds.get_data()
self.assertTrue(ret)
def test_unknown_platform_with_strict_true(self):
"""Unknown platform data with strict_id true should return False."""
- uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a'
+ uuid = "ab439480-72bf-11d3-91fc-b8aded755F9a"
ds = self._setup_ds(
- platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''},
- sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
- md={'md': DEFAULT_METADATA})
+ platform_data={"uuid": uuid, "uuid_source": "dmi", "serial": ""},
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": DEFAULT_METADATA},
+ )
ret = ds.get_data()
self.assertFalse(ret)
def test_unknown_platform_with_strict_false(self):
"""Unknown platform data with strict_id false should return True."""
- uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a'
+ uuid = "ab439480-72bf-11d3-91fc-b8aded755F9a"
ds = self._setup_ds(
- platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''},
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': DEFAULT_METADATA})
+ platform_data={"uuid": uuid, "uuid_source": "dmi", "serial": ""},
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
ret = ds.get_data()
self.assertTrue(ret)
@@ -682,24 +725,28 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.datasource = ec2.DataSourceEc2Local
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': DEFAULT_METADATA})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
platform_attrs = [
- attr for attr in ec2.CloudNames.__dict__.keys()
- if not attr.startswith('__')]
+ attr
+ for attr in ec2.CloudNames.__dict__.keys()
+ if not attr.startswith("__")
+ ]
for attr_name in platform_attrs:
platform_name = getattr(ec2.CloudNames, attr_name)
- if platform_name != 'aws':
+ if platform_name != "aws":
ds._cloud_name = platform_name
ret = ds.get_data()
- self.assertEqual('ec2', ds.platform_type)
+ self.assertEqual("ec2", ds.platform_type)
self.assertFalse(ret)
message = (
"Local Ec2 mode only supported on ('aws',),"
- ' not {0}'.format(platform_name))
+ " not {0}".format(platform_name)
+ )
self.assertIn(message, self.logs.getvalue())
- @mock.patch('cloudinit.sources.DataSourceEc2.util.is_FreeBSD')
+ @mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD")
def test_ec2_local_returns_false_on_bsd(self, m_is_freebsd):
"""DataSourceEc2Local returns False on BSD.
@@ -709,20 +756,23 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.datasource = ec2.DataSourceEc2Local
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': DEFAULT_METADATA})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
ret = ds.get_data()
self.assertFalse(ret)
self.assertIn(
"FreeBSD doesn't support running dhclient with -sf",
- self.logs.getvalue())
-
- @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
- @mock.patch('cloudinit.net.find_fallback_nic')
- @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
- @mock.patch('cloudinit.sources.DataSourceEc2.util.is_FreeBSD')
- def test_ec2_local_performs_dhcp_on_non_bsd(self, m_is_bsd, m_dhcp,
- m_fallback_nic, m_net):
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
+ @mock.patch("cloudinit.net.find_fallback_nic")
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ @mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD")
+ def test_ec2_local_performs_dhcp_on_non_bsd(
+ self, m_is_bsd, m_dhcp, m_fallback_nic, m_net
+ ):
"""Ec2Local returns True for valid platform data on non-BSD with dhcp.
DataSourceEc2Local will setup initial IPv4 network via dhcp discovery.
@@ -730,31 +780,41 @@ class TestEc2(test_helpers.HttprettyTestCase):
When the platform data is valid, return True.
"""
- m_fallback_nic.return_value = 'eth9'
+ m_fallback_nic.return_value = "eth9"
m_is_bsd.return_value = False
- m_dhcp.return_value = [{
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'broadcast-address': '192.168.2.255'}]
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "broadcast-address": "192.168.2.255",
+ }
+ ]
self.datasource = ec2.DataSourceEc2Local
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': DEFAULT_METADATA})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
ret = ds.get_data()
self.assertTrue(ret)
- m_dhcp.assert_called_once_with('eth9', None)
+ m_dhcp.assert_called_once_with("eth9", None)
m_net.assert_called_once_with(
- broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9',
- prefix_or_mask='255.255.255.0', router='192.168.2.1',
- static_routes=None)
- self.assertIn('Crawl of metadata service took', self.logs.getvalue())
+ broadcast="192.168.2.255",
+ interface="eth9",
+ ip="192.168.2.9",
+ prefix_or_mask="255.255.255.0",
+ router="192.168.2.1",
+ static_routes=None,
+ )
+ self.assertIn("Crawl of metadata service took", self.logs.getvalue())
class TestGetSecondaryAddresses(test_helpers.CiTestCase):
- mac = '06:17:04:d7:26:ff'
+ mac = "06:17:04:d7:26:ff"
with_logs = True
def test_md_with_no_secondary_addresses(self):
@@ -764,26 +824,34 @@ class TestGetSecondaryAddresses(test_helpers.CiTestCase):
def test_md_with_secondary_v4_and_v6_addresses(self):
"""All secondary addresses are returned from nic metadata"""
self.assertEqual(
- ['172.31.45.70/20', '2600:1f16:292:100:f152:2222:3333:4444/128',
- '2600:1f16:292:100:f153:12a3:c37c:11f9/128'],
- ec2.get_secondary_addresses(NIC1_MD_IPV4_IPV6_MULTI_IP, self.mac))
+ [
+ "172.31.45.70/20",
+ "2600:1f16:292:100:f152:2222:3333:4444/128",
+ "2600:1f16:292:100:f153:12a3:c37c:11f9/128",
+ ],
+ ec2.get_secondary_addresses(NIC1_MD_IPV4_IPV6_MULTI_IP, self.mac),
+ )
def test_invalid_ipv4_ipv6_cidr_metadata_logged_with_defaults(self):
"""Any invalid subnet-ipv(4|6)-cidr-block values use defaults"""
invalid_cidr_md = copy.deepcopy(NIC1_MD_IPV4_IPV6_MULTI_IP)
- invalid_cidr_md['subnet-ipv4-cidr-block'] = "something-unexpected"
- invalid_cidr_md['subnet-ipv6-cidr-block'] = "not/sure/what/this/is"
+ invalid_cidr_md["subnet-ipv4-cidr-block"] = "something-unexpected"
+ invalid_cidr_md["subnet-ipv6-cidr-block"] = "not/sure/what/this/is"
self.assertEqual(
- ['172.31.45.70/24', '2600:1f16:292:100:f152:2222:3333:4444/128',
- '2600:1f16:292:100:f153:12a3:c37c:11f9/128'],
- ec2.get_secondary_addresses(invalid_cidr_md, self.mac))
+ [
+ "172.31.45.70/24",
+ "2600:1f16:292:100:f152:2222:3333:4444/128",
+ "2600:1f16:292:100:f153:12a3:c37c:11f9/128",
+ ],
+ ec2.get_secondary_addresses(invalid_cidr_md, self.mac),
+ )
expected_logs = [
"WARNING: Could not parse subnet-ipv4-cidr-block"
" something-unexpected for mac 06:17:04:d7:26:ff."
" ipv4 network config prefix defaults to /24",
"WARNING: Could not parse subnet-ipv6-cidr-block"
" not/sure/what/this/is for mac 06:17:04:d7:26:ff."
- " ipv6 network config prefix defaults to /128"
+ " ipv6 network config prefix defaults to /128",
]
logs = self.logs.getvalue()
for log in expected_logs:
@@ -791,188 +859,267 @@ class TestGetSecondaryAddresses(test_helpers.CiTestCase):
class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase):
-
def setUp(self):
super(TestConvertEc2MetadataNetworkConfig, self).setUp()
- self.mac1 = '06:17:04:d7:26:09'
+ self.mac1 = "06:17:04:d7:26:09"
interface_dict = copy.deepcopy(
- DEFAULT_METADATA['network']['interfaces']['macs'][self.mac1])
+ DEFAULT_METADATA["network"]["interfaces"]["macs"][self.mac1]
+ )
# These tests are written assuming the base interface doesn't have IPv6
- interface_dict.pop('ipv6s')
+ interface_dict.pop("ipv6s")
self.network_metadata = {
- 'interfaces': {'macs': {self.mac1: interface_dict}}}
+ "interfaces": {"macs": {self.mac1: interface_dict}}
+ }
def test_convert_ec2_metadata_network_config_skips_absent_macs(self):
"""Any mac absent from metadata is skipped by network config."""
- macs_to_nics = {self.mac1: 'eth9', 'DE:AD:BE:EF:FF:FF': 'vitualnic2'}
+ macs_to_nics = {self.mac1: "eth9", "DE:AD:BE:EF:FF:FF": "vitualnic2"}
# DE:AD:BE:EF:FF:FF represented by OS but not in metadata
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': False}}}
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": False,
+ }
+ },
+ }
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
- self.network_metadata, macs_to_nics))
+ self.network_metadata, macs_to_nics
+ ),
+ )
def test_convert_ec2_metadata_network_config_handles_only_dhcp6(self):
"""Config dhcp6 when ipv6s is in metadata for a mac."""
- macs_to_nics = {self.mac1: 'eth9'}
+ macs_to_nics = {self.mac1: "eth9"}
network_metadata_ipv6 = copy.deepcopy(self.network_metadata)
- nic1_metadata = (
- network_metadata_ipv6['interfaces']['macs'][self.mac1])
- nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64'
- nic1_metadata.pop('public-ipv4s')
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': True}}}
+ nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1]
+ nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64"
+ nic1_metadata.pop("public-ipv4s")
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": True,
+ }
+ },
+ }
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
- network_metadata_ipv6, macs_to_nics))
+ network_metadata_ipv6, macs_to_nics
+ ),
+ )
def test_convert_ec2_metadata_network_config_local_only_dhcp4(self):
"""Config dhcp4 when there are no public addresses in public-ipv4s."""
- macs_to_nics = {self.mac1: 'eth9'}
+ macs_to_nics = {self.mac1: "eth9"}
network_metadata_ipv6 = copy.deepcopy(self.network_metadata)
- nic1_metadata = (
- network_metadata_ipv6['interfaces']['macs'][self.mac1])
- nic1_metadata['local-ipv4s'] = '172.3.3.15'
- nic1_metadata.pop('public-ipv4s')
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': False}}}
+ nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1]
+ nic1_metadata["local-ipv4s"] = "172.3.3.15"
+ nic1_metadata.pop("public-ipv4s")
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": False,
+ }
+ },
+ }
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
- network_metadata_ipv6, macs_to_nics))
+ network_metadata_ipv6, macs_to_nics
+ ),
+ )
def test_convert_ec2_metadata_network_config_handles_absent_dhcp4(self):
"""Config dhcp4 on fallback_nic when there are no ipv4 addresses."""
- macs_to_nics = {self.mac1: 'eth9'}
+ macs_to_nics = {self.mac1: "eth9"}
network_metadata_ipv6 = copy.deepcopy(self.network_metadata)
- nic1_metadata = (
- network_metadata_ipv6['interfaces']['macs'][self.mac1])
- nic1_metadata['public-ipv4s'] = ''
+ nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1]
+ nic1_metadata["public-ipv4s"] = ""
# When no ipv4 or ipv6 content but fallback_nic set, set dhcp4 config.
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': False}}}
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": False,
+ }
+ },
+ }
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
- network_metadata_ipv6, macs_to_nics, fallback_nic='eth9'))
+ network_metadata_ipv6, macs_to_nics, fallback_nic="eth9"
+ ),
+ )
def test_convert_ec2_metadata_network_config_handles_local_v4_and_v6(self):
"""When ipv6s and local-ipv4s are non-empty, enable dhcp6 and dhcp4."""
- macs_to_nics = {self.mac1: 'eth9'}
+ macs_to_nics = {self.mac1: "eth9"}
network_metadata_both = copy.deepcopy(self.network_metadata)
- nic1_metadata = (
- network_metadata_both['interfaces']['macs'][self.mac1])
- nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64'
- nic1_metadata.pop('public-ipv4s')
- nic1_metadata['local-ipv4s'] = '10.0.0.42' # Local ipv4 only on vpc
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': True}}}
+ nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1]
+ nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64"
+ nic1_metadata.pop("public-ipv4s")
+ nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": True,
+ }
+ },
+ }
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
- network_metadata_both, macs_to_nics))
+ network_metadata_both, macs_to_nics
+ ),
+ )
def test_convert_ec2_metadata_network_config_handles_multiple_nics(self):
"""DHCP route-metric increases on secondary NICs for IPv4 and IPv6."""
- mac2 = '06:17:04:d7:26:08'
- macs_to_nics = {self.mac1: 'eth9', mac2: 'eth10'}
+ mac2 = "06:17:04:d7:26:08"
+ macs_to_nics = {self.mac1: "eth9", mac2: "eth10"}
network_metadata_both = copy.deepcopy(self.network_metadata)
# Add 2nd nic info
- network_metadata_both['interfaces']['macs'][mac2] = NIC2_MD
- nic1_metadata = (
- network_metadata_both['interfaces']['macs'][self.mac1])
- nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64'
- nic1_metadata.pop('public-ipv4s') # No public-ipv4 IPs in cfg
- nic1_metadata['local-ipv4s'] = '10.0.0.42' # Local ipv4 only on vpc
- expected = {'version': 2, 'ethernets': {
- 'eth9': {
- 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100},
- 'dhcp6': True, 'dhcp6-overrides': {'route-metric': 100}},
- 'eth10': {
- 'match': {'macaddress': mac2}, 'set-name': 'eth10',
- 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 200},
- 'dhcp6': False}}}
+ network_metadata_both["interfaces"]["macs"][mac2] = NIC2_MD
+ nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1]
+ nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64"
+ nic1_metadata.pop("public-ipv4s") # No public-ipv4 IPs in cfg
+ nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": True,
+ "dhcp6-overrides": {"route-metric": 100},
+ },
+ "eth10": {
+ "match": {"macaddress": mac2},
+ "set-name": "eth10",
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 200},
+ "dhcp6": False,
+ },
+ },
+ }
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
- network_metadata_both, macs_to_nics))
+ network_metadata_both, macs_to_nics
+ ),
+ )
def test_convert_ec2_metadata_network_config_handles_dhcp4_and_dhcp6(self):
"""Config both dhcp4 and dhcp6 when both vpc-ipv6 and ipv4 exists."""
- macs_to_nics = {self.mac1: 'eth9'}
+ macs_to_nics = {self.mac1: "eth9"}
network_metadata_both = copy.deepcopy(self.network_metadata)
- nic1_metadata = (
- network_metadata_both['interfaces']['macs'][self.mac1])
- nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64'
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': True}}}
+ nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1]
+ nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64"
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": True,
+ }
+ },
+ }
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
- network_metadata_both, macs_to_nics))
+ network_metadata_both, macs_to_nics
+ ),
+ )
def test_convert_ec2_metadata_gets_macs_from_get_interfaces_by_mac(self):
"""Convert Ec2 Metadata calls get_interfaces_by_mac by default."""
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': self.mac1},
- 'set-name': 'eth9', 'dhcp4': True, 'dhcp6': False}}}
- patch_path = M_PATH_NET + 'get_interfaces_by_mac'
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": False,
+ }
+ },
+ }
+ patch_path = M_PATH_NET + "get_interfaces_by_mac"
with mock.patch(patch_path) as m_get_interfaces_by_mac:
- m_get_interfaces_by_mac.return_value = {self.mac1: 'eth9'}
+ m_get_interfaces_by_mac.return_value = {self.mac1: "eth9"}
self.assertEqual(
expected,
- ec2.convert_ec2_metadata_network_config(self.network_metadata))
+ ec2.convert_ec2_metadata_network_config(self.network_metadata),
+ )
class TesIdentifyPlatform(test_helpers.CiTestCase):
-
def collmock(self, **kwargs):
"""return non-special _collect_platform_data updated with changes."""
unspecial = {
- 'asset_tag': '3857-0037-2746-7462-1818-3997-77',
- 'serial': 'H23-C4J3JV-R6',
- 'uuid': '81c7e555-6471-4833-9551-1ab366c4cfd2',
- 'uuid_source': 'dmi',
- 'vendor': 'tothecloud',
+ "asset_tag": "3857-0037-2746-7462-1818-3997-77",
+ "serial": "H23-C4J3JV-R6",
+ "uuid": "81c7e555-6471-4833-9551-1ab366c4cfd2",
+ "uuid_source": "dmi",
+ "vendor": "tothecloud",
}
unspecial.update(**kwargs)
return unspecial
- @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data')
+ @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data")
def test_identify_zstack(self, m_collect):
- """zstack should be identified if chassis-asset-tag ends in .zstack.io
+ """zstack should be identified if chassis-asset-tag
+ ends in .zstack.io
"""
- m_collect.return_value = self.collmock(asset_tag='123456.zstack.io')
+ m_collect.return_value = self.collmock(asset_tag="123456.zstack.io")
self.assertEqual(ec2.CloudNames.ZSTACK, ec2.identify_platform())
- @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data')
+ @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data")
def test_identify_zstack_full_domain_only(self, m_collect):
- """zstack asset-tag matching should match only on full domain boundary.
+ """zstack asset-tag matching should match only on
+ full domain boundary.
"""
- m_collect.return_value = self.collmock(asset_tag='123456.buzzstack.io')
+ m_collect.return_value = self.collmock(asset_tag="123456.buzzstack.io")
self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform())
- @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data')
+ @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data")
def test_identify_e24cloud(self, m_collect):
"""e24cloud identified if vendor is e24cloud"""
- m_collect.return_value = self.collmock(vendor='e24cloud')
+ m_collect.return_value = self.collmock(vendor="e24cloud")
self.assertEqual(ec2.CloudNames.E24CLOUD, ec2.identify_platform())
- @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data')
+ @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data")
def test_identify_e24cloud_negative(self, m_collect):
"""e24cloud identified if vendor is e24cloud"""
- m_collect.return_value = self.collmock(vendor='e24cloudyday')
+ m_collect.return_value = self.collmock(vendor="e24cloudyday")
self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform())
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_exoscale.py b/tests/unittests/sources/test_exoscale.py
index b0ffb7a5..591256d8 100644
--- a/tests/unittests/sources/test_exoscale.py
+++ b/tests/unittests/sources/test_exoscale.py
@@ -2,36 +2,33 @@
# Author: Christopher Glass <christopher.glass@exoscale.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import helpers
+import os
+
+import httpretty
+import requests
+
+from cloudinit import helpers, util
from cloudinit.sources.DataSourceExoscale import (
API_VERSION,
- DataSourceExoscale,
METADATA_URL,
- get_password,
PASSWORD_SERVER_PORT,
- read_metadata)
+ DataSourceExoscale,
+ get_password,
+ read_metadata,
+)
from tests.unittests.helpers import HttprettyTestCase, mock
-from cloudinit import util
-import httpretty
-import os
-import requests
+TEST_PASSWORD_URL = "{}:{}/{}/".format(
+ METADATA_URL, PASSWORD_SERVER_PORT, API_VERSION
+)
+TEST_METADATA_URL = "{}/{}/meta-data/".format(METADATA_URL, API_VERSION)
-TEST_PASSWORD_URL = "{}:{}/{}/".format(METADATA_URL,
- PASSWORD_SERVER_PORT,
- API_VERSION)
-
-TEST_METADATA_URL = "{}/{}/meta-data/".format(METADATA_URL,
- API_VERSION)
-
-TEST_USERDATA_URL = "{}/{}/user-data".format(METADATA_URL,
- API_VERSION)
+TEST_USERDATA_URL = "{}/{}/user-data".format(METADATA_URL, API_VERSION)
@httpretty.activate
class TestDatasourceExoscale(HttprettyTestCase):
-
def setUp(self):
super(TestDatasourceExoscale, self).setUp()
self.tmp = self.tmp_dir()
@@ -42,37 +39,35 @@ class TestDatasourceExoscale(HttprettyTestCase):
def test_password_saved(self):
"""The password is not set when it is not found
in the metadata service."""
- httpretty.register_uri(httpretty.GET,
- self.password_url,
- body="saved_password")
+ httpretty.register_uri(
+ httpretty.GET, self.password_url, body="saved_password"
+ )
self.assertFalse(get_password())
def test_password_empty(self):
"""No password is set if the metadata service returns
an empty string."""
- httpretty.register_uri(httpretty.GET,
- self.password_url,
- body="")
+ httpretty.register_uri(httpretty.GET, self.password_url, body="")
self.assertFalse(get_password())
def test_password(self):
"""The password is set to what is found in the metadata
service."""
expected_password = "p@ssw0rd"
- httpretty.register_uri(httpretty.GET,
- self.password_url,
- body=expected_password)
+ httpretty.register_uri(
+ httpretty.GET, self.password_url, body=expected_password
+ )
password = get_password()
self.assertEqual(expected_password, password)
def test_activate_removes_set_passwords_semaphore(self):
"""Allow set_passwords to run every boot by removing the semaphore."""
- path = helpers.Paths({'cloud_dir': self.tmp})
- sem_dir = self.tmp_path('instance/sem', dir=self.tmp)
+ path = helpers.Paths({"cloud_dir": self.tmp})
+ sem_dir = self.tmp_path("instance/sem", dir=self.tmp)
util.ensure_dir(sem_dir)
- sem_file = os.path.join(sem_dir, 'config_set_passwords')
- with open(sem_file, 'w') as stream:
- stream.write('')
+ sem_file = os.path.join(sem_dir, "config_set_passwords")
+ with open(sem_file, "w") as stream:
+ stream.write("")
ds = DataSourceExoscale({}, None, path)
ds.activate(None, None)
self.assertFalse(os.path.exists(sem_file))
@@ -80,102 +75,130 @@ class TestDatasourceExoscale(HttprettyTestCase):
def test_get_data(self):
"""The datasource conforms to expected behavior when supplied
full test data."""
- path = helpers.Paths({'run_dir': self.tmp})
+ path = helpers.Paths({"run_dir": self.tmp})
ds = DataSourceExoscale({}, None, path)
ds._is_platform_viable = lambda: True
expected_password = "p@ssw0rd"
expected_id = "12345"
expected_hostname = "myname"
expected_userdata = "#cloud-config"
- httpretty.register_uri(httpretty.GET,
- self.userdata_url,
- body=expected_userdata)
- httpretty.register_uri(httpretty.GET,
- self.password_url,
- body=expected_password)
- httpretty.register_uri(httpretty.GET,
- self.metadata_url,
- body="instance-id\nlocal-hostname")
- httpretty.register_uri(httpretty.GET,
- "{}local-hostname".format(self.metadata_url),
- body=expected_hostname)
- httpretty.register_uri(httpretty.GET,
- "{}instance-id".format(self.metadata_url),
- body=expected_id)
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url, body=expected_userdata
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.password_url, body=expected_password
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ self.metadata_url,
+ body="instance-id\nlocal-hostname",
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}local-hostname".format(self.metadata_url),
+ body=expected_hostname,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}instance-id".format(self.metadata_url),
+ body=expected_id,
+ )
self.assertTrue(ds._get_data())
self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
- self.assertEqual(ds.metadata, {"instance-id": expected_id,
- "local-hostname": expected_hostname})
- self.assertEqual(ds.get_config_obj(),
- {'ssh_pwauth': True,
- 'password': expected_password,
- 'chpasswd': {
- 'expire': False,
- }})
+ self.assertEqual(
+ ds.metadata,
+ {"instance-id": expected_id, "local-hostname": expected_hostname},
+ )
+ self.assertEqual(
+ ds.get_config_obj(),
+ {
+ "ssh_pwauth": True,
+ "password": expected_password,
+ "chpasswd": {
+ "expire": False,
+ },
+ },
+ )
def test_get_data_saved_password(self):
"""The datasource conforms to expected behavior when saved_password is
returned by the password server."""
- path = helpers.Paths({'run_dir': self.tmp})
+ path = helpers.Paths({"run_dir": self.tmp})
ds = DataSourceExoscale({}, None, path)
ds._is_platform_viable = lambda: True
expected_answer = "saved_password"
expected_id = "12345"
expected_hostname = "myname"
expected_userdata = "#cloud-config"
- httpretty.register_uri(httpretty.GET,
- self.userdata_url,
- body=expected_userdata)
- httpretty.register_uri(httpretty.GET,
- self.password_url,
- body=expected_answer)
- httpretty.register_uri(httpretty.GET,
- self.metadata_url,
- body="instance-id\nlocal-hostname")
- httpretty.register_uri(httpretty.GET,
- "{}local-hostname".format(self.metadata_url),
- body=expected_hostname)
- httpretty.register_uri(httpretty.GET,
- "{}instance-id".format(self.metadata_url),
- body=expected_id)
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url, body=expected_userdata
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.password_url, body=expected_answer
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ self.metadata_url,
+ body="instance-id\nlocal-hostname",
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}local-hostname".format(self.metadata_url),
+ body=expected_hostname,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}instance-id".format(self.metadata_url),
+ body=expected_id,
+ )
self.assertTrue(ds._get_data())
self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
- self.assertEqual(ds.metadata, {"instance-id": expected_id,
- "local-hostname": expected_hostname})
+ self.assertEqual(
+ ds.metadata,
+ {"instance-id": expected_id, "local-hostname": expected_hostname},
+ )
self.assertEqual(ds.get_config_obj(), {})
def test_get_data_no_password(self):
"""The datasource conforms to expected behavior when no password is
returned by the password server."""
- path = helpers.Paths({'run_dir': self.tmp})
+ path = helpers.Paths({"run_dir": self.tmp})
ds = DataSourceExoscale({}, None, path)
ds._is_platform_viable = lambda: True
expected_answer = ""
expected_id = "12345"
expected_hostname = "myname"
expected_userdata = "#cloud-config"
- httpretty.register_uri(httpretty.GET,
- self.userdata_url,
- body=expected_userdata)
- httpretty.register_uri(httpretty.GET,
- self.password_url,
- body=expected_answer)
- httpretty.register_uri(httpretty.GET,
- self.metadata_url,
- body="instance-id\nlocal-hostname")
- httpretty.register_uri(httpretty.GET,
- "{}local-hostname".format(self.metadata_url),
- body=expected_hostname)
- httpretty.register_uri(httpretty.GET,
- "{}instance-id".format(self.metadata_url),
- body=expected_id)
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url, body=expected_userdata
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.password_url, body=expected_answer
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ self.metadata_url,
+ body="instance-id\nlocal-hostname",
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}local-hostname".format(self.metadata_url),
+ body=expected_hostname,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}instance-id".format(self.metadata_url),
+ body=expected_id,
+ )
self.assertTrue(ds._get_data())
self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
- self.assertEqual(ds.metadata, {"instance-id": expected_id,
- "local-hostname": expected_hostname})
+ self.assertEqual(
+ ds.metadata,
+ {"instance-id": expected_id, "local-hostname": expected_hostname},
+ )
self.assertEqual(ds.get_config_obj(), {})
- @mock.patch('cloudinit.sources.DataSourceExoscale.get_password')
+ @mock.patch("cloudinit.sources.DataSourceExoscale.get_password")
def test_read_metadata_when_password_server_unreachable(self, m_password):
"""The read_metadata function returns partial results in case the
password server (only) is unreachable."""
@@ -183,29 +206,36 @@ class TestDatasourceExoscale(HttprettyTestCase):
expected_hostname = "myname"
expected_userdata = "#cloud-config"
- m_password.side_effect = requests.Timeout('Fake Connection Timeout')
- httpretty.register_uri(httpretty.GET,
- self.userdata_url,
- body=expected_userdata)
- httpretty.register_uri(httpretty.GET,
- self.metadata_url,
- body="instance-id\nlocal-hostname")
- httpretty.register_uri(httpretty.GET,
- "{}local-hostname".format(self.metadata_url),
- body=expected_hostname)
- httpretty.register_uri(httpretty.GET,
- "{}instance-id".format(self.metadata_url),
- body=expected_id)
+ m_password.side_effect = requests.Timeout("Fake Connection Timeout")
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url, body=expected_userdata
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ self.metadata_url,
+ body="instance-id\nlocal-hostname",
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}local-hostname".format(self.metadata_url),
+ body=expected_hostname,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}instance-id".format(self.metadata_url),
+ body=expected_id,
+ )
result = read_metadata()
self.assertIsNone(result.get("password"))
- self.assertEqual(result.get("user-data").decode("utf-8"),
- expected_userdata)
+ self.assertEqual(
+ result.get("user-data").decode("utf-8"), expected_userdata
+ )
def test_non_viable_platform(self):
"""The datasource fails fast when the platform is not viable."""
- path = helpers.Paths({'run_dir': self.tmp})
+ path = helpers.Paths({"run_dir": self.tmp})
ds = DataSourceExoscale({}, None, path)
ds._is_platform_viable = lambda: False
self.assertFalse(ds._get_data())
diff --git a/tests/unittests/sources/test_gce.py b/tests/unittests/sources/test_gce.py
index dc768e99..e030931b 100644
--- a/tests/unittests/sources/test_gce.py
+++ b/tests/unittests/sources/test_gce.py
@@ -5,58 +5,57 @@
# This file is part of cloud-init. See LICENSE file for license information.
import datetime
-import httpretty
import json
import re
+from base64 import b64decode, b64encode
from unittest import mock
from urllib.parse import urlparse
-from base64 import b64encode, b64decode
+import httpretty
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import settings
+from cloudinit import distros, helpers, settings
from cloudinit.sources import DataSourceGCE
-
from tests.unittests import helpers as test_helpers
-
GCE_META = {
- 'instance/id': '123',
- 'instance/zone': 'foo/bar',
- 'instance/hostname': 'server.project-foo.local',
+ "instance/id": "123",
+ "instance/zone": "foo/bar",
+ "instance/hostname": "server.project-foo.local",
}
GCE_META_PARTIAL = {
- 'instance/id': '1234',
- 'instance/hostname': 'server.project-bar.local',
- 'instance/zone': 'bar/baz',
+ "instance/id": "1234",
+ "instance/hostname": "server.project-bar.local",
+ "instance/zone": "bar/baz",
}
GCE_META_ENCODING = {
- 'instance/id': '12345',
- 'instance/hostname': 'server.project-baz.local',
- 'instance/zone': 'baz/bang',
- 'instance/attributes': {
- 'user-data': b64encode(b'#!/bin/echo baz\n').decode('utf-8'),
- 'user-data-encoding': 'base64',
- }
+ "instance/id": "12345",
+ "instance/hostname": "server.project-baz.local",
+ "instance/zone": "baz/bang",
+ "instance/attributes": {
+ "user-data": b64encode(b"#!/bin/echo baz\n").decode("utf-8"),
+ "user-data-encoding": "base64",
+ },
}
GCE_USER_DATA_TEXT = {
- 'instance/id': '12345',
- 'instance/hostname': 'server.project-baz.local',
- 'instance/zone': 'baz/bang',
- 'instance/attributes': {
- 'user-data': '#!/bin/sh\necho hi mom\ntouch /run/up-now\n',
- }
+ "instance/id": "12345",
+ "instance/hostname": "server.project-baz.local",
+ "instance/zone": "baz/bang",
+ "instance/attributes": {
+ "user-data": "#!/bin/sh\necho hi mom\ntouch /run/up-now\n",
+ },
}
-HEADERS = {'Metadata-Flavor': 'Google'}
+HEADERS = {"Metadata-Flavor": "Google"}
MD_URL_RE = re.compile(
- r'http://metadata.google.internal/computeMetadata/v1/.*')
-GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/'
- 'v1/instance/guest-attributes/hostkeys/')
+ r"http://metadata.google.internal/computeMetadata/v1/.*"
+)
+GUEST_ATTRIBUTES_URL = (
+ "http://metadata.google.internal/computeMetadata/"
+ "v1/instance/guest-attributes/hostkeys/"
+)
def _set_mock_metadata(gce_meta=None):
@@ -65,10 +64,10 @@ def _set_mock_metadata(gce_meta=None):
def _request_callback(method, uri, headers):
url_path = urlparse(uri).path
- if url_path.startswith('/computeMetadata/v1/'):
- path = url_path.split('/computeMetadata/v1/')[1:][0]
- recursive = path.endswith('/')
- path = path.rstrip('/')
+ if url_path.startswith("/computeMetadata/v1/"):
+ path = url_path.split("/computeMetadata/v1/")[1:][0]
+ recursive = path.endswith("/")
+ path = path.rstrip("/")
else:
path = None
if path in gce_meta:
@@ -77,7 +76,7 @@ def _set_mock_metadata(gce_meta=None):
response = json.dumps(response)
return (200, headers, response)
else:
- return (404, headers, '')
+ return (404, headers, "")
# reset is needed. https://github.com/gabrielfalcao/HTTPretty/issues/316
httpretty.register_uri(httpretty.GET, MD_URL_RE, body=_request_callback)
@@ -85,28 +84,28 @@ def _set_mock_metadata(gce_meta=None):
@httpretty.activate
class TestDataSourceGCE(test_helpers.HttprettyTestCase):
-
def _make_distro(self, dtype, def_user=None):
cfg = dict(settings.CFG_BUILTIN)
- cfg['system_info']['distro'] = dtype
- paths = helpers.Paths(cfg['system_info']['paths'])
+ cfg["system_info"]["distro"] = dtype
+ paths = helpers.Paths(cfg["system_info"]["paths"])
distro_cls = distros.fetch(dtype)
if def_user:
- cfg['system_info']['default_user'] = def_user.copy()
- distro = distro_cls(dtype, cfg['system_info'], paths)
+ cfg["system_info"]["default_user"] = def_user.copy()
+ distro = distro_cls(dtype, cfg["system_info"], paths)
return distro
def setUp(self):
tmp = self.tmp_dir()
self.ds = DataSourceGCE.DataSourceGCE(
- settings.CFG_BUILTIN, None,
- helpers.Paths({'run_dir': tmp}))
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": tmp})
+ )
ppatch = self.m_platform_reports_gce = mock.patch(
- 'cloudinit.sources.DataSourceGCE.platform_reports_gce')
+ "cloudinit.sources.DataSourceGCE.platform_reports_gce"
+ )
self.m_platform_reports_gce = ppatch.start()
self.m_platform_reports_gce.return_value = True
self.addCleanup(ppatch.stop)
- self.add_patch('time.sleep', 'm_sleep') # just to speed up tests
+ self.add_patch("time.sleep", "m_sleep") # just to speed up tests
super(TestDataSourceGCE, self).setUp()
def test_connection(self):
@@ -121,30 +120,33 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
def test_metadata(self):
# UnicodeDecodeError if set to ds.userdata instead of userdata_raw
meta = GCE_META.copy()
- meta['instance/attributes/user-data'] = b'/bin/echo \xff\n'
+ meta["instance/attributes/user-data"] = b"/bin/echo \xff\n"
_set_mock_metadata()
self.ds.get_data()
- shostname = GCE_META.get('instance/hostname').split('.')[0]
- self.assertEqual(shostname,
- self.ds.get_hostname())
+ shostname = GCE_META.get("instance/hostname").split(".")[0]
+ self.assertEqual(shostname, self.ds.get_hostname())
- self.assertEqual(GCE_META.get('instance/id'),
- self.ds.get_instance_id())
+ self.assertEqual(
+ GCE_META.get("instance/id"), self.ds.get_instance_id()
+ )
- self.assertEqual(GCE_META.get('instance/attributes/user-data'),
- self.ds.get_userdata_raw())
+ self.assertEqual(
+ GCE_META.get("instance/attributes/user-data"),
+ self.ds.get_userdata_raw(),
+ )
# test partial metadata (missing user-data in particular)
def test_metadata_partial(self):
_set_mock_metadata(GCE_META_PARTIAL)
self.ds.get_data()
- self.assertEqual(GCE_META_PARTIAL.get('instance/id'),
- self.ds.get_instance_id())
+ self.assertEqual(
+ GCE_META_PARTIAL.get("instance/id"), self.ds.get_instance_id()
+ )
- shostname = GCE_META_PARTIAL.get('instance/hostname').split('.')[0]
+ shostname = GCE_META_PARTIAL.get("instance/hostname").split(".")[0]
self.assertEqual(shostname, self.ds.get_hostname())
def test_userdata_no_encoding(self):
@@ -152,21 +154,25 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
_set_mock_metadata(GCE_USER_DATA_TEXT)
self.ds.get_data()
self.assertEqual(
- GCE_USER_DATA_TEXT['instance/attributes']['user-data'].encode(),
- self.ds.get_userdata_raw())
+ GCE_USER_DATA_TEXT["instance/attributes"]["user-data"].encode(),
+ self.ds.get_userdata_raw(),
+ )
def test_metadata_encoding(self):
"""user-data is base64 encoded if user-data-encoding is 'base64'."""
_set_mock_metadata(GCE_META_ENCODING)
self.ds.get_data()
- instance_data = GCE_META_ENCODING.get('instance/attributes')
- decoded = b64decode(instance_data.get('user-data'))
+ instance_data = GCE_META_ENCODING.get("instance/attributes")
+ decoded = b64decode(instance_data.get("user-data"))
self.assertEqual(decoded, self.ds.get_userdata_raw())
def test_missing_required_keys_return_false(self):
- for required_key in ['instance/id', 'instance/zone',
- 'instance/hostname']:
+ for required_key in [
+ "instance/id",
+ "instance/zone",
+ "instance/hostname",
+ ]:
meta = GCE_META_PARTIAL.copy()
del meta[required_key]
_set_mock_metadata(meta)
@@ -179,29 +185,35 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
self.assertEqual([], self.ds.get_public_ssh_keys())
def test_cloudinit_ssh_keys(self):
- valid_key = 'ssh-rsa VALID {0}'
- invalid_key = 'ssh-rsa INVALID {0}'
+ valid_key = "ssh-rsa VALID {0}"
+ invalid_key = "ssh-rsa INVALID {0}"
project_attributes = {
- 'sshKeys': '\n'.join([
- 'cloudinit:{0}'.format(valid_key.format(0)),
- 'user:{0}'.format(invalid_key.format(0)),
- ]),
- 'ssh-keys': '\n'.join([
- 'cloudinit:{0}'.format(valid_key.format(1)),
- 'user:{0}'.format(invalid_key.format(1)),
- ]),
+ "sshKeys": "\n".join(
+ [
+ "cloudinit:{0}".format(valid_key.format(0)),
+ "user:{0}".format(invalid_key.format(0)),
+ ]
+ ),
+ "ssh-keys": "\n".join(
+ [
+ "cloudinit:{0}".format(valid_key.format(1)),
+ "user:{0}".format(invalid_key.format(1)),
+ ]
+ ),
}
instance_attributes = {
- 'ssh-keys': '\n'.join([
- 'cloudinit:{0}'.format(valid_key.format(2)),
- 'user:{0}'.format(invalid_key.format(2)),
- ]),
- 'block-project-ssh-keys': 'False',
+ "ssh-keys": "\n".join(
+ [
+ "cloudinit:{0}".format(valid_key.format(2)),
+ "user:{0}".format(invalid_key.format(2)),
+ ]
+ ),
+ "block-project-ssh-keys": "False",
}
meta = GCE_META.copy()
- meta['project/attributes'] = project_attributes
- meta['instance/attributes'] = instance_attributes
+ meta["project/attributes"] = project_attributes
+ meta["instance/attributes"] = instance_attributes
_set_mock_metadata(meta)
self.ds.get_data()
@@ -212,34 +224,42 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
@mock.patch("cloudinit.sources.DataSourceGCE.ug_util")
def test_default_user_ssh_keys(self, mock_ug_util):
mock_ug_util.normalize_users_groups.return_value = None, None
- mock_ug_util.extract_default.return_value = 'ubuntu', None
+ mock_ug_util.extract_default.return_value = "ubuntu", None
ubuntu_ds = DataSourceGCE.DataSourceGCE(
- settings.CFG_BUILTIN, self._make_distro('ubuntu'),
- helpers.Paths({'run_dir': self.tmp_dir()}))
+ settings.CFG_BUILTIN,
+ self._make_distro("ubuntu"),
+ helpers.Paths({"run_dir": self.tmp_dir()}),
+ )
- valid_key = 'ssh-rsa VALID {0}'
- invalid_key = 'ssh-rsa INVALID {0}'
+ valid_key = "ssh-rsa VALID {0}"
+ invalid_key = "ssh-rsa INVALID {0}"
project_attributes = {
- 'sshKeys': '\n'.join([
- 'ubuntu:{0}'.format(valid_key.format(0)),
- 'user:{0}'.format(invalid_key.format(0)),
- ]),
- 'ssh-keys': '\n'.join([
- 'ubuntu:{0}'.format(valid_key.format(1)),
- 'user:{0}'.format(invalid_key.format(1)),
- ]),
+ "sshKeys": "\n".join(
+ [
+ "ubuntu:{0}".format(valid_key.format(0)),
+ "user:{0}".format(invalid_key.format(0)),
+ ]
+ ),
+ "ssh-keys": "\n".join(
+ [
+ "ubuntu:{0}".format(valid_key.format(1)),
+ "user:{0}".format(invalid_key.format(1)),
+ ]
+ ),
}
instance_attributes = {
- 'ssh-keys': '\n'.join([
- 'ubuntu:{0}'.format(valid_key.format(2)),
- 'user:{0}'.format(invalid_key.format(2)),
- ]),
- 'block-project-ssh-keys': 'False',
+ "ssh-keys": "\n".join(
+ [
+ "ubuntu:{0}".format(valid_key.format(2)),
+ "user:{0}".format(invalid_key.format(2)),
+ ]
+ ),
+ "block-project-ssh-keys": "False",
}
meta = GCE_META.copy()
- meta['project/attributes'] = project_attributes
- meta['instance/attributes'] = instance_attributes
+ meta["project/attributes"] = project_attributes
+ meta["instance/attributes"] = instance_attributes
_set_mock_metadata(meta)
ubuntu_ds.get_data()
@@ -248,21 +268,21 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
self.assertEqual(set(expected), set(ubuntu_ds.get_public_ssh_keys()))
def test_instance_ssh_keys_override(self):
- valid_key = 'ssh-rsa VALID {0}'
- invalid_key = 'ssh-rsa INVALID {0}'
+ valid_key = "ssh-rsa VALID {0}"
+ invalid_key = "ssh-rsa INVALID {0}"
project_attributes = {
- 'sshKeys': 'cloudinit:{0}'.format(invalid_key.format(0)),
- 'ssh-keys': 'cloudinit:{0}'.format(invalid_key.format(1)),
+ "sshKeys": "cloudinit:{0}".format(invalid_key.format(0)),
+ "ssh-keys": "cloudinit:{0}".format(invalid_key.format(1)),
}
instance_attributes = {
- 'sshKeys': 'cloudinit:{0}'.format(valid_key.format(0)),
- 'ssh-keys': 'cloudinit:{0}'.format(valid_key.format(1)),
- 'block-project-ssh-keys': 'False',
+ "sshKeys": "cloudinit:{0}".format(valid_key.format(0)),
+ "ssh-keys": "cloudinit:{0}".format(valid_key.format(1)),
+ "block-project-ssh-keys": "False",
}
meta = GCE_META.copy()
- meta['project/attributes'] = project_attributes
- meta['instance/attributes'] = instance_attributes
+ meta["project/attributes"] = project_attributes
+ meta["instance/attributes"] = instance_attributes
_set_mock_metadata(meta)
self.ds.get_data()
@@ -271,20 +291,20 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys()))
def test_block_project_ssh_keys_override(self):
- valid_key = 'ssh-rsa VALID {0}'
- invalid_key = 'ssh-rsa INVALID {0}'
+ valid_key = "ssh-rsa VALID {0}"
+ invalid_key = "ssh-rsa INVALID {0}"
project_attributes = {
- 'sshKeys': 'cloudinit:{0}'.format(invalid_key.format(0)),
- 'ssh-keys': 'cloudinit:{0}'.format(invalid_key.format(1)),
+ "sshKeys": "cloudinit:{0}".format(invalid_key.format(0)),
+ "ssh-keys": "cloudinit:{0}".format(invalid_key.format(1)),
}
instance_attributes = {
- 'ssh-keys': 'cloudinit:{0}'.format(valid_key.format(0)),
- 'block-project-ssh-keys': 'True',
+ "ssh-keys": "cloudinit:{0}".format(valid_key.format(0)),
+ "block-project-ssh-keys": "True",
}
meta = GCE_META.copy()
- meta['project/attributes'] = project_attributes
- meta['instance/attributes'] = instance_attributes
+ meta["project/attributes"] = project_attributes
+ meta["instance/attributes"] = instance_attributes
_set_mock_metadata(meta)
self.ds.get_data()
@@ -296,7 +316,7 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
_set_mock_metadata()
r = self.ds.get_data()
self.assertEqual(True, r)
- self.assertEqual('bar', self.ds.availability_zone)
+ self.assertEqual("bar", self.ds.availability_zone)
@mock.patch("cloudinit.sources.DataSourceGCE.GoogleMetadataFetcher")
def test_get_data_returns_false_if_not_on_gce(self, m_fetcher):
@@ -306,9 +326,8 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
m_fetcher.assert_not_called()
def test_has_expired(self):
-
def _get_timestamp(days):
- format_str = '%Y-%m-%dT%H:%M:%S+0000'
+ format_str = "%Y-%m-%dT%H:%M:%S+0000"
today = datetime.datetime.now()
timestamp = today + datetime.timedelta(days=days)
return timestamp.strftime(format_str)
@@ -317,12 +336,12 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
future = _get_timestamp(1)
ssh_keys = {
None: False,
- '': False,
- 'Invalid': False,
- 'user:ssh-rsa key user@domain.com': False,
+ "": False,
+ "Invalid": False,
+ "user:ssh-rsa key user@domain.com": False,
'user:ssh-rsa key google {"expireOn":"%s"}' % past: False,
- 'user:ssh-rsa key google-ssh': False,
- 'user:ssh-rsa key google-ssh {invalid:json}': False,
+ "user:ssh-rsa key google-ssh": False,
+ "user:ssh-rsa key google-ssh {invalid:json}": False,
'user:ssh-rsa key google-ssh {"userName":"user"}': False,
'user:ssh-rsa key google-ssh {"expireOn":"invalid"}': False,
'user:xyz key google-ssh {"expireOn":"%s"}' % future: False,
@@ -334,28 +353,36 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
def test_parse_public_keys_non_ascii(self):
public_key_data = [
- 'cloudinit:rsa ssh-ke%s invalid' % chr(165),
- 'use%sname:rsa ssh-key' % chr(174),
- 'cloudinit:test 1',
- 'default:test 2',
- 'user:test 3',
+ "cloudinit:rsa ssh-ke%s invalid" % chr(165),
+ "use%sname:rsa ssh-key" % chr(174),
+ "cloudinit:test 1",
+ "default:test 2",
+ "user:test 3",
]
- expected = ['test 1', 'test 2']
+ expected = ["test 1", "test 2"]
found = DataSourceGCE._parse_public_keys(
- public_key_data, default_user='default')
+ public_key_data, default_user="default"
+ )
self.assertEqual(sorted(found), sorted(expected))
@mock.patch("cloudinit.url_helper.readurl")
def test_publish_host_keys(self, m_readurl):
- hostkeys = [('ssh-rsa', 'asdfasdf'),
- ('ssh-ed25519', 'qwerqwer')]
+ hostkeys = [("ssh-rsa", "asdfasdf"), ("ssh-ed25519", "qwerqwer")]
readurl_expected_calls = [
- mock.call(check_status=False, data=b'asdfasdf', headers=HEADERS,
- request_method='PUT',
- url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-rsa')),
- mock.call(check_status=False, data=b'qwerqwer', headers=HEADERS,
- request_method='PUT',
- url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-ed25519')),
+ mock.call(
+ check_status=False,
+ data=b"asdfasdf",
+ headers=HEADERS,
+ request_method="PUT",
+ url="%s%s" % (GUEST_ATTRIBUTES_URL, "ssh-rsa"),
+ ),
+ mock.call(
+ check_status=False,
+ data=b"qwerqwer",
+ headers=HEADERS,
+ request_method="PUT",
+ url="%s%s" % (GUEST_ATTRIBUTES_URL, "ssh-ed25519"),
+ ),
]
self.ds.publish_host_keys(hostkeys)
m_readurl.assert_has_calls(readurl_expected_calls, any_order=True)
@@ -385,4 +412,5 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
ds._get_data()
assert m_dhcp.call_count == 0
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_hetzner.py b/tests/unittests/sources/test_hetzner.py
index 5af0f3db..9e70de34 100644
--- a/tests/unittests/sources/test_hetzner.py
+++ b/tests/unittests/sources/test_hetzner.py
@@ -4,16 +4,17 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.sources import DataSourceHetzner
-import cloudinit.sources.helpers.hetzner as hc_helper
-from cloudinit import util, settings, helpers
-
-from tests.unittests.helpers import mock, CiTestCase
-
import base64
+
import pytest
-METADATA = util.load_yaml("""
+import cloudinit.sources.helpers.hetzner as hc_helper
+from cloudinit import helpers, settings, util
+from cloudinit.sources import DataSourceHetzner
+from tests.unittests.helpers import CiTestCase, mock
+
+METADATA = util.load_yaml(
+ """
hostname: cloudinit-test
instance-id: 123456
local-ipv4: ''
@@ -52,7 +53,8 @@ public-keys:
AAAAC3Nzac1lZdI1NTE5AaaAIaFrcac0yVITsmRrmueq6MD0qYNKlEvW8O1Ib4nkhmWh \
test-key@workstation
vendor_data: "test"
-""")
+"""
+)
USERDATA = b"""#cloud-config
runcmd:
@@ -64,55 +66,59 @@ class TestDataSourceHetzner(CiTestCase):
"""
Test reading the meta-data
"""
+
def setUp(self):
super(TestDataSourceHetzner, self).setUp()
self.tmp = self.tmp_dir()
def get_ds(self):
ds = DataSourceHetzner.DataSourceHetzner(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
return ds
- @mock.patch('cloudinit.net.EphemeralIPv4Network')
- @mock.patch('cloudinit.net.find_fallback_nic')
- @mock.patch('cloudinit.sources.helpers.hetzner.read_metadata')
- @mock.patch('cloudinit.sources.helpers.hetzner.read_userdata')
- @mock.patch('cloudinit.sources.DataSourceHetzner.get_hcloud_data')
- def test_read_data(self, m_get_hcloud_data, m_usermd, m_readmd,
- m_fallback_nic, m_net):
- m_get_hcloud_data.return_value = (True,
- str(METADATA.get('instance-id')))
+ @mock.patch("cloudinit.net.EphemeralIPv4Network")
+ @mock.patch("cloudinit.net.find_fallback_nic")
+ @mock.patch("cloudinit.sources.helpers.hetzner.read_metadata")
+ @mock.patch("cloudinit.sources.helpers.hetzner.read_userdata")
+ @mock.patch("cloudinit.sources.DataSourceHetzner.get_hcloud_data")
+ def test_read_data(
+ self, m_get_hcloud_data, m_usermd, m_readmd, m_fallback_nic, m_net
+ ):
+ m_get_hcloud_data.return_value = (
+ True,
+ str(METADATA.get("instance-id")),
+ )
m_readmd.return_value = METADATA.copy()
m_usermd.return_value = USERDATA
- m_fallback_nic.return_value = 'eth0'
+ m_fallback_nic.return_value = "eth0"
ds = self.get_ds()
ret = ds.get_data()
self.assertTrue(ret)
m_net.assert_called_once_with(
- 'eth0', '169.254.0.1',
- 16, '169.254.255.255'
+ "eth0", "169.254.0.1", 16, "169.254.255.255"
)
self.assertTrue(m_readmd.called)
- self.assertEqual(METADATA.get('hostname'), ds.get_hostname())
+ self.assertEqual(METADATA.get("hostname"), ds.get_hostname())
- self.assertEqual(METADATA.get('public-keys'),
- ds.get_public_ssh_keys())
+ self.assertEqual(METADATA.get("public-keys"), ds.get_public_ssh_keys())
self.assertIsInstance(ds.get_public_ssh_keys(), list)
self.assertEqual(ds.get_userdata_raw(), USERDATA)
- self.assertEqual(ds.get_vendordata_raw(), METADATA.get('vendor_data'))
-
- @mock.patch('cloudinit.sources.helpers.hetzner.read_metadata')
- @mock.patch('cloudinit.net.find_fallback_nic')
- @mock.patch('cloudinit.sources.DataSourceHetzner.get_hcloud_data')
- def test_not_on_hetzner_returns_false(self, m_get_hcloud_data,
- m_find_fallback, m_read_md):
+ self.assertEqual(ds.get_vendordata_raw(), METADATA.get("vendor_data"))
+
+ @mock.patch("cloudinit.sources.helpers.hetzner.read_metadata")
+ @mock.patch("cloudinit.net.find_fallback_nic")
+ @mock.patch("cloudinit.sources.DataSourceHetzner.get_hcloud_data")
+ def test_not_on_hetzner_returns_false(
+ self, m_get_hcloud_data, m_find_fallback, m_read_md
+ ):
"""If helper 'get_hcloud_data' returns False,
- return False from get_data."""
+ return False from get_data."""
m_get_hcloud_data.return_value = (False, None)
ds = self.get_ds()
ret = ds.get_data()
@@ -132,11 +138,14 @@ class TestMaybeB64Decode:
with pytest.raises(TypeError):
hc_helper.maybe_b64decode(invalid_input)
- @pytest.mark.parametrize("in_data,expected", [
- # If data is not b64 encoded, then return value should be the same.
- (b"this is my data", b"this is my data"),
- # If data is b64 encoded, then return value should be decoded.
- (base64.b64encode(b"data"), b"data"),
- ])
+ @pytest.mark.parametrize(
+ "in_data,expected",
+ [
+ # If data is not b64 encoded, then return value should be the same.
+ (b"this is my data", b"this is my data"),
+ # If data is b64 encoded, then return value should be decoded.
+ (base64.b64encode(b"data"), b"data"),
+ ],
+ )
def test_happy_path(self, in_data, expected):
assert expected == hc_helper.maybe_b64decode(in_data)
diff --git a/tests/unittests/sources/test_ibmcloud.py b/tests/unittests/sources/test_ibmcloud.py
index 38e8e892..17a8be64 100644
--- a/tests/unittests/sources/test_ibmcloud.py
+++ b/tests/unittests/sources/test_ibmcloud.py
@@ -1,15 +1,15 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.helpers import Paths
-from cloudinit.sources import DataSourceIBMCloud as ibm
-from tests.unittests import helpers as test_helpers
-from cloudinit import util
-
import base64
import copy
import json
from textwrap import dedent
+from cloudinit import util
+from cloudinit.helpers import Paths
+from cloudinit.sources import DataSourceIBMCloud as ibm
+from tests.unittests import helpers as test_helpers
+
mock = test_helpers.mock
D_PATH = "cloudinit.sources.DataSourceIBMCloud."
@@ -23,24 +23,36 @@ class TestGetIBMPlatform(test_helpers.CiTestCase):
blkid_base = {
"/dev/xvda1": {
- "DEVNAME": "/dev/xvda1", "LABEL": "cloudimg-bootfs",
- "TYPE": "ext3"},
+ "DEVNAME": "/dev/xvda1",
+ "LABEL": "cloudimg-bootfs",
+ "TYPE": "ext3",
+ },
"/dev/xvda2": {
- "DEVNAME": "/dev/xvda2", "LABEL": "cloudimg-rootfs",
- "TYPE": "ext4"},
+ "DEVNAME": "/dev/xvda2",
+ "LABEL": "cloudimg-rootfs",
+ "TYPE": "ext4",
+ },
}
blkid_metadata_disk = {
"/dev/xvdh1": {
- "DEVNAME": "/dev/xvdh1", "LABEL": "METADATA", "TYPE": "vfat",
- "SEC_TYPE": "msdos", "UUID": "681B-8C5D",
- "PARTUUID": "3d631e09-01"},
+ "DEVNAME": "/dev/xvdh1",
+ "LABEL": "METADATA",
+ "TYPE": "vfat",
+ "SEC_TYPE": "msdos",
+ "UUID": "681B-8C5D",
+ "PARTUUID": "3d631e09-01",
+ },
}
blkid_oscode_disk = {
"/dev/xvdh": {
- "DEVNAME": "/dev/xvdh", "LABEL": "config-2", "TYPE": "vfat",
- "SEC_TYPE": "msdos", "UUID": ibm.IBM_CONFIG_UUID}
+ "DEVNAME": "/dev/xvdh",
+ "LABEL": "config-2",
+ "TYPE": "vfat",
+ "SEC_TYPE": "msdos",
+ "UUID": ibm.IBM_CONFIG_UUID,
+ }
}
def setUp(self):
@@ -56,7 +68,8 @@ class TestGetIBMPlatform(test_helpers.CiTestCase):
m_is_prov.return_value = False
self.assertEqual(
(ibm.Platforms.TEMPLATE_LIVE_METADATA, "/dev/xvdh1"),
- ibm.get_ibm_platform())
+ ibm.get_ibm_platform(),
+ )
def test_id_template_prov_metadata(self, m_blkid, m_is_prov, _m_xen):
"""identify TEMPLATE_PROVISIONING_METADATA."""
@@ -64,7 +77,8 @@ class TestGetIBMPlatform(test_helpers.CiTestCase):
m_is_prov.return_value = True
self.assertEqual(
(ibm.Platforms.TEMPLATE_PROVISIONING_METADATA, "/dev/xvdh1"),
- ibm.get_ibm_platform())
+ ibm.get_ibm_platform(),
+ )
def test_id_template_prov_nodata(self, m_blkid, m_is_prov, _m_xen):
"""identify TEMPLATE_PROVISIONING_NODATA."""
@@ -72,14 +86,16 @@ class TestGetIBMPlatform(test_helpers.CiTestCase):
m_is_prov.return_value = True
self.assertEqual(
(ibm.Platforms.TEMPLATE_PROVISIONING_NODATA, None),
- ibm.get_ibm_platform())
+ ibm.get_ibm_platform(),
+ )
def test_id_os_code(self, m_blkid, m_is_prov, _m_xen):
"""Identify OS_CODE."""
m_blkid.return_value = self.blkid_oscode
m_is_prov.return_value = False
- self.assertEqual((ibm.Platforms.OS_CODE, "/dev/xvdh"),
- ibm.get_ibm_platform())
+ self.assertEqual(
+ (ibm.Platforms.OS_CODE, "/dev/xvdh"), ibm.get_ibm_platform()
+ )
def test_id_os_code_must_match_uuid(self, m_blkid, m_is_prov, _m_xen):
"""Test against false positive on openstack with non-ibm UUID."""
@@ -116,7 +132,8 @@ class TestReadMD(test_helpers.CiTestCase):
"public_keys": {"1091307": "ssh-rsa AAAAB3N..Hw== ci-pubkey"},
}
- content_interfaces = dedent("""\
+ content_interfaces = dedent(
+ """\
auto lo
iface lo inet loopback
@@ -125,71 +142,107 @@ class TestReadMD(test_helpers.CiTestCase):
iface eth0 inet static
address 10.82.43.5
netmask 255.255.255.192
- """)
+ """
+ )
userdata = b"#!/bin/sh\necho hi mom\n"
# meta.js file gets json encoded userdata as a list.
meta_js = '["#!/bin/sh\necho hi mom\n"]'
vendor_data = {
- "cloud-init": "#!/bin/bash\necho 'root:$6$5ab01p1m1' | chpasswd -e"}
+ "cloud-init": "#!/bin/bash\necho 'root:$6$5ab01p1m1' | chpasswd -e"
+ }
network_data = {
"links": [
- {"id": "interface_29402281", "name": "eth0", "mtu": None,
- "type": "phy", "ethernet_mac_address": "06:00:f1:bd:da:25"},
- {"id": "interface_29402279", "name": "eth1", "mtu": None,
- "type": "phy", "ethernet_mac_address": "06:98:5e:d0:7f:86"}
+ {
+ "id": "interface_29402281",
+ "name": "eth0",
+ "mtu": None,
+ "type": "phy",
+ "ethernet_mac_address": "06:00:f1:bd:da:25",
+ },
+ {
+ "id": "interface_29402279",
+ "name": "eth1",
+ "mtu": None,
+ "type": "phy",
+ "ethernet_mac_address": "06:98:5e:d0:7f:86",
+ },
],
"networks": [
- {"id": "network_109887563", "link": "interface_29402281",
- "type": "ipv4", "ip_address": "10.82.43.2",
- "netmask": "255.255.255.192",
- "routes": [
- {"network": "10.0.0.0", "netmask": "255.0.0.0",
- "gateway": "10.82.43.1"},
- {"network": "161.26.0.0", "netmask": "255.255.0.0",
- "gateway": "10.82.43.1"}]},
- {"id": "network_109887551", "link": "interface_29402279",
- "type": "ipv4", "ip_address": "108.168.194.252",
- "netmask": "255.255.255.248",
- "routes": [
- {"network": "0.0.0.0", "netmask": "0.0.0.0",
- "gateway": "108.168.194.249"}]}
+ {
+ "id": "network_109887563",
+ "link": "interface_29402281",
+ "type": "ipv4",
+ "ip_address": "10.82.43.2",
+ "netmask": "255.255.255.192",
+ "routes": [
+ {
+ "network": "10.0.0.0",
+ "netmask": "255.0.0.0",
+ "gateway": "10.82.43.1",
+ },
+ {
+ "network": "161.26.0.0",
+ "netmask": "255.255.0.0",
+ "gateway": "10.82.43.1",
+ },
+ ],
+ },
+ {
+ "id": "network_109887551",
+ "link": "interface_29402279",
+ "type": "ipv4",
+ "ip_address": "108.168.194.252",
+ "netmask": "255.255.255.248",
+ "routes": [
+ {
+ "network": "0.0.0.0",
+ "netmask": "0.0.0.0",
+ "gateway": "108.168.194.249",
+ }
+ ],
+ },
],
"services": [
{"type": "dns", "address": "10.0.80.11"},
- {"type": "dns", "address": "10.0.80.12"}
+ {"type": "dns", "address": "10.0.80.12"},
],
}
- sysuuid = '7f79ebf5-d791-43c3-a723-854e8389d59f'
+ sysuuid = "7f79ebf5-d791-43c3-a723-854e8389d59f"
def _get_expected_metadata(self, os_md):
"""return expected 'metadata' for data loaded from meta_data.json."""
os_md = copy.deepcopy(os_md)
renames = (
- ('hostname', 'local-hostname'),
- ('uuid', 'instance-id'),
- ('public_keys', 'public-keys'))
+ ("hostname", "local-hostname"),
+ ("uuid", "instance-id"),
+ ("public_keys", "public-keys"),
+ )
ret = {}
for osname, mdname in renames:
if osname in os_md:
ret[mdname] = os_md[osname]
- if 'random_seed' in os_md:
- ret['random_seed'] = base64.b64decode(os_md['random_seed'])
+ if "random_seed" in os_md:
+ ret["random_seed"] = base64.b64decode(os_md["random_seed"])
return ret
def test_provisioning_md(self, m_platform, m_sysuuid):
"""Provisioning env with a metadata disk should return None."""
m_platform.return_value = (
- ibm.Platforms.TEMPLATE_PROVISIONING_METADATA, "/dev/xvdh")
+ ibm.Platforms.TEMPLATE_PROVISIONING_METADATA,
+ "/dev/xvdh",
+ )
self.assertIsNone(ibm.read_md())
def test_provisioning_no_metadata(self, m_platform, m_sysuuid):
"""Provisioning env with no metadata disk should return None."""
m_platform.return_value = (
- ibm.Platforms.TEMPLATE_PROVISIONING_NODATA, None)
+ ibm.Platforms.TEMPLATE_PROVISIONING_NODATA,
+ None,
+ )
self.assertIsNone(ibm.read_md())
def test_provisioning_not_ibm(self, m_platform, m_sysuuid):
@@ -201,62 +254,83 @@ class TestReadMD(test_helpers.CiTestCase):
"""Template live environment should be identified."""
tmpdir = self.tmp_dir()
m_platform.return_value = (
- ibm.Platforms.TEMPLATE_LIVE_METADATA, tmpdir)
+ ibm.Platforms.TEMPLATE_LIVE_METADATA,
+ tmpdir,
+ )
m_sysuuid.return_value = self.sysuuid
- test_helpers.populate_dir(tmpdir, {
- 'openstack/latest/meta_data.json': json.dumps(self.template_md),
- 'openstack/latest/user_data': self.userdata,
- 'openstack/content/interfaces': self.content_interfaces,
- 'meta.js': self.meta_js})
+ test_helpers.populate_dir(
+ tmpdir,
+ {
+ "openstack/latest/meta_data.json": json.dumps(
+ self.template_md
+ ),
+ "openstack/latest/user_data": self.userdata,
+ "openstack/content/interfaces": self.content_interfaces,
+ "meta.js": self.meta_js,
+ },
+ )
ret = ibm.read_md()
- self.assertEqual(ibm.Platforms.TEMPLATE_LIVE_METADATA,
- ret['platform'])
- self.assertEqual(tmpdir, ret['source'])
- self.assertEqual(self.userdata, ret['userdata'])
- self.assertEqual(self._get_expected_metadata(self.template_md),
- ret['metadata'])
- self.assertEqual(self.sysuuid, ret['system-uuid'])
+ self.assertEqual(ibm.Platforms.TEMPLATE_LIVE_METADATA, ret["platform"])
+ self.assertEqual(tmpdir, ret["source"])
+ self.assertEqual(self.userdata, ret["userdata"])
+ self.assertEqual(
+ self._get_expected_metadata(self.template_md), ret["metadata"]
+ )
+ self.assertEqual(self.sysuuid, ret["system-uuid"])
def test_os_code_live(self, m_platform, m_sysuuid):
"""Verify an os_code metadata path."""
tmpdir = self.tmp_dir()
m_platform.return_value = (ibm.Platforms.OS_CODE, tmpdir)
netdata = json.dumps(self.network_data)
- test_helpers.populate_dir(tmpdir, {
- 'openstack/latest/meta_data.json': json.dumps(self.oscode_md),
- 'openstack/latest/user_data': self.userdata,
- 'openstack/latest/vendor_data.json': json.dumps(self.vendor_data),
- 'openstack/latest/network_data.json': netdata,
- })
+ test_helpers.populate_dir(
+ tmpdir,
+ {
+ "openstack/latest/meta_data.json": json.dumps(self.oscode_md),
+ "openstack/latest/user_data": self.userdata,
+ "openstack/latest/vendor_data.json": json.dumps(
+ self.vendor_data
+ ),
+ "openstack/latest/network_data.json": netdata,
+ },
+ )
ret = ibm.read_md()
- self.assertEqual(ibm.Platforms.OS_CODE, ret['platform'])
- self.assertEqual(tmpdir, ret['source'])
- self.assertEqual(self.userdata, ret['userdata'])
- self.assertEqual(self._get_expected_metadata(self.oscode_md),
- ret['metadata'])
+ self.assertEqual(ibm.Platforms.OS_CODE, ret["platform"])
+ self.assertEqual(tmpdir, ret["source"])
+ self.assertEqual(self.userdata, ret["userdata"])
+ self.assertEqual(
+ self._get_expected_metadata(self.oscode_md), ret["metadata"]
+ )
def test_os_code_live_no_userdata(self, m_platform, m_sysuuid):
"""Verify os_code without user-data."""
tmpdir = self.tmp_dir()
m_platform.return_value = (ibm.Platforms.OS_CODE, tmpdir)
- test_helpers.populate_dir(tmpdir, {
- 'openstack/latest/meta_data.json': json.dumps(self.oscode_md),
- 'openstack/latest/vendor_data.json': json.dumps(self.vendor_data),
- })
+ test_helpers.populate_dir(
+ tmpdir,
+ {
+ "openstack/latest/meta_data.json": json.dumps(self.oscode_md),
+ "openstack/latest/vendor_data.json": json.dumps(
+ self.vendor_data
+ ),
+ },
+ )
ret = ibm.read_md()
- self.assertEqual(ibm.Platforms.OS_CODE, ret['platform'])
- self.assertEqual(tmpdir, ret['source'])
- self.assertIsNone(ret['userdata'])
- self.assertEqual(self._get_expected_metadata(self.oscode_md),
- ret['metadata'])
+ self.assertEqual(ibm.Platforms.OS_CODE, ret["platform"])
+ self.assertEqual(tmpdir, ret["source"])
+ self.assertIsNone(ret["userdata"])
+ self.assertEqual(
+ self._get_expected_metadata(self.oscode_md), ret["metadata"]
+ )
class TestIsIBMProvisioning(test_helpers.FilesystemMockingTestCase):
"""Test the _is_ibm_provisioning method."""
+
inst_log = "/root/swinstall.log"
prov_cfg = "/root/provisioningConfiguration.cfg"
boot_ref = "/proc/1/environ"
@@ -279,9 +353,11 @@ class TestIsIBMProvisioning(test_helpers.FilesystemMockingTestCase):
def test_config_with_old_log(self):
"""A config with a log from previous boot is not provisioning."""
rootd = self.tmp_dir()
- data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10),
- self.inst_log: ("log data\n", -30),
- self.boot_ref: ("PWD=/", 0)}
+ data = {
+ self.prov_cfg: ("key=value\nkey2=val2\n", -10),
+ self.inst_log: ("log data\n", -30),
+ self.boot_ref: ("PWD=/", 0),
+ }
test_helpers.populate_dir_with_ts(rootd, data)
self.assertFalse(self._call_with_root(rootd=rootd))
self.assertIn("from previous boot", self.logs.getvalue())
@@ -289,9 +365,11 @@ class TestIsIBMProvisioning(test_helpers.FilesystemMockingTestCase):
def test_config_with_new_log(self):
"""A config with a log from this boot is provisioning."""
rootd = self.tmp_dir()
- data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10),
- self.inst_log: ("log data\n", 30),
- self.boot_ref: ("PWD=/", 0)}
+ data = {
+ self.prov_cfg: ("key=value\nkey2=val2\n", -10),
+ self.inst_log: ("log data\n", 30),
+ self.boot_ref: ("PWD=/", 0),
+ }
test_helpers.populate_dir_with_ts(rootd, data)
self.assertTrue(self._call_with_root(rootd=rootd))
self.assertIn("from current boot", self.logs.getvalue())
@@ -300,44 +378,49 @@ class TestIsIBMProvisioning(test_helpers.FilesystemMockingTestCase):
"""If the config and log existed, but no reference, assume not."""
rootd = self.tmp_dir()
test_helpers.populate_dir(
- rootd, {self.prov_cfg: "key=value", self.inst_log: "log data\n"})
+ rootd, {self.prov_cfg: "key=value", self.inst_log: "log data\n"}
+ )
self.assertFalse(self._call_with_root(rootd=rootd))
self.assertIn("no reference file", self.logs.getvalue())
class TestDataSourceIBMCloud(test_helpers.CiTestCase):
-
def setUp(self):
super(TestDataSourceIBMCloud, self).setUp()
self.tmp = self.tmp_dir()
- self.cloud_dir = self.tmp_path('cloud', dir=self.tmp)
+ self.cloud_dir = self.tmp_path("cloud", dir=self.tmp)
util.ensure_dir(self.cloud_dir)
- paths = Paths({'run_dir': self.tmp, 'cloud_dir': self.cloud_dir})
- self.ds = ibm.DataSourceIBMCloud(
- sys_cfg={}, distro=None, paths=paths)
+ paths = Paths({"run_dir": self.tmp, "cloud_dir": self.cloud_dir})
+ self.ds = ibm.DataSourceIBMCloud(sys_cfg={}, distro=None, paths=paths)
def test_get_data_false(self):
"""When read_md returns None, get_data returns False."""
- with mock.patch(D_PATH + 'read_md', return_value=None):
+ with mock.patch(D_PATH + "read_md", return_value=None):
self.assertFalse(self.ds.get_data())
def test_get_data_processes_read_md(self):
"""get_data processes and caches content returned by read_md."""
md = {
- 'metadata': {}, 'networkdata': 'net', 'platform': 'plat',
- 'source': 'src', 'system-uuid': 'uuid', 'userdata': 'ud',
- 'vendordata': 'vd'}
- with mock.patch(D_PATH + 'read_md', return_value=md):
+ "metadata": {},
+ "networkdata": "net",
+ "platform": "plat",
+ "source": "src",
+ "system-uuid": "uuid",
+ "userdata": "ud",
+ "vendordata": "vd",
+ }
+ with mock.patch(D_PATH + "read_md", return_value=md):
self.assertTrue(self.ds.get_data())
- self.assertEqual('src', self.ds.source)
- self.assertEqual('plat', self.ds.platform)
+ self.assertEqual("src", self.ds.source)
+ self.assertEqual("plat", self.ds.platform)
self.assertEqual({}, self.ds.metadata)
- self.assertEqual('ud', self.ds.userdata_raw)
- self.assertEqual('net', self.ds.network_json)
- self.assertEqual('vd', self.ds.vendordata_pure)
- self.assertEqual('uuid', self.ds.system_uuid)
- self.assertEqual('ibmcloud', self.ds.cloud_name)
- self.assertEqual('ibmcloud', self.ds.platform_type)
- self.assertEqual('plat (src)', self.ds.subplatform)
+ self.assertEqual("ud", self.ds.userdata_raw)
+ self.assertEqual("net", self.ds.network_json)
+ self.assertEqual("vd", self.ds.vendordata_pure)
+ self.assertEqual("uuid", self.ds.system_uuid)
+ self.assertEqual("ibmcloud", self.ds.cloud_name)
+ self.assertEqual("ibmcloud", self.ds.platform_type)
+ self.assertEqual("plat (src)", self.ds.subplatform)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_init.py b/tests/unittests/sources/test_init.py
index a1d19518..745a7fa6 100644
--- a/tests/unittests/sources/test_init.py
+++ b/tests/unittests/sources/test_init.py
@@ -5,46 +5,60 @@ import inspect
import os
import stat
+from cloudinit import importer, util
from cloudinit.event import EventScope, EventType
from cloudinit.helpers import Paths
-from cloudinit import importer
from cloudinit.sources import (
- EXPERIMENTAL_TEXT, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE,
- METADATA_UNKNOWN, REDACT_SENSITIVE_VALUE, UNSET, DataSource,
- canonical_cloud_id, redact_sensitive_keys)
-from tests.unittests.helpers import CiTestCase, mock
+ EXPERIMENTAL_TEXT,
+ INSTANCE_JSON_FILE,
+ INSTANCE_JSON_SENSITIVE_FILE,
+ METADATA_UNKNOWN,
+ REDACT_SENSITIVE_VALUE,
+ UNSET,
+ DataSource,
+ canonical_cloud_id,
+ redact_sensitive_keys,
+)
from cloudinit.user_data import UserDataProcessor
-from cloudinit import util
+from tests.unittests.helpers import CiTestCase, mock
class DataSourceTestSubclassNet(DataSource):
- dsname = 'MyTestSubclass'
+ dsname = "MyTestSubclass"
url_max_wait = 55
- def __init__(self, sys_cfg, distro, paths, custom_metadata=None,
- custom_userdata=None, get_data_retval=True):
- super(DataSourceTestSubclassNet, self).__init__(
- sys_cfg, distro, paths)
+ def __init__(
+ self,
+ sys_cfg,
+ distro,
+ paths,
+ custom_metadata=None,
+ custom_userdata=None,
+ get_data_retval=True,
+ ):
+ super(DataSourceTestSubclassNet, self).__init__(sys_cfg, distro, paths)
self._custom_userdata = custom_userdata
self._custom_metadata = custom_metadata
self._get_data_retval = get_data_retval
def _get_cloud_name(self):
- return 'SubclassCloudName'
+ return "SubclassCloudName"
def _get_data(self):
if self._custom_metadata:
self.metadata = self._custom_metadata
else:
- self.metadata = {'availability_zone': 'myaz',
- 'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion'}
+ self.metadata = {
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ }
if self._custom_userdata:
self.userdata_raw = self._custom_userdata
else:
- self.userdata_raw = 'userdata_raw'
- self.vendordata_raw = 'vendordata_raw'
+ self.userdata_raw = "userdata_raw"
+ self.vendordata_raw = "vendordata_raw"
return self._get_data_retval
@@ -59,8 +73,8 @@ class TestDataSource(CiTestCase):
def setUp(self):
super(TestDataSource, self).setUp()
- self.sys_cfg = {'datasource': {'_undef': {'key1': False}}}
- self.distro = 'distrotest' # generally should be a Distro object
+ self.sys_cfg = {"datasource": {"_undef": {"key1": False}}}
+ self.distro = "distrotest" # generally should be a Distro object
self.paths = Paths({})
self.datasource = DataSource(self.sys_cfg, self.distro, self.paths)
@@ -74,22 +88,23 @@ class TestDataSource(CiTestCase):
self.assertIsNone(self.datasource.userdata_raw)
self.assertIsNone(self.datasource.vendordata)
self.assertIsNone(self.datasource.vendordata_raw)
- self.assertEqual({'key1': False}, self.datasource.ds_cfg)
+ self.assertEqual({"key1": False}, self.datasource.ds_cfg)
self.assertIsInstance(self.datasource.ud_proc, UserDataProcessor)
def test_datasource_init_gets_ds_cfg_using_dsname(self):
"""Init uses DataSource.dsname for sourcing ds_cfg."""
- sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}}
- distro = 'distrotest' # generally should be a Distro object
+ sys_cfg = {"datasource": {"MyTestSubclass": {"key2": False}}}
+ distro = "distrotest" # generally should be a Distro object
datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths)
- self.assertEqual({'key2': False}, datasource.ds_cfg)
+ self.assertEqual({"key2": False}, datasource.ds_cfg)
def test_str_is_classname(self):
"""The string representation of the datasource is the classname."""
- self.assertEqual('DataSource', str(self.datasource))
+ self.assertEqual("DataSource", str(self.datasource))
self.assertEqual(
- 'DataSourceTestSubclassNet',
- str(DataSourceTestSubclassNet('', '', self.paths)))
+ "DataSourceTestSubclassNet",
+ str(DataSourceTestSubclassNet("", "", self.paths)),
+ )
def test_datasource_get_url_params_defaults(self):
"""get_url_params default url config settings for the datasource."""
@@ -97,16 +112,21 @@ class TestDataSource(CiTestCase):
self.assertEqual(params.max_wait_seconds, self.datasource.url_max_wait)
self.assertEqual(params.timeout_seconds, self.datasource.url_timeout)
self.assertEqual(params.num_retries, self.datasource.url_retries)
- self.assertEqual(params.sec_between_retries,
- self.datasource.url_sec_between_retries)
+ self.assertEqual(
+ params.sec_between_retries, self.datasource.url_sec_between_retries
+ )
def test_datasource_get_url_params_subclassed(self):
"""Subclasses can override get_url_params defaults."""
- sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}}
- distro = 'distrotest' # generally should be a Distro object
+ sys_cfg = {"datasource": {"MyTestSubclass": {"key2": False}}}
+ distro = "distrotest" # generally should be a Distro object
datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths)
- expected = (datasource.url_max_wait, datasource.url_timeout,
- datasource.url_retries, datasource.url_sec_between_retries)
+ expected = (
+ datasource.url_max_wait,
+ datasource.url_timeout,
+ datasource.url_retries,
+ datasource.url_sec_between_retries,
+ )
url_params = datasource.get_url_params()
self.assertNotEqual(self.datasource.get_url_params(), url_params)
self.assertEqual(expected, url_params)
@@ -114,40 +134,64 @@ class TestDataSource(CiTestCase):
def test_datasource_get_url_params_ds_config_override(self):
"""Datasource configuration options can override url param defaults."""
sys_cfg = {
- 'datasource': {
- 'MyTestSubclass': {
- 'max_wait': '1', 'timeout': '2',
- 'retries': '3', 'sec_between_retries': 4
- }}}
+ "datasource": {
+ "MyTestSubclass": {
+ "max_wait": "1",
+ "timeout": "2",
+ "retries": "3",
+ "sec_between_retries": 4,
+ }
+ }
+ }
datasource = DataSourceTestSubclassNet(
- sys_cfg, self.distro, self.paths)
+ sys_cfg, self.distro, self.paths
+ )
expected = (1, 2, 3, 4)
url_params = datasource.get_url_params()
self.assertNotEqual(
- (datasource.url_max_wait, datasource.url_timeout,
- datasource.url_retries, datasource.url_sec_between_retries),
- url_params)
+ (
+ datasource.url_max_wait,
+ datasource.url_timeout,
+ datasource.url_retries,
+ datasource.url_sec_between_retries,
+ ),
+ url_params,
+ )
self.assertEqual(expected, url_params)
def test_datasource_get_url_params_is_zero_or_greater(self):
"""get_url_params ignores timeouts with a value below 0."""
# Set an override that is below 0 which gets ignored.
- sys_cfg = {'datasource': {'_undef': {'timeout': '-1'}}}
+ sys_cfg = {"datasource": {"_undef": {"timeout": "-1"}}}
datasource = DataSource(sys_cfg, self.distro, self.paths)
- (_max_wait, timeout, _retries,
- _sec_between_retries) = datasource.get_url_params()
+ (
+ _max_wait,
+ timeout,
+ _retries,
+ _sec_between_retries,
+ ) = datasource.get_url_params()
self.assertEqual(0, timeout)
def test_datasource_get_url_uses_defaults_on_errors(self):
"""On invalid system config values for url_params defaults are used."""
# All invalid values should be logged
- sys_cfg = {'datasource': {
- '_undef': {
- 'max_wait': 'nope', 'timeout': 'bug', 'retries': 'nonint'}}}
+ sys_cfg = {
+ "datasource": {
+ "_undef": {
+ "max_wait": "nope",
+ "timeout": "bug",
+ "retries": "nonint",
+ }
+ }
+ }
datasource = DataSource(sys_cfg, self.distro, self.paths)
url_params = datasource.get_url_params()
- expected = (datasource.url_max_wait, datasource.url_timeout,
- datasource.url_retries, datasource.url_sec_between_retries)
+ expected = (
+ datasource.url_max_wait,
+ datasource.url_timeout,
+ datasource.url_retries,
+ datasource.url_sec_between_retries,
+ )
self.assertEqual(expected, url_params)
logs = self.logs.getvalue()
expected_logs = [
@@ -158,27 +202,28 @@ class TestDataSource(CiTestCase):
for log in expected_logs:
self.assertIn(log, logs)
- @mock.patch('cloudinit.sources.net.find_fallback_nic')
+ @mock.patch("cloudinit.sources.net.find_fallback_nic")
def test_fallback_interface_is_discovered(self, m_get_fallback_nic):
"""The fallback_interface is discovered via find_fallback_nic."""
- m_get_fallback_nic.return_value = 'nic9'
- self.assertEqual('nic9', self.datasource.fallback_interface)
+ m_get_fallback_nic.return_value = "nic9"
+ self.assertEqual("nic9", self.datasource.fallback_interface)
- @mock.patch('cloudinit.sources.net.find_fallback_nic')
+ @mock.patch("cloudinit.sources.net.find_fallback_nic")
def test_fallback_interface_logs_undiscovered(self, m_get_fallback_nic):
"""Log a warning when fallback_interface can not discover the nic."""
- self.datasource._cloud_name = 'MySupahCloud'
+ self.datasource._cloud_name = "MySupahCloud"
m_get_fallback_nic.return_value = None # Couldn't discover nic
self.assertIsNone(self.datasource.fallback_interface)
self.assertEqual(
- 'WARNING: Did not find a fallback interface on MySupahCloud.\n',
- self.logs.getvalue())
+ "WARNING: Did not find a fallback interface on MySupahCloud.\n",
+ self.logs.getvalue(),
+ )
- @mock.patch('cloudinit.sources.net.find_fallback_nic')
+ @mock.patch("cloudinit.sources.net.find_fallback_nic")
def test_wb_fallback_interface_is_cached(self, m_get_fallback_nic):
"""The fallback_interface is cached and won't be rediscovered."""
- self.datasource._fallback_interface = 'nic10'
- self.assertEqual('nic10', self.datasource.fallback_interface)
+ self.datasource._fallback_interface = "nic10"
+ self.assertEqual("nic10", self.datasource.fallback_interface)
m_get_fallback_nic.assert_not_called()
def test__get_data_unimplemented(self):
@@ -186,80 +231,95 @@ class TestDataSource(CiTestCase):
with self.assertRaises(NotImplementedError) as context_manager:
self.datasource.get_data()
self.assertIn(
- 'Subclasses of DataSource must implement _get_data',
- str(context_manager.exception))
+ "Subclasses of DataSource must implement _get_data",
+ str(context_manager.exception),
+ )
datasource2 = InvalidDataSourceTestSubclassNet(
- self.sys_cfg, self.distro, self.paths)
+ self.sys_cfg, self.distro, self.paths
+ )
with self.assertRaises(NotImplementedError) as context_manager:
datasource2.get_data()
self.assertIn(
- 'Subclasses of DataSource must implement _get_data',
- str(context_manager.exception))
+ "Subclasses of DataSource must implement _get_data",
+ str(context_manager.exception),
+ )
def test_get_data_calls_subclass__get_data(self):
"""Datasource.get_data uses the subclass' version of _get_data."""
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
self.assertTrue(datasource.get_data())
self.assertEqual(
- {'availability_zone': 'myaz',
- 'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion'},
- datasource.metadata)
- self.assertEqual('userdata_raw', datasource.userdata_raw)
- self.assertEqual('vendordata_raw', datasource.vendordata_raw)
+ {
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ },
+ datasource.metadata,
+ )
+ self.assertEqual("userdata_raw", datasource.userdata_raw)
+ self.assertEqual("vendordata_raw", datasource.vendordata_raw)
def test_get_hostname_strips_local_hostname_without_domain(self):
"""Datasource.get_hostname strips metadata local-hostname of domain."""
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
self.assertTrue(datasource.get_data())
self.assertEqual(
- 'test-subclass-hostname', datasource.metadata['local-hostname'])
- self.assertEqual('test-subclass-hostname', datasource.get_hostname())
- datasource.metadata['local-hostname'] = 'hostname.my.domain.com'
- self.assertEqual('hostname', datasource.get_hostname())
+ "test-subclass-hostname", datasource.metadata["local-hostname"]
+ )
+ self.assertEqual("test-subclass-hostname", datasource.get_hostname())
+ datasource.metadata["local-hostname"] = "hostname.my.domain.com"
+ self.assertEqual("hostname", datasource.get_hostname())
def test_get_hostname_with_fqdn_returns_local_hostname_with_domain(self):
"""Datasource.get_hostname with fqdn set gets qualified hostname."""
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
self.assertTrue(datasource.get_data())
- datasource.metadata['local-hostname'] = 'hostname.my.domain.com'
+ datasource.metadata["local-hostname"] = "hostname.my.domain.com"
self.assertEqual(
- 'hostname.my.domain.com', datasource.get_hostname(fqdn=True))
+ "hostname.my.domain.com", datasource.get_hostname(fqdn=True)
+ )
def test_get_hostname_without_metadata_uses_system_hostname(self):
"""Datasource.gethostname runs util.get_hostname when no metadata."""
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
self.assertEqual({}, datasource.metadata)
- mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts'
- with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost:
+ mock_fqdn = "cloudinit.sources.util.get_fqdn_from_hosts"
+ with mock.patch("cloudinit.sources.util.get_hostname") as m_gethost:
with mock.patch(mock_fqdn) as m_fqdn:
- m_gethost.return_value = 'systemhostname.domain.com'
+ m_gethost.return_value = "systemhostname.domain.com"
m_fqdn.return_value = None # No maching fqdn in /etc/hosts
- self.assertEqual('systemhostname', datasource.get_hostname())
+ self.assertEqual("systemhostname", datasource.get_hostname())
self.assertEqual(
- 'systemhostname.domain.com',
- datasource.get_hostname(fqdn=True))
+ "systemhostname.domain.com",
+ datasource.get_hostname(fqdn=True),
+ )
def test_get_hostname_without_metadata_returns_none(self):
"""Datasource.gethostname returns None when metadata_only and no MD."""
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
self.assertEqual({}, datasource.metadata)
- mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts'
- with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost:
+ mock_fqdn = "cloudinit.sources.util.get_fqdn_from_hosts"
+ with mock.patch("cloudinit.sources.util.get_hostname") as m_gethost:
with mock.patch(mock_fqdn) as m_fqdn:
self.assertIsNone(datasource.get_hostname(metadata_only=True))
self.assertIsNone(
- datasource.get_hostname(fqdn=True, metadata_only=True))
+ datasource.get_hostname(fqdn=True, metadata_only=True)
+ )
self.assertEqual([], m_gethost.call_args_list)
self.assertEqual([], m_fqdn.call_args_list)
@@ -267,78 +327,99 @@ class TestDataSource(CiTestCase):
"""Datasource.gethostname prefers /etc/hosts to util.get_hostname."""
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
self.assertEqual({}, datasource.metadata)
- mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts'
- with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost:
+ mock_fqdn = "cloudinit.sources.util.get_fqdn_from_hosts"
+ with mock.patch("cloudinit.sources.util.get_hostname") as m_gethost:
with mock.patch(mock_fqdn) as m_fqdn:
- m_gethost.return_value = 'systemhostname.domain.com'
- m_fqdn.return_value = 'fqdnhostname.domain.com'
- self.assertEqual('fqdnhostname', datasource.get_hostname())
- self.assertEqual('fqdnhostname.domain.com',
- datasource.get_hostname(fqdn=True))
+ m_gethost.return_value = "systemhostname.domain.com"
+ m_fqdn.return_value = "fqdnhostname.domain.com"
+ self.assertEqual("fqdnhostname", datasource.get_hostname())
+ self.assertEqual(
+ "fqdnhostname.domain.com",
+ datasource.get_hostname(fqdn=True),
+ )
def test_get_data_does_not_write_instance_data_on_failure(self):
"""get_data does not write INSTANCE_JSON_FILE on get_data False."""
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- get_data_retval=False)
+ self.sys_cfg,
+ self.distro,
+ Paths({"run_dir": tmp}),
+ get_data_retval=False,
+ )
self.assertFalse(datasource.get_data())
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
self.assertFalse(
- os.path.exists(json_file), 'Found unexpected file %s' % json_file)
+ os.path.exists(json_file), "Found unexpected file %s" % json_file
+ )
def test_get_data_writes_json_instance_data_on_success(self):
"""get_data writes INSTANCE_JSON_FILE to run_dir as world readable."""
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
sys_info = {
"python": "3.7",
- "platform":
- "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal",
- "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah",
- "x86_64"],
- "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]}
+ "platform": (
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal"
+ ),
+ "uname": [
+ "Linux",
+ "myhost",
+ "5.4.0-24-generic",
+ "SMP blah",
+ "x86_64",
+ ],
+ "variant": "ubuntu",
+ "dist": ["ubuntu", "20.04", "focal"],
+ }
with mock.patch("cloudinit.util.system_info", return_value=sys_info):
datasource.get_data()
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
content = util.load_file(json_file)
expected = {
- 'base64_encoded_keys': [],
- 'merged_cfg': REDACT_SENSITIVE_VALUE,
- 'sensitive_keys': ['merged_cfg'],
- 'sys_info': sys_info,
- 'v1': {
- '_beta_keys': ['subplatform'],
- 'availability-zone': 'myaz',
- 'availability_zone': 'myaz',
- 'cloud-name': 'subclasscloudname',
- 'cloud_name': 'subclasscloudname',
- 'distro': 'ubuntu',
- 'distro_release': 'focal',
- 'distro_version': '20.04',
- 'instance-id': 'iid-datasource',
- 'instance_id': 'iid-datasource',
- 'local-hostname': 'test-subclass-hostname',
- 'local_hostname': 'test-subclass-hostname',
- 'kernel_release': '5.4.0-24-generic',
- 'machine': 'x86_64',
- 'platform': 'mytestsubclass',
- 'public_ssh_keys': [],
- 'python_version': '3.7',
- 'region': 'myregion',
- 'system_platform':
- 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal',
- 'subplatform': 'unknown',
- 'variant': 'ubuntu'},
- 'ds': {
-
- '_doc': EXPERIMENTAL_TEXT,
- 'meta_data': {'availability_zone': 'myaz',
- 'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion'}}}
+ "base64_encoded_keys": [],
+ "merged_cfg": REDACT_SENSITIVE_VALUE,
+ "sensitive_keys": ["merged_cfg"],
+ "sys_info": sys_info,
+ "v1": {
+ "_beta_keys": ["subplatform"],
+ "availability-zone": "myaz",
+ "availability_zone": "myaz",
+ "cloud-name": "subclasscloudname",
+ "cloud_name": "subclasscloudname",
+ "distro": "ubuntu",
+ "distro_release": "focal",
+ "distro_version": "20.04",
+ "instance-id": "iid-datasource",
+ "instance_id": "iid-datasource",
+ "local-hostname": "test-subclass-hostname",
+ "local_hostname": "test-subclass-hostname",
+ "kernel_release": "5.4.0-24-generic",
+ "machine": "x86_64",
+ "platform": "mytestsubclass",
+ "public_ssh_keys": [],
+ "python_version": "3.7",
+ "region": "myregion",
+ "system_platform": (
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal"
+ ),
+ "subplatform": "unknown",
+ "variant": "ubuntu",
+ },
+ "ds": {
+ "_doc": EXPERIMENTAL_TEXT,
+ "meta_data": {
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ },
+ },
+ }
self.assertEqual(expected, util.load_json(content))
file_stat = os.stat(json_file)
self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
@@ -348,63 +429,89 @@ class TestDataSource(CiTestCase):
"""get_data writes redacted content to public INSTANCE_JSON_FILE."""
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
+ self.sys_cfg,
+ self.distro,
+ Paths({"run_dir": tmp}),
custom_metadata={
- 'availability_zone': 'myaz',
- 'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion',
- 'some': {'security-credentials': {
- 'cred1': 'sekret', 'cred2': 'othersekret'}}})
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ "some": {
+ "security-credentials": {
+ "cred1": "sekret",
+ "cred2": "othersekret",
+ }
+ },
+ },
+ )
self.assertCountEqual(
- ('merged_cfg', 'security-credentials',),
- datasource.sensitive_metadata_keys)
+ (
+ "merged_cfg",
+ "security-credentials",
+ ),
+ datasource.sensitive_metadata_keys,
+ )
sys_info = {
"python": "3.7",
- "platform":
- "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal",
- "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah",
- "x86_64"],
- "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]}
+ "platform": (
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal"
+ ),
+ "uname": [
+ "Linux",
+ "myhost",
+ "5.4.0-24-generic",
+ "SMP blah",
+ "x86_64",
+ ],
+ "variant": "ubuntu",
+ "dist": ["ubuntu", "20.04", "focal"],
+ }
with mock.patch("cloudinit.util.system_info", return_value=sys_info):
datasource.get_data()
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
redacted = util.load_json(util.load_file(json_file))
expected = {
- 'base64_encoded_keys': [],
- 'merged_cfg': REDACT_SENSITIVE_VALUE,
- 'sensitive_keys': [
- 'ds/meta_data/some/security-credentials', 'merged_cfg'],
- 'sys_info': sys_info,
- 'v1': {
- '_beta_keys': ['subplatform'],
- 'availability-zone': 'myaz',
- 'availability_zone': 'myaz',
- 'cloud-name': 'subclasscloudname',
- 'cloud_name': 'subclasscloudname',
- 'distro': 'ubuntu',
- 'distro_release': 'focal',
- 'distro_version': '20.04',
- 'instance-id': 'iid-datasource',
- 'instance_id': 'iid-datasource',
- 'local-hostname': 'test-subclass-hostname',
- 'local_hostname': 'test-subclass-hostname',
- 'kernel_release': '5.4.0-24-generic',
- 'machine': 'x86_64',
- 'platform': 'mytestsubclass',
- 'public_ssh_keys': [],
- 'python_version': '3.7',
- 'region': 'myregion',
- 'system_platform':
- 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal',
- 'subplatform': 'unknown',
- 'variant': 'ubuntu'},
- 'ds': {
- '_doc': EXPERIMENTAL_TEXT,
- 'meta_data': {
- 'availability_zone': 'myaz',
- 'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion',
- 'some': {'security-credentials': REDACT_SENSITIVE_VALUE}}}
+ "base64_encoded_keys": [],
+ "merged_cfg": REDACT_SENSITIVE_VALUE,
+ "sensitive_keys": [
+ "ds/meta_data/some/security-credentials",
+ "merged_cfg",
+ ],
+ "sys_info": sys_info,
+ "v1": {
+ "_beta_keys": ["subplatform"],
+ "availability-zone": "myaz",
+ "availability_zone": "myaz",
+ "cloud-name": "subclasscloudname",
+ "cloud_name": "subclasscloudname",
+ "distro": "ubuntu",
+ "distro_release": "focal",
+ "distro_version": "20.04",
+ "instance-id": "iid-datasource",
+ "instance_id": "iid-datasource",
+ "local-hostname": "test-subclass-hostname",
+ "local_hostname": "test-subclass-hostname",
+ "kernel_release": "5.4.0-24-generic",
+ "machine": "x86_64",
+ "platform": "mytestsubclass",
+ "public_ssh_keys": [],
+ "python_version": "3.7",
+ "region": "myregion",
+ "system_platform": (
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal"
+ ),
+ "subplatform": "unknown",
+ "variant": "ubuntu",
+ },
+ "ds": {
+ "_doc": EXPERIMENTAL_TEXT,
+ "meta_data": {
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ "some": {"security-credentials": REDACT_SENSITIVE_VALUE},
+ },
+ },
}
self.assertCountEqual(expected, redacted)
file_stat = os.stat(json_file)
@@ -416,71 +523,101 @@ class TestDataSource(CiTestCase):
"""
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
+ self.sys_cfg,
+ self.distro,
+ Paths({"run_dir": tmp}),
custom_metadata={
- 'availability_zone': 'myaz',
- 'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion',
- 'some': {'security-credentials': {
- 'cred1': 'sekret', 'cred2': 'othersekret'}}})
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ "some": {
+ "security-credentials": {
+ "cred1": "sekret",
+ "cred2": "othersekret",
+ }
+ },
+ },
+ )
sys_info = {
"python": "3.7",
- "platform":
- "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal",
- "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah",
- "x86_64"],
- "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]}
+ "platform": (
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal"
+ ),
+ "uname": [
+ "Linux",
+ "myhost",
+ "5.4.0-24-generic",
+ "SMP blah",
+ "x86_64",
+ ],
+ "variant": "ubuntu",
+ "dist": ["ubuntu", "20.04", "focal"],
+ }
self.assertCountEqual(
- ('merged_cfg', 'security-credentials',),
- datasource.sensitive_metadata_keys)
+ (
+ "merged_cfg",
+ "security-credentials",
+ ),
+ datasource.sensitive_metadata_keys,
+ )
with mock.patch("cloudinit.util.system_info", return_value=sys_info):
datasource.get_data()
sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp)
content = util.load_file(sensitive_json_file)
expected = {
- 'base64_encoded_keys': [],
- 'merged_cfg': {
- '_doc': (
- 'Merged cloud-init system config from '
- '/etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/'
+ "base64_encoded_keys": [],
+ "merged_cfg": {
+ "_doc": (
+ "Merged cloud-init system config from "
+ "/etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/"
+ ),
+ "datasource": {"_undef": {"key1": False}},
+ },
+ "sensitive_keys": [
+ "ds/meta_data/some/security-credentials",
+ "merged_cfg",
+ ],
+ "sys_info": sys_info,
+ "v1": {
+ "_beta_keys": ["subplatform"],
+ "availability-zone": "myaz",
+ "availability_zone": "myaz",
+ "cloud-name": "subclasscloudname",
+ "cloud_name": "subclasscloudname",
+ "distro": "ubuntu",
+ "distro_release": "focal",
+ "distro_version": "20.04",
+ "instance-id": "iid-datasource",
+ "instance_id": "iid-datasource",
+ "kernel_release": "5.4.0-24-generic",
+ "local-hostname": "test-subclass-hostname",
+ "local_hostname": "test-subclass-hostname",
+ "machine": "x86_64",
+ "platform": "mytestsubclass",
+ "public_ssh_keys": [],
+ "python_version": "3.7",
+ "region": "myregion",
+ "subplatform": "unknown",
+ "system_platform": (
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal"
),
- 'datasource': {'_undef': {'key1': False}}},
- 'sensitive_keys': [
- 'ds/meta_data/some/security-credentials', 'merged_cfg'],
- 'sys_info': sys_info,
- 'v1': {
- '_beta_keys': ['subplatform'],
- 'availability-zone': 'myaz',
- 'availability_zone': 'myaz',
- 'cloud-name': 'subclasscloudname',
- 'cloud_name': 'subclasscloudname',
- 'distro': 'ubuntu',
- 'distro_release': 'focal',
- 'distro_version': '20.04',
- 'instance-id': 'iid-datasource',
- 'instance_id': 'iid-datasource',
- 'kernel_release': '5.4.0-24-generic',
- 'local-hostname': 'test-subclass-hostname',
- 'local_hostname': 'test-subclass-hostname',
- 'machine': 'x86_64',
- 'platform': 'mytestsubclass',
- 'public_ssh_keys': [],
- 'python_version': '3.7',
- 'region': 'myregion',
- 'subplatform': 'unknown',
- 'system_platform':
- 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal',
- 'variant': 'ubuntu'},
- 'ds': {
- '_doc': EXPERIMENTAL_TEXT,
- 'meta_data': {
- 'availability_zone': 'myaz',
- 'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion',
- 'some': {
- 'security-credentials':
- {'cred1': 'sekret', 'cred2': 'othersekret'}}}}
+ "variant": "ubuntu",
+ },
+ "ds": {
+ "_doc": EXPERIMENTAL_TEXT,
+ "meta_data": {
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ "some": {
+ "security-credentials": {
+ "cred1": "sekret",
+ "cred2": "othersekret",
+ }
+ },
+ },
+ },
}
self.assertCountEqual(expected, util.load_json(content))
file_stat = os.stat(sensitive_json_file)
@@ -491,69 +628,81 @@ class TestDataSource(CiTestCase):
"""get_data warns unserializable content in INSTANCE_JSON_FILE."""
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_metadata={'key1': 'val1', 'key2': {'key2.1': self.paths}})
+ self.sys_cfg,
+ self.distro,
+ Paths({"run_dir": tmp}),
+ custom_metadata={"key1": "val1", "key2": {"key2.1": self.paths}},
+ )
datasource.get_data()
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
content = util.load_file(json_file)
expected_metadata = {
- 'key1': 'val1',
- 'key2': {
- 'key2.1': "Warning: redacted unserializable type <class"
- " 'cloudinit.helpers.Paths'>"}}
+ "key1": "val1",
+ "key2": {
+ "key2.1": (
+ "Warning: redacted unserializable type <class"
+ " 'cloudinit.helpers.Paths'>"
+ )
+ },
+ }
instance_json = util.load_json(content)
- self.assertEqual(
- expected_metadata, instance_json['ds']['meta_data'])
+ self.assertEqual(expected_metadata, instance_json["ds"]["meta_data"])
def test_persist_instance_data_writes_ec2_metadata_when_set(self):
"""When ec2_metadata class attribute is set, persist to json."""
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
datasource.ec2_metadata = UNSET
datasource.get_data()
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
instance_data = util.load_json(util.load_file(json_file))
- self.assertNotIn('ec2_metadata', instance_data['ds'])
- datasource.ec2_metadata = {'ec2stuff': 'is good'}
+ self.assertNotIn("ec2_metadata", instance_data["ds"])
+ datasource.ec2_metadata = {"ec2stuff": "is good"}
datasource.persist_instance_data()
instance_data = util.load_json(util.load_file(json_file))
self.assertEqual(
- {'ec2stuff': 'is good'},
- instance_data['ds']['ec2_metadata'])
+ {"ec2stuff": "is good"}, instance_data["ds"]["ec2_metadata"]
+ )
def test_persist_instance_data_writes_network_json_when_set(self):
"""When network_data.json class attribute is set, persist to json."""
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
datasource.get_data()
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
instance_data = util.load_json(util.load_file(json_file))
- self.assertNotIn('network_json', instance_data['ds'])
- datasource.network_json = {'network_json': 'is good'}
+ self.assertNotIn("network_json", instance_data["ds"])
+ datasource.network_json = {"network_json": "is good"}
datasource.persist_instance_data()
instance_data = util.load_json(util.load_file(json_file))
self.assertEqual(
- {'network_json': 'is good'},
- instance_data['ds']['network_json'])
+ {"network_json": "is good"}, instance_data["ds"]["network_json"]
+ )
def test_get_data_base64encodes_unserializable_bytes(self):
"""On py3, get_data base64encodes any unserializable content."""
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
+ self.sys_cfg,
+ self.distro,
+ Paths({"run_dir": tmp}),
+ custom_metadata={"key1": "val1", "key2": {"key2.1": b"\x123"}},
+ )
self.assertTrue(datasource.get_data())
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
content = util.load_file(json_file)
instance_json = util.load_json(content)
self.assertCountEqual(
- ['ds/meta_data/key2/key2.1'],
- instance_json['base64_encoded_keys'])
+ ["ds/meta_data/key2/key2.1"], instance_json["base64_encoded_keys"]
+ )
self.assertEqual(
- {'key1': 'val1', 'key2': {'key2.1': 'EjM='}},
- instance_json['ds']['meta_data'])
+ {"key1": "val1", "key2": {"key2.1": "EjM="}},
+ instance_json["ds"]["meta_data"],
+ )
def test_get_hostname_subclass_support(self):
"""Validate get_hostname signature on all subclasses of DataSource."""
@@ -561,23 +710,24 @@ class TestDataSource(CiTestCase):
# Import all DataSource subclasses so we can inspect them.
modules = util.find_modules(os.path.dirname(os.path.dirname(__file__)))
for _loc, name in modules.items():
- mod_locs, _ = importer.find_module(name, ['cloudinit.sources'], [])
+ mod_locs, _ = importer.find_module(name, ["cloudinit.sources"], [])
if mod_locs:
importer.import_module(mod_locs[0])
for child in DataSource.__subclasses__():
- if 'Test' in child.dsname:
+ if "Test" in child.dsname:
continue
self.assertEqual(
base_args,
inspect.getfullargspec(child.get_hostname),
- '%s does not implement DataSource.get_hostname params'
- % child)
+ "%s does not implement DataSource.get_hostname params" % child,
+ )
for grandchild in child.__subclasses__():
self.assertEqual(
base_args,
inspect.getfullargspec(grandchild.get_hostname),
- '%s does not implement DataSource.get_hostname params'
- % grandchild)
+ "%s does not implement DataSource.get_hostname params"
+ % grandchild,
+ )
def test_clear_cached_attrs_resets_cached_attr_class_attributes(self):
"""Class attributes listed in cached_attr_defaults are reset."""
@@ -598,7 +748,7 @@ class TestDataSource(CiTestCase):
for attr, _ in self.datasource.cached_attr_defaults:
setattr(self.datasource, attr, count)
count += 1
- self.datasource._dirty_cache = False # Fake clean cache
+ self.datasource._dirty_cache = False # Fake clean cache
self.datasource.clear_cached_attrs()
count = 0
for attr, _ in self.datasource.cached_attr_defaults:
@@ -609,163 +759,194 @@ class TestDataSource(CiTestCase):
"""Skip any cached_attr_defaults which aren't class attributes."""
self.datasource._dirty_cache = True
self.datasource.clear_cached_attrs()
- for attr in ('ec2_metadata', 'network_json'):
+ for attr in ("ec2_metadata", "network_json"):
self.assertFalse(hasattr(self.datasource, attr))
def test_clear_cached_attrs_of_custom_attrs(self):
"""Custom attr_values can be passed to clear_cached_attrs."""
self.datasource._dirty_cache = True
cached_attr_name = self.datasource.cached_attr_defaults[0][0]
- setattr(self.datasource, cached_attr_name, 'himom')
- self.datasource.myattr = 'orig'
+ setattr(self.datasource, cached_attr_name, "himom")
+ self.datasource.myattr = "orig"
self.datasource.clear_cached_attrs(
- attr_defaults=(('myattr', 'updated'),))
- self.assertEqual('himom', getattr(self.datasource, cached_attr_name))
- self.assertEqual('updated', self.datasource.myattr)
-
- @mock.patch.dict(DataSource.default_update_events, {
- EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}})
- @mock.patch.dict(DataSource.supported_update_events, {
- EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}})
+ attr_defaults=(("myattr", "updated"),)
+ )
+ self.assertEqual("himom", getattr(self.datasource, cached_attr_name))
+ self.assertEqual("updated", self.datasource.myattr)
+
+ @mock.patch.dict(
+ DataSource.default_update_events,
+ {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}},
+ )
+ @mock.patch.dict(
+ DataSource.supported_update_events,
+ {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}},
+ )
def test_update_metadata_only_acts_on_supported_update_events(self):
"""update_metadata_if_supported wont get_data on unsupported events."""
self.assertEqual(
{EventScope.NETWORK: set([EventType.BOOT_NEW_INSTANCE])},
- self.datasource.default_update_events
+ self.datasource.default_update_events,
)
def fake_get_data():
- raise Exception('get_data should not be called')
+ raise Exception("get_data should not be called")
self.datasource.get_data = fake_get_data
self.assertFalse(
self.datasource.update_metadata_if_supported(
- source_event_types=[EventType.BOOT]))
+ source_event_types=[EventType.BOOT]
+ )
+ )
- @mock.patch.dict(DataSource.supported_update_events, {
- EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}})
+ @mock.patch.dict(
+ DataSource.supported_update_events,
+ {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}},
+ )
def test_update_metadata_returns_true_on_supported_update_event(self):
"""update_metadata_if_supported returns get_data on supported events"""
+
def fake_get_data():
return True
self.datasource.get_data = fake_get_data
- self.datasource._network_config = 'something'
+ self.datasource._network_config = "something"
self.datasource._dirty_cache = True
self.assertTrue(
self.datasource.update_metadata_if_supported(
source_event_types=[
- EventType.BOOT, EventType.BOOT_NEW_INSTANCE]))
+ EventType.BOOT,
+ EventType.BOOT_NEW_INSTANCE,
+ ]
+ )
+ )
self.assertEqual(UNSET, self.datasource._network_config)
self.assertIn(
"DEBUG: Update datasource metadata and network config due to"
" events: boot-new-instance",
- self.logs.getvalue()
+ self.logs.getvalue(),
)
class TestRedactSensitiveData(CiTestCase):
-
def test_redact_sensitive_data_noop_when_no_sensitive_keys_present(self):
"""When sensitive_keys is absent or empty from metadata do nothing."""
- md = {'my': 'data'}
+ md = {"my": "data"}
self.assertEqual(
- md, redact_sensitive_keys(md, redact_value='redacted'))
- md['sensitive_keys'] = []
+ md, redact_sensitive_keys(md, redact_value="redacted")
+ )
+ md["sensitive_keys"] = []
self.assertEqual(
- md, redact_sensitive_keys(md, redact_value='redacted'))
+ md, redact_sensitive_keys(md, redact_value="redacted")
+ )
def test_redact_sensitive_data_redacts_exact_match_name(self):
"""Only exact matched sensitive_keys are redacted from metadata."""
- md = {'sensitive_keys': ['md/secure'],
- 'md': {'secure': 's3kr1t', 'insecure': 'publik'}}
+ md = {
+ "sensitive_keys": ["md/secure"],
+ "md": {"secure": "s3kr1t", "insecure": "publik"},
+ }
secure_md = copy.deepcopy(md)
- secure_md['md']['secure'] = 'redacted'
+ secure_md["md"]["secure"] = "redacted"
self.assertEqual(
- secure_md,
- redact_sensitive_keys(md, redact_value='redacted'))
+ secure_md, redact_sensitive_keys(md, redact_value="redacted")
+ )
def test_redact_sensitive_data_does_redacts_with_default_string(self):
"""When redact_value is absent, REDACT_SENSITIVE_VALUE is used."""
- md = {'sensitive_keys': ['md/secure'],
- 'md': {'secure': 's3kr1t', 'insecure': 'publik'}}
+ md = {
+ "sensitive_keys": ["md/secure"],
+ "md": {"secure": "s3kr1t", "insecure": "publik"},
+ }
secure_md = copy.deepcopy(md)
- secure_md['md']['secure'] = 'redacted for non-root user'
- self.assertEqual(
- secure_md,
- redact_sensitive_keys(md))
+ secure_md["md"]["secure"] = "redacted for non-root user"
+ self.assertEqual(secure_md, redact_sensitive_keys(md))
class TestCanonicalCloudID(CiTestCase):
-
def test_cloud_id_returns_platform_on_unknowns(self):
"""When region and cloud_name are unknown, return platform."""
self.assertEqual(
- 'platform',
- canonical_cloud_id(cloud_name=METADATA_UNKNOWN,
- region=METADATA_UNKNOWN,
- platform='platform'))
+ "platform",
+ canonical_cloud_id(
+ cloud_name=METADATA_UNKNOWN,
+ region=METADATA_UNKNOWN,
+ platform="platform",
+ ),
+ )
def test_cloud_id_returns_platform_on_none(self):
"""When region and cloud_name are unknown, return platform."""
self.assertEqual(
- 'platform',
- canonical_cloud_id(cloud_name=None,
- region=None,
- platform='platform'))
+ "platform",
+ canonical_cloud_id(
+ cloud_name=None, region=None, platform="platform"
+ ),
+ )
def test_cloud_id_returns_cloud_name_on_unknown_region(self):
"""When region is unknown, return cloud_name."""
for region in (None, METADATA_UNKNOWN):
self.assertEqual(
- 'cloudname',
- canonical_cloud_id(cloud_name='cloudname',
- region=region,
- platform='platform'))
+ "cloudname",
+ canonical_cloud_id(
+ cloud_name="cloudname", region=region, platform="platform"
+ ),
+ )
def test_cloud_id_returns_platform_on_unknown_cloud_name(self):
"""When region is set but cloud_name is unknown return cloud_name."""
self.assertEqual(
- 'platform',
- canonical_cloud_id(cloud_name=METADATA_UNKNOWN,
- region='region',
- platform='platform'))
+ "platform",
+ canonical_cloud_id(
+ cloud_name=METADATA_UNKNOWN,
+ region="region",
+ platform="platform",
+ ),
+ )
def test_cloud_id_aws_based_on_region_and_cloud_name(self):
"""When cloud_name is aws, return proper cloud-id based on region."""
self.assertEqual(
- 'aws-china',
- canonical_cloud_id(cloud_name='aws',
- region='cn-north-1',
- platform='platform'))
+ "aws-china",
+ canonical_cloud_id(
+ cloud_name="aws", region="cn-north-1", platform="platform"
+ ),
+ )
self.assertEqual(
- 'aws',
- canonical_cloud_id(cloud_name='aws',
- region='us-east-1',
- platform='platform'))
+ "aws",
+ canonical_cloud_id(
+ cloud_name="aws", region="us-east-1", platform="platform"
+ ),
+ )
self.assertEqual(
- 'aws-gov',
- canonical_cloud_id(cloud_name='aws',
- region='us-gov-1',
- platform='platform'))
+ "aws-gov",
+ canonical_cloud_id(
+ cloud_name="aws", region="us-gov-1", platform="platform"
+ ),
+ )
self.assertEqual( # Overrideen non-aws cloud_name is returned
- '!aws',
- canonical_cloud_id(cloud_name='!aws',
- region='us-gov-1',
- platform='platform'))
+ "!aws",
+ canonical_cloud_id(
+ cloud_name="!aws", region="us-gov-1", platform="platform"
+ ),
+ )
def test_cloud_id_azure_based_on_region_and_cloud_name(self):
"""Report cloud-id when cloud_name is azure and region is in china."""
self.assertEqual(
- 'azure-china',
- canonical_cloud_id(cloud_name='azure',
- region='chinaeast',
- platform='platform'))
+ "azure-china",
+ canonical_cloud_id(
+ cloud_name="azure", region="chinaeast", platform="platform"
+ ),
+ )
self.assertEqual(
- 'azure',
- canonical_cloud_id(cloud_name='azure',
- region='!chinaeast',
- platform='platform'))
+ "azure",
+ canonical_cloud_id(
+ cloud_name="azure", region="!chinaeast", platform="platform"
+ ),
+ )
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_lxd.py b/tests/unittests/sources/test_lxd.py
index a6e51f3b..ad1508a0 100644
--- a/tests/unittests/sources/test_lxd.py
+++ b/tests/unittests/sources/test_lxd.py
@@ -1,18 +1,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from collections import namedtuple
-from copy import deepcopy
import json
import re
import stat
+from collections import namedtuple
+from copy import deepcopy
from unittest import mock
-import yaml
import pytest
+import yaml
+
+from cloudinit.sources import UNSET
+from cloudinit.sources import DataSourceLXD as lxd
+from cloudinit.sources import InvalidMetaDataException
-from cloudinit.sources import (
- DataSourceLXD as lxd, InvalidMetaDataException, UNSET
-)
DS_PATH = "cloudinit.sources.DataSourceLXD."
@@ -23,10 +24,11 @@ NETWORK_V1 = {
"version": 1,
"config": [
{
- "type": "physical", "name": "eth0",
- "subnets": [{"type": "dhcp", "control": "auto"}]
+ "type": "physical",
+ "name": "eth0",
+ "subnets": [{"type": "dhcp", "control": "auto"}],
}
- ]
+ ],
}
@@ -43,12 +45,10 @@ LXD_V1_METADATA = {
"user-data": "#cloud-config\npackages: [sl]\n",
"vendor-data": "#cloud-config\nruncmd: ['echo vendor-data']\n",
"config": {
- "user.user-data":
- "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n",
- "user.vendor-data":
- "#cloud-config\nruncmd: ['echo vendor-data']\n",
+ "user.user-data": "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n",
+ "user.vendor-data": "#cloud-config\nruncmd: ['echo vendor-data']\n",
"user.network-config": yaml.safe_dump(NETWORK_V1),
- }
+ },
}
@@ -76,9 +76,9 @@ def lxd_ds(request, paths, lxd_metadata):
class TestGenerateFallbackNetworkConfig:
-
@pytest.mark.parametrize(
- "uname_machine,systemd_detect_virt,expected", (
+ "uname_machine,systemd_detect_virt,expected",
+ (
# None for systemd_detect_virt returns None from which
({}, None, NETWORK_V1),
({}, None, NETWORK_V1),
@@ -86,8 +86,8 @@ class TestGenerateFallbackNetworkConfig:
# `uname -m` on kvm determines devname
("x86_64", "kvm\n", _add_network_v1_device("enp5s0")),
("ppc64le", "kvm\n", _add_network_v1_device("enp0s5")),
- ("s390x", "kvm\n", _add_network_v1_device("enc9"))
- )
+ ("s390x", "kvm\n", _add_network_v1_device("enc9")),
+ ),
)
@mock.patch(DS_PATH + "util.system_info")
@mock.patch(DS_PATH + "subp.subp")
@@ -145,11 +145,12 @@ class TestDataSourceLXD:
class TestIsPlatformViable:
@pytest.mark.parametrize(
- "exists,lstat_mode,expected", (
+ "exists,lstat_mode,expected",
+ (
(False, None, False),
(True, stat.S_IFREG, False),
(True, stat.S_IFSOCK, True),
- )
+ ),
)
@mock.patch(DS_PATH + "os.lstat")
@mock.patch(DS_PATH + "os.path.exists")
@@ -169,7 +170,8 @@ class TestIsPlatformViable:
class TestReadMetadata:
@pytest.mark.parametrize(
- "url_responses,expected,logs", (
+ "url_responses,expected,logs",
+ (
( # Assert non-JSON format from config route
{
"http://lxd/1.0/meta-data": "local-hostname: md\n",
@@ -178,31 +180,38 @@ class TestReadMetadata:
InvalidMetaDataException(
"Unable to determine cloud-init config from"
" http://lxd/1.0/config. Expected JSON but found:"
- " [NOT_JSON"),
- ["[GET] [HTTP:200] http://lxd/1.0/meta-data",
- "[GET] [HTTP:200] http://lxd/1.0/config"],
+ " [NOT_JSON"
+ ),
+ [
+ "[GET] [HTTP:200] http://lxd/1.0/meta-data",
+ "[GET] [HTTP:200] http://lxd/1.0/config",
+ ],
),
- ( # Assert success on just meta-data
+ ( # Assert success on just meta-data
{
"http://lxd/1.0/meta-data": "local-hostname: md\n",
"http://lxd/1.0/config": "[]",
},
{
"_metadata_api_version": lxd.LXD_SOCKET_API_VERSION,
- "config": {}, "meta-data": "local-hostname: md\n"
+ "config": {},
+ "meta-data": "local-hostname: md\n",
},
- ["[GET] [HTTP:200] http://lxd/1.0/meta-data",
- "[GET] [HTTP:200] http://lxd/1.0/config"],
+ [
+ "[GET] [HTTP:200] http://lxd/1.0/meta-data",
+ "[GET] [HTTP:200] http://lxd/1.0/config",
+ ],
),
- ( # Assert 404s for config routes log skipping
+ ( # Assert 404s for config routes log skipping
{
"http://lxd/1.0/meta-data": "local-hostname: md\n",
- "http://lxd/1.0/config":
+ "http://lxd/1.0/config": (
'["/1.0/config/user.custom1",'
' "/1.0/config/user.meta-data",'
' "/1.0/config/user.network-config",'
' "/1.0/config/user.user-data",'
- ' "/1.0/config/user.vendor-data"]',
+ ' "/1.0/config/user.vendor-data"]'
+ ),
"http://lxd/1.0/config/user.custom1": "custom1",
"http://lxd/1.0/config/user.meta-data": "", # 404
"http://lxd/1.0/config/user.network-config": "net-config",
@@ -212,7 +221,7 @@ class TestReadMetadata:
{
"_metadata_api_version": lxd.LXD_SOCKET_API_VERSION,
"config": {
- "user.custom1": "custom1", # Not promoted
+ "user.custom1": "custom1", # Not promoted
"user.network-config": "net-config",
},
"meta-data": "local-hostname: md\n",
@@ -231,15 +240,16 @@ class TestReadMetadata:
" http://lxd/1.0/config/user.network-config",
],
),
- ( # Assert all CONFIG_KEY_ALIASES promoted to top-level keys
+ ( # Assert all CONFIG_KEY_ALIASES promoted to top-level keys
{
"http://lxd/1.0/meta-data": "local-hostname: md\n",
- "http://lxd/1.0/config":
+ "http://lxd/1.0/config": (
'["/1.0/config/user.custom1",'
' "/1.0/config/user.meta-data",'
' "/1.0/config/user.network-config",'
' "/1.0/config/user.user-data",'
- ' "/1.0/config/user.vendor-data"]',
+ ' "/1.0/config/user.vendor-data"]'
+ ),
"http://lxd/1.0/config/user.custom1": "custom1",
"http://lxd/1.0/config/user.meta-data": "meta-data",
"http://lxd/1.0/config/user.network-config": "net-config",
@@ -249,7 +259,7 @@ class TestReadMetadata:
{
"_metadata_api_version": lxd.LXD_SOCKET_API_VERSION,
"config": {
- "user.custom1": "custom1", # Not promoted
+ "user.custom1": "custom1", # Not promoted
"user.meta-data": "meta-data",
"user.network-config": "net-config",
"user.user-data": "user-data",
@@ -271,31 +281,38 @@ class TestReadMetadata:
"[GET] [HTTP:200] http://lxd/1.0/config/user.vendor-data",
],
),
- ( # Assert cloud-init.* config key values prefered over user.*
+ ( # Assert cloud-init.* config key values prefered over user.*
{
"http://lxd/1.0/meta-data": "local-hostname: md\n",
- "http://lxd/1.0/config":
+ "http://lxd/1.0/config": (
'["/1.0/config/user.meta-data",'
' "/1.0/config/user.network-config",'
' "/1.0/config/user.user-data",'
' "/1.0/config/user.vendor-data",'
' "/1.0/config/cloud-init.network-config",'
' "/1.0/config/cloud-init.user-data",'
- ' "/1.0/config/cloud-init.vendor-data"]',
+ ' "/1.0/config/cloud-init.vendor-data"]'
+ ),
"http://lxd/1.0/config/user.meta-data": "user.meta-data",
- "http://lxd/1.0/config/user.network-config":
- "user.network-config",
+ "http://lxd/1.0/config/user.network-config": (
+ "user.network-config"
+ ),
"http://lxd/1.0/config/user.user-data": "user.user-data",
- "http://lxd/1.0/config/user.vendor-data":
- "user.vendor-data",
- "http://lxd/1.0/config/cloud-init.meta-data":
- "cloud-init.meta-data",
- "http://lxd/1.0/config/cloud-init.network-config":
- "cloud-init.network-config",
- "http://lxd/1.0/config/cloud-init.user-data":
- "cloud-init.user-data",
- "http://lxd/1.0/config/cloud-init.vendor-data":
- "cloud-init.vendor-data",
+ "http://lxd/1.0/config/user.vendor-data": (
+ "user.vendor-data"
+ ),
+ "http://lxd/1.0/config/cloud-init.meta-data": (
+ "cloud-init.meta-data"
+ ),
+ "http://lxd/1.0/config/cloud-init.network-config": (
+ "cloud-init.network-config"
+ ),
+ "http://lxd/1.0/config/cloud-init.user-data": (
+ "cloud-init.user-data"
+ ),
+ "http://lxd/1.0/config/cloud-init.vendor-data": (
+ "cloud-init.vendor-data"
+ ),
},
{
"_metadata_api_version": lxd.LXD_SOCKET_API_VERSION,
@@ -304,11 +321,11 @@ class TestReadMetadata:
"user.network-config": "user.network-config",
"user.user-data": "user.user-data",
"user.vendor-data": "user.vendor-data",
- "cloud-init.network-config":
- "cloud-init.network-config",
+ "cloud-init.network-config": (
+ "cloud-init.network-config"
+ ),
"cloud-init.user-data": "cloud-init.user-data",
- "cloud-init.vendor-data":
- "cloud-init.vendor-data",
+ "cloud-init.vendor-data": "cloud-init.vendor-data",
},
"meta-data": "local-hostname: md\n",
"network-config": "cloud-init.network-config",
@@ -337,9 +354,9 @@ class TestReadMetadata:
" cloud-init.vendor-data value.",
],
),
- )
+ ),
)
- @mock.patch.object(lxd.requests.Session, 'get')
+ @mock.patch.object(lxd.requests.Session, "get")
def test_read_metadata_handles_unexpected_content_or_http_status(
self, session_get, url_responses, expected, logs, caplog
):
@@ -348,7 +365,7 @@ class TestReadMetadata:
def fake_get(url):
"""Mock Response json, ok, status_code, text from url_responses."""
m_resp = mock.MagicMock()
- content = url_responses.get(url, '')
+ content = url_responses.get(url, "")
m_resp.json.side_effect = lambda: json.loads(content)
if content:
mock_ok = mock.PropertyMock(return_value=True)
@@ -373,4 +390,5 @@ class TestReadMetadata:
for log in logs:
assert log in caplogs
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_maas.py b/tests/unittests/sources/test_maas.py
index 34b79587..e95ba374 100644
--- a/tests/unittests/sources/test_maas.py
+++ b/tests/unittests/sources/test_maas.py
@@ -1,19 +1,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from copy import copy
import os
import shutil
import tempfile
-import yaml
+from copy import copy
from unittest import mock
-from cloudinit.sources import DataSourceMAAS
+import yaml
+
from cloudinit import url_helper
+from cloudinit.sources import DataSourceMAAS
from tests.unittests.helpers import CiTestCase, populate_dir
class TestMAASDataSource(CiTestCase):
-
def setUp(self):
super(TestMAASDataSource, self).setUp()
# Make a temp directoy for tests to use.
@@ -23,11 +23,13 @@ class TestMAASDataSource(CiTestCase):
def test_seed_dir_valid(self):
"""Verify a valid seeddir is read as such."""
- userdata = b'valid01-userdata'
- data = {'meta-data/instance-id': 'i-valid01',
- 'meta-data/local-hostname': 'valid01-hostname',
- 'user-data': userdata,
- 'public-keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname'}
+ userdata = b"valid01-userdata"
+ data = {
+ "meta-data/instance-id": "i-valid01",
+ "meta-data/local-hostname": "valid01-hostname",
+ "user-data": userdata,
+ "public-keys": "ssh-rsa AAAAB3Nz...aC1yc2E= keyname",
+ }
my_d = os.path.join(self.tmp, "valid")
populate_dir(my_d, data)
@@ -35,20 +37,23 @@ class TestMAASDataSource(CiTestCase):
ud, md, vd = DataSourceMAAS.read_maas_seed_dir(my_d)
self.assertEqual(userdata, ud)
- for key in ('instance-id', 'local-hostname'):
+ for key in ("instance-id", "local-hostname"):
self.assertEqual(data["meta-data/" + key], md[key])
# verify that 'userdata' is not returned as part of the metadata
- self.assertFalse(('user-data' in md))
+ self.assertFalse(("user-data" in md))
self.assertIsNone(vd)
def test_seed_dir_valid_extra(self):
"""Verify extra files do not affect seed_dir validity."""
- userdata = b'valid-extra-userdata'
- data = {'meta-data/instance-id': 'i-valid-extra',
- 'meta-data/local-hostname': 'valid-extra-hostname',
- 'user-data': userdata, 'foo': 'bar'}
+ userdata = b"valid-extra-userdata"
+ data = {
+ "meta-data/instance-id": "i-valid-extra",
+ "meta-data/local-hostname": "valid-extra-hostname",
+ "user-data": userdata,
+ "foo": "bar",
+ }
my_d = os.path.join(self.tmp, "valid_extra")
populate_dir(my_d, data)
@@ -56,62 +61,77 @@ class TestMAASDataSource(CiTestCase):
ud, md, _vd = DataSourceMAAS.read_maas_seed_dir(my_d)
self.assertEqual(userdata, ud)
- for key in ('instance-id', 'local-hostname'):
- self.assertEqual(data['meta-data/' + key], md[key])
+ for key in ("instance-id", "local-hostname"):
+ self.assertEqual(data["meta-data/" + key], md[key])
# additional files should not just appear as keys in metadata atm
- self.assertFalse(('foo' in md))
+ self.assertFalse(("foo" in md))
def test_seed_dir_invalid(self):
"""Verify that invalid seed_dir raises MAASSeedDirMalformed."""
- valid = {'instance-id': 'i-instanceid',
- 'local-hostname': 'test-hostname', 'user-data': ''}
+ valid = {
+ "instance-id": "i-instanceid",
+ "local-hostname": "test-hostname",
+ "user-data": "",
+ }
my_based = os.path.join(self.tmp, "valid_extra")
# missing 'userdata' file
my_d = "%s-01" % my_based
invalid_data = copy(valid)
- del invalid_data['local-hostname']
+ del invalid_data["local-hostname"]
populate_dir(my_d, invalid_data)
- self.assertRaises(DataSourceMAAS.MAASSeedDirMalformed,
- DataSourceMAAS.read_maas_seed_dir, my_d)
+ self.assertRaises(
+ DataSourceMAAS.MAASSeedDirMalformed,
+ DataSourceMAAS.read_maas_seed_dir,
+ my_d,
+ )
# missing 'instance-id'
my_d = "%s-02" % my_based
invalid_data = copy(valid)
- del invalid_data['instance-id']
+ del invalid_data["instance-id"]
populate_dir(my_d, invalid_data)
- self.assertRaises(DataSourceMAAS.MAASSeedDirMalformed,
- DataSourceMAAS.read_maas_seed_dir, my_d)
+ self.assertRaises(
+ DataSourceMAAS.MAASSeedDirMalformed,
+ DataSourceMAAS.read_maas_seed_dir,
+ my_d,
+ )
def test_seed_dir_none(self):
"""Verify that empty seed_dir raises MAASSeedDirNone."""
my_d = os.path.join(self.tmp, "valid_empty")
- self.assertRaises(DataSourceMAAS.MAASSeedDirNone,
- DataSourceMAAS.read_maas_seed_dir, my_d)
+ self.assertRaises(
+ DataSourceMAAS.MAASSeedDirNone,
+ DataSourceMAAS.read_maas_seed_dir,
+ my_d,
+ )
def test_seed_dir_missing(self):
"""Verify that missing seed_dir raises MAASSeedDirNone."""
- self.assertRaises(DataSourceMAAS.MAASSeedDirNone,
- DataSourceMAAS.read_maas_seed_dir,
- os.path.join(self.tmp, "nonexistantdirectory"))
+ self.assertRaises(
+ DataSourceMAAS.MAASSeedDirNone,
+ DataSourceMAAS.read_maas_seed_dir,
+ os.path.join(self.tmp, "nonexistantdirectory"),
+ )
def mock_read_maas_seed_url(self, data, seed, version="19991231"):
"""mock up readurl to appear as a web server at seed has provided data.
return what read_maas_seed_url returns."""
+
def my_readurl(*args, **kwargs):
if len(args):
url = args[0]
else:
- url = kwargs['url']
+ url = kwargs["url"]
prefix = "%s/%s/" % (seed, version)
if not url.startswith(prefix):
raise ValueError("unexpected call %s" % url)
- short = url[len(prefix):]
+ short = url[len(prefix) :]
if short not in data:
raise url_helper.UrlError("not found", code=404, url=url)
return url_helper.StringResponse(data[short])
@@ -124,44 +144,48 @@ class TestMAASDataSource(CiTestCase):
def test_seed_url_valid(self):
"""Verify that valid seed_url is read as such."""
valid = {
- 'meta-data/instance-id': 'i-instanceid',
- 'meta-data/local-hostname': 'test-hostname',
- 'meta-data/public-keys': 'test-hostname',
- 'meta-data/vendor-data': b'my-vendordata',
- 'user-data': b'foodata',
+ "meta-data/instance-id": "i-instanceid",
+ "meta-data/local-hostname": "test-hostname",
+ "meta-data/public-keys": "test-hostname",
+ "meta-data/vendor-data": b"my-vendordata",
+ "user-data": b"foodata",
}
my_seed = "http://example.com/xmeta"
my_ver = "1999-99-99"
ud, md, vd = self.mock_read_maas_seed_url(valid, my_seed, my_ver)
- self.assertEqual(valid['meta-data/instance-id'], md['instance-id'])
+ self.assertEqual(valid["meta-data/instance-id"], md["instance-id"])
self.assertEqual(
- valid['meta-data/local-hostname'], md['local-hostname'])
- self.assertEqual(valid['meta-data/public-keys'], md['public-keys'])
- self.assertEqual(valid['user-data'], ud)
+ valid["meta-data/local-hostname"], md["local-hostname"]
+ )
+ self.assertEqual(valid["meta-data/public-keys"], md["public-keys"])
+ self.assertEqual(valid["user-data"], ud)
# vendor-data is yaml, which decodes a string
- self.assertEqual(valid['meta-data/vendor-data'].decode(), vd)
+ self.assertEqual(valid["meta-data/vendor-data"].decode(), vd)
def test_seed_url_vendor_data_dict(self):
- expected_vd = {'key1': 'value1'}
+ expected_vd = {"key1": "value1"}
valid = {
- 'meta-data/instance-id': 'i-instanceid',
- 'meta-data/local-hostname': 'test-hostname',
- 'meta-data/vendor-data': yaml.safe_dump(expected_vd).encode(),
+ "meta-data/instance-id": "i-instanceid",
+ "meta-data/local-hostname": "test-hostname",
+ "meta-data/vendor-data": yaml.safe_dump(expected_vd).encode(),
}
_ud, md, vd = self.mock_read_maas_seed_url(
- valid, "http://example.com/foo")
+ valid, "http://example.com/foo"
+ )
- self.assertEqual(valid['meta-data/instance-id'], md['instance-id'])
+ self.assertEqual(valid["meta-data/instance-id"], md["instance-id"])
self.assertEqual(expected_vd, vd)
@mock.patch("cloudinit.sources.DataSourceMAAS.url_helper.OauthUrlHelper")
class TestGetOauthHelper(CiTestCase):
- base_cfg = {'consumer_key': 'FAKE_CONSUMER_KEY',
- 'token_key': 'FAKE_TOKEN_KEY',
- 'token_secret': 'FAKE_TOKEN_SECRET',
- 'consumer_secret': None}
+ base_cfg = {
+ "consumer_key": "FAKE_CONSUMER_KEY",
+ "token_key": "FAKE_TOKEN_KEY",
+ "token_secret": "FAKE_TOKEN_SECRET",
+ "consumer_secret": None,
+ }
def test_all_required(self, m_helper):
"""Valid config as expected."""
@@ -171,17 +195,20 @@ class TestGetOauthHelper(CiTestCase):
def test_other_fields_not_passed_through(self, m_helper):
"""Only relevant fields are passed through."""
mycfg = self.base_cfg.copy()
- mycfg['unrelated_field'] = 'unrelated'
+ mycfg["unrelated_field"] = "unrelated"
DataSourceMAAS.get_oauth_helper(mycfg)
m_helper.assert_has_calls([mock.call(**self.base_cfg)])
class TestGetIdHash(CiTestCase):
- v1_cfg = {'consumer_key': 'CKEY', 'token_key': 'TKEY',
- 'token_secret': 'TSEC'}
+ v1_cfg = {
+ "consumer_key": "CKEY",
+ "token_key": "TKEY",
+ "token_secret": "TSEC",
+ }
v1_id = (
- 'v1:'
- '403ee5f19c956507f1d0e50814119c405902137ea4f8838bde167c5da8110392')
+ "v1:403ee5f19c956507f1d0e50814119c405902137ea4f8838bde167c5da8110392"
+ )
def test_v1_expected(self):
"""Test v1 id generated as expected working behavior from config."""
@@ -191,8 +218,8 @@ class TestGetIdHash(CiTestCase):
def test_v1_extra_fields_are_ignored(self):
"""Test v1 id ignores unused entries in config."""
cfg = self.v1_cfg.copy()
- cfg['consumer_secret'] = "BOO"
- cfg['unrelated'] = "HI MOM"
+ cfg["consumer_secret"] = "BOO"
+ cfg["unrelated"] = "HI MOM"
result = DataSourceMAAS.get_id_from_ds_cfg(cfg)
self.assertEqual(self.v1_id, result)
diff --git a/tests/unittests/sources/test_nocloud.py b/tests/unittests/sources/test_nocloud.py
index 26f91054..1f6b722d 100644
--- a/tests/unittests/sources/test_nocloud.py
+++ b/tests/unittests/sources/test_nocloud.py
@@ -1,27 +1,27 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import dmi
-from cloudinit import helpers
-from cloudinit.sources.DataSourceNoCloud import (
- DataSourceNoCloud as dsNoCloud,
- _maybe_remove_top_network,
- parse_cmdline_data)
-from cloudinit import util
-from tests.unittests.helpers import CiTestCase, populate_dir, mock, ExitStack
-
import os
import textwrap
+
import yaml
+from cloudinit import dmi, helpers, util
+from cloudinit.sources.DataSourceNoCloud import DataSourceNoCloud as dsNoCloud
+from cloudinit.sources.DataSourceNoCloud import (
+ _maybe_remove_top_network,
+ parse_cmdline_data,
+)
+from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir
+
-@mock.patch('cloudinit.sources.DataSourceNoCloud.util.is_lxd')
+@mock.patch("cloudinit.sources.DataSourceNoCloud.util.is_lxd")
class TestNoCloudDataSource(CiTestCase):
-
def setUp(self):
super(TestNoCloudDataSource, self).setUp()
self.tmp = self.tmp_dir()
self.paths = helpers.Paths(
- {'cloud_dir': self.tmp, 'run_dir': self.tmp})
+ {"cloud_dir": self.tmp, "run_dir": self.tmp}
+ )
self.cmdline = "root=TESTCMDLINE"
@@ -29,77 +29,77 @@ class TestNoCloudDataSource(CiTestCase):
self.addCleanup(self.mocks.close)
self.mocks.enter_context(
- mock.patch.object(util, 'get_cmdline', return_value=self.cmdline))
+ mock.patch.object(util, "get_cmdline", return_value=self.cmdline)
+ )
self.mocks.enter_context(
- mock.patch.object(dmi, 'read_dmi_data', return_value=None))
+ mock.patch.object(dmi, "read_dmi_data", return_value=None)
+ )
def _test_fs_config_is_read(self, fs_label, fs_label_to_search):
- vfat_device = 'device-1'
+ vfat_device = "device-1"
def m_mount_cb(device, callback, mtype):
- if (device == vfat_device):
- return {'meta-data': yaml.dump({'instance-id': 'IID'})}
+ if device == vfat_device:
+ return {"meta-data": yaml.dump({"instance-id": "IID"})}
else:
return {}
- def m_find_devs_with(query='', path=''):
- if 'TYPE=vfat' == query:
+ def m_find_devs_with(query="", path=""):
+ if "TYPE=vfat" == query:
return [vfat_device]
- elif 'LABEL={}'.format(fs_label) == query:
+ elif "LABEL={}".format(fs_label) == query:
return [vfat_device]
else:
return []
self.mocks.enter_context(
- mock.patch.object(util, 'find_devs_with',
- side_effect=m_find_devs_with))
+ mock.patch.object(
+ util, "find_devs_with", side_effect=m_find_devs_with
+ )
+ )
self.mocks.enter_context(
- mock.patch.object(util, 'mount_cb',
- side_effect=m_mount_cb))
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': fs_label_to_search}}}
+ mock.patch.object(util, "mount_cb", side_effect=m_mount_cb)
+ )
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": fs_label_to_search}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
- self.assertEqual(dsrc.metadata.get('instance-id'), 'IID')
+ self.assertEqual(dsrc.metadata.get("instance-id"), "IID")
self.assertTrue(ret)
def test_nocloud_seed_dir_on_lxd(self, m_is_lxd):
- md = {'instance-id': 'IID', 'dsmode': 'local'}
+ md = {"instance-id": "IID", "dsmode": "local"}
ud = b"USER_DATA_HERE"
seed_dir = os.path.join(self.paths.seed_dir, "nocloud")
- populate_dir(seed_dir,
- {'user-data': ud, 'meta-data': yaml.safe_dump(md)})
+ populate_dir(
+ seed_dir, {"user-data": ud, "meta-data": yaml.safe_dump(md)}
+ )
- sys_cfg = {
- 'datasource': {'NoCloud': {'fs_label': None}}
- }
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
self.assertEqual(dsrc.userdata_raw, ud)
self.assertEqual(dsrc.metadata, md)
- self.assertEqual(dsrc.platform_type, 'lxd')
- self.assertEqual(
- dsrc.subplatform, 'seed-dir (%s)' % seed_dir)
+ self.assertEqual(dsrc.platform_type, "lxd")
+ self.assertEqual(dsrc.subplatform, "seed-dir (%s)" % seed_dir)
self.assertTrue(ret)
def test_nocloud_seed_dir_non_lxd_platform_is_nocloud(self, m_is_lxd):
"""Non-lxd environments will list nocloud as the platform."""
m_is_lxd.return_value = False
- md = {'instance-id': 'IID', 'dsmode': 'local'}
+ md = {"instance-id": "IID", "dsmode": "local"}
seed_dir = os.path.join(self.paths.seed_dir, "nocloud")
- populate_dir(seed_dir,
- {'user-data': '', 'meta-data': yaml.safe_dump(md)})
+ populate_dir(
+ seed_dir, {"user-data": "", "meta-data": yaml.safe_dump(md)}
+ )
- sys_cfg = {
- 'datasource': {'NoCloud': {'fs_label': None}}
- }
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
self.assertTrue(dsrc.get_data())
- self.assertEqual(dsrc.platform_type, 'nocloud')
- self.assertEqual(
- dsrc.subplatform, 'seed-dir (%s)' % seed_dir)
+ self.assertEqual(dsrc.platform_type, "nocloud")
+ self.assertEqual(dsrc.subplatform, "seed-dir (%s)" % seed_dir)
def test_fs_label(self, m_is_lxd):
# find_devs_with should not be called ff fs_label is None
@@ -107,65 +107,70 @@ class TestNoCloudDataSource(CiTestCase):
pass
self.mocks.enter_context(
- mock.patch.object(util, 'find_devs_with',
- side_effect=PsuedoException))
+ mock.patch.object(
+ util, "find_devs_with", side_effect=PsuedoException
+ )
+ )
# by default, NoCloud should search for filesystems by label
- sys_cfg = {'datasource': {'NoCloud': {}}}
+ sys_cfg = {"datasource": {"NoCloud": {}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
self.assertRaises(PsuedoException, dsrc.get_data)
# but disabling searching should just end up with None found
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
self.assertFalse(ret)
def test_fs_config_lowercase_label(self, m_is_lxd):
- self._test_fs_config_is_read('cidata', 'cidata')
+ self._test_fs_config_is_read("cidata", "cidata")
def test_fs_config_uppercase_label(self, m_is_lxd):
- self._test_fs_config_is_read('CIDATA', 'cidata')
+ self._test_fs_config_is_read("CIDATA", "cidata")
def test_fs_config_lowercase_label_search_uppercase(self, m_is_lxd):
- self._test_fs_config_is_read('cidata', 'CIDATA')
+ self._test_fs_config_is_read("cidata", "CIDATA")
def test_fs_config_uppercase_label_search_uppercase(self, m_is_lxd):
- self._test_fs_config_is_read('CIDATA', 'CIDATA')
+ self._test_fs_config_is_read("CIDATA", "CIDATA")
def test_no_datasource_expected(self, m_is_lxd):
# no source should be found if no cmdline, config, and fs_label=None
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
self.assertFalse(dsrc.get_data())
def test_seed_in_config(self, m_is_lxd):
data = {
- 'fs_label': None,
- 'meta-data': yaml.safe_dump({'instance-id': 'IID'}),
- 'user-data': b"USER_DATA_RAW",
+ "fs_label": None,
+ "meta-data": yaml.safe_dump({"instance-id": "IID"}),
+ "user-data": b"USER_DATA_RAW",
}
- sys_cfg = {'datasource': {'NoCloud': data}}
+ sys_cfg = {"datasource": {"NoCloud": data}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
self.assertEqual(dsrc.userdata_raw, b"USER_DATA_RAW")
- self.assertEqual(dsrc.metadata.get('instance-id'), 'IID')
+ self.assertEqual(dsrc.metadata.get("instance-id"), "IID")
self.assertTrue(ret)
def test_nocloud_seed_with_vendordata(self, m_is_lxd):
- md = {'instance-id': 'IID', 'dsmode': 'local'}
+ md = {"instance-id": "IID", "dsmode": "local"}
ud = b"USER_DATA_HERE"
vd = b"THIS IS MY VENDOR_DATA"
- populate_dir(os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': ud, 'meta-data': yaml.safe_dump(md),
- 'vendor-data': vd})
+ populate_dir(
+ os.path.join(self.paths.seed_dir, "nocloud"),
+ {
+ "user-data": ud,
+ "meta-data": yaml.safe_dump(md),
+ "vendor-data": vd,
+ },
+ )
- sys_cfg = {
- 'datasource': {'NoCloud': {'fs_label': None}}
- }
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
@@ -175,10 +180,12 @@ class TestNoCloudDataSource(CiTestCase):
self.assertTrue(ret)
def test_nocloud_no_vendordata(self, m_is_lxd):
- populate_dir(os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': b"ud", 'meta-data': "instance-id: IID\n"})
+ populate_dir(
+ os.path.join(self.paths.seed_dir, "nocloud"),
+ {"user-data": b"ud", "meta-data": "instance-id: IID\n"},
+ )
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
@@ -189,23 +196,28 @@ class TestNoCloudDataSource(CiTestCase):
def test_metadata_network_interfaces(self, m_is_lxd):
gateway = "103.225.10.1"
md = {
- 'instance-id': 'i-abcd',
- 'local-hostname': 'hostname1',
- 'network-interfaces': textwrap.dedent("""\
+ "instance-id": "i-abcd",
+ "local-hostname": "hostname1",
+ "network-interfaces": textwrap.dedent(
+ """\
auto eth0
iface eth0 inet static
hwaddr 00:16:3e:70:e1:04
address 103.225.10.12
netmask 255.255.255.0
- gateway """ + gateway + """
- dns-servers 8.8.8.8""")}
+ gateway """
+ + gateway
+ + """
+ dns-servers 8.8.8.8"""
+ ),
+ }
populate_dir(
os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': b"ud",
- 'meta-data': yaml.dump(md) + "\n"})
+ {"user-data": b"ud", "meta-data": yaml.dump(md) + "\n"},
+ )
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
@@ -215,16 +227,26 @@ class TestNoCloudDataSource(CiTestCase):
def test_metadata_network_config(self, m_is_lxd):
# network-config needs to get into network_config
- netconf = {'version': 1,
- 'config': [{'type': 'physical', 'name': 'interface0',
- 'subnets': [{'type': 'dhcp'}]}]}
+ netconf = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "interface0",
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
+ }
populate_dir(
os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': b"ud",
- 'meta-data': "instance-id: IID\n",
- 'network-config': yaml.dump(netconf) + "\n"})
+ {
+ "user-data": b"ud",
+ "meta-data": "instance-id: IID\n",
+ "network-config": yaml.dump(netconf) + "\n",
+ },
+ )
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
@@ -233,14 +255,17 @@ class TestNoCloudDataSource(CiTestCase):
def test_metadata_network_config_with_toplevel_network(self, m_is_lxd):
"""network-config may have 'network' top level key."""
- netconf = {'config': 'disabled'}
+ netconf = {"config": "disabled"}
populate_dir(
os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': b"ud",
- 'meta-data': "instance-id: IID\n",
- 'network-config': yaml.dump({'network': netconf}) + "\n"})
+ {
+ "user-data": b"ud",
+ "meta-data": "instance-id: IID\n",
+ "network-config": yaml.dump({"network": netconf}) + "\n",
+ },
+ )
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
@@ -251,27 +276,42 @@ class TestNoCloudDataSource(CiTestCase):
# network-config should override meta-data/network-interfaces
gateway = "103.225.10.1"
md = {
- 'instance-id': 'i-abcd',
- 'local-hostname': 'hostname1',
- 'network-interfaces': textwrap.dedent("""\
+ "instance-id": "i-abcd",
+ "local-hostname": "hostname1",
+ "network-interfaces": textwrap.dedent(
+ """\
auto eth0
iface eth0 inet static
hwaddr 00:16:3e:70:e1:04
address 103.225.10.12
netmask 255.255.255.0
- gateway """ + gateway + """
- dns-servers 8.8.8.8""")}
+ gateway """
+ + gateway
+ + """
+ dns-servers 8.8.8.8"""
+ ),
+ }
- netconf = {'version': 1,
- 'config': [{'type': 'physical', 'name': 'interface0',
- 'subnets': [{'type': 'dhcp'}]}]}
+ netconf = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "interface0",
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
+ }
populate_dir(
os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': b"ud",
- 'meta-data': yaml.dump(md) + "\n",
- 'network-config': yaml.dump(netconf) + "\n"})
+ {
+ "user-data": b"ud",
+ "meta-data": yaml.dump(md) + "\n",
+ "network-config": yaml.dump(netconf) + "\n",
+ },
+ )
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
@@ -281,17 +321,24 @@ class TestNoCloudDataSource(CiTestCase):
@mock.patch("cloudinit.util.blkid")
def test_nocloud_get_devices_freebsd(self, m_is_lxd, fake_blkid):
- populate_dir(os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': b"ud", 'meta-data': "instance-id: IID\n"})
+ populate_dir(
+ os.path.join(self.paths.seed_dir, "nocloud"),
+ {"user-data": b"ud", "meta-data": "instance-id: IID\n"},
+ )
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
self.mocks.enter_context(
- mock.patch.object(util, 'is_FreeBSD', return_value=True))
+ mock.patch.object(util, "is_FreeBSD", return_value=True)
+ )
def _mfind_devs_with_freebsd(
- criteria=None, oformat='device',
- tag=None, no_cache=False, path=None):
+ criteria=None,
+ oformat="device",
+ tag=None,
+ no_cache=False,
+ path=None,
+ ):
if not criteria:
return ["/dev/msdosfs/foo", "/dev/iso9660/foo"]
if criteria.startswith("LABEL="):
@@ -304,17 +351,19 @@ class TestNoCloudDataSource(CiTestCase):
self.mocks.enter_context(
mock.patch.object(
- util, 'find_devs_with_freebsd',
- side_effect=_mfind_devs_with_freebsd))
+ util,
+ "find_devs_with_freebsd",
+ side_effect=_mfind_devs_with_freebsd,
+ )
+ )
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
- ret = dsrc._get_devices('foo')
- self.assertEqual(['/dev/msdosfs/foo', '/dev/iso9660/foo'], ret)
+ ret = dsrc._get_devices("foo")
+ self.assertEqual(["/dev/msdosfs/foo", "/dev/iso9660/foo"], ret)
fake_blkid.assert_not_called()
class TestParseCommandLineData(CiTestCase):
-
def test_parse_cmdline_data_valid(self):
ds_id = "ds=nocloud"
pairs = (
@@ -322,18 +371,21 @@ class TestParseCommandLineData(CiTestCase):
("%(ds_id)s; root=/dev/foo", {}),
("%(ds_id)s", {}),
("%(ds_id)s;", {}),
- ("%(ds_id)s;s=SEED", {'seedfrom': 'SEED'}),
- ("%(ds_id)s;seedfrom=SEED;local-hostname=xhost",
- {'seedfrom': 'SEED', 'local-hostname': 'xhost'}),
- ("%(ds_id)s;h=xhost",
- {'local-hostname': 'xhost'}),
- ("%(ds_id)s;h=xhost;i=IID",
- {'local-hostname': 'xhost', 'instance-id': 'IID'}),
+ ("%(ds_id)s;s=SEED", {"seedfrom": "SEED"}),
+ (
+ "%(ds_id)s;seedfrom=SEED;local-hostname=xhost",
+ {"seedfrom": "SEED", "local-hostname": "xhost"},
+ ),
+ ("%(ds_id)s;h=xhost", {"local-hostname": "xhost"}),
+ (
+ "%(ds_id)s;h=xhost;i=IID",
+ {"local-hostname": "xhost", "instance-id": "IID"},
+ ),
)
for (fmt, expected) in pairs:
fill = {}
- cmdline = fmt % {'ds_id': ds_id}
+ cmdline = fmt % {"ds_id": ds_id}
ret = parse_cmdline_data(ds_id=ds_id, fill=fill, cmdline=cmdline)
self.assertEqual(expected, fill)
self.assertTrue(ret)
@@ -358,36 +410,44 @@ class TestParseCommandLineData(CiTestCase):
class TestMaybeRemoveToplevelNetwork(CiTestCase):
"""test _maybe_remove_top_network function."""
- basecfg = [{'type': 'physical', 'name': 'interface0',
- 'subnets': [{'type': 'dhcp'}]}]
+
+ basecfg = [
+ {
+ "type": "physical",
+ "name": "interface0",
+ "subnets": [{"type": "dhcp"}],
+ }
+ ]
def test_should_remove_safely(self):
- mcfg = {'config': self.basecfg, 'version': 1}
- self.assertEqual(mcfg, _maybe_remove_top_network({'network': mcfg}))
+ mcfg = {"config": self.basecfg, "version": 1}
+ self.assertEqual(mcfg, _maybe_remove_top_network({"network": mcfg}))
def test_no_remove_if_other_keys(self):
"""should not shift if other keys at top level."""
- mcfg = {'network': {'config': self.basecfg, 'version': 1},
- 'unknown_keyname': 'keyval'}
+ mcfg = {
+ "network": {"config": self.basecfg, "version": 1},
+ "unknown_keyname": "keyval",
+ }
self.assertEqual(mcfg, _maybe_remove_top_network(mcfg))
def test_no_remove_if_non_dict(self):
"""should not shift if not a dict."""
- mcfg = {'network': '"content here'}
+ mcfg = {"network": '"content here'}
self.assertEqual(mcfg, _maybe_remove_top_network(mcfg))
def test_no_remove_if_missing_config_or_version(self):
"""should not shift unless network entry has config and version."""
- mcfg = {'network': {'config': self.basecfg}}
+ mcfg = {"network": {"config": self.basecfg}}
self.assertEqual(mcfg, _maybe_remove_top_network(mcfg))
- mcfg = {'network': {'version': 1}}
+ mcfg = {"network": {"version": 1}}
self.assertEqual(mcfg, _maybe_remove_top_network(mcfg))
def test_remove_with_config_disabled(self):
"""network/config=disabled should be shifted."""
- mcfg = {'config': 'disabled'}
- self.assertEqual(mcfg, _maybe_remove_top_network({'network': mcfg}))
+ mcfg = {"config": "disabled"}
+ self.assertEqual(mcfg, _maybe_remove_top_network({"network": mcfg}))
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_opennebula.py b/tests/unittests/sources/test_opennebula.py
index e5963f5a..e05c4749 100644
--- a/tests/unittests/sources/test_opennebula.py
+++ b/tests/unittests/sources/test_opennebula.py
@@ -1,62 +1,61 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import helpers
-from cloudinit.sources import DataSourceOpenNebula as ds
-from cloudinit import util
-from tests.unittests.helpers import mock, populate_dir, CiTestCase
-
import os
import pwd
import unittest
import pytest
+from cloudinit import helpers, util
+from cloudinit.sources import DataSourceOpenNebula as ds
+from tests.unittests.helpers import CiTestCase, mock, populate_dir
TEST_VARS = {
- 'VAR1': 'single',
- 'VAR2': 'double word',
- 'VAR3': 'multi\nline\n',
- 'VAR4': "'single'",
- 'VAR5': "'double word'",
- 'VAR6': "'multi\nline\n'",
- 'VAR7': 'single\\t',
- 'VAR8': 'double\\tword',
- 'VAR9': 'multi\\t\nline\n',
- 'VAR10': '\\', # expect '\'
- 'VAR11': '\'', # expect '
- 'VAR12': '$', # expect $
+ "VAR1": "single",
+ "VAR2": "double word",
+ "VAR3": "multi\nline\n",
+ "VAR4": "'single'",
+ "VAR5": "'double word'",
+ "VAR6": "'multi\nline\n'",
+ "VAR7": "single\\t",
+ "VAR8": "double\\tword",
+ "VAR9": "multi\\t\nline\n",
+ "VAR10": "\\", # expect '\'
+ "VAR11": "'", # expect '
+ "VAR12": "$", # expect $
}
-INVALID_CONTEXT = ';'
-USER_DATA = '#cloud-config\napt_upgrade: true'
-SSH_KEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i'
-HOSTNAME = 'foo.example.com'
-PUBLIC_IP = '10.0.0.3'
-MACADDR = '02:00:0a:12:01:01'
-IP_BY_MACADDR = '10.18.1.1'
-IP4_PREFIX = '24'
-IP6_GLOBAL = '2001:db8:1:0:400:c0ff:fea8:1ba'
-IP6_ULA = 'fd01:dead:beaf:0:400:c0ff:fea8:1ba'
-IP6_GW = '2001:db8:1::ffff'
-IP6_PREFIX = '48'
+INVALID_CONTEXT = ";"
+USER_DATA = "#cloud-config\napt_upgrade: true"
+SSH_KEY = "ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i"
+HOSTNAME = "foo.example.com"
+PUBLIC_IP = "10.0.0.3"
+MACADDR = "02:00:0a:12:01:01"
+IP_BY_MACADDR = "10.18.1.1"
+IP4_PREFIX = "24"
+IP6_GLOBAL = "2001:db8:1:0:400:c0ff:fea8:1ba"
+IP6_ULA = "fd01:dead:beaf:0:400:c0ff:fea8:1ba"
+IP6_GW = "2001:db8:1::ffff"
+IP6_PREFIX = "48"
DS_PATH = "cloudinit.sources.DataSourceOpenNebula"
class TestOpenNebulaDataSource(CiTestCase):
parsed_user = None
- allowed_subp = ['bash']
+ allowed_subp = ["bash"]
def setUp(self):
super(TestOpenNebulaDataSource, self).setUp()
self.tmp = self.tmp_dir()
self.paths = helpers.Paths(
- {'cloud_dir': self.tmp, 'run_dir': self.tmp})
+ {"cloud_dir": self.tmp, "run_dir": self.tmp}
+ )
# defaults for few tests
self.ds = ds.DataSourceOpenNebula
self.seed_dir = os.path.join(self.paths.seed_dir, "opennebula")
- self.sys_cfg = {'datasource': {'OpenNebula': {'dsmode': 'local'}}}
+ self.sys_cfg = {"datasource": {"OpenNebula": {"dsmode": "local"}}}
# we don't want 'sudo' called in tests. so we patch switch_user_cmd
def my_switch_user_cmd(user):
@@ -86,7 +85,7 @@ class TestOpenNebulaDataSource(CiTestCase):
try:
# dont' try to lookup for CDs
util.find_devs_with = lambda n: []
- populate_dir(self.seed_dir, {'context.sh': INVALID_CONTEXT})
+ populate_dir(self.seed_dir, {"context.sh": INVALID_CONTEXT})
dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths)
self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data)
finally:
@@ -97,18 +96,19 @@ class TestOpenNebulaDataSource(CiTestCase):
try:
# generate non-existing system user name
sys_cfg = self.sys_cfg
- invalid_user = 'invalid'
- while not sys_cfg['datasource']['OpenNebula'].get('parseuser'):
+ invalid_user = "invalid"
+ while not sys_cfg["datasource"]["OpenNebula"].get("parseuser"):
try:
pwd.getpwnam(invalid_user)
- invalid_user += 'X'
+ invalid_user += "X"
except KeyError:
- sys_cfg['datasource']['OpenNebula']['parseuser'] = \
- invalid_user
+ sys_cfg["datasource"]["OpenNebula"][
+ "parseuser"
+ ] = invalid_user
# dont' try to lookup for CDs
util.find_devs_with = lambda n: []
- populate_context_dir(self.seed_dir, {'KEY1': 'val1'})
+ populate_context_dir(self.seed_dir, {"KEY1": "val1"})
dsrc = self.ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data)
finally:
@@ -119,227 +119,265 @@ class TestOpenNebulaDataSource(CiTestCase):
try:
# dont' try to lookup for CDs
util.find_devs_with = lambda n: []
- populate_context_dir(self.seed_dir, {'KEY1': 'val1'})
+ populate_context_dir(self.seed_dir, {"KEY1": "val1"})
dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
self.assertTrue(ret)
finally:
util.find_devs_with = orig_find_devs_with
- self.assertEqual('opennebula', dsrc.cloud_name)
- self.assertEqual('opennebula', dsrc.platform_type)
+ self.assertEqual("opennebula", dsrc.cloud_name)
+ self.assertEqual("opennebula", dsrc.platform_type)
self.assertEqual(
- 'seed-dir (%s/seed/opennebula)' % self.tmp, dsrc.subplatform)
+ "seed-dir (%s/seed/opennebula)" % self.tmp, dsrc.subplatform
+ )
def test_seed_dir_non_contextdisk(self):
- self.assertRaises(ds.NonContextDiskDir, ds.read_context_disk_dir,
- self.seed_dir, mock.Mock())
+ self.assertRaises(
+ ds.NonContextDiskDir,
+ ds.read_context_disk_dir,
+ self.seed_dir,
+ mock.Mock(),
+ )
def test_seed_dir_empty1_context(self):
- populate_dir(self.seed_dir, {'context.sh': ''})
+ populate_dir(self.seed_dir, {"context.sh": ""})
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertIsNone(results['userdata'])
- self.assertEqual(results['metadata'], {})
+ self.assertIsNone(results["userdata"])
+ self.assertEqual(results["metadata"], {})
def test_seed_dir_empty2_context(self):
populate_context_dir(self.seed_dir, {})
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertIsNone(results['userdata'])
- self.assertEqual(results['metadata'], {})
+ self.assertIsNone(results["userdata"])
+ self.assertEqual(results["metadata"], {})
def test_seed_dir_broken_context(self):
- populate_dir(self.seed_dir, {'context.sh': INVALID_CONTEXT})
+ populate_dir(self.seed_dir, {"context.sh": INVALID_CONTEXT})
- self.assertRaises(ds.BrokenContextDiskDir,
- ds.read_context_disk_dir,
- self.seed_dir, mock.Mock())
+ self.assertRaises(
+ ds.BrokenContextDiskDir,
+ ds.read_context_disk_dir,
+ self.seed_dir,
+ mock.Mock(),
+ )
def test_context_parser(self):
populate_context_dir(self.seed_dir, TEST_VARS)
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('metadata' in results)
- self.assertEqual(TEST_VARS, results['metadata'])
+ self.assertTrue("metadata" in results)
+ self.assertEqual(TEST_VARS, results["metadata"])
def test_ssh_key(self):
- public_keys = ['first key', 'second key']
+ public_keys = ["first key", "second key"]
for c in range(4):
- for k in ('SSH_KEY', 'SSH_PUBLIC_KEY'):
+ for k in ("SSH_KEY", "SSH_PUBLIC_KEY"):
my_d = os.path.join(self.tmp, "%s-%i" % (k, c))
- populate_context_dir(my_d, {k: '\n'.join(public_keys)})
+ populate_context_dir(my_d, {k: "\n".join(public_keys)})
results = ds.read_context_disk_dir(my_d, mock.Mock())
- self.assertTrue('metadata' in results)
- self.assertTrue('public-keys' in results['metadata'])
- self.assertEqual(public_keys,
- results['metadata']['public-keys'])
+ self.assertTrue("metadata" in results)
+ self.assertTrue("public-keys" in results["metadata"])
+ self.assertEqual(
+ public_keys, results["metadata"]["public-keys"]
+ )
public_keys.append(SSH_KEY % (c + 1,))
def test_user_data_plain(self):
- for k in ('USER_DATA', 'USERDATA'):
+ for k in ("USER_DATA", "USERDATA"):
my_d = os.path.join(self.tmp, k)
- populate_context_dir(my_d, {k: USER_DATA,
- 'USERDATA_ENCODING': ''})
+ populate_context_dir(my_d, {k: USER_DATA, "USERDATA_ENCODING": ""})
results = ds.read_context_disk_dir(my_d, mock.Mock())
- self.assertTrue('userdata' in results)
- self.assertEqual(USER_DATA, results['userdata'])
+ self.assertTrue("userdata" in results)
+ self.assertEqual(USER_DATA, results["userdata"])
def test_user_data_encoding_required_for_decode(self):
b64userdata = util.b64e(USER_DATA)
- for k in ('USER_DATA', 'USERDATA'):
+ for k in ("USER_DATA", "USERDATA"):
my_d = os.path.join(self.tmp, k)
populate_context_dir(my_d, {k: b64userdata})
results = ds.read_context_disk_dir(my_d, mock.Mock())
- self.assertTrue('userdata' in results)
- self.assertEqual(b64userdata, results['userdata'])
+ self.assertTrue("userdata" in results)
+ self.assertEqual(b64userdata, results["userdata"])
def test_user_data_base64_encoding(self):
- for k in ('USER_DATA', 'USERDATA'):
+ for k in ("USER_DATA", "USERDATA"):
my_d = os.path.join(self.tmp, k)
- populate_context_dir(my_d, {k: util.b64e(USER_DATA),
- 'USERDATA_ENCODING': 'base64'})
+ populate_context_dir(
+ my_d, {k: util.b64e(USER_DATA), "USERDATA_ENCODING": "base64"}
+ )
results = ds.read_context_disk_dir(my_d, mock.Mock())
- self.assertTrue('userdata' in results)
- self.assertEqual(USER_DATA, results['userdata'])
+ self.assertTrue("userdata" in results)
+ self.assertEqual(USER_DATA, results["userdata"])
@mock.patch(DS_PATH + ".get_physical_nics_by_mac")
def test_hostname(self, m_get_phys_by_mac):
- for dev in ('eth0', 'ens3'):
+ for dev in ("eth0", "ens3"):
m_get_phys_by_mac.return_value = {MACADDR: dev}
- for k in ('SET_HOSTNAME', 'HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC',
- 'ETH0_IP'):
+ for k in (
+ "SET_HOSTNAME",
+ "HOSTNAME",
+ "PUBLIC_IP",
+ "IP_PUBLIC",
+ "ETH0_IP",
+ ):
my_d = os.path.join(self.tmp, k)
populate_context_dir(my_d, {k: PUBLIC_IP})
results = ds.read_context_disk_dir(my_d, mock.Mock())
- self.assertTrue('metadata' in results)
- self.assertTrue('local-hostname' in results['metadata'])
+ self.assertTrue("metadata" in results)
+ self.assertTrue("local-hostname" in results["metadata"])
self.assertEqual(
- PUBLIC_IP, results['metadata']['local-hostname'])
+ PUBLIC_IP, results["metadata"]["local-hostname"]
+ )
@mock.patch(DS_PATH + ".get_physical_nics_by_mac")
def test_network_interfaces(self, m_get_phys_by_mac):
- for dev in ('eth0', 'ens3'):
+ for dev in ("eth0", "ens3"):
m_get_phys_by_mac.return_value = {MACADDR: dev}
# without ETH0_MAC
# for Older OpenNebula?
- populate_context_dir(self.seed_dir, {'ETH0_IP': IP_BY_MACADDR})
+ populate_context_dir(self.seed_dir, {"ETH0_IP": IP_BY_MACADDR})
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP_BY_MACADDR + '/' + IP4_PREFIX in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP_BY_MACADDR + "/" + IP4_PREFIX
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
# ETH0_IP and ETH0_MAC
populate_context_dir(
- self.seed_dir, {'ETH0_IP': IP_BY_MACADDR, 'ETH0_MAC': MACADDR})
+ self.seed_dir, {"ETH0_IP": IP_BY_MACADDR, "ETH0_MAC": MACADDR}
+ )
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP_BY_MACADDR + '/' + IP4_PREFIX in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP_BY_MACADDR + "/" + IP4_PREFIX
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
# ETH0_IP with empty string and ETH0_MAC
# in the case of using Virtual Network contains
# "AR = [ TYPE = ETHER ]"
populate_context_dir(
- self.seed_dir, {'ETH0_IP': '', 'ETH0_MAC': MACADDR})
+ self.seed_dir, {"ETH0_IP": "", "ETH0_MAC": MACADDR}
+ )
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP_BY_MACADDR + '/' + IP4_PREFIX in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP_BY_MACADDR + "/" + IP4_PREFIX
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
# ETH0_MASK
populate_context_dir(
- self.seed_dir, {
- 'ETH0_IP': IP_BY_MACADDR,
- 'ETH0_MAC': MACADDR,
- 'ETH0_MASK': '255.255.0.0'
- })
+ self.seed_dir,
+ {
+ "ETH0_IP": IP_BY_MACADDR,
+ "ETH0_MAC": MACADDR,
+ "ETH0_MASK": "255.255.0.0",
+ },
+ )
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP_BY_MACADDR + '/16' in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP_BY_MACADDR + "/16"
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
# ETH0_MASK with empty string
populate_context_dir(
- self.seed_dir, {
- 'ETH0_IP': IP_BY_MACADDR,
- 'ETH0_MAC': MACADDR,
- 'ETH0_MASK': ''
- })
+ self.seed_dir,
+ {
+ "ETH0_IP": IP_BY_MACADDR,
+ "ETH0_MAC": MACADDR,
+ "ETH0_MASK": "",
+ },
+ )
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP_BY_MACADDR + '/' + IP4_PREFIX in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP_BY_MACADDR + "/" + IP4_PREFIX
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
# ETH0_IP6
populate_context_dir(
- self.seed_dir, {
- 'ETH0_IP6': IP6_GLOBAL,
- 'ETH0_MAC': MACADDR,
- })
+ self.seed_dir,
+ {
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_MAC": MACADDR,
+ },
+ )
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP6_GLOBAL + '/64' in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP6_GLOBAL + "/64"
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
# ETH0_IP6_ULA
populate_context_dir(
- self.seed_dir, {
- 'ETH0_IP6_ULA': IP6_ULA,
- 'ETH0_MAC': MACADDR,
- })
+ self.seed_dir,
+ {
+ "ETH0_IP6_ULA": IP6_ULA,
+ "ETH0_MAC": MACADDR,
+ },
+ )
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP6_ULA + '/64' in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP6_ULA + "/64"
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
# ETH0_IP6 and ETH0_IP6_PREFIX_LENGTH
populate_context_dir(
- self.seed_dir, {
- 'ETH0_IP6': IP6_GLOBAL,
- 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX,
- 'ETH0_MAC': MACADDR,
- })
+ self.seed_dir,
+ {
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_PREFIX_LENGTH": IP6_PREFIX,
+ "ETH0_MAC": MACADDR,
+ },
+ )
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP6_GLOBAL + '/' + IP6_PREFIX in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP6_GLOBAL + "/" + IP6_PREFIX
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
# ETH0_IP6 and ETH0_IP6_PREFIX_LENGTH with empty string
populate_context_dir(
- self.seed_dir, {
- 'ETH0_IP6': IP6_GLOBAL,
- 'ETH0_IP6_PREFIX_LENGTH': '',
- 'ETH0_MAC': MACADDR,
- })
+ self.seed_dir,
+ {
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_PREFIX_LENGTH": "",
+ "ETH0_MAC": MACADDR,
+ },
+ )
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP6_GLOBAL + '/64' in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP6_GLOBAL + "/64"
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
def test_find_candidates(self):
def my_devs_with(criteria):
@@ -352,25 +390,28 @@ class TestOpenNebulaDataSource(CiTestCase):
orig_find_devs_with = util.find_devs_with
try:
util.find_devs_with = my_devs_with
- self.assertEqual(["/dev/sdb", "/dev/sr0", "/dev/vdb"],
- ds.find_candidate_devs())
+ self.assertEqual(
+ ["/dev/sdb", "/dev/sr0", "/dev/vdb"], ds.find_candidate_devs()
+ )
finally:
util.find_devs_with = orig_find_devs_with
-@mock.patch(DS_PATH + '.net.get_interfaces_by_mac', mock.Mock(return_value={}))
+@mock.patch(DS_PATH + ".net.get_interfaces_by_mac", mock.Mock(return_value={}))
class TestOpenNebulaNetwork(unittest.TestCase):
- system_nics = ('eth0', 'ens3')
+ system_nics = ("eth0", "ens3")
def test_context_devname(self):
"""Verify context_devname correctly returns mac and name."""
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH1_MAC': '02:00:0a:12:0f:0f', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH1_MAC": "02:00:0a:12:0f:0f",
+ }
expected = {
- '02:00:0a:12:01:01': 'ETH0',
- '02:00:0a:12:0f:0f': 'ETH1', }
+ "02:00:0a:12:01:01": "ETH0",
+ "02:00:0a:12:0f:0f": "ETH1",
+ }
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(expected, net.context_devname)
@@ -380,28 +421,30 @@ class TestOpenNebulaNetwork(unittest.TestCase):
and search domains.
"""
context = {
- 'DNS': '1.2.3.8',
- 'ETH0_DNS': '1.2.3.6 1.2.3.7',
- 'ETH0_SEARCH_DOMAIN': 'example.com example.org', }
+ "DNS": "1.2.3.8",
+ "ETH0_DNS": "1.2.3.6 1.2.3.7",
+ "ETH0_SEARCH_DOMAIN": "example.com example.org",
+ }
expected = {
- 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'],
- 'search': ['example.com', 'example.org']}
+ "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"],
+ "search": ["example.com", "example.org"],
+ }
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_nameservers('eth0')
+ val = net.get_nameservers("eth0")
self.assertEqual(expected, val)
def test_get_mtu(self):
"""Verify get_mtu('device') correctly returns MTU size."""
- context = {'ETH0_MTU': '1280'}
+ context = {"ETH0_MTU": "1280"}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_mtu('eth0')
- self.assertEqual('1280', val)
+ val = net.get_mtu("eth0")
+ self.assertEqual("1280", val)
def test_get_ip(self):
"""Verify get_ip('device') correctly returns IPv4 address."""
- context = {'ETH0_IP': PUBLIC_IP}
+ context = {"ETH0_IP": PUBLIC_IP}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_ip('eth0', MACADDR)
+ val = net.get_ip("eth0", MACADDR)
self.assertEqual(PUBLIC_IP, val)
def test_get_ip_emptystring(self):
@@ -410,9 +453,9 @@ class TestOpenNebulaNetwork(unittest.TestCase):
It returns IP address created by MAC address if ETH0_IP has empty
string.
"""
- context = {'ETH0_IP': ''}
+ context = {"ETH0_IP": ""}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_ip('eth0', MACADDR)
+ val = net.get_ip("eth0", MACADDR)
self.assertEqual(IP_BY_MACADDR, val)
def test_get_ip6(self):
@@ -421,11 +464,12 @@ class TestOpenNebulaNetwork(unittest.TestCase):
In this case, IPv6 address is Given by ETH0_IP6.
"""
context = {
- 'ETH0_IP6': IP6_GLOBAL,
- 'ETH0_IP6_ULA': '', }
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_ULA": "",
+ }
expected = [IP6_GLOBAL]
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_ip6('eth0')
+ val = net.get_ip6("eth0")
self.assertEqual(expected, val)
def test_get_ip6_ula(self):
@@ -434,11 +478,12 @@ class TestOpenNebulaNetwork(unittest.TestCase):
In this case, IPv6 address is Given by ETH0_IP6_ULA.
"""
context = {
- 'ETH0_IP6': '',
- 'ETH0_IP6_ULA': IP6_ULA, }
+ "ETH0_IP6": "",
+ "ETH0_IP6_ULA": IP6_ULA,
+ }
expected = [IP6_ULA]
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_ip6('eth0')
+ val = net.get_ip6("eth0")
self.assertEqual(expected, val)
def test_get_ip6_dual(self):
@@ -447,20 +492,21 @@ class TestOpenNebulaNetwork(unittest.TestCase):
In this case, IPv6 addresses are Given by ETH0_IP6 and ETH0_IP6_ULA.
"""
context = {
- 'ETH0_IP6': IP6_GLOBAL,
- 'ETH0_IP6_ULA': IP6_ULA, }
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_ULA": IP6_ULA,
+ }
expected = [IP6_GLOBAL, IP6_ULA]
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_ip6('eth0')
+ val = net.get_ip6("eth0")
self.assertEqual(expected, val)
def test_get_ip6_prefix(self):
"""
Verify get_ip6_prefix('device') correctly returns IPv6 prefix.
"""
- context = {'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX}
+ context = {"ETH0_IP6_PREFIX_LENGTH": IP6_PREFIX}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_ip6_prefix('eth0')
+ val = net.get_ip6_prefix("eth0")
self.assertEqual(IP6_PREFIX, val)
def test_get_ip6_prefix_emptystring(self):
@@ -469,59 +515,59 @@ class TestOpenNebulaNetwork(unittest.TestCase):
It returns default value '64' if ETH0_IP6_PREFIX_LENGTH has empty
string.
"""
- context = {'ETH0_IP6_PREFIX_LENGTH': ''}
+ context = {"ETH0_IP6_PREFIX_LENGTH": ""}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_ip6_prefix('eth0')
- self.assertEqual('64', val)
+ val = net.get_ip6_prefix("eth0")
+ self.assertEqual("64", val)
def test_get_gateway(self):
"""
Verify get_gateway('device') correctly returns IPv4 default gateway
address.
"""
- context = {'ETH0_GATEWAY': '1.2.3.5'}
+ context = {"ETH0_GATEWAY": "1.2.3.5"}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_gateway('eth0')
- self.assertEqual('1.2.3.5', val)
+ val = net.get_gateway("eth0")
+ self.assertEqual("1.2.3.5", val)
def test_get_gateway6(self):
"""
Verify get_gateway6('device') correctly returns IPv6 default gateway
address.
"""
- for k in ('GATEWAY6', 'IP6_GATEWAY'):
- context = {'ETH0_' + k: IP6_GW}
+ for k in ("GATEWAY6", "IP6_GATEWAY"):
+ context = {"ETH0_" + k: IP6_GW}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_gateway6('eth0')
+ val = net.get_gateway6("eth0")
self.assertEqual(IP6_GW, val)
def test_get_mask(self):
"""
Verify get_mask('device') correctly returns IPv4 subnet mask.
"""
- context = {'ETH0_MASK': '255.255.0.0'}
+ context = {"ETH0_MASK": "255.255.0.0"}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_mask('eth0')
- self.assertEqual('255.255.0.0', val)
+ val = net.get_mask("eth0")
+ self.assertEqual("255.255.0.0", val)
def test_get_mask_emptystring(self):
"""
Verify get_mask('device') correctly returns IPv4 subnet mask.
It returns default value '255.255.255.0' if ETH0_MASK has empty string.
"""
- context = {'ETH0_MASK': ''}
+ context = {"ETH0_MASK": ""}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_mask('eth0')
- self.assertEqual('255.255.255.0', val)
+ val = net.get_mask("eth0")
+ self.assertEqual("255.255.255.0", val)
def test_get_network(self):
"""
Verify get_network('device') correctly returns IPv4 network address.
"""
- context = {'ETH0_NETWORK': '1.2.3.0'}
+ context = {"ETH0_NETWORK": "1.2.3.0"}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_network('eth0', MACADDR)
- self.assertEqual('1.2.3.0', val)
+ val = net.get_network("eth0", MACADDR)
+ self.assertEqual("1.2.3.0", val)
def test_get_network_emptystring(self):
"""
@@ -529,48 +575,48 @@ class TestOpenNebulaNetwork(unittest.TestCase):
It returns network address created by MAC address if ETH0_NETWORK has
empty string.
"""
- context = {'ETH0_NETWORK': ''}
+ context = {"ETH0_NETWORK": ""}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_network('eth0', MACADDR)
- self.assertEqual('10.18.1.0', val)
+ val = net.get_network("eth0", MACADDR)
+ self.assertEqual("10.18.1.0", val)
def test_get_field(self):
"""
Verify get_field('device', 'name') returns *context* value.
"""
- context = {'ETH9_DUMMY': 'DUMMY_VALUE'}
+ context = {"ETH9_DUMMY": "DUMMY_VALUE"}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_field('eth9', 'dummy')
- self.assertEqual('DUMMY_VALUE', val)
+ val = net.get_field("eth9", "dummy")
+ self.assertEqual("DUMMY_VALUE", val)
def test_get_field_withdefaultvalue(self):
"""
Verify get_field('device', 'name', 'default value') returns *context*
value.
"""
- context = {'ETH9_DUMMY': 'DUMMY_VALUE'}
+ context = {"ETH9_DUMMY": "DUMMY_VALUE"}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE')
- self.assertEqual('DUMMY_VALUE', val)
+ val = net.get_field("eth9", "dummy", "DEFAULT_VALUE")
+ self.assertEqual("DUMMY_VALUE", val)
def test_get_field_withdefaultvalue_emptycontext(self):
"""
Verify get_field('device', 'name', 'default value') returns *default*
value if context value is empty string.
"""
- context = {'ETH9_DUMMY': ''}
+ context = {"ETH9_DUMMY": ""}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE')
- self.assertEqual('DEFAULT_VALUE', val)
+ val = net.get_field("eth9", "dummy", "DEFAULT_VALUE")
+ self.assertEqual("DEFAULT_VALUE", val)
def test_get_field_emptycontext(self):
"""
Verify get_field('device', 'name') returns None if context value is
empty string.
"""
- context = {'ETH9_DUMMY': ''}
+ context = {"ETH9_DUMMY": ""}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_field('eth9', 'dummy')
+ val = net.get_field("eth9", "dummy")
self.assertEqual(None, val)
def test_get_field_nonecontext(self):
@@ -578,9 +624,9 @@ class TestOpenNebulaNetwork(unittest.TestCase):
Verify get_field('device', 'name') returns None if context value is
None.
"""
- context = {'ETH9_DUMMY': None}
+ context = {"ETH9_DUMMY": None}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_field('eth9', 'dummy')
+ val = net.get_field("eth9", "dummy")
self.assertEqual(None, val)
@mock.patch(DS_PATH + ".get_physical_nics_by_mac")
@@ -589,31 +635,39 @@ class TestOpenNebulaNetwork(unittest.TestCase):
self.maxDiff = None
# empty ETH0_GATEWAY
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_GATEWAY': '', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_GATEWAY": "",
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set ETH0_GATEWAY
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_GATEWAY': '1.2.3.5', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_GATEWAY": "1.2.3.5",
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'gateway4': '1.2.3.5',
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "gateway4": "1.2.3.5",
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@@ -624,31 +678,39 @@ class TestOpenNebulaNetwork(unittest.TestCase):
self.maxDiff = None
# empty ETH0_GATEWAY6
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_GATEWAY6': '', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_GATEWAY6": "",
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set ETH0_GATEWAY6
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_GATEWAY6': IP6_GW, }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_GATEWAY6": IP6_GW,
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'gateway6': IP6_GW,
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "gateway6": IP6_GW,
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@@ -659,37 +721,46 @@ class TestOpenNebulaNetwork(unittest.TestCase):
self.maxDiff = None
# empty ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_IP6': '',
- 'ETH0_IP6_ULA': '',
- 'ETH0_IP6_PREFIX_LENGTH': '', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_IP6": "",
+ "ETH0_IP6_ULA": "",
+ "ETH0_IP6_PREFIX_LENGTH": "",
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_IP6': IP6_GLOBAL,
- 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX,
- 'ETH0_IP6_ULA': IP6_ULA, }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_PREFIX_LENGTH": IP6_PREFIX,
+ "ETH0_IP6_ULA": IP6_ULA,
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [
- IP_BY_MACADDR + '/' + IP4_PREFIX,
- IP6_GLOBAL + '/' + IP6_PREFIX,
- IP6_ULA + '/' + IP6_PREFIX]}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [
+ IP_BY_MACADDR + "/" + IP4_PREFIX,
+ IP6_GLOBAL + "/" + IP6_PREFIX,
+ IP6_ULA + "/" + IP6_PREFIX,
+ ],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@@ -700,37 +771,46 @@ class TestOpenNebulaNetwork(unittest.TestCase):
self.maxDiff = None
# empty DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'DNS': '',
- 'ETH0_DNS': '',
- 'ETH0_SEARCH_DOMAIN': '', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "DNS": "",
+ "ETH0_DNS": "",
+ "ETH0_SEARCH_DOMAIN": "",
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'DNS': '1.2.3.8',
- 'ETH0_DNS': '1.2.3.6 1.2.3.7',
- 'ETH0_SEARCH_DOMAIN': 'example.com example.org', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "DNS": "1.2.3.8",
+ "ETH0_DNS": "1.2.3.6 1.2.3.7",
+ "ETH0_SEARCH_DOMAIN": "example.com example.org",
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'nameservers': {
- 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'],
- 'search': ['example.com', 'example.org']},
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "nameservers": {
+ "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"],
+ "search": ["example.com", "example.org"],
+ },
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@@ -741,31 +821,39 @@ class TestOpenNebulaNetwork(unittest.TestCase):
self.maxDiff = None
# empty ETH0_MTU
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_MTU': '', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_MTU": "",
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set ETH0_MTU
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_MTU': '1280', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_MTU": "1280",
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'mtu': '1280',
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "mtu": "1280",
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@@ -776,11 +864,14 @@ class TestOpenNebulaNetwork(unittest.TestCase):
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork({}, mock.Mock())
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
self.assertEqual(net.gen_conf(), expected)
@@ -795,71 +886,82 @@ class TestOpenNebulaNetwork(unittest.TestCase):
def test_eth0_override(self):
self.maxDiff = None
context = {
- 'DNS': '1.2.3.8',
- 'ETH0_DNS': '1.2.3.6 1.2.3.7',
- 'ETH0_GATEWAY': '1.2.3.5',
- 'ETH0_GATEWAY6': '',
- 'ETH0_IP': IP_BY_MACADDR,
- 'ETH0_IP6': '',
- 'ETH0_IP6_PREFIX_LENGTH': '',
- 'ETH0_IP6_ULA': '',
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_MASK': '255.255.0.0',
- 'ETH0_MTU': '',
- 'ETH0_NETWORK': '10.18.0.0',
- 'ETH0_SEARCH_DOMAIN': '',
+ "DNS": "1.2.3.8",
+ "ETH0_DNS": "1.2.3.6 1.2.3.7",
+ "ETH0_GATEWAY": "1.2.3.5",
+ "ETH0_GATEWAY6": "",
+ "ETH0_IP": IP_BY_MACADDR,
+ "ETH0_IP6": "",
+ "ETH0_IP6_PREFIX_LENGTH": "",
+ "ETH0_IP6_ULA": "",
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_MASK": "255.255.0.0",
+ "ETH0_MTU": "",
+ "ETH0_NETWORK": "10.18.0.0",
+ "ETH0_SEARCH_DOMAIN": "",
}
for nic in self.system_nics:
- net = ds.OpenNebulaNetwork(context, mock.Mock(),
- system_nics_by_mac={MACADDR: nic})
+ net = ds.OpenNebulaNetwork(
+ context, mock.Mock(), system_nics_by_mac={MACADDR: nic}
+ )
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/16'],
- 'gateway4': '1.2.3.5',
- 'nameservers': {
- 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8']}}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/16"],
+ "gateway4": "1.2.3.5",
+ "nameservers": {
+ "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"]
+ },
+ }
+ },
+ }
self.assertEqual(expected, net.gen_conf())
def test_eth0_v4v6_override(self):
self.maxDiff = None
context = {
- 'DNS': '1.2.3.8',
- 'ETH0_DNS': '1.2.3.6 1.2.3.7',
- 'ETH0_GATEWAY': '1.2.3.5',
- 'ETH0_GATEWAY6': IP6_GW,
- 'ETH0_IP': IP_BY_MACADDR,
- 'ETH0_IP6': IP6_GLOBAL,
- 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX,
- 'ETH0_IP6_ULA': IP6_ULA,
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_MASK': '255.255.0.0',
- 'ETH0_MTU': '1280',
- 'ETH0_NETWORK': '10.18.0.0',
- 'ETH0_SEARCH_DOMAIN': 'example.com example.org',
+ "DNS": "1.2.3.8",
+ "ETH0_DNS": "1.2.3.6 1.2.3.7",
+ "ETH0_GATEWAY": "1.2.3.5",
+ "ETH0_GATEWAY6": IP6_GW,
+ "ETH0_IP": IP_BY_MACADDR,
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_PREFIX_LENGTH": IP6_PREFIX,
+ "ETH0_IP6_ULA": IP6_ULA,
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_MASK": "255.255.0.0",
+ "ETH0_MTU": "1280",
+ "ETH0_NETWORK": "10.18.0.0",
+ "ETH0_SEARCH_DOMAIN": "example.com example.org",
}
for nic in self.system_nics:
- net = ds.OpenNebulaNetwork(context, mock.Mock(),
- system_nics_by_mac={MACADDR: nic})
+ net = ds.OpenNebulaNetwork(
+ context, mock.Mock(), system_nics_by_mac={MACADDR: nic}
+ )
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [
- IP_BY_MACADDR + '/16',
- IP6_GLOBAL + '/' + IP6_PREFIX,
- IP6_ULA + '/' + IP6_PREFIX],
- 'gateway4': '1.2.3.5',
- 'gateway6': IP6_GW,
- 'nameservers': {
- 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'],
- 'search': ['example.com', 'example.org']},
- 'mtu': '1280'}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [
+ IP_BY_MACADDR + "/16",
+ IP6_GLOBAL + "/" + IP6_PREFIX,
+ IP6_ULA + "/" + IP6_PREFIX,
+ ],
+ "gateway4": "1.2.3.5",
+ "gateway6": IP6_GW,
+ "nameservers": {
+ "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"],
+ "search": ["example.com", "example.org"],
+ },
+ "mtu": "1280",
+ }
+ },
+ }
self.assertEqual(expected, net.gen_conf())
@@ -869,62 +971,67 @@ class TestOpenNebulaNetwork(unittest.TestCase):
MAC_1 = "02:00:0a:12:01:01"
MAC_2 = "02:00:0a:12:01:02"
context = {
- 'DNS': '1.2.3.8',
- 'ETH0_DNS': '1.2.3.6 1.2.3.7',
- 'ETH0_GATEWAY': '1.2.3.5',
- 'ETH0_GATEWAY6': IP6_GW,
- 'ETH0_IP': '10.18.1.1',
- 'ETH0_IP6': IP6_GLOBAL,
- 'ETH0_IP6_PREFIX_LENGTH': '',
- 'ETH0_IP6_ULA': IP6_ULA,
- 'ETH0_MAC': MAC_2,
- 'ETH0_MASK': '255.255.0.0',
- 'ETH0_MTU': '1280',
- 'ETH0_NETWORK': '10.18.0.0',
- 'ETH0_SEARCH_DOMAIN': 'example.com',
- 'ETH3_DNS': '10.3.1.2',
- 'ETH3_GATEWAY': '10.3.0.1',
- 'ETH3_GATEWAY6': '',
- 'ETH3_IP': '10.3.1.3',
- 'ETH3_IP6': '',
- 'ETH3_IP6_PREFIX_LENGTH': '',
- 'ETH3_IP6_ULA': '',
- 'ETH3_MAC': MAC_1,
- 'ETH3_MASK': '255.255.0.0',
- 'ETH3_MTU': '',
- 'ETH3_NETWORK': '10.3.0.0',
- 'ETH3_SEARCH_DOMAIN': 'third.example.com third.example.org',
+ "DNS": "1.2.3.8",
+ "ETH0_DNS": "1.2.3.6 1.2.3.7",
+ "ETH0_GATEWAY": "1.2.3.5",
+ "ETH0_GATEWAY6": IP6_GW,
+ "ETH0_IP": "10.18.1.1",
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_PREFIX_LENGTH": "",
+ "ETH0_IP6_ULA": IP6_ULA,
+ "ETH0_MAC": MAC_2,
+ "ETH0_MASK": "255.255.0.0",
+ "ETH0_MTU": "1280",
+ "ETH0_NETWORK": "10.18.0.0",
+ "ETH0_SEARCH_DOMAIN": "example.com",
+ "ETH3_DNS": "10.3.1.2",
+ "ETH3_GATEWAY": "10.3.0.1",
+ "ETH3_GATEWAY6": "",
+ "ETH3_IP": "10.3.1.3",
+ "ETH3_IP6": "",
+ "ETH3_IP6_PREFIX_LENGTH": "",
+ "ETH3_IP6_ULA": "",
+ "ETH3_MAC": MAC_1,
+ "ETH3_MASK": "255.255.0.0",
+ "ETH3_MTU": "",
+ "ETH3_NETWORK": "10.3.0.0",
+ "ETH3_SEARCH_DOMAIN": "third.example.com third.example.org",
}
net = ds.OpenNebulaNetwork(
context,
mock.Mock(),
- system_nics_by_mac={MAC_1: 'enp0s25', MAC_2: 'enp1s2'}
+ system_nics_by_mac={MAC_1: "enp0s25", MAC_2: "enp1s2"},
)
expected = {
- 'version': 2,
- 'ethernets': {
- 'enp1s2': {
- 'match': {'macaddress': MAC_2},
- 'addresses': [
- '10.18.1.1/16',
- IP6_GLOBAL + '/64',
- IP6_ULA + '/64'],
- 'gateway4': '1.2.3.5',
- 'gateway6': IP6_GW,
- 'nameservers': {
- 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'],
- 'search': ['example.com']},
- 'mtu': '1280'},
- 'enp0s25': {
- 'match': {'macaddress': MAC_1},
- 'addresses': ['10.3.1.3/16'],
- 'gateway4': '10.3.0.1',
- 'nameservers': {
- 'addresses': ['10.3.1.2', '1.2.3.8'],
- 'search': [
- 'third.example.com',
- 'third.example.org']}}}}
+ "version": 2,
+ "ethernets": {
+ "enp1s2": {
+ "match": {"macaddress": MAC_2},
+ "addresses": [
+ "10.18.1.1/16",
+ IP6_GLOBAL + "/64",
+ IP6_ULA + "/64",
+ ],
+ "gateway4": "1.2.3.5",
+ "gateway6": IP6_GW,
+ "nameservers": {
+ "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"],
+ "search": ["example.com"],
+ },
+ "mtu": "1280",
+ },
+ "enp0s25": {
+ "match": {"macaddress": MAC_1},
+ "addresses": ["10.3.1.3/16"],
+ "gateway4": "10.3.0.1",
+ "nameservers": {
+ "addresses": ["10.3.1.2", "1.2.3.8"],
+ "search": ["third.example.com", "third.example.org"],
+ },
+ },
+ },
+ }
self.assertEqual(expected, net.gen_conf())
@@ -932,7 +1039,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
class TestParseShellConfig:
@pytest.mark.allow_subp_for("bash")
def test_no_seconds(self):
- cfg = '\n'.join(["foo=bar", "SECONDS=2", "xx=foo"])
+ cfg = "\n".join(["foo=bar", "SECONDS=2", "xx=foo"])
# we could test 'sleep 2', but that would make the test run slower.
ret = ds.parse_shell_config(cfg)
assert ret == {"foo": "bar", "xx": "foo"}
@@ -971,7 +1078,8 @@ class TestGetPhysicalNicsByMac:
def populate_context_dir(path, variables):
data = "# Context variables generated by OpenNebula\n"
for k, v in variables.items():
- data += ("%s='%s'\n" % (k.upper(), v.replace(r"'", r"'\''")))
- populate_dir(path, {'context.sh': data})
+ data += "%s='%s'\n" % (k.upper(), v.replace(r"'", r"'\''"))
+ populate_dir(path, {"context.sh": data})
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_openstack.py b/tests/unittests/sources/test_openstack.py
index 0d6fb04a..c111bbcd 100644
--- a/tests/unittests/sources/test_openstack.py
+++ b/tests/unittests/sources/test_openstack.py
@@ -5,74 +5,74 @@
# This file is part of cloud-init. See LICENSE file for license information.
import copy
-import httpretty as hp
import json
import re
from io import StringIO
from urllib.parse import urlparse
-from tests.unittests import helpers as test_helpers
+import httpretty as hp
-from cloudinit import helpers
-from cloudinit import settings
-from cloudinit.sources import BrokenMetadata, convert_vendordata, UNSET
+from cloudinit import helpers, settings, util
+from cloudinit.sources import UNSET, BrokenMetadata
from cloudinit.sources import DataSourceOpenStack as ds
+from cloudinit.sources import convert_vendordata
from cloudinit.sources.helpers import openstack
-from cloudinit import util
+from tests.unittests import helpers as test_helpers
BASE_URL = "http://169.254.169.254"
-PUBKEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n'
+PUBKEY = "ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n"
EC2_META = {
- 'ami-id': 'ami-00000001',
- 'ami-launch-index': '0',
- 'ami-manifest-path': 'FIXME',
- 'hostname': 'sm-foo-test.novalocal',
- 'instance-action': 'none',
- 'instance-id': 'i-00000001',
- 'instance-type': 'm1.tiny',
- 'local-hostname': 'sm-foo-test.novalocal',
- 'local-ipv4': '0.0.0.0',
- 'public-hostname': 'sm-foo-test.novalocal',
- 'public-ipv4': '0.0.0.1',
- 'reservation-id': 'r-iru5qm4m',
+ "ami-id": "ami-00000001",
+ "ami-launch-index": "0",
+ "ami-manifest-path": "FIXME",
+ "hostname": "sm-foo-test.novalocal",
+ "instance-action": "none",
+ "instance-id": "i-00000001",
+ "instance-type": "m1.tiny",
+ "local-hostname": "sm-foo-test.novalocal",
+ "local-ipv4": "0.0.0.0",
+ "public-hostname": "sm-foo-test.novalocal",
+ "public-ipv4": "0.0.0.1",
+ "reservation-id": "r-iru5qm4m",
}
-USER_DATA = b'#!/bin/sh\necho This is user data\n'
+USER_DATA = b"#!/bin/sh\necho This is user data\n"
VENDOR_DATA = {
- 'magic': '',
-}
-VENDOR_DATA2 = {
- 'static': {}
+ "magic": "",
}
+VENDOR_DATA2 = {"static": {}}
OSTACK_META = {
- 'availability_zone': 'nova',
- 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'},
- {'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}],
- 'hostname': 'sm-foo-test.novalocal',
- 'meta': {'dsmode': 'local', 'my-meta': 'my-value'},
- 'name': 'sm-foo-test',
- 'public_keys': {'mykey': PUBKEY},
- 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c',
+ "availability_zone": "nova",
+ "files": [
+ {"content_path": "/content/0000", "path": "/etc/foo.cfg"},
+ {"content_path": "/content/0001", "path": "/etc/bar/bar.cfg"},
+ ],
+ "hostname": "sm-foo-test.novalocal",
+ "meta": {"dsmode": "local", "my-meta": "my-value"},
+ "name": "sm-foo-test",
+ "public_keys": {"mykey": PUBKEY},
+ "uuid": "b0fa911b-69d4-4476-bbe2-1c92bff6535c",
}
-CONTENT_0 = b'This is contents of /etc/foo.cfg\n'
-CONTENT_1 = b'# this is /etc/bar/bar.cfg\n'
+CONTENT_0 = b"This is contents of /etc/foo.cfg\n"
+CONTENT_1 = b"# this is /etc/bar/bar.cfg\n"
OS_FILES = {
- 'openstack/content/0000': CONTENT_0,
- 'openstack/content/0001': CONTENT_1,
- 'openstack/latest/meta_data.json': json.dumps(OSTACK_META),
- 'openstack/latest/network_data.json': json.dumps(
- {'links': [], 'networks': [], 'services': []}),
- 'openstack/latest/user_data': USER_DATA,
- 'openstack/latest/vendor_data.json': json.dumps(VENDOR_DATA),
- 'openstack/latest/vendor_data2.json': json.dumps(VENDOR_DATA2),
+ "openstack/content/0000": CONTENT_0,
+ "openstack/content/0001": CONTENT_1,
+ "openstack/latest/meta_data.json": json.dumps(OSTACK_META),
+ "openstack/latest/network_data.json": json.dumps(
+ {"links": [], "networks": [], "services": []}
+ ),
+ "openstack/latest/user_data": USER_DATA,
+ "openstack/latest/vendor_data.json": json.dumps(VENDOR_DATA),
+ "openstack/latest/vendor_data2.json": json.dumps(VENDOR_DATA2),
}
EC2_FILES = {
- 'latest/user-data': USER_DATA,
+ "latest/user-data": USER_DATA,
}
EC2_VERSIONS = [
- 'latest',
+ "latest",
]
-MOCK_PATH = 'cloudinit.sources.DataSourceOpenStack.'
+MOCK_PATH = "cloudinit.sources.DataSourceOpenStack."
# TODO _register_uris should leverage test_ec2.register_mock_metaserver.
@@ -87,7 +87,7 @@ def _register_uris(version, ec2_files, ec2_meta, os_files):
path = uri.path.lstrip("/")
if path in ec2_files:
return (200, headers, ec2_files.get(path))
- if path == 'latest/meta-data/':
+ if path == "latest/meta-data/":
buf = StringIO()
for (k, v) in ec2_meta.items():
if isinstance(v, (list, tuple)):
@@ -96,7 +96,7 @@ def _register_uris(version, ec2_files, ec2_meta, os_files):
buf.write("%s" % (k))
buf.write("\n")
return (200, headers, buf.getvalue())
- if path.startswith('latest/meta-data/'):
+ if path.startswith("latest/meta-data/"):
value = None
pieces = path.split("/")
if path.endswith("/"):
@@ -107,26 +107,29 @@ def _register_uris(version, ec2_files, ec2_meta, os_files):
value = util.get_cfg_by_path(ec2_meta, pieces)
if value is not None:
return (200, headers, str(value))
- return (404, headers, '')
+ return (404, headers, "")
def match_os_uri(uri, headers):
path = uri.path.strip("/")
- if path == 'openstack':
+ if path == "openstack":
return (200, headers, "\n".join([openstack.OS_LATEST]))
path = uri.path.lstrip("/")
if path in os_files:
return (200, headers, os_files.get(path))
- return (404, headers, '')
+ return (404, headers, "")
def get_request_callback(method, uri, headers):
uri = urlparse(uri)
path = uri.path.lstrip("/").split("/")
- if path[0] == 'openstack':
+ if path[0] == "openstack":
return match_os_uri(uri, headers)
return match_ec2_url(uri, headers)
- hp.register_uri(hp.GET, re.compile(r'http://169.254.169.254/.*'),
- body=get_request_callback)
+ hp.register_uri(
+ hp.GET,
+ re.compile(r"http://169.254.169.254/.*"),
+ body=get_request_callback,
+ )
def _read_metadata_service():
@@ -136,7 +139,7 @@ def _read_metadata_service():
class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
with_logs = True
- VERSION = 'latest'
+ VERSION = "latest"
def setUp(self):
super(TestOpenStackDataSource, self).setUp()
@@ -145,40 +148,43 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
def test_successful(self):
_register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
f = _read_metadata_service()
- self.assertEqual(VENDOR_DATA, f.get('vendordata'))
- self.assertEqual(VENDOR_DATA2, f.get('vendordata2'))
- self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
- self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
- self.assertEqual(2, len(f['files']))
- self.assertEqual(USER_DATA, f.get('userdata'))
- self.assertEqual(EC2_META, f.get('ec2-metadata'))
- self.assertEqual(2, f.get('version'))
- metadata = f['metadata']
- self.assertEqual('nova', metadata.get('availability_zone'))
- self.assertEqual('sm-foo-test.novalocal', metadata.get('hostname'))
- self.assertEqual('sm-foo-test.novalocal',
- metadata.get('local-hostname'))
- self.assertEqual('sm-foo-test', metadata.get('name'))
- self.assertEqual('b0fa911b-69d4-4476-bbe2-1c92bff6535c',
- metadata.get('uuid'))
- self.assertEqual('b0fa911b-69d4-4476-bbe2-1c92bff6535c',
- metadata.get('instance-id'))
+ self.assertEqual(VENDOR_DATA, f.get("vendordata"))
+ self.assertEqual(VENDOR_DATA2, f.get("vendordata2"))
+ self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"])
+ self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"])
+ self.assertEqual(2, len(f["files"]))
+ self.assertEqual(USER_DATA, f.get("userdata"))
+ self.assertEqual(EC2_META, f.get("ec2-metadata"))
+ self.assertEqual(2, f.get("version"))
+ metadata = f["metadata"]
+ self.assertEqual("nova", metadata.get("availability_zone"))
+ self.assertEqual("sm-foo-test.novalocal", metadata.get("hostname"))
+ self.assertEqual(
+ "sm-foo-test.novalocal", metadata.get("local-hostname")
+ )
+ self.assertEqual("sm-foo-test", metadata.get("name"))
+ self.assertEqual(
+ "b0fa911b-69d4-4476-bbe2-1c92bff6535c", metadata.get("uuid")
+ )
+ self.assertEqual(
+ "b0fa911b-69d4-4476-bbe2-1c92bff6535c", metadata.get("instance-id")
+ )
def test_no_ec2(self):
_register_uris(self.VERSION, {}, {}, OS_FILES)
f = _read_metadata_service()
- self.assertEqual(VENDOR_DATA, f.get('vendordata'))
- self.assertEqual(VENDOR_DATA2, f.get('vendordata2'))
- self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
- self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
- self.assertEqual(USER_DATA, f.get('userdata'))
- self.assertEqual({}, f.get('ec2-metadata'))
- self.assertEqual(2, f.get('version'))
+ self.assertEqual(VENDOR_DATA, f.get("vendordata"))
+ self.assertEqual(VENDOR_DATA2, f.get("vendordata2"))
+ self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"])
+ self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"])
+ self.assertEqual(USER_DATA, f.get("userdata"))
+ self.assertEqual({}, f.get("ec2-metadata"))
+ self.assertEqual(2, f.get("version"))
def test_bad_metadata(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
- if k.endswith('meta_data.json'):
+ if k.endswith("meta_data.json"):
os_files.pop(k, None)
_register_uris(self.VERSION, {}, {}, os_files)
self.assertRaises(openstack.NonReadable, _read_metadata_service)
@@ -186,9 +192,9 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
def test_bad_uuid(self):
os_files = copy.deepcopy(OS_FILES)
os_meta = copy.deepcopy(OSTACK_META)
- os_meta.pop('uuid')
+ os_meta.pop("uuid")
for k in list(os_files.keys()):
- if k.endswith('meta_data.json'):
+ if k.endswith("meta_data.json"):
os_files[k] = json.dumps(os_meta)
_register_uris(self.VERSION, {}, {}, os_files)
self.assertRaises(BrokenMetadata, _read_metadata_service)
@@ -196,77 +202,78 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
def test_userdata_empty(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
- if k.endswith('user_data'):
+ if k.endswith("user_data"):
os_files.pop(k, None)
_register_uris(self.VERSION, {}, {}, os_files)
f = _read_metadata_service()
- self.assertEqual(VENDOR_DATA, f.get('vendordata'))
- self.assertEqual(VENDOR_DATA2, f.get('vendordata2'))
- self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
- self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
- self.assertFalse(f.get('userdata'))
+ self.assertEqual(VENDOR_DATA, f.get("vendordata"))
+ self.assertEqual(VENDOR_DATA2, f.get("vendordata2"))
+ self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"])
+ self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"])
+ self.assertFalse(f.get("userdata"))
def test_vendordata_empty(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
- if k.endswith('vendor_data.json'):
+ if k.endswith("vendor_data.json"):
os_files.pop(k, None)
_register_uris(self.VERSION, {}, {}, os_files)
f = _read_metadata_service()
- self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
- self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
- self.assertFalse(f.get('vendordata'))
+ self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"])
+ self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"])
+ self.assertFalse(f.get("vendordata"))
def test_vendordata2_empty(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
- if k.endswith('vendor_data2.json'):
+ if k.endswith("vendor_data2.json"):
os_files.pop(k, None)
_register_uris(self.VERSION, {}, {}, os_files)
f = _read_metadata_service()
- self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
- self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
- self.assertFalse(f.get('vendordata2'))
+ self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"])
+ self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"])
+ self.assertFalse(f.get("vendordata2"))
def test_vendordata_invalid(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
- if k.endswith('vendor_data.json'):
- os_files[k] = '{' # some invalid json
+ if k.endswith("vendor_data.json"):
+ os_files[k] = "{" # some invalid json
_register_uris(self.VERSION, {}, {}, os_files)
self.assertRaises(BrokenMetadata, _read_metadata_service)
def test_vendordata2_invalid(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
- if k.endswith('vendor_data2.json'):
- os_files[k] = '{' # some invalid json
+ if k.endswith("vendor_data2.json"):
+ os_files[k] = "{" # some invalid json
_register_uris(self.VERSION, {}, {}, os_files)
self.assertRaises(BrokenMetadata, _read_metadata_service)
def test_metadata_invalid(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
- if k.endswith('meta_data.json'):
- os_files[k] = '{' # some invalid json
+ if k.endswith("meta_data.json"):
+ os_files[k] = "{" # some invalid json
_register_uris(self.VERSION, {}, {}, os_files)
self.assertRaises(BrokenMetadata, _read_metadata_service)
- @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
+ @test_helpers.mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
def test_datasource(self, m_dhcp):
_register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
ds_os = ds.DataSourceOpenStack(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
self.assertIsNone(ds_os.version)
- mock_path = MOCK_PATH + 'detect_openstack'
+ mock_path = MOCK_PATH + "detect_openstack"
with test_helpers.mock.patch(mock_path) as m_detect_os:
m_detect_os.return_value = True
found = ds_os.get_data()
self.assertTrue(found)
self.assertEqual(2, ds_os.version)
md = dict(ds_os.metadata)
- md.pop('instance-id', None)
- md.pop('local-hostname', None)
+ md.pop("instance-id", None)
+ md.pop("local-hostname", None)
self.assertEqual(OSTACK_META, md)
self.assertEqual(EC2_META, ds_os.ec2_metadata)
self.assertEqual(USER_DATA, ds_os.userdata_raw)
@@ -277,29 +284,35 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
m_dhcp.assert_not_called()
@hp.activate
- @test_helpers.mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
- @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
+ @test_helpers.mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
+ @test_helpers.mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
def test_local_datasource(self, m_dhcp, m_net):
"""OpenStackLocal calls EphemeralDHCPNetwork and gets instance data."""
_register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
ds_os_local = ds.DataSourceOpenStackLocal(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
- ds_os_local._fallback_interface = 'eth9' # Monkey patch for dhcp
- m_dhcp.return_value = [{
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'broadcast-address': '192.168.2.255'}]
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ ds_os_local._fallback_interface = "eth9" # Monkey patch for dhcp
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "broadcast-address": "192.168.2.255",
+ }
+ ]
self.assertIsNone(ds_os_local.version)
- mock_path = MOCK_PATH + 'detect_openstack'
+ mock_path = MOCK_PATH + "detect_openstack"
with test_helpers.mock.patch(mock_path) as m_detect_os:
m_detect_os.return_value = True
found = ds_os_local.get_data()
self.assertTrue(found)
self.assertEqual(2, ds_os_local.version)
md = dict(ds_os_local.metadata)
- md.pop('instance-id', None)
- md.pop('local-hostname', None)
+ md.pop("instance-id", None)
+ md.pop("local-hostname", None)
self.assertEqual(OSTACK_META, md)
self.assertEqual(EC2_META, ds_os_local.ec2_metadata)
self.assertEqual(USER_DATA, ds_os_local.userdata_raw)
@@ -307,44 +320,45 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
self.assertEqual(VENDOR_DATA, ds_os_local.vendordata_pure)
self.assertEqual(VENDOR_DATA2, ds_os_local.vendordata2_pure)
self.assertIsNone(ds_os_local.vendordata_raw)
- m_dhcp.assert_called_with('eth9', None)
+ m_dhcp.assert_called_with("eth9", None)
def test_bad_datasource_meta(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
- if k.endswith('meta_data.json'):
- os_files[k] = '{' # some invalid json
+ if k.endswith("meta_data.json"):
+ os_files[k] = "{" # some invalid json
_register_uris(self.VERSION, {}, {}, os_files)
- ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN,
- None,
- helpers.Paths({'run_dir': self.tmp}))
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
self.assertIsNone(ds_os.version)
- mock_path = MOCK_PATH + 'detect_openstack'
+ mock_path = MOCK_PATH + "detect_openstack"
with test_helpers.mock.patch(mock_path) as m_detect_os:
m_detect_os.return_value = True
found = ds_os.get_data()
self.assertFalse(found)
self.assertIsNone(ds_os.version)
self.assertIn(
- 'InvalidMetaDataException: Broken metadata address'
- ' http://169.254.169.25',
- self.logs.getvalue())
+ "InvalidMetaDataException: Broken metadata address"
+ " http://169.254.169.25",
+ self.logs.getvalue(),
+ )
def test_no_datasource(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
- if k.endswith('meta_data.json'):
+ if k.endswith("meta_data.json"):
os_files.pop(k)
_register_uris(self.VERSION, {}, {}, os_files)
- ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN,
- None,
- helpers.Paths({'run_dir': self.tmp}))
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
ds_os.ds_cfg = {
- 'max_wait': 0,
- 'timeout': 0,
+ "max_wait": 0,
+ "timeout": 0,
}
self.assertIsNone(ds_os.version)
- mock_path = MOCK_PATH + 'detect_openstack'
+ mock_path = MOCK_PATH + "detect_openstack"
with test_helpers.mock.patch(mock_path) as m_detect_os:
m_detect_os.return_value = True
found = ds_os.get_data()
@@ -353,12 +367,16 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
def test_network_config_disabled_by_datasource_config(self):
"""The network_config can be disabled from datasource config."""
- mock_path = MOCK_PATH + 'openstack.convert_net_json'
+ mock_path = MOCK_PATH + "openstack.convert_net_json"
ds_os = ds.DataSourceOpenStack(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
- ds_os.ds_cfg = {'apply_network_config': False}
- sample_json = {'links': [{'ethernet_mac_address': 'mymac'}],
- 'networks': [], 'services': []}
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ ds_os.ds_cfg = {"apply_network_config": False}
+ sample_json = {
+ "links": [{"ethernet_mac_address": "mymac"}],
+ "networks": [],
+ "services": [],
+ }
ds_os.network_json = sample_json # Ignore this content from metadata
with test_helpers.mock.patch(mock_path) as m_convert_json:
self.assertIsNone(ds_os.network_config)
@@ -366,26 +384,32 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
def test_network_config_from_network_json(self):
"""The datasource gets network_config from network_data.json."""
- mock_path = MOCK_PATH + 'openstack.convert_net_json'
- example_cfg = {'version': 1, 'config': []}
+ mock_path = MOCK_PATH + "openstack.convert_net_json"
+ example_cfg = {"version": 1, "config": []}
ds_os = ds.DataSourceOpenStack(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
- sample_json = {'links': [{'ethernet_mac_address': 'mymac'}],
- 'networks': [], 'services': []}
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ sample_json = {
+ "links": [{"ethernet_mac_address": "mymac"}],
+ "networks": [],
+ "services": [],
+ }
ds_os.network_json = sample_json
with test_helpers.mock.patch(mock_path) as m_convert_json:
m_convert_json.return_value = example_cfg
self.assertEqual(example_cfg, ds_os.network_config)
self.assertIn(
- 'network config provided via network_json', self.logs.getvalue())
+ "network config provided via network_json", self.logs.getvalue()
+ )
m_convert_json.assert_called_with(sample_json, known_macs=None)
def test_network_config_cached(self):
"""The datasource caches the network_config property."""
- mock_path = MOCK_PATH + 'openstack.convert_net_json'
- example_cfg = {'version': 1, 'config': []}
+ mock_path = MOCK_PATH + "openstack.convert_net_json"
+ example_cfg = {"version": 1, "config": []}
ds_os = ds.DataSourceOpenStack(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
ds_os._network_config = example_cfg
with test_helpers.mock.patch(mock_path) as m_convert_json:
self.assertEqual(example_cfg, ds_os.network_config)
@@ -394,22 +418,22 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
def test_disabled_datasource(self):
os_files = copy.deepcopy(OS_FILES)
os_meta = copy.deepcopy(OSTACK_META)
- os_meta['meta'] = {
- 'dsmode': 'disabled',
+ os_meta["meta"] = {
+ "dsmode": "disabled",
}
for k in list(os_files.keys()):
- if k.endswith('meta_data.json'):
+ if k.endswith("meta_data.json"):
os_files[k] = json.dumps(os_meta)
_register_uris(self.VERSION, {}, {}, os_files)
- ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN,
- None,
- helpers.Paths({'run_dir': self.tmp}))
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
ds_os.ds_cfg = {
- 'max_wait': 0,
- 'timeout': 0,
+ "max_wait": 0,
+ "timeout": 0,
}
self.assertIsNone(ds_os.version)
- mock_path = MOCK_PATH + 'detect_openstack'
+ mock_path = MOCK_PATH + "detect_openstack"
with test_helpers.mock.patch(mock_path) as m_detect_os:
m_detect_os.return_value = True
found = ds_os.get_data()
@@ -421,30 +445,42 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
"""_crawl_metadata returns current metadata and does not cache."""
_register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
ds_os = ds.DataSourceOpenStack(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
crawled_data = ds_os._crawl_metadata()
self.assertEqual(UNSET, ds_os.ec2_metadata)
self.assertIsNone(ds_os.userdata_raw)
self.assertEqual(0, len(ds_os.files))
self.assertIsNone(ds_os.vendordata_raw)
self.assertEqual(
- ['dsmode', 'ec2-metadata', 'files', 'metadata', 'networkdata',
- 'userdata', 'vendordata', 'vendordata2', 'version'],
- sorted(crawled_data.keys()))
- self.assertEqual('local', crawled_data['dsmode'])
- self.assertEqual(EC2_META, crawled_data['ec2-metadata'])
- self.assertEqual(2, len(crawled_data['files']))
- md = copy.deepcopy(crawled_data['metadata'])
- md.pop('instance-id')
- md.pop('local-hostname')
+ [
+ "dsmode",
+ "ec2-metadata",
+ "files",
+ "metadata",
+ "networkdata",
+ "userdata",
+ "vendordata",
+ "vendordata2",
+ "version",
+ ],
+ sorted(crawled_data.keys()),
+ )
+ self.assertEqual("local", crawled_data["dsmode"])
+ self.assertEqual(EC2_META, crawled_data["ec2-metadata"])
+ self.assertEqual(2, len(crawled_data["files"]))
+ md = copy.deepcopy(crawled_data["metadata"])
+ md.pop("instance-id")
+ md.pop("local-hostname")
self.assertEqual(OSTACK_META, md)
self.assertEqual(
- json.loads(OS_FILES['openstack/latest/network_data.json']),
- crawled_data['networkdata'])
- self.assertEqual(USER_DATA, crawled_data['userdata'])
- self.assertEqual(VENDOR_DATA, crawled_data['vendordata'])
- self.assertEqual(VENDOR_DATA2, crawled_data['vendordata2'])
- self.assertEqual(2, crawled_data['version'])
+ json.loads(OS_FILES["openstack/latest/network_data.json"]),
+ crawled_data["networkdata"],
+ )
+ self.assertEqual(USER_DATA, crawled_data["userdata"])
+ self.assertEqual(VENDOR_DATA, crawled_data["vendordata"])
+ self.assertEqual(VENDOR_DATA2, crawled_data["vendordata2"])
+ self.assertEqual(2, crawled_data["version"])
class TestVendorDataLoading(test_helpers.TestCase):
@@ -459,261 +495,289 @@ class TestVendorDataLoading(test_helpers.TestCase):
self.assertEqual(self.cvj("foobar"), "foobar")
def test_vd_load_list(self):
- data = [{'foo': 'bar'}, 'mystring', list(['another', 'list'])]
+ data = [{"foo": "bar"}, "mystring", list(["another", "list"])]
self.assertEqual(self.cvj(data), data)
def test_vd_load_dict_no_ci(self):
- self.assertIsNone(self.cvj({'foo': 'bar'}))
+ self.assertIsNone(self.cvj({"foo": "bar"}))
def test_vd_load_dict_ci_dict(self):
- self.assertRaises(ValueError, self.cvj,
- {'foo': 'bar', 'cloud-init': {'x': 1}})
+ self.assertRaises(
+ ValueError, self.cvj, {"foo": "bar", "cloud-init": {"x": 1}}
+ )
def test_vd_load_dict_ci_string(self):
- data = {'foo': 'bar', 'cloud-init': 'VENDOR_DATA'}
- self.assertEqual(self.cvj(data), data['cloud-init'])
+ data = {"foo": "bar", "cloud-init": "VENDOR_DATA"}
+ self.assertEqual(self.cvj(data), data["cloud-init"])
def test_vd_load_dict_ci_list(self):
- data = {'foo': 'bar', 'cloud-init': ['VD_1', 'VD_2']}
- self.assertEqual(self.cvj(data), data['cloud-init'])
+ data = {"foo": "bar", "cloud-init": ["VD_1", "VD_2"]}
+ self.assertEqual(self.cvj(data), data["cloud-init"])
-@test_helpers.mock.patch(MOCK_PATH + 'util.is_x86')
+@test_helpers.mock.patch(MOCK_PATH + "util.is_x86")
class TestDetectOpenStack(test_helpers.CiTestCase):
-
def test_detect_openstack_non_intel_x86(self, m_is_x86):
"""Return True on non-intel platforms because dmi isn't conclusive."""
m_is_x86.return_value = False
self.assertTrue(
- ds.detect_openstack(), 'Expected detect_openstack == True')
-
- @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env')
- @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
- def test_not_detect_openstack_intel_x86_ec2(self, m_dmi, m_proc_env,
- m_is_x86):
+ ds.detect_openstack(), "Expected detect_openstack == True"
+ )
+
+ @test_helpers.mock.patch(MOCK_PATH + "util.get_proc_env")
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_not_detect_openstack_intel_x86_ec2(
+ self, m_dmi, m_proc_env, m_is_x86
+ ):
"""Return False on EC2 platforms."""
m_is_x86.return_value = True
# No product_name in proc/1/environ
- m_proc_env.return_value = {'HOME': '/'}
+ m_proc_env.return_value = {"HOME": "/"}
def fake_dmi_read(dmi_key):
- if dmi_key == 'system-product-name':
- return 'HVM domU' # Nothing 'openstackish' on EC2
- if dmi_key == 'chassis-asset-tag':
- return '' # Empty string on EC2
- assert False, 'Unexpected dmi read of %s' % dmi_key
+ if dmi_key == "system-product-name":
+ return "HVM domU" # Nothing 'openstackish' on EC2
+ if dmi_key == "chassis-asset-tag":
+ return "" # Empty string on EC2
+ assert False, "Unexpected dmi read of %s" % dmi_key
m_dmi.side_effect = fake_dmi_read
self.assertFalse(
- ds.detect_openstack(), 'Expected detect_openstack == False on EC2')
+ ds.detect_openstack(), "Expected detect_openstack == False on EC2"
+ )
m_proc_env.assert_called_with(1)
- @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
- def test_detect_openstack_intel_product_name_compute(self, m_dmi,
- m_is_x86):
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_intel_product_name_compute(
+ self, m_dmi, m_is_x86
+ ):
"""Return True on OpenStack compute and nova instances."""
m_is_x86.return_value = True
- openstack_product_names = ['OpenStack Nova', 'OpenStack Compute']
+ openstack_product_names = ["OpenStack Nova", "OpenStack Compute"]
for product_name in openstack_product_names:
m_dmi.return_value = product_name
self.assertTrue(
- ds.detect_openstack(), 'Failed to detect_openstack')
+ ds.detect_openstack(), "Failed to detect_openstack"
+ )
- @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
- def test_detect_openstack_opentelekomcloud_chassis_asset_tag(self, m_dmi,
- m_is_x86):
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_opentelekomcloud_chassis_asset_tag(
+ self, m_dmi, m_is_x86
+ ):
"""Return True on OpenStack reporting OpenTelekomCloud asset-tag."""
m_is_x86.return_value = True
def fake_dmi_read(dmi_key):
- if dmi_key == 'system-product-name':
- return 'HVM domU' # Nothing 'openstackish' on OpenTelekomCloud
- if dmi_key == 'chassis-asset-tag':
- return 'OpenTelekomCloud'
- assert False, 'Unexpected dmi read of %s' % dmi_key
+ if dmi_key == "system-product-name":
+ return "HVM domU" # Nothing 'openstackish' on OpenTelekomCloud
+ if dmi_key == "chassis-asset-tag":
+ return "OpenTelekomCloud"
+ assert False, "Unexpected dmi read of %s" % dmi_key
m_dmi.side_effect = fake_dmi_read
self.assertTrue(
ds.detect_openstack(),
- 'Expected detect_openstack == True on OpenTelekomCloud')
+ "Expected detect_openstack == True on OpenTelekomCloud",
+ )
- @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
- def test_detect_openstack_sapccloud_chassis_asset_tag(self, m_dmi,
- m_is_x86):
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_sapccloud_chassis_asset_tag(
+ self, m_dmi, m_is_x86
+ ):
"""Return True on OpenStack reporting SAP CCloud VM asset-tag."""
m_is_x86.return_value = True
def fake_dmi_read(dmi_key):
- if dmi_key == 'system-product-name':
- return 'VMware Virtual Platform' # SAP CCloud uses VMware
- if dmi_key == 'chassis-asset-tag':
- return 'SAP CCloud VM'
- assert False, 'Unexpected dmi read of %s' % dmi_key
+ if dmi_key == "system-product-name":
+ return "VMware Virtual Platform" # SAP CCloud uses VMware
+ if dmi_key == "chassis-asset-tag":
+ return "SAP CCloud VM"
+ assert False, "Unexpected dmi read of %s" % dmi_key
m_dmi.side_effect = fake_dmi_read
self.assertTrue(
ds.detect_openstack(),
- 'Expected detect_openstack == True on SAP CCloud VM')
+ "Expected detect_openstack == True on SAP CCloud VM",
+ )
- @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
- def test_detect_openstack_oraclecloud_chassis_asset_tag(self, m_dmi,
- m_is_x86):
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_oraclecloud_chassis_asset_tag(
+ self, m_dmi, m_is_x86
+ ):
"""Return True on OpenStack reporting Oracle cloud asset-tag."""
m_is_x86.return_value = True
def fake_dmi_read(dmi_key):
- if dmi_key == 'system-product-name':
- return 'Standard PC (i440FX + PIIX, 1996)' # No match
- if dmi_key == 'chassis-asset-tag':
- return 'OracleCloud.com'
- assert False, 'Unexpected dmi read of %s' % dmi_key
+ if dmi_key == "system-product-name":
+ return "Standard PC (i440FX + PIIX, 1996)" # No match
+ if dmi_key == "chassis-asset-tag":
+ return "OracleCloud.com"
+ assert False, "Unexpected dmi read of %s" % dmi_key
m_dmi.side_effect = fake_dmi_read
self.assertTrue(
ds.detect_openstack(accept_oracle=True),
- 'Expected detect_openstack == True on OracleCloud.com')
+ "Expected detect_openstack == True on OracleCloud.com",
+ )
self.assertFalse(
ds.detect_openstack(accept_oracle=False),
- 'Expected detect_openstack == False.')
+ "Expected detect_openstack == False.",
+ )
- def _test_detect_openstack_nova_compute_chassis_asset_tag(self, m_dmi,
- m_is_x86,
- chassis_tag):
+ def _test_detect_openstack_nova_compute_chassis_asset_tag(
+ self, m_dmi, m_is_x86, chassis_tag
+ ):
"""Return True on OpenStack reporting generic asset-tag."""
m_is_x86.return_value = True
def fake_dmi_read(dmi_key):
- if dmi_key == 'system-product-name':
- return 'Generic OpenStack Platform'
- if dmi_key == 'chassis-asset-tag':
+ if dmi_key == "system-product-name":
+ return "Generic OpenStack Platform"
+ if dmi_key == "chassis-asset-tag":
return chassis_tag
- assert False, 'Unexpected dmi read of %s' % dmi_key
+ assert False, "Unexpected dmi read of %s" % dmi_key
m_dmi.side_effect = fake_dmi_read
self.assertTrue(
ds.detect_openstack(),
- 'Expected detect_openstack == True on Generic OpenStack Platform')
+ "Expected detect_openstack == True on Generic OpenStack Platform",
+ )
- @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
- def test_detect_openstack_nova_chassis_asset_tag(self, m_dmi,
- m_is_x86):
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_nova_chassis_asset_tag(self, m_dmi, m_is_x86):
self._test_detect_openstack_nova_compute_chassis_asset_tag(
- m_dmi, m_is_x86, 'OpenStack Nova')
+ m_dmi, m_is_x86, "OpenStack Nova"
+ )
- @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
- def test_detect_openstack_compute_chassis_asset_tag(self, m_dmi,
- m_is_x86):
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_compute_chassis_asset_tag(self, m_dmi, m_is_x86):
self._test_detect_openstack_nova_compute_chassis_asset_tag(
- m_dmi, m_is_x86, 'OpenStack Compute')
-
- @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env')
- @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
- def test_detect_openstack_by_proc_1_environ(self, m_dmi, m_proc_env,
- m_is_x86):
+ m_dmi, m_is_x86, "OpenStack Compute"
+ )
+
+ @test_helpers.mock.patch(MOCK_PATH + "util.get_proc_env")
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_by_proc_1_environ(
+ self, m_dmi, m_proc_env, m_is_x86
+ ):
"""Return True when nova product_name specified in /proc/1/environ."""
m_is_x86.return_value = True
# Nova product_name in proc/1/environ
m_proc_env.return_value = {
- 'HOME': '/', 'product_name': 'OpenStack Nova'}
+ "HOME": "/",
+ "product_name": "OpenStack Nova",
+ }
def fake_dmi_read(dmi_key):
- if dmi_key == 'system-product-name':
- return 'HVM domU' # Nothing 'openstackish'
- if dmi_key == 'chassis-asset-tag':
- return '' # Nothin 'openstackish'
- assert False, 'Unexpected dmi read of %s' % dmi_key
+ if dmi_key == "system-product-name":
+ return "HVM domU" # Nothing 'openstackish'
+ if dmi_key == "chassis-asset-tag":
+ return "" # Nothin 'openstackish'
+ assert False, "Unexpected dmi read of %s" % dmi_key
m_dmi.side_effect = fake_dmi_read
self.assertTrue(
ds.detect_openstack(),
- 'Expected detect_openstack == True on OpenTelekomCloud')
+ "Expected detect_openstack == True on OpenTelekomCloud",
+ )
m_proc_env.assert_called_with(1)
class TestMetadataReader(test_helpers.HttprettyTestCase):
"""Test the MetadataReader."""
- burl = 'http://169.254.169.254/'
+
+ burl = "http://169.254.169.254/"
md_base = {
- 'availability_zone': 'myaz1',
- 'hostname': 'sm-foo-test.novalocal',
+ "availability_zone": "myaz1",
+ "hostname": "sm-foo-test.novalocal",
"keys": [{"data": PUBKEY, "name": "brickies", "type": "ssh"}],
- 'launch_index': 0,
- 'name': 'sm-foo-test',
- 'public_keys': {'mykey': PUBKEY},
- 'project_id': '6a103f813b774b9fb15a4fcd36e1c056',
- 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'}
+ "launch_index": 0,
+ "name": "sm-foo-test",
+ "public_keys": {"mykey": PUBKEY},
+ "project_id": "6a103f813b774b9fb15a4fcd36e1c056",
+ "uuid": "b0fa911b-69d4-4476-bbe2-1c92bff6535c",
+ }
def register(self, path, body=None, status=200):
- content = body if not isinstance(body, str) else body.encode('utf-8')
+ content = body if not isinstance(body, str) else body.encode("utf-8")
hp.register_uri(
- hp.GET, self.burl + "openstack" + path, status=status,
- body=content)
+ hp.GET, self.burl + "openstack" + path, status=status, body=content
+ )
def register_versions(self, versions):
- self.register("", '\n'.join(versions))
- self.register("/", '\n'.join(versions))
+ self.register("", "\n".join(versions))
+ self.register("/", "\n".join(versions))
def register_version(self, version, data):
- content = '\n'.join(sorted(data.keys()))
+ content = "\n".join(sorted(data.keys()))
self.register(version, content)
self.register(version + "/", content)
for path, content in data.items():
self.register("/%s/%s" % (version, path), content)
self.register("/%s/%s" % (version, path), content)
- if 'user_data' not in data:
+ if "user_data" not in data:
self.register("/%s/user_data" % version, "nodata", status=404)
def test__find_working_version(self):
"""Test a working version ignores unsupported."""
unsup = "2016-11-09"
self.register_versions(
- [openstack.OS_FOLSOM, openstack.OS_LIBERTY, unsup,
- openstack.OS_LATEST])
+ [
+ openstack.OS_FOLSOM,
+ openstack.OS_LIBERTY,
+ unsup,
+ openstack.OS_LATEST,
+ ]
+ )
self.assertEqual(
openstack.OS_LIBERTY,
- openstack.MetadataReader(self.burl)._find_working_version())
+ openstack.MetadataReader(self.burl)._find_working_version(),
+ )
def test__find_working_version_uses_latest(self):
"""'latest' should be used if no supported versions."""
- unsup1, unsup2 = ("2016-11-09", '2017-06-06')
+ unsup1, unsup2 = ("2016-11-09", "2017-06-06")
self.register_versions([unsup1, unsup2, openstack.OS_LATEST])
self.assertEqual(
openstack.OS_LATEST,
- openstack.MetadataReader(self.burl)._find_working_version())
+ openstack.MetadataReader(self.burl)._find_working_version(),
+ )
def test_read_v2_os_ocata(self):
"""Validate return value of read_v2 for os_ocata data."""
md = copy.deepcopy(self.md_base)
- md['devices'] = []
- network_data = {'links': [], 'networks': [], 'services': []}
+ md["devices"] = []
+ network_data = {"links": [], "networks": [], "services": []}
vendor_data = {}
vendor_data2 = {"static": {}}
data = {
- 'meta_data.json': json.dumps(md),
- 'network_data.json': json.dumps(network_data),
- 'vendor_data.json': json.dumps(vendor_data),
- 'vendor_data2.json': json.dumps(vendor_data2),
+ "meta_data.json": json.dumps(md),
+ "network_data.json": json.dumps(network_data),
+ "vendor_data.json": json.dumps(vendor_data),
+ "vendor_data2.json": json.dumps(vendor_data2),
}
self.register_versions([openstack.OS_OCATA, openstack.OS_LATEST])
self.register_version(openstack.OS_OCATA, data)
mock_read_ec2 = test_helpers.mock.MagicMock(
- return_value={'instance-id': 'unused-ec2'})
+ return_value={"instance-id": "unused-ec2"}
+ )
expected_md = copy.deepcopy(md)
expected_md.update(
- {'instance-id': md['uuid'], 'local-hostname': md['hostname']})
+ {"instance-id": md["uuid"], "local-hostname": md["hostname"]}
+ )
expected = {
- 'userdata': '', # Annoying, no user-data results in empty string.
- 'version': 2,
- 'metadata': expected_md,
- 'vendordata': vendor_data,
- 'vendordata2': vendor_data2,
- 'networkdata': network_data,
- 'ec2-metadata': mock_read_ec2.return_value,
- 'files': {},
+ "userdata": "", # Annoying, no user-data results in empty string.
+ "version": 2,
+ "metadata": expected_md,
+ "vendordata": vendor_data,
+ "vendordata2": vendor_data2,
+ "networkdata": network_data,
+ "ec2-metadata": mock_read_ec2.return_value,
+ "files": {},
}
reader = openstack.MetadataReader(self.burl)
reader._read_ec2_metadata = mock_read_ec2
diff --git a/tests/unittests/sources/test_oracle.py b/tests/unittests/sources/test_oracle.py
index 2aab097c..e0e79c8c 100644
--- a/tests/unittests/sources/test_oracle.py
+++ b/tests/unittests/sources/test_oracle.py
@@ -11,8 +11,8 @@ import pytest
from cloudinit.sources import DataSourceOracle as oracle
from cloudinit.sources import NetworkConfigSource
from cloudinit.sources.DataSourceOracle import OpcMetadata
-from tests.unittests import helpers as test_helpers
from cloudinit.url_helper import UrlError
+from tests.unittests import helpers as test_helpers
DS_PATH = "cloudinit.sources.DataSourceOracle"
@@ -119,7 +119,9 @@ def oracle_ds(request, fixture_utils, paths, metadata_version):
return_value=metadata,
):
yield oracle.DataSourceOracle(
- sys_cfg=sys_cfg, distro=mock.Mock(), paths=paths,
+ sys_cfg=sys_cfg,
+ distro=mock.Mock(),
+ paths=paths,
)
@@ -129,18 +131,22 @@ class TestDataSourceOracle:
assert "oracle" == oracle_ds.platform_type
def test_subplatform_before_fetch(self, oracle_ds):
- assert 'unknown' == oracle_ds.subplatform
+ assert "unknown" == oracle_ds.subplatform
def test_platform_info_after_fetch(self, oracle_ds):
oracle_ds._get_data()
- assert 'metadata (http://169.254.169.254/opc/v2/)' == \
- oracle_ds.subplatform
+ assert (
+ "metadata (http://169.254.169.254/opc/v2/)"
+ == oracle_ds.subplatform
+ )
- @pytest.mark.parametrize('metadata_version', [1])
+ @pytest.mark.parametrize("metadata_version", [1])
def test_v1_platform_info_after_fetch(self, oracle_ds):
oracle_ds._get_data()
- assert 'metadata (http://169.254.169.254/opc/v1/)' == \
- oracle_ds.subplatform
+ assert (
+ "metadata (http://169.254.169.254/opc/v1/)"
+ == oracle_ds.subplatform
+ )
def test_secondary_nics_disabled_by_default(self, oracle_ds):
assert not oracle_ds.ds_cfg["configure_secondary_nics"]
@@ -153,29 +159,30 @@ class TestDataSourceOracle:
class TestIsPlatformViable(test_helpers.CiTestCase):
- @mock.patch(DS_PATH + ".dmi.read_dmi_data",
- return_value=oracle.CHASSIS_ASSET_TAG)
+ @mock.patch(
+ DS_PATH + ".dmi.read_dmi_data", return_value=oracle.CHASSIS_ASSET_TAG
+ )
def test_expected_viable(self, m_read_dmi_data):
"""System with known chassis tag is viable."""
self.assertTrue(oracle._is_platform_viable())
- m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')])
+ m_read_dmi_data.assert_has_calls([mock.call("chassis-asset-tag")])
@mock.patch(DS_PATH + ".dmi.read_dmi_data", return_value=None)
def test_expected_not_viable_dmi_data_none(self, m_read_dmi_data):
"""System without known chassis tag is not viable."""
self.assertFalse(oracle._is_platform_viable())
- m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')])
+ m_read_dmi_data.assert_has_calls([mock.call("chassis-asset-tag")])
@mock.patch(DS_PATH + ".dmi.read_dmi_data", return_value="LetsGoCubs")
def test_expected_not_viable_other(self, m_read_dmi_data):
"""System with unnown chassis tag is not viable."""
self.assertFalse(oracle._is_platform_viable())
- m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')])
+ m_read_dmi_data.assert_has_calls([mock.call("chassis-asset-tag")])
@mock.patch(
"cloudinit.net.is_openvswitch_internal_interface",
- mock.Mock(return_value=False)
+ mock.Mock(return_value=False),
)
class TestNetworkConfigFromOpcImds:
def test_no_secondary_nics_does_not_mutate_input(self, oracle_ds):
@@ -192,222 +199,317 @@ class TestNetworkConfigFromOpcImds:
# operations are used
oracle_ds._network_config = object()
oracle_ds._add_network_config_from_opc_imds()
- assert 'bare metal machine' in caplog.text
+ assert "bare metal machine" in caplog.text
def test_missing_mac_skipped(self, oracle_ds, caplog):
oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
oracle_ds._network_config = {
- 'version': 1, 'config': [{'primary': 'nic'}]
+ "version": 1,
+ "config": [{"primary": "nic"}],
}
with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}):
oracle_ds._add_network_config_from_opc_imds()
- assert 1 == len(oracle_ds.network_config['config'])
- assert 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping' in \
- caplog.text
+ assert 1 == len(oracle_ds.network_config["config"])
+ assert (
+ "Interface with MAC 00:00:17:02:2b:b1 not found; skipping"
+ in caplog.text
+ )
def test_missing_mac_skipped_v2(self, oracle_ds, caplog):
oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
oracle_ds._network_config = {
- 'version': 2, 'ethernets': {'primary': {'nic': {}}}
+ "version": 2,
+ "ethernets": {"primary": {"nic": {}}},
}
with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}):
oracle_ds._add_network_config_from_opc_imds()
- assert 1 == len(oracle_ds.network_config['ethernets'])
- assert 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping' in \
- caplog.text
+ assert 1 == len(oracle_ds.network_config["ethernets"])
+ assert (
+ "Interface with MAC 00:00:17:02:2b:b1 not found; skipping"
+ in caplog.text
+ )
def test_secondary_nic(self, oracle_ds):
oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
oracle_ds._network_config = {
- 'version': 1, 'config': [{'primary': 'nic'}]
+ "version": 1,
+ "config": [{"primary": "nic"}],
}
- mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
- with mock.patch(DS_PATH + ".get_interfaces_by_mac",
- return_value={mac_addr: nic_name}):
+ mac_addr, nic_name = "00:00:17:02:2b:b1", "ens3"
+ with mock.patch(
+ DS_PATH + ".get_interfaces_by_mac",
+ return_value={mac_addr: nic_name},
+ ):
oracle_ds._add_network_config_from_opc_imds()
# The input is mutated
- assert 2 == len(oracle_ds.network_config['config'])
+ assert 2 == len(oracle_ds.network_config["config"])
- secondary_nic_cfg = oracle_ds.network_config['config'][1]
- assert nic_name == secondary_nic_cfg['name']
- assert 'physical' == secondary_nic_cfg['type']
- assert mac_addr == secondary_nic_cfg['mac_address']
- assert 9000 == secondary_nic_cfg['mtu']
+ secondary_nic_cfg = oracle_ds.network_config["config"][1]
+ assert nic_name == secondary_nic_cfg["name"]
+ assert "physical" == secondary_nic_cfg["type"]
+ assert mac_addr == secondary_nic_cfg["mac_address"]
+ assert 9000 == secondary_nic_cfg["mtu"]
- assert 1 == len(secondary_nic_cfg['subnets'])
- subnet_cfg = secondary_nic_cfg['subnets'][0]
+ assert 1 == len(secondary_nic_cfg["subnets"])
+ subnet_cfg = secondary_nic_cfg["subnets"][0]
# These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE
- assert '10.0.0.231' == subnet_cfg['address']
+ assert "10.0.0.231" == subnet_cfg["address"]
def test_secondary_nic_v2(self, oracle_ds):
oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
oracle_ds._network_config = {
- 'version': 2, 'ethernets': {'primary': {'nic': {}}}
+ "version": 2,
+ "ethernets": {"primary": {"nic": {}}},
}
- mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
- with mock.patch(DS_PATH + ".get_interfaces_by_mac",
- return_value={mac_addr: nic_name}):
+ mac_addr, nic_name = "00:00:17:02:2b:b1", "ens3"
+ with mock.patch(
+ DS_PATH + ".get_interfaces_by_mac",
+ return_value={mac_addr: nic_name},
+ ):
oracle_ds._add_network_config_from_opc_imds()
# The input is mutated
- assert 2 == len(oracle_ds.network_config['ethernets'])
+ assert 2 == len(oracle_ds.network_config["ethernets"])
- secondary_nic_cfg = oracle_ds.network_config['ethernets']['ens3']
- assert secondary_nic_cfg['dhcp4'] is False
- assert secondary_nic_cfg['dhcp6'] is False
- assert mac_addr == secondary_nic_cfg['match']['macaddress']
- assert 9000 == secondary_nic_cfg['mtu']
+ secondary_nic_cfg = oracle_ds.network_config["ethernets"]["ens3"]
+ assert secondary_nic_cfg["dhcp4"] is False
+ assert secondary_nic_cfg["dhcp6"] is False
+ assert mac_addr == secondary_nic_cfg["match"]["macaddress"]
+ assert 9000 == secondary_nic_cfg["mtu"]
- assert 1 == len(secondary_nic_cfg['addresses'])
+ assert 1 == len(secondary_nic_cfg["addresses"])
# These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE
- assert '10.0.0.231' == secondary_nic_cfg['addresses'][0]
+ assert "10.0.0.231" == secondary_nic_cfg["addresses"][0]
class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase):
-
def setUp(self):
super(TestNetworkConfigFiltersNetFailover, self).setUp()
- self.add_patch(DS_PATH + '.get_interfaces_by_mac',
- 'm_get_interfaces_by_mac')
- self.add_patch(DS_PATH + '.is_netfail_master', 'm_netfail_master')
+ self.add_patch(
+ DS_PATH + ".get_interfaces_by_mac", "m_get_interfaces_by_mac"
+ )
+ self.add_patch(DS_PATH + ".is_netfail_master", "m_netfail_master")
def test_ignore_bogus_network_config(self):
- netcfg = {'something': 'here'}
+ netcfg = {"something": "here"}
passed_netcfg = copy.copy(netcfg)
oracle._ensure_netfailover_safe(passed_netcfg)
self.assertEqual(netcfg, passed_netcfg)
def test_ignore_network_config_unknown_versions(self):
- netcfg = {'something': 'here', 'version': 3}
+ netcfg = {"something": "here", "version": 3}
passed_netcfg = copy.copy(netcfg)
oracle._ensure_netfailover_safe(passed_netcfg)
self.assertEqual(netcfg, passed_netcfg)
def test_checks_v1_type_physical_interfaces(self):
- mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
+ mac_addr, nic_name = "00:00:17:02:2b:b1", "ens3"
self.m_get_interfaces_by_mac.return_value = {
mac_addr: nic_name,
}
- netcfg = {'version': 1, 'config': [
- {'type': 'physical', 'name': nic_name, 'mac_address': mac_addr,
- 'subnets': [{'type': 'dhcp4'}]}]}
+ netcfg = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": nic_name,
+ "mac_address": mac_addr,
+ "subnets": [{"type": "dhcp4"}],
+ }
+ ],
+ }
passed_netcfg = copy.copy(netcfg)
self.m_netfail_master.return_value = False
oracle._ensure_netfailover_safe(passed_netcfg)
self.assertEqual(netcfg, passed_netcfg)
- self.assertEqual([mock.call(nic_name)],
- self.m_netfail_master.call_args_list)
+ self.assertEqual(
+ [mock.call(nic_name)], self.m_netfail_master.call_args_list
+ )
def test_checks_v1_skips_non_phys_interfaces(self):
- mac_addr, nic_name = '00:00:17:02:2b:b1', 'bond0'
+ mac_addr, nic_name = "00:00:17:02:2b:b1", "bond0"
self.m_get_interfaces_by_mac.return_value = {
mac_addr: nic_name,
}
- netcfg = {'version': 1, 'config': [
- {'type': 'bond', 'name': nic_name, 'mac_address': mac_addr,
- 'subnets': [{'type': 'dhcp4'}]}]}
+ netcfg = {
+ "version": 1,
+ "config": [
+ {
+ "type": "bond",
+ "name": nic_name,
+ "mac_address": mac_addr,
+ "subnets": [{"type": "dhcp4"}],
+ }
+ ],
+ }
passed_netcfg = copy.copy(netcfg)
oracle._ensure_netfailover_safe(passed_netcfg)
self.assertEqual(netcfg, passed_netcfg)
self.assertEqual(0, self.m_netfail_master.call_count)
def test_removes_master_mac_property_v1(self):
- nic_master, mac_master = 'ens3', self.random_string()
- nic_other, mac_other = 'ens7', self.random_string()
- nic_extra, mac_extra = 'enp0s1f2', self.random_string()
+ nic_master, mac_master = "ens3", self.random_string()
+ nic_other, mac_other = "ens7", self.random_string()
+ nic_extra, mac_extra = "enp0s1f2", self.random_string()
self.m_get_interfaces_by_mac.return_value = {
mac_master: nic_master,
mac_other: nic_other,
mac_extra: nic_extra,
}
- netcfg = {'version': 1, 'config': [
- {'type': 'physical', 'name': nic_master,
- 'mac_address': mac_master},
- {'type': 'physical', 'name': nic_other, 'mac_address': mac_other},
- {'type': 'physical', 'name': nic_extra, 'mac_address': mac_extra},
- ]}
+ netcfg = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": nic_master,
+ "mac_address": mac_master,
+ },
+ {
+ "type": "physical",
+ "name": nic_other,
+ "mac_address": mac_other,
+ },
+ {
+ "type": "physical",
+ "name": nic_extra,
+ "mac_address": mac_extra,
+ },
+ ],
+ }
def _is_netfail_master(iface):
- if iface == 'ens3':
+ if iface == "ens3":
return True
return False
+
self.m_netfail_master.side_effect = _is_netfail_master
- expected_cfg = {'version': 1, 'config': [
- {'type': 'physical', 'name': nic_master},
- {'type': 'physical', 'name': nic_other, 'mac_address': mac_other},
- {'type': 'physical', 'name': nic_extra, 'mac_address': mac_extra},
- ]}
+ expected_cfg = {
+ "version": 1,
+ "config": [
+ {"type": "physical", "name": nic_master},
+ {
+ "type": "physical",
+ "name": nic_other,
+ "mac_address": mac_other,
+ },
+ {
+ "type": "physical",
+ "name": nic_extra,
+ "mac_address": mac_extra,
+ },
+ ],
+ }
oracle._ensure_netfailover_safe(netcfg)
self.assertEqual(expected_cfg, netcfg)
def test_checks_v2_type_ethernet_interfaces(self):
- mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
+ mac_addr, nic_name = "00:00:17:02:2b:b1", "ens3"
self.m_get_interfaces_by_mac.return_value = {
mac_addr: nic_name,
}
- netcfg = {'version': 2, 'ethernets': {
- nic_name: {'dhcp4': True, 'critical': True, 'set-name': nic_name,
- 'match': {'macaddress': mac_addr}}}}
+ netcfg = {
+ "version": 2,
+ "ethernets": {
+ nic_name: {
+ "dhcp4": True,
+ "critical": True,
+ "set-name": nic_name,
+ "match": {"macaddress": mac_addr},
+ }
+ },
+ }
passed_netcfg = copy.copy(netcfg)
self.m_netfail_master.return_value = False
oracle._ensure_netfailover_safe(passed_netcfg)
self.assertEqual(netcfg, passed_netcfg)
- self.assertEqual([mock.call(nic_name)],
- self.m_netfail_master.call_args_list)
+ self.assertEqual(
+ [mock.call(nic_name)], self.m_netfail_master.call_args_list
+ )
def test_skips_v2_non_ethernet_interfaces(self):
- mac_addr, nic_name = '00:00:17:02:2b:b1', 'wlps0'
+ mac_addr, nic_name = "00:00:17:02:2b:b1", "wlps0"
self.m_get_interfaces_by_mac.return_value = {
mac_addr: nic_name,
}
- netcfg = {'version': 2, 'wifis': {
- nic_name: {'dhcp4': True, 'critical': True, 'set-name': nic_name,
- 'match': {'macaddress': mac_addr}}}}
+ netcfg = {
+ "version": 2,
+ "wifis": {
+ nic_name: {
+ "dhcp4": True,
+ "critical": True,
+ "set-name": nic_name,
+ "match": {"macaddress": mac_addr},
+ }
+ },
+ }
passed_netcfg = copy.copy(netcfg)
oracle._ensure_netfailover_safe(passed_netcfg)
self.assertEqual(netcfg, passed_netcfg)
self.assertEqual(0, self.m_netfail_master.call_count)
def test_removes_master_mac_property_v2(self):
- nic_master, mac_master = 'ens3', self.random_string()
- nic_other, mac_other = 'ens7', self.random_string()
- nic_extra, mac_extra = 'enp0s1f2', self.random_string()
+ nic_master, mac_master = "ens3", self.random_string()
+ nic_other, mac_other = "ens7", self.random_string()
+ nic_extra, mac_extra = "enp0s1f2", self.random_string()
self.m_get_interfaces_by_mac.return_value = {
mac_master: nic_master,
mac_other: nic_other,
mac_extra: nic_extra,
}
- netcfg = {'version': 2, 'ethernets': {
- nic_extra: {'dhcp4': True, 'set-name': nic_extra,
- 'match': {'macaddress': mac_extra}},
- nic_other: {'dhcp4': True, 'set-name': nic_other,
- 'match': {'macaddress': mac_other}},
- nic_master: {'dhcp4': True, 'set-name': nic_master,
- 'match': {'macaddress': mac_master}},
- }}
+ netcfg = {
+ "version": 2,
+ "ethernets": {
+ nic_extra: {
+ "dhcp4": True,
+ "set-name": nic_extra,
+ "match": {"macaddress": mac_extra},
+ },
+ nic_other: {
+ "dhcp4": True,
+ "set-name": nic_other,
+ "match": {"macaddress": mac_other},
+ },
+ nic_master: {
+ "dhcp4": True,
+ "set-name": nic_master,
+ "match": {"macaddress": mac_master},
+ },
+ },
+ }
def _is_netfail_master(iface):
- if iface == 'ens3':
+ if iface == "ens3":
return True
return False
+
self.m_netfail_master.side_effect = _is_netfail_master
- expected_cfg = {'version': 2, 'ethernets': {
- nic_master: {'dhcp4': True, 'match': {'name': nic_master}},
- nic_extra: {'dhcp4': True, 'set-name': nic_extra,
- 'match': {'macaddress': mac_extra}},
- nic_other: {'dhcp4': True, 'set-name': nic_other,
- 'match': {'macaddress': mac_other}},
- }}
+ expected_cfg = {
+ "version": 2,
+ "ethernets": {
+ nic_master: {"dhcp4": True, "match": {"name": nic_master}},
+ nic_extra: {
+ "dhcp4": True,
+ "set-name": nic_extra,
+ "match": {"macaddress": mac_extra},
+ },
+ nic_other: {
+ "dhcp4": True,
+ "set-name": nic_other,
+ "match": {"macaddress": mac_other},
+ },
+ },
+ }
oracle._ensure_netfailover_safe(netcfg)
import pprint
+
pprint.pprint(netcfg)
- print('---- ^^ modified ^^ ---- vv original vv ----')
+ print("---- ^^ modified ^^ ---- vv original vv ----")
pprint.pprint(expected_cfg)
self.assertEqual(expected_cfg, netcfg)
@@ -425,12 +527,12 @@ def _mock_v2_urls(httpretty):
httpretty.register_uri(
httpretty.GET,
"http://169.254.169.254/opc/v2/instance/",
- body=instance_callback
+ body=instance_callback,
)
httpretty.register_uri(
httpretty.GET,
"http://169.254.169.254/opc/v2/vnics/",
- body=vnics_callback
+ body=vnics_callback,
)
@@ -443,12 +545,12 @@ def _mock_no_v2_urls(httpretty):
httpretty.register_uri(
httpretty.GET,
"http://169.254.169.254/opc/v1/instance/",
- body=OPC_V1_METADATA
+ body=OPC_V1_METADATA,
)
httpretty.register_uri(
httpretty.GET,
"http://169.254.169.254/opc/v1/vnics/",
- body=OPC_BM_SECONDARY_VNIC_RESPONSE
+ body=OPC_BM_SECONDARY_VNIC_RESPONSE,
)
@@ -459,18 +561,34 @@ class TestReadOpcMetadata:
@mock.patch("cloudinit.url_helper.time.sleep", lambda _: None)
@pytest.mark.parametrize(
- 'version,setup_urls,instance_data,fetch_vnics,vnics_data', [
- (2, _mock_v2_urls, json.loads(OPC_V2_METADATA), True,
- json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)),
+ "version,setup_urls,instance_data,fetch_vnics,vnics_data",
+ [
+ (
+ 2,
+ _mock_v2_urls,
+ json.loads(OPC_V2_METADATA),
+ True,
+ json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE),
+ ),
(2, _mock_v2_urls, json.loads(OPC_V2_METADATA), False, None),
- (1, _mock_no_v2_urls, json.loads(OPC_V1_METADATA), True,
- json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)),
+ (
+ 1,
+ _mock_no_v2_urls,
+ json.loads(OPC_V1_METADATA),
+ True,
+ json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE),
+ ),
(1, _mock_no_v2_urls, json.loads(OPC_V1_METADATA), False, None),
- ]
+ ],
)
def test_metadata_returned(
- self, version, setup_urls, instance_data,
- fetch_vnics, vnics_data, httpretty
+ self,
+ version,
+ setup_urls,
+ instance_data,
+ fetch_vnics,
+ vnics_data,
+ httpretty,
):
setup_urls(httpretty)
metadata = oracle.read_opc_metadata(fetch_vnics_data=fetch_vnics)
@@ -490,10 +608,16 @@ class TestReadOpcMetadata:
(3, 1, json.loads(OPC_V1_METADATA), does_not_raise()),
(3, 2, json.loads(OPC_V1_METADATA), does_not_raise()),
(3, 3, None, pytest.raises(UrlError)),
- ]
+ ],
)
- def test_retries(self, v2_failure_count, v1_failure_count,
- expected_body, expectation, httpretty):
+ def test_retries(
+ self,
+ v2_failure_count,
+ v1_failure_count,
+ expected_body,
+ expectation,
+ httpretty,
+ ):
v2_responses = [httpretty.Response("", status=404)] * v2_failure_count
v2_responses.append(httpretty.Response(OPC_V2_METADATA))
v1_responses = [httpretty.Response("", status=404)] * v1_failure_count
@@ -548,7 +672,8 @@ class TestCommon_GetDataBehaviour:
DS_PATH + "._is_platform_viable", mock.Mock(return_value=False)
)
def test_false_if_platform_not_viable(
- self, parameterized_oracle_ds,
+ self,
+ parameterized_oracle_ds,
):
assert not parameterized_oracle_ds._get_data()
@@ -571,7 +696,10 @@ class TestCommon_GetDataBehaviour:
),
)
def test_metadata_keys_set_correctly(
- self, keyname, expected_value, parameterized_oracle_ds,
+ self,
+ keyname,
+ expected_value,
+ parameterized_oracle_ds,
):
assert parameterized_oracle_ds._get_data()
assert expected_value == parameterized_oracle_ds.metadata[keyname]
@@ -591,7 +719,10 @@ class TestCommon_GetDataBehaviour:
DS_PATH + "._read_system_uuid", mock.Mock(return_value="my-test-uuid")
)
def test_attributes_set_correctly(
- self, attribute_name, expected_value, parameterized_oracle_ds,
+ self,
+ attribute_name,
+ expected_value,
+ parameterized_oracle_ds,
):
assert parameterized_oracle_ds._get_data()
assert expected_value == getattr(
@@ -624,7 +755,8 @@ class TestCommon_GetDataBehaviour:
instance_data["metadata"]["ssh_authorized_keys"] = ssh_keys
metadata = OpcMetadata(None, instance_data, None)
with mock.patch(
- DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata),
+ DS_PATH + ".read_opc_metadata",
+ mock.Mock(return_value=metadata),
):
assert parameterized_oracle_ds._get_data()
assert (
@@ -638,7 +770,8 @@ class TestCommon_GetDataBehaviour:
del instance_data["metadata"]["user_data"]
metadata = OpcMetadata(None, instance_data, None)
with mock.patch(
- DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata),
+ DS_PATH + ".read_opc_metadata",
+ mock.Mock(return_value=metadata),
):
assert parameterized_oracle_ds._get_data()
@@ -651,7 +784,8 @@ class TestCommon_GetDataBehaviour:
del instance_data["metadata"]
metadata = OpcMetadata(None, instance_data, None)
with mock.patch(
- DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata),
+ DS_PATH + ".read_opc_metadata",
+ mock.Mock(return_value=metadata),
):
assert parameterized_oracle_ds._get_data()
@@ -697,11 +831,9 @@ class TestNonIscsiRoot_GetDataBehaviour:
mock.call(
iface=m_find_fallback_nic.return_value,
connectivity_url_data={
- 'headers': {
- 'Authorization': 'Bearer Oracle'
- },
- 'url': 'http://169.254.169.254/opc/v2/instance/'
- }
+ "headers": {"Authorization": "Bearer Oracle"},
+ "url": "http://169.254.169.254/opc/v2/instance/",
+ },
)
] == m_EphemeralDHCPv4.call_args_list
@@ -761,9 +893,10 @@ class TestNetworkConfig:
def side_effect(self):
self._network_config["secondary_added"] = mock.sentinel.needle
- oracle_ds._vnics_data = 'DummyData'
+ oracle_ds._vnics_data = "DummyData"
with mock.patch.object(
- oracle.DataSourceOracle, "_add_network_config_from_opc_imds",
+ oracle.DataSourceOracle,
+ "_add_network_config_from_opc_imds",
new=side_effect,
):
was_secondary_added = "secondary_added" in oracle_ds.network_config
@@ -779,8 +912,9 @@ class TestNetworkConfig:
oracle_ds._vnics_data = "DummyData"
with mock.patch.object(
- oracle.DataSourceOracle, "_add_network_config_from_opc_imds",
- side_effect=Exception()
+ oracle.DataSourceOracle,
+ "_add_network_config_from_opc_imds",
+ side_effect=Exception(),
):
network_config = oracle_ds.network_config
assert network_config == m_read_initramfs_config.return_value
diff --git a/tests/unittests/sources/test_ovf.py b/tests/unittests/sources/test_ovf.py
index da516731..c2c87f12 100644
--- a/tests/unittests/sources/test_ovf.py
+++ b/tests/unittests/sources/test_ovf.py
@@ -6,20 +6,19 @@
import base64
import os
-
from collections import OrderedDict
from textwrap import dedent
-from cloudinit import subp
-from cloudinit import util
-from tests.unittests.helpers import CiTestCase, mock, wrap_and_call
+from cloudinit import subp, util
from cloudinit.helpers import Paths
+from cloudinit.safeyaml import YAMLError
from cloudinit.sources import DataSourceOVF as dsovf
from cloudinit.sources.helpers.vmware.imc.config_custom_script import (
- CustomScriptNotFound)
-from cloudinit.safeyaml import YAMLError
+ CustomScriptNotFound,
+)
+from tests.unittests.helpers import CiTestCase, mock, wrap_and_call
-MPATH = 'cloudinit.sources.DataSourceOVF.'
+MPATH = "cloudinit.sources.DataSourceOVF."
NOT_FOUND = None
@@ -50,7 +49,7 @@ def fill_properties(props, template=OVF_ENV_CONTENT):
for key, val in props.items():
lines.append(prop_tmpl.format(key=key, val=val))
indent = " "
- properties = ''.join([indent + line + "\n" for line in lines])
+ properties = "".join([indent + line + "\n" for line in lines])
return template.format(properties=properties)
@@ -58,13 +57,16 @@ class TestReadOvfEnv(CiTestCase):
def test_with_b64_userdata(self):
user_data = "#!/bin/sh\necho hello world\n"
user_data_b64 = base64.b64encode(user_data.encode()).decode()
- props = {"user-data": user_data_b64, "password": "passw0rd",
- "instance-id": "inst-001"}
+ props = {
+ "user-data": user_data_b64,
+ "password": "passw0rd",
+ "instance-id": "inst-001",
+ }
env = fill_properties(props)
md, ud, cfg = dsovf.read_ovf_environment(env)
self.assertEqual({"instance-id": "inst-001"}, md)
self.assertEqual(user_data.encode(), ud)
- self.assertEqual({'password': "passw0rd"}, cfg)
+ self.assertEqual({"password": "passw0rd"}, cfg)
def test_with_non_b64_userdata(self):
user_data = "my-user-data"
@@ -80,11 +82,12 @@ class TestReadOvfEnv(CiTestCase):
env = fill_properties(props)
md, ud, cfg = dsovf.read_ovf_environment(env)
self.assertEqual({"instance-id": "inst-001"}, md)
- self.assertEqual({'password': "passw0rd"}, cfg)
+ self.assertEqual({"password": "passw0rd"}, cfg)
self.assertIsNone(ud)
def test_with_b64_network_config_enable_read_network(self):
- network_config = dedent("""\
+ network_config = dedent(
+ """\
network:
version: 2
ethernets:
@@ -101,30 +104,41 @@ class TestReadOvfEnv(CiTestCase):
dhcp4: false
addresses:
- 10.10.10.1/24
- """)
+ """
+ )
network_config_b64 = base64.b64encode(network_config.encode()).decode()
- props = {"network-config": network_config_b64,
- "password": "passw0rd",
- "instance-id": "inst-001"}
+ props = {
+ "network-config": network_config_b64,
+ "password": "passw0rd",
+ "instance-id": "inst-001",
+ }
env = fill_properties(props)
md, ud, cfg = dsovf.read_ovf_environment(env, True)
self.assertEqual("inst-001", md["instance-id"])
- self.assertEqual({'password': "passw0rd"}, cfg)
+ self.assertEqual({"password": "passw0rd"}, cfg)
self.assertEqual(
- {'version': 2, 'ethernets':
- {'nics':
- {'nameservers':
- {'addresses': ['127.0.0.53'],
- 'search': ['eng.vmware.com', 'vmware.com']},
- 'match': {'name': 'eth*'},
- 'gateway4': '10.10.10.253',
- 'dhcp4': False,
- 'addresses': ['10.10.10.1/24']}}},
- md["network-config"])
+ {
+ "version": 2,
+ "ethernets": {
+ "nics": {
+ "nameservers": {
+ "addresses": ["127.0.0.53"],
+ "search": ["eng.vmware.com", "vmware.com"],
+ },
+ "match": {"name": "eth*"},
+ "gateway4": "10.10.10.253",
+ "dhcp4": False,
+ "addresses": ["10.10.10.1/24"],
+ }
+ },
+ },
+ md["network-config"],
+ )
self.assertIsNone(ud)
def test_with_non_b64_network_config_enable_read_network(self):
- network_config = dedent("""\
+ network_config = dedent(
+ """\
network:
version: 2
ethernets:
@@ -141,18 +155,22 @@ class TestReadOvfEnv(CiTestCase):
dhcp4: false
addresses:
- 10.10.10.1/24
- """)
- props = {"network-config": network_config,
- "password": "passw0rd",
- "instance-id": "inst-001"}
+ """
+ )
+ props = {
+ "network-config": network_config,
+ "password": "passw0rd",
+ "instance-id": "inst-001",
+ }
env = fill_properties(props)
md, ud, cfg = dsovf.read_ovf_environment(env, True)
self.assertEqual({"instance-id": "inst-001"}, md)
- self.assertEqual({'password': "passw0rd"}, cfg)
+ self.assertEqual({"password": "passw0rd"}, cfg)
self.assertIsNone(ud)
def test_with_b64_network_config_disable_read_network(self):
- network_config = dedent("""\
+ network_config = dedent(
+ """\
network:
version: 2
ethernets:
@@ -169,20 +187,22 @@ class TestReadOvfEnv(CiTestCase):
dhcp4: false
addresses:
- 10.10.10.1/24
- """)
+ """
+ )
network_config_b64 = base64.b64encode(network_config.encode()).decode()
- props = {"network-config": network_config_b64,
- "password": "passw0rd",
- "instance-id": "inst-001"}
+ props = {
+ "network-config": network_config_b64,
+ "password": "passw0rd",
+ "instance-id": "inst-001",
+ }
env = fill_properties(props)
md, ud, cfg = dsovf.read_ovf_environment(env)
self.assertEqual({"instance-id": "inst-001"}, md)
- self.assertEqual({'password': "passw0rd"}, cfg)
+ self.assertEqual({"password": "passw0rd"}, cfg)
self.assertIsNone(ud)
class TestMarkerFiles(CiTestCase):
-
def setUp(self):
super(TestMarkerFiles, self).setUp()
self.tdir = self.tmp_dir()
@@ -190,25 +210,23 @@ class TestMarkerFiles(CiTestCase):
def test_false_when_markerid_none(self):
"""Return False when markerid provided is None."""
self.assertFalse(
- dsovf.check_marker_exists(markerid=None, marker_dir=self.tdir))
+ dsovf.check_marker_exists(markerid=None, marker_dir=self.tdir)
+ )
def test_markerid_file_exist(self):
"""Return False when markerid file path does not exist,
True otherwise."""
- self.assertFalse(
- dsovf.check_marker_exists('123', self.tdir))
+ self.assertFalse(dsovf.check_marker_exists("123", self.tdir))
- marker_file = self.tmp_path('.markerfile-123.txt', self.tdir)
- util.write_file(marker_file, '')
- self.assertTrue(
- dsovf.check_marker_exists('123', self.tdir)
- )
+ marker_file = self.tmp_path(".markerfile-123.txt", self.tdir)
+ util.write_file(marker_file, "")
+ self.assertTrue(dsovf.check_marker_exists("123", self.tdir))
def test_marker_file_setup(self):
"""Test creation of marker files."""
- markerfilepath = self.tmp_path('.markerfile-hi.txt', self.tdir)
+ markerfilepath = self.tmp_path(".markerfile-hi.txt", self.tdir)
self.assertFalse(os.path.exists(markerfilepath))
- dsovf.setup_marker_files(markerid='hi', marker_dir=self.tdir)
+ dsovf.setup_marker_files(markerid="hi", marker_dir=self.tdir)
self.assertTrue(os.path.exists(markerfilepath))
@@ -223,233 +241,298 @@ class TestDatasourceOVF(CiTestCase):
def test_get_data_false_on_none_dmi_data(self):
"""When dmi for system-product-name is None, get_data returns False."""
- paths = Paths({'cloud_dir': self.tdir})
+ paths = Paths({"cloud_dir": self.tdir})
ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
retcode = wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': None,
- 'transport_iso9660': NOT_FOUND,
- 'transport_vmware_guestinfo': NOT_FOUND},
- ds.get_data)
- self.assertFalse(retcode, 'Expected False return from ds.get_data')
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": None,
+ "transport_iso9660": NOT_FOUND,
+ "transport_vmware_guestinfo": NOT_FOUND,
+ },
+ ds.get_data,
+ )
+ self.assertFalse(retcode, "Expected False return from ds.get_data")
self.assertIn(
- 'DEBUG: No system-product-name found', self.logs.getvalue())
+ "DEBUG: No system-product-name found", self.logs.getvalue()
+ )
def test_get_data_vmware_customization_disabled(self):
"""When vmware customization is disabled via sys_cfg and
allow_raw_data is disabled via ds_cfg, log a message.
"""
- paths = Paths({'cloud_dir': self.tdir})
+ paths = Paths({"cloud_dir": self.tdir})
ds = self.datasource(
- sys_cfg={'disable_vmware_customization': True,
- 'datasource': {'OVF': {'allow_raw_data': False}}},
- distro={}, paths=paths)
- conf_file = self.tmp_path('test-cust', self.tdir)
- conf_content = dedent("""\
+ sys_cfg={
+ "disable_vmware_customization": True,
+ "datasource": {"OVF": {"allow_raw_data": False}},
+ },
+ distro={},
+ paths=paths,
+ )
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
[MISC]
MARKER-ID = 12345345
- """)
+ """
+ )
util.write_file(conf_file, conf_content)
retcode = wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': 'vmware',
- 'transport_iso9660': NOT_FOUND,
- 'transport_vmware_guestinfo': NOT_FOUND,
- 'util.del_dir': True,
- 'search_file': self.tdir,
- 'wait_for_imc_cfg_file': conf_file},
- ds.get_data)
- self.assertFalse(retcode, 'Expected False return from ds.get_data')
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "transport_iso9660": NOT_FOUND,
+ "transport_vmware_guestinfo": NOT_FOUND,
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ },
+ ds.get_data,
+ )
+ self.assertFalse(retcode, "Expected False return from ds.get_data")
self.assertIn(
- 'DEBUG: Customization for VMware platform is disabled.',
- self.logs.getvalue())
+ "DEBUG: Customization for VMware platform is disabled.",
+ self.logs.getvalue(),
+ )
def test_get_data_vmware_customization_sys_cfg_disabled(self):
"""When vmware customization is disabled via sys_cfg and
no meta data is found, log a message.
"""
- paths = Paths({'cloud_dir': self.tdir})
+ paths = Paths({"cloud_dir": self.tdir})
ds = self.datasource(
- sys_cfg={'disable_vmware_customization': True,
- 'datasource': {'OVF': {'allow_raw_data': True}}},
- distro={}, paths=paths)
- conf_file = self.tmp_path('test-cust', self.tdir)
- conf_content = dedent("""\
+ sys_cfg={
+ "disable_vmware_customization": True,
+ "datasource": {"OVF": {"allow_raw_data": True}},
+ },
+ distro={},
+ paths=paths,
+ )
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
[MISC]
MARKER-ID = 12345345
- """)
+ """
+ )
util.write_file(conf_file, conf_content)
retcode = wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': 'vmware',
- 'transport_iso9660': NOT_FOUND,
- 'transport_vmware_guestinfo': NOT_FOUND,
- 'util.del_dir': True,
- 'search_file': self.tdir,
- 'wait_for_imc_cfg_file': conf_file},
- ds.get_data)
- self.assertFalse(retcode, 'Expected False return from ds.get_data')
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "transport_iso9660": NOT_FOUND,
+ "transport_vmware_guestinfo": NOT_FOUND,
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ },
+ ds.get_data,
+ )
+ self.assertFalse(retcode, "Expected False return from ds.get_data")
self.assertIn(
- 'DEBUG: Customization using VMware config is disabled.',
- self.logs.getvalue())
+ "DEBUG: Customization using VMware config is disabled.",
+ self.logs.getvalue(),
+ )
def test_get_data_allow_raw_data_disabled(self):
"""When allow_raw_data is disabled via ds_cfg and
meta data is found, log a message.
"""
- paths = Paths({'cloud_dir': self.tdir})
+ paths = Paths({"cloud_dir": self.tdir})
ds = self.datasource(
- sys_cfg={'disable_vmware_customization': False,
- 'datasource': {'OVF': {'allow_raw_data': False}}},
- distro={}, paths=paths)
+ sys_cfg={
+ "disable_vmware_customization": False,
+ "datasource": {"OVF": {"allow_raw_data": False}},
+ },
+ distro={},
+ paths=paths,
+ )
# Prepare the conf file
- conf_file = self.tmp_path('test-cust', self.tdir)
- conf_content = dedent("""\
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
[CLOUDINIT]
METADATA = test-meta
- """)
+ """
+ )
util.write_file(conf_file, conf_content)
# Prepare the meta data file
- metadata_file = self.tmp_path('test-meta', self.tdir)
+ metadata_file = self.tmp_path("test-meta", self.tdir)
util.write_file(metadata_file, "This is meta data")
retcode = wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': 'vmware',
- 'transport_iso9660': NOT_FOUND,
- 'transport_vmware_guestinfo': NOT_FOUND,
- 'util.del_dir': True,
- 'search_file': self.tdir,
- 'wait_for_imc_cfg_file': conf_file,
- 'collect_imc_file_paths': [self.tdir + '/test-meta', '', '']},
- ds.get_data)
- self.assertFalse(retcode, 'Expected False return from ds.get_data')
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "transport_iso9660": NOT_FOUND,
+ "transport_vmware_guestinfo": NOT_FOUND,
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "collect_imc_file_paths": [self.tdir + "/test-meta", "", ""],
+ },
+ ds.get_data,
+ )
+ self.assertFalse(retcode, "Expected False return from ds.get_data")
self.assertIn(
- 'DEBUG: Customization using raw data is disabled.',
- self.logs.getvalue())
+ "DEBUG: Customization using raw data is disabled.",
+ self.logs.getvalue(),
+ )
def test_get_data_vmware_customization_enabled(self):
"""When cloud-init workflow for vmware is enabled via sys_cfg log a
message.
"""
- paths = Paths({'cloud_dir': self.tdir})
+ paths = Paths({"cloud_dir": self.tdir})
ds = self.datasource(
- sys_cfg={'disable_vmware_customization': False}, distro={},
- paths=paths)
- conf_file = self.tmp_path('test-cust', self.tdir)
- conf_content = dedent("""\
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
[CUSTOM-SCRIPT]
SCRIPT-NAME = test-script
[MISC]
MARKER-ID = 12345345
- """)
+ """
+ )
util.write_file(conf_file, conf_content)
- with mock.patch(MPATH + 'get_tools_config', return_value='true'):
+ with mock.patch(MPATH + "get_tools_config", return_value="true"):
with self.assertRaises(CustomScriptNotFound) as context:
wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': 'vmware',
- 'util.del_dir': True,
- 'search_file': self.tdir,
- 'wait_for_imc_cfg_file': conf_file,
- 'get_nics_to_enable': ''},
- ds.get_data)
- customscript = self.tmp_path('test-script', self.tdir)
- self.assertIn('Script %s not found!!' % customscript,
- str(context.exception))
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
+ customscript = self.tmp_path("test-script", self.tdir)
+ self.assertIn(
+ "Script %s not found!!" % customscript, str(context.exception)
+ )
def test_get_data_cust_script_disabled(self):
"""If custom script is disabled by VMware tools configuration,
raise a RuntimeError.
"""
- paths = Paths({'cloud_dir': self.tdir})
+ paths = Paths({"cloud_dir": self.tdir})
ds = self.datasource(
- sys_cfg={'disable_vmware_customization': False}, distro={},
- paths=paths)
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
# Prepare the conf file
- conf_file = self.tmp_path('test-cust', self.tdir)
- conf_content = dedent("""\
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
[CUSTOM-SCRIPT]
SCRIPT-NAME = test-script
[MISC]
MARKER-ID = 12345346
- """)
+ """
+ )
util.write_file(conf_file, conf_content)
# Prepare the custom sript
- customscript = self.tmp_path('test-script', self.tdir)
+ customscript = self.tmp_path("test-script", self.tdir)
util.write_file(customscript, "This is the post cust script")
- with mock.patch(MPATH + 'get_tools_config', return_value='invalid'):
- with mock.patch(MPATH + 'set_customization_status',
- return_value=('msg', b'')):
+ with mock.patch(MPATH + "get_tools_config", return_value="invalid"):
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
with self.assertRaises(RuntimeError) as context:
wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': 'vmware',
- 'util.del_dir': True,
- 'search_file': self.tdir,
- 'wait_for_imc_cfg_file': conf_file,
- 'get_nics_to_enable': ''},
- ds.get_data)
- self.assertIn('Custom script is disabled by VM Administrator',
- str(context.exception))
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
+ self.assertIn(
+ "Custom script is disabled by VM Administrator",
+ str(context.exception),
+ )
def test_get_data_cust_script_enabled(self):
"""If custom script is enabled by VMware tools configuration,
execute the script.
"""
- paths = Paths({'cloud_dir': self.tdir})
+ paths = Paths({"cloud_dir": self.tdir})
ds = self.datasource(
- sys_cfg={'disable_vmware_customization': False}, distro={},
- paths=paths)
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
# Prepare the conf file
- conf_file = self.tmp_path('test-cust', self.tdir)
- conf_content = dedent("""\
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
[CUSTOM-SCRIPT]
SCRIPT-NAME = test-script
[MISC]
MARKER-ID = 12345346
- """)
+ """
+ )
util.write_file(conf_file, conf_content)
# Mock custom script is enabled by return true when calling
# get_tools_config
- with mock.patch(MPATH + 'get_tools_config', return_value="true"):
- with mock.patch(MPATH + 'set_customization_status',
- return_value=('msg', b'')):
+ with mock.patch(MPATH + "get_tools_config", return_value="true"):
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
with self.assertRaises(CustomScriptNotFound) as context:
wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': 'vmware',
- 'util.del_dir': True,
- 'search_file': self.tdir,
- 'wait_for_imc_cfg_file': conf_file,
- 'get_nics_to_enable': ''},
- ds.get_data)
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
# Verify custom script is trying to be executed
- customscript = self.tmp_path('test-script', self.tdir)
- self.assertIn('Script %s not found!!' % customscript,
- str(context.exception))
+ customscript = self.tmp_path("test-script", self.tdir)
+ self.assertIn(
+ "Script %s not found!!" % customscript, str(context.exception)
+ )
def test_get_data_force_run_post_script_is_yes(self):
"""If DEFAULT-RUN-POST-CUST-SCRIPT is yes, custom script could run if
enable-custom-scripts is not defined in VM Tools configuration
"""
- paths = Paths({'cloud_dir': self.tdir})
+ paths = Paths({"cloud_dir": self.tdir})
ds = self.datasource(
- sys_cfg={'disable_vmware_customization': False}, distro={},
- paths=paths)
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
# Prepare the conf file
- conf_file = self.tmp_path('test-cust', self.tdir)
+ conf_file = self.tmp_path("test-cust", self.tdir)
# set DEFAULT-RUN-POST-CUST-SCRIPT = yes so that enable-custom-scripts
# default value is TRUE
- conf_content = dedent("""\
+ conf_content = dedent(
+ """\
[CUSTOM-SCRIPT]
SCRIPT-NAME = test-script
[MISC]
MARKER-ID = 12345346
DEFAULT-RUN-POST-CUST-SCRIPT = yes
- """)
+ """
+ )
util.write_file(conf_file, conf_content)
# Mock get_tools_config(section, key, defaultVal) to return
@@ -457,81 +540,89 @@ class TestDatasourceOVF(CiTestCase):
def my_get_tools_config(*args, **kwargs):
return args[2]
- with mock.patch(MPATH + 'get_tools_config',
- side_effect=my_get_tools_config):
- with mock.patch(MPATH + 'set_customization_status',
- return_value=('msg', b'')):
+ with mock.patch(
+ MPATH + "get_tools_config", side_effect=my_get_tools_config
+ ):
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
with self.assertRaises(CustomScriptNotFound) as context:
wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': 'vmware',
- 'util.del_dir': True,
- 'search_file': self.tdir,
- 'wait_for_imc_cfg_file': conf_file,
- 'get_nics_to_enable': ''},
- ds.get_data)
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
# Verify custom script still runs although it is
# disabled by VMware Tools
- customscript = self.tmp_path('test-script', self.tdir)
- self.assertIn('Script %s not found!!' % customscript,
- str(context.exception))
+ customscript = self.tmp_path("test-script", self.tdir)
+ self.assertIn(
+ "Script %s not found!!" % customscript, str(context.exception)
+ )
def test_get_data_non_vmware_seed_platform_info(self):
"""Platform info properly reports when on non-vmware platforms."""
- paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir})
+ paths = Paths({"cloud_dir": self.tdir, "run_dir": self.tdir})
# Write ovf-env.xml seed file
- seed_dir = self.tmp_path('seed', dir=self.tdir)
- ovf_env = self.tmp_path('ovf-env.xml', dir=seed_dir)
+ seed_dir = self.tmp_path("seed", dir=self.tdir)
+ ovf_env = self.tmp_path("ovf-env.xml", dir=seed_dir)
util.write_file(ovf_env, OVF_ENV_CONTENT)
ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
- self.assertEqual('ovf', ds.cloud_name)
- self.assertEqual('ovf', ds.platform_type)
- with mock.patch(MPATH + 'dmi.read_dmi_data', return_value='!VMware'):
- with mock.patch(MPATH + 'transport_vmware_guestinfo') as m_guestd:
- with mock.patch(MPATH + 'transport_iso9660') as m_iso9660:
+ self.assertEqual("ovf", ds.cloud_name)
+ self.assertEqual("ovf", ds.platform_type)
+ with mock.patch(MPATH + "dmi.read_dmi_data", return_value="!VMware"):
+ with mock.patch(MPATH + "transport_vmware_guestinfo") as m_guestd:
+ with mock.patch(MPATH + "transport_iso9660") as m_iso9660:
m_iso9660.return_value = NOT_FOUND
m_guestd.return_value = NOT_FOUND
self.assertTrue(ds.get_data())
self.assertEqual(
- 'ovf (%s/seed/ovf-env.xml)' % self.tdir,
- ds.subplatform)
+ "ovf (%s/seed/ovf-env.xml)" % self.tdir, ds.subplatform
+ )
def test_get_data_vmware_seed_platform_info(self):
"""Platform info properly reports when on VMware platform."""
- paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir})
+ paths = Paths({"cloud_dir": self.tdir, "run_dir": self.tdir})
# Write ovf-env.xml seed file
- seed_dir = self.tmp_path('seed', dir=self.tdir)
- ovf_env = self.tmp_path('ovf-env.xml', dir=seed_dir)
+ seed_dir = self.tmp_path("seed", dir=self.tdir)
+ ovf_env = self.tmp_path("ovf-env.xml", dir=seed_dir)
util.write_file(ovf_env, OVF_ENV_CONTENT)
ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
- self.assertEqual('ovf', ds.cloud_name)
- self.assertEqual('ovf', ds.platform_type)
- with mock.patch(MPATH + 'dmi.read_dmi_data', return_value='VMWare'):
- with mock.patch(MPATH + 'transport_vmware_guestinfo') as m_guestd:
- with mock.patch(MPATH + 'transport_iso9660') as m_iso9660:
+ self.assertEqual("ovf", ds.cloud_name)
+ self.assertEqual("ovf", ds.platform_type)
+ with mock.patch(MPATH + "dmi.read_dmi_data", return_value="VMWare"):
+ with mock.patch(MPATH + "transport_vmware_guestinfo") as m_guestd:
+ with mock.patch(MPATH + "transport_iso9660") as m_iso9660:
m_iso9660.return_value = NOT_FOUND
m_guestd.return_value = NOT_FOUND
self.assertTrue(ds.get_data())
self.assertEqual(
- 'vmware (%s/seed/ovf-env.xml)' % self.tdir,
- ds.subplatform)
+ "vmware (%s/seed/ovf-env.xml)" % self.tdir,
+ ds.subplatform,
+ )
- @mock.patch('cloudinit.subp.subp')
- @mock.patch('cloudinit.sources.DataSource.persist_instance_data')
+ @mock.patch("cloudinit.subp.subp")
+ @mock.patch("cloudinit.sources.DataSource.persist_instance_data")
def test_get_data_vmware_guestinfo_with_network_config(
self, m_persist, m_subp
):
self._test_get_data_with_network_config(guestinfo=False, iso=True)
- @mock.patch('cloudinit.subp.subp')
- @mock.patch('cloudinit.sources.DataSource.persist_instance_data')
+ @mock.patch("cloudinit.subp.subp")
+ @mock.patch("cloudinit.sources.DataSource.persist_instance_data")
def test_get_data_iso9660_with_network_config(self, m_persist, m_subp):
self._test_get_data_with_network_config(guestinfo=True, iso=False)
def _test_get_data_with_network_config(self, guestinfo, iso):
- network_config = dedent("""\
+ network_config = dedent(
+ """\
network:
version: 2
ethernets:
@@ -547,50 +638,69 @@ class TestDatasourceOVF(CiTestCase):
dhcp4: false
addresses:
- 10.10.10.1/24
- """)
+ """
+ )
network_config_b64 = base64.b64encode(network_config.encode()).decode()
- props = {"network-config": network_config_b64,
- "password": "passw0rd",
- "instance-id": "inst-001"}
+ props = {
+ "network-config": network_config_b64,
+ "password": "passw0rd",
+ "instance-id": "inst-001",
+ }
env = fill_properties(props)
- paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir})
+ paths = Paths({"cloud_dir": self.tdir, "run_dir": self.tdir})
ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
- with mock.patch(MPATH + 'transport_vmware_guestinfo',
- return_value=env if guestinfo else NOT_FOUND):
- with mock.patch(MPATH + 'transport_iso9660',
- return_value=env if iso else NOT_FOUND):
+ with mock.patch(
+ MPATH + "transport_vmware_guestinfo",
+ return_value=env if guestinfo else NOT_FOUND,
+ ):
+ with mock.patch(
+ MPATH + "transport_iso9660",
+ return_value=env if iso else NOT_FOUND,
+ ):
self.assertTrue(ds.get_data())
- self.assertEqual('inst-001', ds.metadata['instance-id'])
+ self.assertEqual("inst-001", ds.metadata["instance-id"])
self.assertEqual(
- {'version': 2, 'ethernets':
- {'nics':
- {'nameservers':
- {'addresses': ['127.0.0.53'],
- 'search': ['vmware.com']},
- 'match': {'name': 'eth*'},
- 'gateway4': '10.10.10.253',
- 'dhcp4': False,
- 'addresses': ['10.10.10.1/24']}}},
- ds.network_config)
+ {
+ "version": 2,
+ "ethernets": {
+ "nics": {
+ "nameservers": {
+ "addresses": ["127.0.0.53"],
+ "search": ["vmware.com"],
+ },
+ "match": {"name": "eth*"},
+ "gateway4": "10.10.10.253",
+ "dhcp4": False,
+ "addresses": ["10.10.10.1/24"],
+ }
+ },
+ },
+ ds.network_config,
+ )
def test_get_data_cloudinit_metadata_json(self):
"""Test metadata can be loaded to cloud-init metadata and network.
The metadata format is json.
"""
- paths = Paths({'cloud_dir': self.tdir})
+ paths = Paths({"cloud_dir": self.tdir})
ds = self.datasource(
- sys_cfg={'disable_vmware_customization': True}, distro={},
- paths=paths)
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
# Prepare the conf file
- conf_file = self.tmp_path('test-cust', self.tdir)
- conf_content = dedent("""\
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
[CLOUDINIT]
METADATA = test-meta
- """)
+ """
+ )
util.write_file(conf_file, conf_content)
# Prepare the meta data file
- metadata_file = self.tmp_path('test-meta', self.tdir)
- metadata_content = dedent("""\
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ metadata_content = dedent(
+ """\
{
"instance-id": "cloud-vm",
"local-hostname": "my-host.domain.com",
@@ -606,45 +716,59 @@ class TestDatasourceOVF(CiTestCase):
}
}
}
- """)
+ """
+ )
util.write_file(metadata_file, metadata_content)
- with mock.patch(MPATH + 'set_customization_status',
- return_value=('msg', b'')):
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
result = wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': 'vmware',
- 'util.del_dir': True,
- 'search_file': self.tdir,
- 'wait_for_imc_cfg_file': conf_file,
- 'collect_imc_file_paths': [self.tdir + '/test-meta', '', ''],
- 'get_nics_to_enable': ''},
- ds._get_data)
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "collect_imc_file_paths": [
+ self.tdir + "/test-meta",
+ "",
+ "",
+ ],
+ "get_nics_to_enable": "",
+ },
+ ds._get_data,
+ )
self.assertTrue(result)
- self.assertEqual("cloud-vm", ds.metadata['instance-id'])
- self.assertEqual("my-host.domain.com", ds.metadata['local-hostname'])
- self.assertEqual(2, ds.network_config['version'])
- self.assertTrue(ds.network_config['ethernets']['eths']['dhcp4'])
+ self.assertEqual("cloud-vm", ds.metadata["instance-id"])
+ self.assertEqual("my-host.domain.com", ds.metadata["local-hostname"])
+ self.assertEqual(2, ds.network_config["version"])
+ self.assertTrue(ds.network_config["ethernets"]["eths"]["dhcp4"])
def test_get_data_cloudinit_metadata_yaml(self):
"""Test metadata can be loaded to cloud-init metadata and network.
The metadata format is yaml.
"""
- paths = Paths({'cloud_dir': self.tdir})
+ paths = Paths({"cloud_dir": self.tdir})
ds = self.datasource(
- sys_cfg={'disable_vmware_customization': True}, distro={},
- paths=paths)
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
# Prepare the conf file
- conf_file = self.tmp_path('test-cust', self.tdir)
- conf_content = dedent("""\
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
[CLOUDINIT]
METADATA = test-meta
- """)
+ """
+ )
util.write_file(conf_file, conf_content)
# Prepare the meta data file
- metadata_file = self.tmp_path('test-meta', self.tdir)
- metadata_content = dedent("""\
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ metadata_content = dedent(
+ """\
instance-id: cloud-vm
local-hostname: my-host.domain.com
network:
@@ -654,116 +778,147 @@ class TestDatasourceOVF(CiTestCase):
match:
name: ens*
dhcp4: yes
- """)
+ """
+ )
util.write_file(metadata_file, metadata_content)
- with mock.patch(MPATH + 'set_customization_status',
- return_value=('msg', b'')):
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
result = wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': 'vmware',
- 'util.del_dir': True,
- 'search_file': self.tdir,
- 'wait_for_imc_cfg_file': conf_file,
- 'collect_imc_file_paths': [self.tdir + '/test-meta', '', ''],
- 'get_nics_to_enable': ''},
- ds._get_data)
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "collect_imc_file_paths": [
+ self.tdir + "/test-meta",
+ "",
+ "",
+ ],
+ "get_nics_to_enable": "",
+ },
+ ds._get_data,
+ )
self.assertTrue(result)
- self.assertEqual("cloud-vm", ds.metadata['instance-id'])
- self.assertEqual("my-host.domain.com", ds.metadata['local-hostname'])
- self.assertEqual(2, ds.network_config['version'])
- self.assertTrue(ds.network_config['ethernets']['nics']['dhcp4'])
+ self.assertEqual("cloud-vm", ds.metadata["instance-id"])
+ self.assertEqual("my-host.domain.com", ds.metadata["local-hostname"])
+ self.assertEqual(2, ds.network_config["version"])
+ self.assertTrue(ds.network_config["ethernets"]["nics"]["dhcp4"])
def test_get_data_cloudinit_metadata_not_valid(self):
- """Test metadata is not JSON or YAML format.
- """
- paths = Paths({'cloud_dir': self.tdir})
+ """Test metadata is not JSON or YAML format."""
+ paths = Paths({"cloud_dir": self.tdir})
ds = self.datasource(
- sys_cfg={'disable_vmware_customization': True}, distro={},
- paths=paths)
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
# Prepare the conf file
- conf_file = self.tmp_path('test-cust', self.tdir)
- conf_content = dedent("""\
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
[CLOUDINIT]
METADATA = test-meta
- """)
+ """
+ )
util.write_file(conf_file, conf_content)
# Prepare the meta data file
- metadata_file = self.tmp_path('test-meta', self.tdir)
+ metadata_file = self.tmp_path("test-meta", self.tdir)
metadata_content = "[This is not json or yaml format]a=b"
util.write_file(metadata_file, metadata_content)
- with mock.patch(MPATH + 'set_customization_status',
- return_value=('msg', b'')):
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
with self.assertRaises(YAMLError) as context:
wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': 'vmware',
- 'util.del_dir': True,
- 'search_file': self.tdir,
- 'wait_for_imc_cfg_file': conf_file,
- 'collect_imc_file_paths': [
- self.tdir + '/test-meta', '', ''
- ],
- 'get_nics_to_enable': ''},
- ds.get_data)
-
- self.assertIn("expected '<document start>', but found '<scalar>'",
- str(context.exception))
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "collect_imc_file_paths": [
+ self.tdir + "/test-meta",
+ "",
+ "",
+ ],
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
+
+ self.assertIn(
+ "expected '<document start>', but found '<scalar>'",
+ str(context.exception),
+ )
def test_get_data_cloudinit_metadata_not_found(self):
- """Test metadata file can't be found.
- """
- paths = Paths({'cloud_dir': self.tdir})
+ """Test metadata file can't be found."""
+ paths = Paths({"cloud_dir": self.tdir})
ds = self.datasource(
- sys_cfg={'disable_vmware_customization': True}, distro={},
- paths=paths)
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
# Prepare the conf file
- conf_file = self.tmp_path('test-cust', self.tdir)
- conf_content = dedent("""\
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
[CLOUDINIT]
METADATA = test-meta
- """)
+ """
+ )
util.write_file(conf_file, conf_content)
# Don't prepare the meta data file
- with mock.patch(MPATH + 'set_customization_status',
- return_value=('msg', b'')):
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
with self.assertRaises(FileNotFoundError) as context:
wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': 'vmware',
- 'util.del_dir': True,
- 'search_file': self.tdir,
- 'wait_for_imc_cfg_file': conf_file,
- 'get_nics_to_enable': ''},
- ds.get_data)
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
- self.assertIn('is not found', str(context.exception))
+ self.assertIn("is not found", str(context.exception))
def test_get_data_cloudinit_userdata(self):
- """Test user data can be loaded to cloud-init user data.
- """
- paths = Paths({'cloud_dir': self.tdir})
+ """Test user data can be loaded to cloud-init user data."""
+ paths = Paths({"cloud_dir": self.tdir})
ds = self.datasource(
- sys_cfg={'disable_vmware_customization': False}, distro={},
- paths=paths)
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
# Prepare the conf file
- conf_file = self.tmp_path('test-cust', self.tdir)
- conf_content = dedent("""\
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
[CLOUDINIT]
METADATA = test-meta
USERDATA = test-user
- """)
+ """
+ )
util.write_file(conf_file, conf_content)
# Prepare the meta data file
- metadata_file = self.tmp_path('test-meta', self.tdir)
- metadata_content = dedent("""\
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ metadata_content = dedent(
+ """\
instance-id: cloud-vm
local-hostname: my-host.domain.com
network:
@@ -773,51 +928,63 @@ class TestDatasourceOVF(CiTestCase):
match:
name: ens*
dhcp4: yes
- """)
+ """
+ )
util.write_file(metadata_file, metadata_content)
# Prepare the user data file
- userdata_file = self.tmp_path('test-user', self.tdir)
+ userdata_file = self.tmp_path("test-user", self.tdir)
userdata_content = "This is the user data"
util.write_file(userdata_file, userdata_content)
- with mock.patch(MPATH + 'set_customization_status',
- return_value=('msg', b'')):
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
result = wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': 'vmware',
- 'util.del_dir': True,
- 'search_file': self.tdir,
- 'wait_for_imc_cfg_file': conf_file,
- 'collect_imc_file_paths': [self.tdir + '/test-meta',
- self.tdir + '/test-user', ''],
- 'get_nics_to_enable': ''},
- ds._get_data)
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "collect_imc_file_paths": [
+ self.tdir + "/test-meta",
+ self.tdir + "/test-user",
+ "",
+ ],
+ "get_nics_to_enable": "",
+ },
+ ds._get_data,
+ )
self.assertTrue(result)
- self.assertEqual("cloud-vm", ds.metadata['instance-id'])
+ self.assertEqual("cloud-vm", ds.metadata["instance-id"])
self.assertEqual(userdata_content, ds.userdata_raw)
def test_get_data_cloudinit_userdata_not_found(self):
- """Test userdata file can't be found.
- """
- paths = Paths({'cloud_dir': self.tdir})
+ """Test userdata file can't be found."""
+ paths = Paths({"cloud_dir": self.tdir})
ds = self.datasource(
- sys_cfg={'disable_vmware_customization': True}, distro={},
- paths=paths)
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
# Prepare the conf file
- conf_file = self.tmp_path('test-cust', self.tdir)
- conf_content = dedent("""\
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
[CLOUDINIT]
METADATA = test-meta
USERDATA = test-user
- """)
+ """
+ )
util.write_file(conf_file, conf_content)
# Prepare the meta data file
- metadata_file = self.tmp_path('test-meta', self.tdir)
- metadata_content = dedent("""\
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ metadata_content = dedent(
+ """\
instance-id: cloud-vm
local-hostname: my-host.domain.com
network:
@@ -827,45 +994,49 @@ class TestDatasourceOVF(CiTestCase):
match:
name: ens*
dhcp4: yes
- """)
+ """
+ )
util.write_file(metadata_file, metadata_content)
# Don't prepare the user data file
- with mock.patch(MPATH + 'set_customization_status',
- return_value=('msg', b'')):
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
with self.assertRaises(FileNotFoundError) as context:
wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': 'vmware',
- 'util.del_dir': True,
- 'search_file': self.tdir,
- 'wait_for_imc_cfg_file': conf_file,
- 'get_nics_to_enable': ''},
- ds.get_data)
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
- self.assertIn('is not found', str(context.exception))
+ self.assertIn("is not found", str(context.exception))
class TestTransportIso9660(CiTestCase):
-
def setUp(self):
super(TestTransportIso9660, self).setUp()
- self.add_patch('cloudinit.util.find_devs_with',
- 'm_find_devs_with')
- self.add_patch('cloudinit.util.mounts', 'm_mounts')
- self.add_patch('cloudinit.util.mount_cb', 'm_mount_cb')
- self.add_patch('cloudinit.sources.DataSourceOVF.get_ovf_env',
- 'm_get_ovf_env')
- self.m_get_ovf_env.return_value = ('myfile', 'mycontent')
+ self.add_patch("cloudinit.util.find_devs_with", "m_find_devs_with")
+ self.add_patch("cloudinit.util.mounts", "m_mounts")
+ self.add_patch("cloudinit.util.mount_cb", "m_mount_cb")
+ self.add_patch(
+ "cloudinit.sources.DataSourceOVF.get_ovf_env", "m_get_ovf_env"
+ )
+ self.m_get_ovf_env.return_value = ("myfile", "mycontent")
def test_find_already_mounted(self):
"""Check we call get_ovf_env from on matching mounted devices"""
mounts = {
- '/dev/sr9': {
- 'fstype': 'iso9660',
- 'mountpoint': 'wark/media/sr9',
- 'opts': 'ro',
+ "/dev/sr9": {
+ "fstype": "iso9660",
+ "mountpoint": "wark/media/sr9",
+ "opts": "ro",
}
}
self.m_mounts.return_value = mounts
@@ -875,33 +1046,34 @@ class TestTransportIso9660(CiTestCase):
def test_find_already_mounted_skips_non_iso9660(self):
"""Check we call get_ovf_env ignoring non iso9660"""
mounts = {
- '/dev/xvdb': {
- 'fstype': 'vfat',
- 'mountpoint': 'wark/foobar',
- 'opts': 'defaults,noatime',
+ "/dev/xvdb": {
+ "fstype": "vfat",
+ "mountpoint": "wark/foobar",
+ "opts": "defaults,noatime",
+ },
+ "/dev/xvdc": {
+ "fstype": "iso9660",
+ "mountpoint": "wark/media/sr9",
+ "opts": "ro",
},
- '/dev/xvdc': {
- 'fstype': 'iso9660',
- 'mountpoint': 'wark/media/sr9',
- 'opts': 'ro',
- }
}
# We use an OrderedDict here to ensure we check xvdb before xvdc
# as we're not mocking the regex matching, however, if we place
# an entry in the results then we can be reasonably sure that
# we're skipping an entry which fails to match.
- self.m_mounts.return_value = (
- OrderedDict(sorted(mounts.items(), key=lambda t: t[0])))
+ self.m_mounts.return_value = OrderedDict(
+ sorted(mounts.items(), key=lambda t: t[0])
+ )
self.assertEqual("mycontent", dsovf.transport_iso9660())
def test_find_already_mounted_matches_kname(self):
"""Check we dont regex match on basename of the device"""
mounts = {
- '/dev/foo/bar/xvdc': {
- 'fstype': 'iso9660',
- 'mountpoint': 'wark/media/sr9',
- 'opts': 'ro',
+ "/dev/foo/bar/xvdc": {
+ "fstype": "iso9660",
+ "mountpoint": "wark/media/sr9",
+ "opts": "ro",
}
}
# we're skipping an entry which fails to match.
@@ -912,28 +1084,33 @@ class TestTransportIso9660(CiTestCase):
def test_mount_cb_called_on_blkdevs_with_iso9660(self):
"""Check we call mount_cb on blockdevs with iso9660 only"""
self.m_mounts.return_value = {}
- self.m_find_devs_with.return_value = ['/dev/sr0']
+ self.m_find_devs_with.return_value = ["/dev/sr0"]
self.m_mount_cb.return_value = ("myfile", "mycontent")
self.assertEqual("mycontent", dsovf.transport_iso9660())
self.m_mount_cb.assert_called_with(
- "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660")
+ "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660"
+ )
def test_mount_cb_called_on_blkdevs_with_iso9660_check_regex(self):
"""Check we call mount_cb on blockdevs with iso9660 and match regex"""
self.m_mounts.return_value = {}
self.m_find_devs_with.return_value = [
- '/dev/abc', '/dev/my-cdrom', '/dev/sr0']
+ "/dev/abc",
+ "/dev/my-cdrom",
+ "/dev/sr0",
+ ]
self.m_mount_cb.return_value = ("myfile", "mycontent")
self.assertEqual("mycontent", dsovf.transport_iso9660())
self.m_mount_cb.assert_called_with(
- "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660")
+ "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660"
+ )
def test_mount_cb_not_called_no_matches(self):
"""Check we don't call mount_cb if nothing matches"""
self.m_mounts.return_value = {}
- self.m_find_devs_with.return_value = ['/dev/vg/myovf']
+ self.m_find_devs_with.return_value = ["/dev/vg/myovf"]
self.assertEqual(NOT_FOUND, dsovf.transport_iso9660())
self.assertEqual(0, self.m_mount_cb.call_count)
@@ -941,76 +1118,85 @@ class TestTransportIso9660(CiTestCase):
def test_mount_cb_called_require_iso_false(self):
"""Check we call mount_cb on blockdevs with require_iso=False"""
self.m_mounts.return_value = {}
- self.m_find_devs_with.return_value = ['/dev/xvdz']
+ self.m_find_devs_with.return_value = ["/dev/xvdz"]
self.m_mount_cb.return_value = ("myfile", "mycontent")
self.assertEqual(
- "mycontent", dsovf.transport_iso9660(require_iso=False))
+ "mycontent", dsovf.transport_iso9660(require_iso=False)
+ )
self.m_mount_cb.assert_called_with(
- "/dev/xvdz", dsovf.get_ovf_env, mtype=None)
+ "/dev/xvdz", dsovf.get_ovf_env, mtype=None
+ )
def test_maybe_cdrom_device_none(self):
"""Test maybe_cdrom_device returns False for none/empty input"""
self.assertFalse(dsovf.maybe_cdrom_device(None))
- self.assertFalse(dsovf.maybe_cdrom_device(''))
+ self.assertFalse(dsovf.maybe_cdrom_device(""))
def test_maybe_cdrom_device_non_string_exception(self):
"""Test maybe_cdrom_device raises ValueError on non-string types"""
with self.assertRaises(ValueError):
- dsovf.maybe_cdrom_device({'a': 'eleven'})
+ dsovf.maybe_cdrom_device({"a": "eleven"})
def test_maybe_cdrom_device_false_on_multi_dir_paths(self):
"""Test maybe_cdrom_device is false on /dev[/.*]/* paths"""
- self.assertFalse(dsovf.maybe_cdrom_device('/dev/foo/sr0'))
- self.assertFalse(dsovf.maybe_cdrom_device('foo/sr0'))
- self.assertFalse(dsovf.maybe_cdrom_device('../foo/sr0'))
- self.assertFalse(dsovf.maybe_cdrom_device('../foo/sr0'))
+ self.assertFalse(dsovf.maybe_cdrom_device("/dev/foo/sr0"))
+ self.assertFalse(dsovf.maybe_cdrom_device("foo/sr0"))
+ self.assertFalse(dsovf.maybe_cdrom_device("../foo/sr0"))
+ self.assertFalse(dsovf.maybe_cdrom_device("../foo/sr0"))
def test_maybe_cdrom_device_true_on_hd_partitions(self):
"""Test maybe_cdrom_device is false on /dev/hd[a-z][0-9]+ paths"""
- self.assertTrue(dsovf.maybe_cdrom_device('/dev/hda1'))
- self.assertTrue(dsovf.maybe_cdrom_device('hdz9'))
+ self.assertTrue(dsovf.maybe_cdrom_device("/dev/hda1"))
+ self.assertTrue(dsovf.maybe_cdrom_device("hdz9"))
def test_maybe_cdrom_device_true_on_valid_relative_paths(self):
"""Test maybe_cdrom_device normalizes paths"""
- self.assertTrue(dsovf.maybe_cdrom_device('/dev/wark/../sr9'))
- self.assertTrue(dsovf.maybe_cdrom_device('///sr0'))
- self.assertTrue(dsovf.maybe_cdrom_device('/sr0'))
- self.assertTrue(dsovf.maybe_cdrom_device('//dev//hda'))
+ self.assertTrue(dsovf.maybe_cdrom_device("/dev/wark/../sr9"))
+ self.assertTrue(dsovf.maybe_cdrom_device("///sr0"))
+ self.assertTrue(dsovf.maybe_cdrom_device("/sr0"))
+ self.assertTrue(dsovf.maybe_cdrom_device("//dev//hda"))
def test_maybe_cdrom_device_true_on_xvd_partitions(self):
"""Test maybe_cdrom_device returns true on xvd*"""
- self.assertTrue(dsovf.maybe_cdrom_device('/dev/xvda'))
- self.assertTrue(dsovf.maybe_cdrom_device('/dev/xvda1'))
- self.assertTrue(dsovf.maybe_cdrom_device('xvdza1'))
+ self.assertTrue(dsovf.maybe_cdrom_device("/dev/xvda"))
+ self.assertTrue(dsovf.maybe_cdrom_device("/dev/xvda1"))
+ self.assertTrue(dsovf.maybe_cdrom_device("xvdza1"))
@mock.patch(MPATH + "subp.which")
@mock.patch(MPATH + "subp.subp")
class TestTransportVmwareGuestinfo(CiTestCase):
"""Test the com.vmware.guestInfo transport implemented in
- transport_vmware_guestinfo."""
+ transport_vmware_guestinfo."""
- rpctool = 'vmware-rpctool'
+ rpctool = "vmware-rpctool"
with_logs = True
- rpctool_path = '/not/important/vmware-rpctool'
+ rpctool_path = "/not/important/vmware-rpctool"
def test_without_vmware_rpctool_returns_notfound(self, m_subp, m_which):
m_which.return_value = None
self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
- self.assertEqual(0, m_subp.call_count,
- "subp should not be called if no rpctool in path.")
+ self.assertEqual(
+ 0,
+ m_subp.call_count,
+ "subp should not be called if no rpctool in path.",
+ )
def test_notfound_on_exit_code_1(self, m_subp, m_which):
"""If vmware-rpctool exits 1, then must return not found."""
m_which.return_value = self.rpctool_path
m_subp.side_effect = subp.ProcessExecutionError(
- stdout="", stderr="No value found", exit_code=1, cmd=["unused"])
+ stdout="", stderr="No value found", exit_code=1, cmd=["unused"]
+ )
self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
self.assertEqual(1, m_subp.call_count)
- self.assertNotIn("WARNING", self.logs.getvalue(),
- "exit code of 1 by rpctool should not cause warning.")
+ self.assertNotIn(
+ "WARNING",
+ self.logs.getvalue(),
+ "exit code of 1 by rpctool should not cause warning.",
+ )
def test_notfound_if_no_content_but_exit_zero(self, m_subp, m_which):
"""If vmware-rpctool exited 0 with no stdout is normal not-found.
@@ -1020,7 +1206,7 @@ class TestTransportVmwareGuestinfo(CiTestCase):
the case where it exited 0 and just wrote nothing to stdout.
"""
m_which.return_value = self.rpctool_path
- m_subp.return_value = ('', '')
+ m_subp.return_value = ("", "")
self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
self.assertEqual(1, m_subp.call_count)
@@ -1028,19 +1214,24 @@ class TestTransportVmwareGuestinfo(CiTestCase):
"""If vmware-rpctool exits non zero or 1, warnings should be logged."""
m_which.return_value = self.rpctool_path
m_subp.side_effect = subp.ProcessExecutionError(
- stdout=None, stderr="No value found", exit_code=2, cmd=["unused"])
+ stdout=None, stderr="No value found", exit_code=2, cmd=["unused"]
+ )
self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
self.assertEqual(1, m_subp.call_count)
- self.assertIn("WARNING", self.logs.getvalue(),
- "exit code of 2 by rpctool should log WARNING.")
+ self.assertIn(
+ "WARNING",
+ self.logs.getvalue(),
+ "exit code of 2 by rpctool should log WARNING.",
+ )
def test_found_when_guestinfo_present(self, m_subp, m_which):
"""When there is a ovf info, transport should return it."""
m_which.return_value = self.rpctool_path
content = fill_properties({})
- m_subp.return_value = (content, '')
+ m_subp.return_value = (content, "")
self.assertEqual(content, dsovf.transport_vmware_guestinfo())
self.assertEqual(1, m_subp.call_count)
+
#
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_rbx.py b/tests/unittests/sources/test_rbx.py
index c1294c92..475bf498 100644
--- a/tests/unittests/sources/test_rbx.py
+++ b/tests/unittests/sources/test_rbx.py
@@ -1,38 +1,42 @@
import json
-from cloudinit import helpers
-from cloudinit import distros
+from cloudinit import distros, helpers, subp
from cloudinit.sources import DataSourceRbxCloud as ds
-from tests.unittests.helpers import mock, CiTestCase, populate_dir
-from cloudinit import subp
+from tests.unittests.helpers import CiTestCase, mock, populate_dir
DS_PATH = "cloudinit.sources.DataSourceRbxCloud"
-CRYPTO_PASS = "$6$uktth46t$FvpDzFD2iL9YNZIG1Epz7957hJqbH0f" \
- "QKhnzcfBcUhEodGAWRqTy7tYG4nEW7SUOYBjxOSFIQW5" \
- "tToyGP41.s1"
+CRYPTO_PASS = (
+ "$6$uktth46t$FvpDzFD2iL9YNZIG1Epz7957hJqbH0f"
+ "QKhnzcfBcUhEodGAWRqTy7tYG4nEW7SUOYBjxOSFIQW5"
+ "tToyGP41.s1"
+)
CLOUD_METADATA = {
"vm": {
"memory": 4,
"cpu": 2,
"name": "vm-image-builder",
- "_id": "5beab44f680cffd11f0e60fc"
+ "_id": "5beab44f680cffd11f0e60fc",
},
"additionalMetadata": {
"username": "guru",
"sshKeys": ["ssh-rsa ..."],
- "password": {
- "sha512": CRYPTO_PASS
- }
+ "password": {"sha512": CRYPTO_PASS},
},
"disk": [
- {"size": 10, "type": "ssd",
- "name": "vm-image-builder-os",
- "_id": "5beab450680cffd11f0e60fe"},
- {"size": 2, "type": "ssd",
- "name": "ubuntu-1804-bionic",
- "_id": "5bef002c680cffd11f107590"}
+ {
+ "size": 10,
+ "type": "ssd",
+ "name": "vm-image-builder-os",
+ "_id": "5beab450680cffd11f0e60fe",
+ },
+ {
+ "size": 2,
+ "type": "ssd",
+ "name": "ubuntu-1804-bionic",
+ "_id": "5bef002c680cffd11f107590",
+ },
],
"netadp": [
{
@@ -44,12 +48,12 @@ CLOUD_METADATA = {
"netmask": "255.255.248.0",
"name": "public",
"type": "public",
- "_id": "5784e97be2627505227b578c"
+ "_id": "5784e97be2627505227b578c",
},
"speed": 1000,
"type": "hv",
"macaddress": "00:15:5D:FF:0F:03",
- "_id": "5beab450680cffd11f0e6102"
+ "_id": "5beab450680cffd11f0e6102",
},
{
"ip": [{"address": "10.209.78.11"}],
@@ -60,21 +64,21 @@ CLOUD_METADATA = {
"netmask": "255.255.255.0",
"name": "network-determined-bardeen",
"type": "private",
- "_id": "5beaec64680cffd11f0e7c31"
+ "_id": "5beaec64680cffd11f0e7c31",
},
"speed": 1000,
"type": "hv",
"macaddress": "00:15:5D:FF:0F:24",
- "_id": "5bec18c6680cffd11f0f0d8b"
- }
+ "_id": "5bec18c6680cffd11f0f0d8b",
+ },
],
- "dvddrive": [{"iso": {}}]
+ "dvddrive": [{"iso": {}}],
}
class TestRbxDataSource(CiTestCase):
parsed_user = None
- allowed_subp = ['bash']
+ allowed_subp = ["bash"]
def _fetch_distro(self, kind):
cls = distros.fetch(kind)
@@ -85,30 +89,30 @@ class TestRbxDataSource(CiTestCase):
super(TestRbxDataSource, self).setUp()
self.tmp = self.tmp_dir()
self.paths = helpers.Paths(
- {'cloud_dir': self.tmp, 'run_dir': self.tmp}
+ {"cloud_dir": self.tmp, "run_dir": self.tmp}
)
# defaults for few tests
self.ds = ds.DataSourceRbxCloud
self.seed_dir = self.paths.seed_dir
- self.sys_cfg = {'datasource': {'RbxCloud': {'dsmode': 'local'}}}
+ self.sys_cfg = {"datasource": {"RbxCloud": {"dsmode": "local"}}}
def test_seed_read_user_data_callback_empty_file(self):
- populate_user_metadata(self.seed_dir, '')
+ populate_user_metadata(self.seed_dir, "")
populate_cloud_metadata(self.seed_dir, {})
results = ds.read_user_data_callback(self.seed_dir)
self.assertIsNone(results)
def test_seed_read_user_data_callback_valid_disk(self):
- populate_user_metadata(self.seed_dir, '')
+ populate_user_metadata(self.seed_dir, "")
populate_cloud_metadata(self.seed_dir, CLOUD_METADATA)
results = ds.read_user_data_callback(self.seed_dir)
self.assertNotEqual(results, None)
- self.assertTrue('userdata' in results)
- self.assertTrue('metadata' in results)
- self.assertTrue('cfg' in results)
+ self.assertTrue("userdata" in results)
+ self.assertTrue("metadata" in results)
+ self.assertTrue("cfg" in results)
def test_seed_read_user_data_callback_userdata(self):
userdata = "#!/bin/sh\nexit 1"
@@ -118,121 +122,120 @@ class TestRbxDataSource(CiTestCase):
results = ds.read_user_data_callback(self.seed_dir)
self.assertNotEqual(results, None)
- self.assertTrue('userdata' in results)
- self.assertEqual(results['userdata'], userdata)
+ self.assertTrue("userdata" in results)
+ self.assertEqual(results["userdata"], userdata)
def test_generate_network_config(self):
expected = {
- 'version': 1,
- 'config': [
+ "version": 1,
+ "config": [
{
- 'subnets': [
- {'control': 'auto',
- 'dns_nameservers': ['8.8.8.8', '8.8.4.4'],
- 'netmask': '255.255.248.0',
- 'address': '62.181.8.174',
- 'type': 'static', 'gateway': '62.181.8.1'}
+ "subnets": [
+ {
+ "control": "auto",
+ "dns_nameservers": ["8.8.8.8", "8.8.4.4"],
+ "netmask": "255.255.248.0",
+ "address": "62.181.8.174",
+ "type": "static",
+ "gateway": "62.181.8.1",
+ }
],
- 'type': 'physical',
- 'name': 'eth0',
- 'mac_address': '00:15:5d:ff:0f:03'
+ "type": "physical",
+ "name": "eth0",
+ "mac_address": "00:15:5d:ff:0f:03",
},
{
- 'subnets': [
- {'control': 'auto',
- 'dns_nameservers': ['9.9.9.9', '8.8.8.8'],
- 'netmask': '255.255.255.0',
- 'address': '10.209.78.11',
- 'type': 'static',
- 'gateway': '10.209.78.1'}
+ "subnets": [
+ {
+ "control": "auto",
+ "dns_nameservers": ["9.9.9.9", "8.8.8.8"],
+ "netmask": "255.255.255.0",
+ "address": "10.209.78.11",
+ "type": "static",
+ "gateway": "10.209.78.1",
+ }
],
- 'type': 'physical',
- 'name': 'eth1',
- 'mac_address': '00:15:5d:ff:0f:24'
- }
- ]
+ "type": "physical",
+ "name": "eth1",
+ "mac_address": "00:15:5d:ff:0f:24",
+ },
+ ],
}
self.assertTrue(
- ds.generate_network_config(CLOUD_METADATA['netadp']),
- expected
+ ds.generate_network_config(CLOUD_METADATA["netadp"]), expected
)
- @mock.patch(DS_PATH + '.subp.subp')
+ @mock.patch(DS_PATH + ".subp.subp")
def test_gratuitous_arp_run_standard_arping(self, m_subp):
"""Test handle run arping & parameters."""
items = [
+ {"destination": "172.17.0.2", "source": "172.16.6.104"},
{
- 'destination': '172.17.0.2',
- 'source': '172.16.6.104'
- },
- {
- 'destination': '172.17.0.2',
- 'source': '172.16.6.104',
+ "destination": "172.17.0.2",
+ "source": "172.16.6.104",
},
]
- ds.gratuitous_arp(items, self._fetch_distro('ubuntu'))
- self.assertEqual([
- mock.call([
- 'arping', '-c', '2', '-S',
- '172.16.6.104', '172.17.0.2'
- ]),
- mock.call([
- 'arping', '-c', '2', '-S',
- '172.16.6.104', '172.17.0.2'
- ])
- ], m_subp.call_args_list
+ ds.gratuitous_arp(items, self._fetch_distro("ubuntu"))
+ self.assertEqual(
+ [
+ mock.call(
+ ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"]
+ ),
+ mock.call(
+ ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"]
+ ),
+ ],
+ m_subp.call_args_list,
)
- @mock.patch(DS_PATH + '.subp.subp')
+ @mock.patch(DS_PATH + ".subp.subp")
def test_handle_rhel_like_arping(self, m_subp):
"""Test handle on RHEL-like distros."""
items = [
{
- 'source': '172.16.6.104',
- 'destination': '172.17.0.2',
+ "source": "172.16.6.104",
+ "destination": "172.17.0.2",
}
]
- ds.gratuitous_arp(items, self._fetch_distro('fedora'))
- self.assertEqual([
- mock.call(
- ['arping', '-c', '2', '-s', '172.16.6.104', '172.17.0.2']
- )],
- m_subp.call_args_list
+ ds.gratuitous_arp(items, self._fetch_distro("fedora"))
+ self.assertEqual(
+ [
+ mock.call(
+ ["arping", "-c", "2", "-s", "172.16.6.104", "172.17.0.2"]
+ )
+ ],
+ m_subp.call_args_list,
)
@mock.patch(
- DS_PATH + '.subp.subp',
- side_effect=subp.ProcessExecutionError()
+ DS_PATH + ".subp.subp", side_effect=subp.ProcessExecutionError()
)
def test_continue_on_arping_error(self, m_subp):
"""Continue when command error"""
items = [
+ {"destination": "172.17.0.2", "source": "172.16.6.104"},
{
- 'destination': '172.17.0.2',
- 'source': '172.16.6.104'
- },
- {
- 'destination': '172.17.0.2',
- 'source': '172.16.6.104',
+ "destination": "172.17.0.2",
+ "source": "172.16.6.104",
},
]
- ds.gratuitous_arp(items, self._fetch_distro('ubuntu'))
- self.assertEqual([
- mock.call([
- 'arping', '-c', '2', '-S',
- '172.16.6.104', '172.17.0.2'
- ]),
- mock.call([
- 'arping', '-c', '2', '-S',
- '172.16.6.104', '172.17.0.2'
- ])
- ], m_subp.call_args_list
+ ds.gratuitous_arp(items, self._fetch_distro("ubuntu"))
+ self.assertEqual(
+ [
+ mock.call(
+ ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"]
+ ),
+ mock.call(
+ ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"]
+ ),
+ ],
+ m_subp.call_args_list,
)
def populate_cloud_metadata(path, data):
- populate_dir(path, {'cloud.json': json.dumps(data)})
+ populate_dir(path, {"cloud.json": json.dumps(data)})
def populate_user_metadata(path, data):
- populate_dir(path, {'user.data': data})
+ populate_dir(path, {"user.data": data})
diff --git a/tests/unittests/sources/test_scaleway.py b/tests/unittests/sources/test_scaleway.py
index 33ae26b8..d7e8b969 100644
--- a/tests/unittests/sources/test_scaleway.py
+++ b/tests/unittests/sources/test_scaleway.py
@@ -5,12 +5,9 @@ import json
import httpretty
import requests
-from cloudinit import helpers
-from cloudinit import settings
-from cloudinit import sources
+from cloudinit import helpers, settings, sources
from cloudinit.sources import DataSourceScaleway
-
-from tests.unittests.helpers import mock, HttprettyTestCase, CiTestCase
+from tests.unittests.helpers import CiTestCase, HttprettyTestCase, mock
class DataResponses(object):
@@ -24,11 +21,11 @@ class DataResponses(object):
@staticmethod
def rate_limited(method, uri, headers):
- return 429, headers, ''
+ return 429, headers, ""
@staticmethod
def api_error(method, uri, headers):
- return 500, headers, ''
+ return 500, headers, ""
@classmethod
def get_ok(cls, method, uri, headers):
@@ -39,7 +36,7 @@ class DataResponses(object):
"""
No user data for this server.
"""
- return 404, headers, ''
+ return 404, headers, ""
class MetadataResponses(object):
@@ -48,18 +45,21 @@ class MetadataResponses(object):
"""
FAKE_METADATA = {
- 'id': '00000000-0000-0000-0000-000000000000',
- 'hostname': 'scaleway.host',
- 'tags': [
+ "id": "00000000-0000-0000-0000-000000000000",
+ "hostname": "scaleway.host",
+ "tags": [
"AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD",
],
- 'ssh_public_keys': [{
- 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
- 'fingerprint': '2048 06:ae:... login (RSA)'
- }, {
- 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
- 'fingerprint': '2048 06:ff:... login2 (RSA)'
- }]
+ "ssh_public_keys": [
+ {
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA",
+ "fingerprint": "2048 06:ae:... login (RSA)",
+ },
+ {
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ "fingerprint": "2048 06:ff:... login2 (RSA)",
+ },
+ ],
}
@classmethod
@@ -68,46 +68,49 @@ class MetadataResponses(object):
class TestOnScaleway(CiTestCase):
-
def setUp(self):
super(TestOnScaleway, self).setUp()
self.tmp = self.tmp_dir()
def install_mocks(self, fake_dmi, fake_file_exists, fake_cmdline):
mock, faked = fake_dmi
- mock.return_value = 'Scaleway' if faked else 'Whatever'
+ mock.return_value = "Scaleway" if faked else "Whatever"
mock, faked = fake_file_exists
mock.return_value = faked
mock, faked = fake_cmdline
- mock.return_value = \
- 'initrd=initrd showopts scaleway nousb' if faked \
- else 'BOOT_IMAGE=/vmlinuz-3.11.0-26-generic'
-
- @mock.patch('cloudinit.util.get_cmdline')
- @mock.patch('os.path.exists')
- @mock.patch('cloudinit.dmi.read_dmi_data')
- def test_not_on_scaleway(self, m_read_dmi_data, m_file_exists,
- m_get_cmdline):
+ mock.return_value = (
+ "initrd=initrd showopts scaleway nousb"
+ if faked
+ else "BOOT_IMAGE=/vmlinuz-3.11.0-26-generic"
+ )
+
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("os.path.exists")
+ @mock.patch("cloudinit.dmi.read_dmi_data")
+ def test_not_on_scaleway(
+ self, m_read_dmi_data, m_file_exists, m_get_cmdline
+ ):
self.install_mocks(
fake_dmi=(m_read_dmi_data, False),
fake_file_exists=(m_file_exists, False),
- fake_cmdline=(m_get_cmdline, False)
+ fake_cmdline=(m_get_cmdline, False),
)
self.assertFalse(DataSourceScaleway.on_scaleway())
# When not on Scaleway, get_data() returns False.
datasource = DataSourceScaleway.DataSourceScaleway(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
)
self.assertFalse(datasource.get_data())
- @mock.patch('cloudinit.util.get_cmdline')
- @mock.patch('os.path.exists')
- @mock.patch('cloudinit.dmi.read_dmi_data')
- def test_on_scaleway_dmi(self, m_read_dmi_data, m_file_exists,
- m_get_cmdline):
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("os.path.exists")
+ @mock.patch("cloudinit.dmi.read_dmi_data")
+ def test_on_scaleway_dmi(
+ self, m_read_dmi_data, m_file_exists, m_get_cmdline
+ ):
"""
dmidecode returns "Scaleway".
"""
@@ -115,37 +118,39 @@ class TestOnScaleway(CiTestCase):
self.install_mocks(
fake_dmi=(m_read_dmi_data, True),
fake_file_exists=(m_file_exists, False),
- fake_cmdline=(m_get_cmdline, False)
+ fake_cmdline=(m_get_cmdline, False),
)
self.assertTrue(DataSourceScaleway.on_scaleway())
- @mock.patch('cloudinit.util.get_cmdline')
- @mock.patch('os.path.exists')
- @mock.patch('cloudinit.dmi.read_dmi_data')
- def test_on_scaleway_var_run_scaleway(self, m_read_dmi_data, m_file_exists,
- m_get_cmdline):
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("os.path.exists")
+ @mock.patch("cloudinit.dmi.read_dmi_data")
+ def test_on_scaleway_var_run_scaleway(
+ self, m_read_dmi_data, m_file_exists, m_get_cmdline
+ ):
"""
/var/run/scaleway exists.
"""
self.install_mocks(
fake_dmi=(m_read_dmi_data, False),
fake_file_exists=(m_file_exists, True),
- fake_cmdline=(m_get_cmdline, False)
+ fake_cmdline=(m_get_cmdline, False),
)
self.assertTrue(DataSourceScaleway.on_scaleway())
- @mock.patch('cloudinit.util.get_cmdline')
- @mock.patch('os.path.exists')
- @mock.patch('cloudinit.dmi.read_dmi_data')
- def test_on_scaleway_cmdline(self, m_read_dmi_data, m_file_exists,
- m_get_cmdline):
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("os.path.exists")
+ @mock.patch("cloudinit.dmi.read_dmi_data")
+ def test_on_scaleway_cmdline(
+ self, m_read_dmi_data, m_file_exists, m_get_cmdline
+ ):
"""
"scaleway" in /proc/cmdline.
"""
self.install_mocks(
fake_dmi=(m_read_dmi_data, False),
fake_file_exists=(m_file_exists, False),
- fake_cmdline=(m_get_cmdline, True)
+ fake_cmdline=(m_get_cmdline, True),
)
self.assertTrue(DataSourceScaleway.on_scaleway())
@@ -160,65 +165,86 @@ def get_source_address_adapter(*args, **kwargs):
This function removes the bind on a privileged address, since anyway the
HTTP call is mocked by httpretty.
"""
- kwargs.pop('source_address')
+ kwargs.pop("source_address")
return requests.adapters.HTTPAdapter(*args, **kwargs)
class TestDataSourceScaleway(HttprettyTestCase):
-
def setUp(self):
tmp = self.tmp_dir()
self.datasource = DataSourceScaleway.DataSourceScaleway(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': tmp})
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": tmp})
)
super(TestDataSourceScaleway, self).setUp()
- self.metadata_url = \
- DataSourceScaleway.BUILTIN_DS_CONFIG['metadata_url']
- self.userdata_url = \
- DataSourceScaleway.BUILTIN_DS_CONFIG['userdata_url']
- self.vendordata_url = \
- DataSourceScaleway.BUILTIN_DS_CONFIG['vendordata_url']
+ self.metadata_url = DataSourceScaleway.BUILTIN_DS_CONFIG[
+ "metadata_url"
+ ]
+ self.userdata_url = DataSourceScaleway.BUILTIN_DS_CONFIG[
+ "userdata_url"
+ ]
+ self.vendordata_url = DataSourceScaleway.BUILTIN_DS_CONFIG[
+ "vendordata_url"
+ ]
- self.add_patch('cloudinit.sources.DataSourceScaleway.on_scaleway',
- '_m_on_scaleway', return_value=True)
self.add_patch(
- 'cloudinit.sources.DataSourceScaleway.net.find_fallback_nic',
- '_m_find_fallback_nic', return_value='scalewaynic0')
-
- @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4')
- @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter',
- get_source_address_adapter)
- @mock.patch('cloudinit.util.get_cmdline')
- @mock.patch('time.sleep', return_value=None)
+ "cloudinit.sources.DataSourceScaleway.on_scaleway",
+ "_m_on_scaleway",
+ return_value=True,
+ )
+ self.add_patch(
+ "cloudinit.sources.DataSourceScaleway.net.find_fallback_nic",
+ "_m_find_fallback_nic",
+ return_value="scalewaynic0",
+ )
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4")
+ @mock.patch(
+ "cloudinit.sources.DataSourceScaleway.SourceAddressAdapter",
+ get_source_address_adapter,
+ )
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("time.sleep", return_value=None)
def test_metadata_ok(self, sleep, m_get_cmdline, dhcpv4):
"""
get_data() returns metadata, user data and vendor data.
"""
- m_get_cmdline.return_value = 'scaleway'
+ m_get_cmdline.return_value = "scaleway"
# Make user data API return a valid response
- httpretty.register_uri(httpretty.GET, self.metadata_url,
- body=MetadataResponses.get_ok)
- httpretty.register_uri(httpretty.GET, self.userdata_url,
- body=DataResponses.get_ok)
- httpretty.register_uri(httpretty.GET, self.vendordata_url,
- body=DataResponses.get_ok)
+ httpretty.register_uri(
+ httpretty.GET, self.metadata_url, body=MetadataResponses.get_ok
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url, body=DataResponses.get_ok
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.vendordata_url, body=DataResponses.get_ok
+ )
self.datasource.get_data()
- self.assertEqual(self.datasource.get_instance_id(),
- MetadataResponses.FAKE_METADATA['id'])
- self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [
- 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
- 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD',
- 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
- ].sort())
- self.assertEqual(self.datasource.get_hostname(),
- MetadataResponses.FAKE_METADATA['hostname'])
- self.assertEqual(self.datasource.get_userdata_raw(),
- DataResponses.FAKE_USER_DATA)
- self.assertEqual(self.datasource.get_vendordata_raw(),
- DataResponses.FAKE_USER_DATA)
+ self.assertEqual(
+ self.datasource.get_instance_id(),
+ MetadataResponses.FAKE_METADATA["id"],
+ )
+ self.assertEqual(
+ self.datasource.get_public_ssh_keys().sort(),
+ [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA",
+ ].sort(),
+ )
+ self.assertEqual(
+ self.datasource.get_hostname(),
+ MetadataResponses.FAKE_METADATA["hostname"],
+ )
+ self.assertEqual(
+ self.datasource.get_userdata_raw(), DataResponses.FAKE_USER_DATA
+ )
+ self.assertEqual(
+ self.datasource.get_vendordata_raw(), DataResponses.FAKE_USER_DATA
+ )
self.assertIsNone(self.datasource.availability_zone)
self.assertIsNone(self.datasource.region)
self.assertEqual(sleep.call_count, 0)
@@ -228,246 +254,273 @@ class TestDataSourceScaleway(HttprettyTestCase):
get_public_ssh_keys() should return empty list if no ssh key are
available
"""
- self.datasource.metadata['tags'] = []
- self.datasource.metadata['ssh_public_keys'] = []
+ self.datasource.metadata["tags"] = []
+ self.datasource.metadata["ssh_public_keys"] = []
self.assertEqual(self.datasource.get_public_ssh_keys(), [])
def test_ssh_keys_only_tags(self):
"""
get_public_ssh_keys() should return list of keys available in tags
"""
- self.datasource.metadata['tags'] = [
+ self.datasource.metadata["tags"] = [
"AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD",
"AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABCCCCC",
]
- self.datasource.metadata['ssh_public_keys'] = []
- self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [
- 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD',
- 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
- ].sort())
+ self.datasource.metadata["ssh_public_keys"] = []
+ self.assertEqual(
+ self.datasource.get_public_ssh_keys().sort(),
+ [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ ].sort(),
+ )
def test_ssh_keys_only_conf(self):
"""
get_public_ssh_keys() should return list of keys available in
ssh_public_keys field
"""
- self.datasource.metadata['tags'] = []
- self.datasource.metadata['ssh_public_keys'] = [{
- 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
- 'fingerprint': '2048 06:ae:... login (RSA)'
- }, {
- 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
- 'fingerprint': '2048 06:ff:... login2 (RSA)'
- }]
- self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [
- 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
- 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD',
- 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
- ].sort())
+ self.datasource.metadata["tags"] = []
+ self.datasource.metadata["ssh_public_keys"] = [
+ {
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA",
+ "fingerprint": "2048 06:ae:... login (RSA)",
+ },
+ {
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ "fingerprint": "2048 06:ff:... login2 (RSA)",
+ },
+ ]
+ self.assertEqual(
+ self.datasource.get_public_ssh_keys().sort(),
+ [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA",
+ ].sort(),
+ )
def test_ssh_keys_both(self):
"""
get_public_ssh_keys() should return a merge of keys available
in ssh_public_keys and tags
"""
- self.datasource.metadata['tags'] = [
+ self.datasource.metadata["tags"] = [
"AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD",
]
- self.datasource.metadata['ssh_public_keys'] = [{
- 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
- 'fingerprint': '2048 06:ae:... login (RSA)'
- }, {
- 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
- 'fingerprint': '2048 06:ff:... login2 (RSA)'
- }]
- self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [
- 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
- 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD',
- 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
- ].sort())
-
- @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4')
- @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter',
- get_source_address_adapter)
- @mock.patch('cloudinit.util.get_cmdline')
- @mock.patch('time.sleep', return_value=None)
+ self.datasource.metadata["ssh_public_keys"] = [
+ {
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA",
+ "fingerprint": "2048 06:ae:... login (RSA)",
+ },
+ {
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ "fingerprint": "2048 06:ff:... login2 (RSA)",
+ },
+ ]
+ self.assertEqual(
+ self.datasource.get_public_ssh_keys().sort(),
+ [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA",
+ ].sort(),
+ )
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4")
+ @mock.patch(
+ "cloudinit.sources.DataSourceScaleway.SourceAddressAdapter",
+ get_source_address_adapter,
+ )
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("time.sleep", return_value=None)
def test_metadata_404(self, sleep, m_get_cmdline, dhcpv4):
"""
get_data() returns metadata, but no user data nor vendor data.
"""
- m_get_cmdline.return_value = 'scaleway'
+ m_get_cmdline.return_value = "scaleway"
# Make user and vendor data APIs return HTTP/404, which means there is
# no user / vendor data for the server.
- httpretty.register_uri(httpretty.GET, self.metadata_url,
- body=MetadataResponses.get_ok)
- httpretty.register_uri(httpretty.GET, self.userdata_url,
- body=DataResponses.empty)
- httpretty.register_uri(httpretty.GET, self.vendordata_url,
- body=DataResponses.empty)
+ httpretty.register_uri(
+ httpretty.GET, self.metadata_url, body=MetadataResponses.get_ok
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url, body=DataResponses.empty
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.vendordata_url, body=DataResponses.empty
+ )
self.datasource.get_data()
self.assertIsNone(self.datasource.get_userdata_raw())
self.assertIsNone(self.datasource.get_vendordata_raw())
self.assertEqual(sleep.call_count, 0)
- @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4')
- @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter',
- get_source_address_adapter)
- @mock.patch('cloudinit.util.get_cmdline')
- @mock.patch('time.sleep', return_value=None)
+ @mock.patch("cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4")
+ @mock.patch(
+ "cloudinit.sources.DataSourceScaleway.SourceAddressAdapter",
+ get_source_address_adapter,
+ )
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("time.sleep", return_value=None)
def test_metadata_rate_limit(self, sleep, m_get_cmdline, dhcpv4):
"""
get_data() is rate limited two times by the metadata API when fetching
user data.
"""
- m_get_cmdline.return_value = 'scaleway'
+ m_get_cmdline.return_value = "scaleway"
- httpretty.register_uri(httpretty.GET, self.metadata_url,
- body=MetadataResponses.get_ok)
- httpretty.register_uri(httpretty.GET, self.vendordata_url,
- body=DataResponses.empty)
+ httpretty.register_uri(
+ httpretty.GET, self.metadata_url, body=MetadataResponses.get_ok
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.vendordata_url, body=DataResponses.empty
+ )
httpretty.register_uri(
- httpretty.GET, self.userdata_url,
+ httpretty.GET,
+ self.userdata_url,
responses=[
httpretty.Response(body=DataResponses.rate_limited),
httpretty.Response(body=DataResponses.rate_limited),
httpretty.Response(body=DataResponses.get_ok),
- ]
+ ],
)
self.datasource.get_data()
- self.assertEqual(self.datasource.get_userdata_raw(),
- DataResponses.FAKE_USER_DATA)
+ self.assertEqual(
+ self.datasource.get_userdata_raw(), DataResponses.FAKE_USER_DATA
+ )
self.assertEqual(sleep.call_count, 2)
- @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
- @mock.patch('cloudinit.util.get_cmdline')
+ @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic")
+ @mock.patch("cloudinit.util.get_cmdline")
def test_network_config_ok(self, m_get_cmdline, fallback_nic):
"""
network_config will only generate IPv4 config if no ipv6 data is
available in the metadata
"""
- m_get_cmdline.return_value = 'scaleway'
- fallback_nic.return_value = 'ens2'
- self.datasource.metadata['ipv6'] = None
+ m_get_cmdline.return_value = "scaleway"
+ fallback_nic.return_value = "ens2"
+ self.datasource.metadata["ipv6"] = None
netcfg = self.datasource.network_config
resp = {
- 'version': 1,
- 'config': [
+ "version": 1,
+ "config": [
{
- 'type': 'physical',
- 'name': 'ens2',
- 'subnets': [{'type': 'dhcp4'}]
+ "type": "physical",
+ "name": "ens2",
+ "subnets": [{"type": "dhcp4"}],
}
- ]
+ ],
}
self.assertEqual(netcfg, resp)
- @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
- @mock.patch('cloudinit.util.get_cmdline')
+ @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic")
+ @mock.patch("cloudinit.util.get_cmdline")
def test_network_config_ipv6_ok(self, m_get_cmdline, fallback_nic):
"""
network_config will only generate IPv4/v6 configs if ipv6 data is
available in the metadata
"""
- m_get_cmdline.return_value = 'scaleway'
- fallback_nic.return_value = 'ens2'
- self.datasource.metadata['ipv6'] = {
- 'address': '2000:abc:4444:9876::42:999',
- 'gateway': '2000:abc:4444:9876::42:000',
- 'netmask': '127',
+ m_get_cmdline.return_value = "scaleway"
+ fallback_nic.return_value = "ens2"
+ self.datasource.metadata["ipv6"] = {
+ "address": "2000:abc:4444:9876::42:999",
+ "gateway": "2000:abc:4444:9876::42:000",
+ "netmask": "127",
}
netcfg = self.datasource.network_config
resp = {
- 'version': 1,
- 'config': [
+ "version": 1,
+ "config": [
{
- 'type': 'physical',
- 'name': 'ens2',
- 'subnets': [
+ "type": "physical",
+ "name": "ens2",
+ "subnets": [
+ {"type": "dhcp4"},
{
- 'type': 'dhcp4'
+ "type": "static",
+ "address": "2000:abc:4444:9876::42:999",
+ "gateway": "2000:abc:4444:9876::42:000",
+ "netmask": "127",
},
- {
- 'type': 'static',
- 'address': '2000:abc:4444:9876::42:999',
- 'gateway': '2000:abc:4444:9876::42:000',
- 'netmask': '127',
- }
- ]
+ ],
}
- ]
+ ],
}
self.assertEqual(netcfg, resp)
- @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
- @mock.patch('cloudinit.util.get_cmdline')
+ @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic")
+ @mock.patch("cloudinit.util.get_cmdline")
def test_network_config_existing(self, m_get_cmdline, fallback_nic):
"""
network_config() should return the same data if a network config
already exists
"""
- m_get_cmdline.return_value = 'scaleway'
- self.datasource._network_config = '0xdeadbeef'
+ m_get_cmdline.return_value = "scaleway"
+ self.datasource._network_config = "0xdeadbeef"
netcfg = self.datasource.network_config
- self.assertEqual(netcfg, '0xdeadbeef')
+ self.assertEqual(netcfg, "0xdeadbeef")
- @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
- @mock.patch('cloudinit.util.get_cmdline')
+ @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic")
+ @mock.patch("cloudinit.util.get_cmdline")
def test_network_config_unset(self, m_get_cmdline, fallback_nic):
"""
_network_config will be set to sources.UNSET after the first boot.
Make sure it behave correctly.
"""
- m_get_cmdline.return_value = 'scaleway'
- fallback_nic.return_value = 'ens2'
- self.datasource.metadata['ipv6'] = None
+ m_get_cmdline.return_value = "scaleway"
+ fallback_nic.return_value = "ens2"
+ self.datasource.metadata["ipv6"] = None
self.datasource._network_config = sources.UNSET
resp = {
- 'version': 1,
- 'config': [
+ "version": 1,
+ "config": [
{
- 'type': 'physical',
- 'name': 'ens2',
- 'subnets': [{'type': 'dhcp4'}]
+ "type": "physical",
+ "name": "ens2",
+ "subnets": [{"type": "dhcp4"}],
}
- ]
+ ],
}
netcfg = self.datasource.network_config
self.assertEqual(netcfg, resp)
- @mock.patch('cloudinit.sources.DataSourceScaleway.LOG.warning')
- @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
- @mock.patch('cloudinit.util.get_cmdline')
- def test_network_config_cached_none(self, m_get_cmdline, fallback_nic,
- logwarning):
+ @mock.patch("cloudinit.sources.DataSourceScaleway.LOG.warning")
+ @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic")
+ @mock.patch("cloudinit.util.get_cmdline")
+ def test_network_config_cached_none(
+ self, m_get_cmdline, fallback_nic, logwarning
+ ):
"""
network_config() should return config data if cached data is None
rather than sources.UNSET
"""
- m_get_cmdline.return_value = 'scaleway'
- fallback_nic.return_value = 'ens2'
- self.datasource.metadata['ipv6'] = None
+ m_get_cmdline.return_value = "scaleway"
+ fallback_nic.return_value = "ens2"
+ self.datasource.metadata["ipv6"] = None
self.datasource._network_config = None
resp = {
- 'version': 1,
- 'config': [
+ "version": 1,
+ "config": [
{
- 'type': 'physical',
- 'name': 'ens2',
- 'subnets': [{'type': 'dhcp4'}]
+ "type": "physical",
+ "name": "ens2",
+ "subnets": [{"type": "dhcp4"}],
}
- ]
+ ],
}
netcfg = self.datasource.network_config
self.assertEqual(netcfg, resp)
- logwarning.assert_called_with('Found None as cached _network_config. '
- 'Resetting to %s', sources.UNSET)
+ logwarning.assert_called_with(
+ "Found None as cached _network_config. Resetting to %s",
+ sources.UNSET,
+ )
diff --git a/tests/unittests/sources/test_smartos.py b/tests/unittests/sources/test_smartos.py
index e306eded..55239c4e 100644
--- a/tests/unittests/sources/test_smartos.py
+++ b/tests/unittests/sources/test_smartos.py
@@ -5,14 +5,13 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-'''This is a testcase for the SmartOS datasource.
+"""This is a testcase for the SmartOS datasource.
It replicates a serial console and acts like the SmartOS console does in
order to validate return responses.
-'''
+"""
-from binascii import crc32
import json
import multiprocessing
import os
@@ -22,32 +21,40 @@ import signal
import stat
import unittest
import uuid
+from binascii import crc32
+from cloudinit import helpers as c_helpers
from cloudinit import serial
+from cloudinit.event import EventScope, EventType
from cloudinit.sources import DataSourceSmartOS
+from cloudinit.sources.DataSourceSmartOS import SERIAL_DEVICE, SMARTOS_ENV_KVM
from cloudinit.sources.DataSourceSmartOS import (
convert_smartos_network_data as convert_net,
- SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ,
- identify_file)
-from cloudinit.event import EventScope, EventType
-
-from cloudinit import helpers as c_helpers
-from cloudinit.util import (b64e, write_file)
-from cloudinit.subp import (subp, ProcessExecutionError, which)
-
+)
+from cloudinit.sources.DataSourceSmartOS import (
+ get_smartos_environ,
+ identify_file,
+)
+from cloudinit.subp import ProcessExecutionError, subp, which
+from cloudinit.util import b64e, write_file
from tests.unittests.helpers import (
- CiTestCase, mock, FilesystemMockingTestCase, skipIf)
-
+ CiTestCase,
+ FilesystemMockingTestCase,
+ mock,
+ skipIf,
+)
try:
import serial as _pyserial
+
assert _pyserial # avoid pyflakes error F401: import unused
HAS_PYSERIAL = True
except ImportError:
HAS_PYSERIAL = False
-DSMOS = 'cloudinit.sources.DataSourceSmartOS'
-SDC_NICS = json.loads("""
+DSMOS = "cloudinit.sources.DataSourceSmartOS"
+SDC_NICS = json.loads(
+ """
[
{
"nic_tag": "external",
@@ -87,10 +94,12 @@ SDC_NICS = json.loads("""
]
}
]
-""")
+"""
+)
-SDC_NICS_ALT = json.loads("""
+SDC_NICS_ALT = json.loads(
+ """
[
{
"interface": "net0",
@@ -126,9 +135,11 @@ SDC_NICS_ALT = json.loads("""
"mtu": 1500
}
]
-""")
+"""
+)
-SDC_NICS_DHCP = json.loads("""
+SDC_NICS_DHCP = json.loads(
+ """
[
{
"interface": "net0",
@@ -164,9 +175,11 @@ SDC_NICS_DHCP = json.loads("""
"mtu": 1500
}
]
-""")
+"""
+)
-SDC_NICS_MIP = json.loads("""
+SDC_NICS_MIP = json.loads(
+ """
[
{
"interface": "net0",
@@ -204,9 +217,11 @@ SDC_NICS_MIP = json.loads("""
"mtu": 1500
}
]
-""")
+"""
+)
-SDC_NICS_MIP_IPV6 = json.loads("""
+SDC_NICS_MIP_IPV6 = json.loads(
+ """
[
{
"interface": "net0",
@@ -243,9 +258,11 @@ SDC_NICS_MIP_IPV6 = json.loads("""
"mtu": 1500
}
]
-""")
+"""
+)
-SDC_NICS_IPV4_IPV6 = json.loads("""
+SDC_NICS_IPV4_IPV6 = json.loads(
+ """
[
{
"interface": "net0",
@@ -277,9 +294,11 @@ SDC_NICS_IPV4_IPV6 = json.loads("""
"mtu": 1500
}
]
-""")
+"""
+)
-SDC_NICS_SINGLE_GATEWAY = json.loads("""
+SDC_NICS_SINGLE_GATEWAY = json.loads(
+ """
[
{
"interface":"net0",
@@ -309,32 +328,33 @@ SDC_NICS_SINGLE_GATEWAY = json.loads("""
"mtu":1500
}
]
-""")
+"""
+)
MOCK_RETURNS = {
- 'hostname': 'test-host',
- 'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname',
- 'disable_iptables_flag': None,
- 'enable_motd_sys_info': None,
- 'test-var1': 'some data',
- 'cloud-init:user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']),
- 'sdc:datacenter_name': 'somewhere2',
- 'sdc:operator-script': '\n'.join(['bin/true', '']),
- 'sdc:uuid': str(uuid.uuid4()),
- 'sdc:vendor-data': '\n'.join(['VENDOR_DATA', '']),
- 'user-data': '\n'.join(['something', '']),
- 'user-script': '\n'.join(['/bin/true', '']),
- 'sdc:nics': json.dumps(SDC_NICS),
+ "hostname": "test-host",
+ "root_authorized_keys": "ssh-rsa AAAAB3Nz...aC1yc2E= keyname",
+ "disable_iptables_flag": None,
+ "enable_motd_sys_info": None,
+ "test-var1": "some data",
+ "cloud-init:user-data": "\n".join(["#!/bin/sh", "/bin/true", ""]),
+ "sdc:datacenter_name": "somewhere2",
+ "sdc:operator-script": "\n".join(["bin/true", ""]),
+ "sdc:uuid": str(uuid.uuid4()),
+ "sdc:vendor-data": "\n".join(["VENDOR_DATA", ""]),
+ "user-data": "\n".join(["something", ""]),
+ "user-script": "\n".join(["/bin/true", ""]),
+ "sdc:nics": json.dumps(SDC_NICS),
}
-DMI_DATA_RETURN = 'smartdc'
+DMI_DATA_RETURN = "smartdc"
# Useful for calculating the length of a frame body. A SUCCESS body will be
# followed by more characters or be one character less if SUCCESS with no
# payload. See Section 4.3 of https://eng.joyent.com/mdata/protocol.html.
-SUCCESS_LEN = len('0123abcd SUCCESS ')
-NOTFOUND_LEN = len('0123abcd NOTFOUND')
+SUCCESS_LEN = len("0123abcd SUCCESS ")
+NOTFOUND_LEN = len("0123abcd NOTFOUND")
class PsuedoJoyentClient(object):
@@ -364,11 +384,11 @@ class PsuedoJoyentClient(object):
return True
def open_transport(self):
- assert(not self._is_open)
+ assert not self._is_open
self._is_open = True
def close_transport(self):
- assert(self._is_open)
+ assert self._is_open
self._is_open = False
@@ -381,21 +401,35 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
self.add_patch(DSMOS + ".get_smartos_environ", "get_smartos_environ")
self.add_patch(DSMOS + ".jmc_client_factory", "jmc_cfact")
- self.legacy_user_d = self.tmp_path('legacy_user_tmp')
+ self.legacy_user_d = self.tmp_path("legacy_user_tmp")
os.mkdir(self.legacy_user_d)
- self.add_patch(DSMOS + ".LEGACY_USER_D", "m_legacy_user_d",
- autospec=False, new=self.legacy_user_d)
- self.add_patch(DSMOS + ".identify_file", "m_identify_file",
- return_value="text/plain")
+ self.add_patch(
+ DSMOS + ".LEGACY_USER_D",
+ "m_legacy_user_d",
+ autospec=False,
+ new=self.legacy_user_d,
+ )
+ self.add_patch(
+ DSMOS + ".identify_file",
+ "m_identify_file",
+ return_value="text/plain",
+ )
- def _get_ds(self, mockdata=None, mode=DataSourceSmartOS.SMARTOS_ENV_KVM,
- sys_cfg=None, ds_cfg=None):
+ def _get_ds(
+ self,
+ mockdata=None,
+ mode=DataSourceSmartOS.SMARTOS_ENV_KVM,
+ sys_cfg=None,
+ ds_cfg=None,
+ ):
self.jmc_cfact.return_value = PsuedoJoyentClient(mockdata)
self.get_smartos_environ.return_value = mode
tmpd = self.tmp_dir()
- dirs = {'cloud_dir': self.tmp_path('cloud_dir', tmpd),
- 'run_dir': self.tmp_path('run_dir')}
+ dirs = {
+ "cloud_dir": self.tmp_path("cloud_dir", tmpd),
+ "run_dir": self.tmp_path("run_dir"),
+ }
for d in dirs.values():
os.mkdir(d)
paths = c_helpers.Paths(dirs)
@@ -404,14 +438,15 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
sys_cfg = {}
if ds_cfg is not None:
- sys_cfg['datasource'] = sys_cfg.get('datasource', {})
- sys_cfg['datasource']['SmartOS'] = ds_cfg
+ sys_cfg["datasource"] = sys_cfg.get("datasource", {})
+ sys_cfg["datasource"]["SmartOS"] = ds_cfg
return DataSourceSmartOS.DataSourceSmartOS(
- sys_cfg, distro=None, paths=paths)
+ sys_cfg, distro=None, paths=paths
+ )
def test_no_base64(self):
- ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True}
+ ds_cfg = {"no_base64_decode": ["test_var1"], "all_base": True}
dsrc = self._get_ds(ds_cfg=ds_cfg)
ret = dsrc.get_data()
self.assertTrue(ret)
@@ -420,166 +455,180 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['sdc:uuid'],
- dsrc.metadata['instance-id'])
+ self.assertEqual(
+ MOCK_RETURNS["sdc:uuid"], dsrc.metadata["instance-id"]
+ )
def test_platform_info(self):
"""All platform-related attributes are properly set."""
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
- self.assertEqual('joyent', dsrc.cloud_name)
- self.assertEqual('joyent', dsrc.platform_type)
- self.assertEqual('serial (/dev/ttyS1)', dsrc.subplatform)
+ self.assertEqual("joyent", dsrc.cloud_name)
+ self.assertEqual("joyent", dsrc.platform_type)
+ self.assertEqual("serial (/dev/ttyS1)", dsrc.subplatform)
def test_root_keys(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['root_authorized_keys'],
- dsrc.metadata['public-keys'])
+ self.assertEqual(
+ MOCK_RETURNS["root_authorized_keys"], dsrc.metadata["public-keys"]
+ )
def test_hostname_b64(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['hostname'],
- dsrc.metadata['local-hostname'])
+ self.assertEqual(
+ MOCK_RETURNS["hostname"], dsrc.metadata["local-hostname"]
+ )
def test_hostname(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['hostname'],
- dsrc.metadata['local-hostname'])
+ self.assertEqual(
+ MOCK_RETURNS["hostname"], dsrc.metadata["local-hostname"]
+ )
def test_hostname_if_no_sdc_hostname(self):
my_returns = MOCK_RETURNS.copy()
- my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname']
+ my_returns["sdc:hostname"] = "sdc-" + my_returns["hostname"]
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(my_returns['hostname'],
- dsrc.metadata['local-hostname'])
+ self.assertEqual(
+ my_returns["hostname"], dsrc.metadata["local-hostname"]
+ )
def test_sdc_hostname_if_no_hostname(self):
my_returns = MOCK_RETURNS.copy()
- my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname']
- del my_returns['hostname']
+ my_returns["sdc:hostname"] = "sdc-" + my_returns["hostname"]
+ del my_returns["hostname"]
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(my_returns['sdc:hostname'],
- dsrc.metadata['local-hostname'])
+ self.assertEqual(
+ my_returns["sdc:hostname"], dsrc.metadata["local-hostname"]
+ )
def test_sdc_uuid_if_no_hostname_or_sdc_hostname(self):
my_returns = MOCK_RETURNS.copy()
- del my_returns['hostname']
+ del my_returns["hostname"]
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(my_returns['sdc:uuid'],
- dsrc.metadata['local-hostname'])
+ self.assertEqual(
+ my_returns["sdc:uuid"], dsrc.metadata["local-hostname"]
+ )
def test_userdata(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['user-data'],
- dsrc.metadata['legacy-user-data'])
- self.assertEqual(MOCK_RETURNS['cloud-init:user-data'],
- dsrc.userdata_raw)
+ self.assertEqual(
+ MOCK_RETURNS["user-data"], dsrc.metadata["legacy-user-data"]
+ )
+ self.assertEqual(
+ MOCK_RETURNS["cloud-init:user-data"], dsrc.userdata_raw
+ )
def test_sdc_nics(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(json.loads(MOCK_RETURNS['sdc:nics']),
- dsrc.metadata['network-data'])
+ self.assertEqual(
+ json.loads(MOCK_RETURNS["sdc:nics"]), dsrc.metadata["network-data"]
+ )
def test_sdc_scripts(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['user-script'],
- dsrc.metadata['user-script'])
+ self.assertEqual(
+ MOCK_RETURNS["user-script"], dsrc.metadata["user-script"]
+ )
legacy_script_f = "%s/user-script" % self.legacy_user_d
print("legacy_script_f=%s" % legacy_script_f)
self.assertTrue(os.path.exists(legacy_script_f))
self.assertTrue(os.path.islink(legacy_script_f))
user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:]
- self.assertEqual(user_script_perm, '700')
+ self.assertEqual(user_script_perm, "700")
def test_scripts_shebanged(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['user-script'],
- dsrc.metadata['user-script'])
+ self.assertEqual(
+ MOCK_RETURNS["user-script"], dsrc.metadata["user-script"]
+ )
legacy_script_f = "%s/user-script" % self.legacy_user_d
self.assertTrue(os.path.exists(legacy_script_f))
self.assertTrue(os.path.islink(legacy_script_f))
shebang = None
- with open(legacy_script_f, 'r') as f:
+ with open(legacy_script_f, "r") as f:
shebang = f.readlines()[0].strip()
self.assertEqual(shebang, "#!/bin/bash")
user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:]
- self.assertEqual(user_script_perm, '700')
+ self.assertEqual(user_script_perm, "700")
def test_scripts_shebang_not_added(self):
"""
- Test that the SmartOS requirement that plain text scripts
- are executable. This test makes sure that plain texts scripts
- with out file magic have it added appropriately by cloud-init.
+ Test that the SmartOS requirement that plain text scripts
+ are executable. This test makes sure that plain texts scripts
+ with out file magic have it added appropriately by cloud-init.
"""
my_returns = MOCK_RETURNS.copy()
- my_returns['user-script'] = '\n'.join(['#!/usr/bin/perl',
- 'print("hi")', ''])
+ my_returns["user-script"] = "\n".join(
+ ["#!/usr/bin/perl", 'print("hi")', ""]
+ )
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(my_returns['user-script'],
- dsrc.metadata['user-script'])
+ self.assertEqual(
+ my_returns["user-script"], dsrc.metadata["user-script"]
+ )
legacy_script_f = "%s/user-script" % self.legacy_user_d
self.assertTrue(os.path.exists(legacy_script_f))
self.assertTrue(os.path.islink(legacy_script_f))
shebang = None
- with open(legacy_script_f, 'r') as f:
+ with open(legacy_script_f, "r") as f:
shebang = f.readlines()[0].strip()
self.assertEqual(shebang, "#!/usr/bin/perl")
def test_userdata_removed(self):
"""
- User-data in the SmartOS world is supposed to be written to a file
- each and every boot. This tests to make sure that in the event the
- legacy user-data is removed, the existing user-data is backed-up
- and there is no /var/db/user-data left.
+ User-data in the SmartOS world is supposed to be written to a file
+ each and every boot. This tests to make sure that in the event the
+ legacy user-data is removed, the existing user-data is backed-up
+ and there is no /var/db/user-data left.
"""
user_data_f = "%s/mdata-user-data" % self.legacy_user_d
- with open(user_data_f, 'w') as f:
+ with open(user_data_f, "w") as f:
f.write("PREVIOUS")
my_returns = MOCK_RETURNS.copy()
- del my_returns['user-data']
+ del my_returns["user-data"]
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertFalse(dsrc.metadata.get('legacy-user-data'))
+ self.assertFalse(dsrc.metadata.get("legacy-user-data"))
found_new = False
for root, _dirs, files in os.walk(self.legacy_user_d):
for name in files:
name_f = os.path.join(root, name)
permissions = oct(os.stat(name_f)[stat.ST_MODE])[-3:]
- if re.match(r'.*\/mdata-user-data$', name_f):
+ if re.match(r".*\/mdata-user-data$", name_f):
found_new = True
print(name_f)
- self.assertEqual(permissions, '400')
+ self.assertEqual(permissions, "400")
self.assertFalse(found_new)
@@ -587,17 +636,18 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['sdc:vendor-data'],
- dsrc.metadata['vendor-data'])
+ self.assertEqual(
+ MOCK_RETURNS["sdc:vendor-data"], dsrc.metadata["vendor-data"]
+ )
def test_default_vendor_data(self):
my_returns = MOCK_RETURNS.copy()
- def_op_script = my_returns['sdc:vendor-data']
- del my_returns['sdc:vendor-data']
+ def_op_script = my_returns["sdc:vendor-data"]
+ del my_returns["sdc:vendor-data"]
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertNotEqual(def_op_script, dsrc.metadata['vendor-data'])
+ self.assertNotEqual(def_op_script, dsrc.metadata["vendor-data"])
# we expect default vendor-data is a boothook
self.assertTrue(dsrc.vendordata_raw.startswith("#cloud-boothook"))
@@ -606,15 +656,19 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['disable_iptables_flag'],
- dsrc.metadata['iptables_disable'])
+ self.assertEqual(
+ MOCK_RETURNS["disable_iptables_flag"],
+ dsrc.metadata["iptables_disable"],
+ )
def test_motd_sys_info(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['enable_motd_sys_info'],
- dsrc.metadata['motd_sys_info'])
+ self.assertEqual(
+ MOCK_RETURNS["enable_motd_sys_info"],
+ dsrc.metadata["motd_sys_info"],
+ )
def test_default_ephemeral(self):
# Test to make sure that the builtin config has the ephemeral
@@ -625,16 +679,16 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
ret = dsrc.get_data()
self.assertTrue(ret)
- assert 'disk_setup' in cfg
- assert 'fs_setup' in cfg
- self.assertIsInstance(cfg['disk_setup'], dict)
- self.assertIsInstance(cfg['fs_setup'], list)
+ assert "disk_setup" in cfg
+ assert "fs_setup" in cfg
+ self.assertIsInstance(cfg["disk_setup"], dict)
+ self.assertIsInstance(cfg["fs_setup"], list)
def test_override_disk_aliases(self):
# Test to make sure that the built-in DS is overriden
builtin = DataSourceSmartOS.BUILTIN_DS_CONFIG
- mydscfg = {'disk_aliases': {'FOO': '/dev/bar'}}
+ mydscfg = {"disk_aliases": {"FOO": "/dev/bar"}}
# expect that these values are in builtin, or this is pointless
for k in mydscfg:
@@ -644,25 +698,30 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(mydscfg['disk_aliases']['FOO'],
- dsrc.ds_cfg['disk_aliases']['FOO'])
+ self.assertEqual(
+ mydscfg["disk_aliases"]["FOO"], dsrc.ds_cfg["disk_aliases"]["FOO"]
+ )
- self.assertEqual(dsrc.device_name_to_device('FOO'),
- mydscfg['disk_aliases']['FOO'])
+ self.assertEqual(
+ dsrc.device_name_to_device("FOO"), mydscfg["disk_aliases"]["FOO"]
+ )
def test_reconfig_network_on_boot(self):
# Test to ensure that network is configured from metadata on each boot
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
self.assertSetEqual(
- {EventType.BOOT_NEW_INSTANCE,
- EventType.BOOT,
- EventType.BOOT_LEGACY},
- dsrc.default_update_events[EventScope.NETWORK]
+ {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ },
+ dsrc.default_update_events[EventScope.NETWORK],
)
class TestIdentifyFile(CiTestCase):
"""Test the 'identify_file' utility."""
+
@skipIf(not which("file"), "command 'file' not available.")
def test_file_happy_path(self):
"""Test file is available and functional on plain text."""
@@ -680,14 +739,16 @@ class TestIdentifyFile(CiTestCase):
self.assertEqual(None, identify_file(fname))
self.assertEqual(
[mock.call(["file", "--brief", "--mime-type", fname])],
- m_subp.call_args_list)
+ m_subp.call_args_list,
+ )
class ShortReader(object):
"""Implements a 'read' interface for bytes provided.
much like io.BytesIO but the 'endbyte' acts as if EOF.
When it is reached a short will be returned."""
- def __init__(self, initial_bytes, endbyte=b'\0'):
+
+ def __init__(self, initial_bytes, endbyte=b"\0"):
self.data = initial_bytes
self.index = 0
self.len = len(self.data)
@@ -700,7 +761,7 @@ class ShortReader(object):
def read(self, size=-1):
"""Read size bytes but not past a null."""
if size == 0 or self.index >= self.len:
- return b''
+ return b""
rsize = size
if size < 0 or size + self.index > self.len:
@@ -711,7 +772,7 @@ class ShortReader(object):
rsize = next_null - self.index + 1
i = self.index
self.index += rsize
- ret = self.data[i:i + rsize]
+ ret = self.data[i : i + rsize]
if len(ret) and ret[-1:] == self.endbyte:
ret = ret[:-1]
return ret
@@ -719,32 +780,34 @@ class ShortReader(object):
class TestJoyentMetadataClient(FilesystemMockingTestCase):
- invalid = b'invalid command\n'
- failure = b'FAILURE\n'
- v2_ok = b'V2_OK\n'
+ invalid = b"invalid command\n"
+ failure = b"FAILURE\n"
+ v2_ok = b"V2_OK\n"
def setUp(self):
super(TestJoyentMetadataClient, self).setUp()
self.serial = mock.MagicMock(spec=serial.Serial)
- self.request_id = 0xabcdef12
- self.metadata_value = 'value'
+ self.request_id = 0xABCDEF12
+ self.metadata_value = "value"
self.response_parts = {
- 'command': 'SUCCESS',
- 'crc': 'b5a9ff00',
- 'length': SUCCESS_LEN + len(b64e(self.metadata_value)),
- 'payload': b64e(self.metadata_value),
- 'request_id': '{0:08x}'.format(self.request_id),
+ "command": "SUCCESS",
+ "crc": "b5a9ff00",
+ "length": SUCCESS_LEN + len(b64e(self.metadata_value)),
+ "payload": b64e(self.metadata_value),
+ "request_id": "{0:08x}".format(self.request_id),
}
def make_response():
- payloadstr = ''
- if 'payload' in self.response_parts:
- payloadstr = ' {0}'.format(self.response_parts['payload'])
- return ('V2 {length} {crc} {request_id} '
- '{command}{payloadstr}\n'.format(
- payloadstr=payloadstr,
- **self.response_parts).encode('ascii'))
+ payloadstr = ""
+ if "payload" in self.response_parts:
+ payloadstr = " {0}".format(self.response_parts["payload"])
+ return (
+ "V2 {length} {crc} {request_id} "
+ "{command}{payloadstr}\n".format(
+ payloadstr=payloadstr, **self.response_parts
+ ).encode("ascii")
+ )
self.metasource_data = None
@@ -758,41 +821,49 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase):
self.serial.read.side_effect = read_response
self.patched_funcs.enter_context(
- mock.patch('cloudinit.sources.DataSourceSmartOS.random.randint',
- mock.Mock(return_value=self.request_id)))
+ mock.patch(
+ "cloudinit.sources.DataSourceSmartOS.random.randint",
+ mock.Mock(return_value=self.request_id),
+ )
+ )
def _get_client(self):
return DataSourceSmartOS.JoyentMetadataClient(
- fp=self.serial, smartos_type=DataSourceSmartOS.SMARTOS_ENV_KVM)
+ fp=self.serial, smartos_type=DataSourceSmartOS.SMARTOS_ENV_KVM
+ )
def _get_serial_client(self):
self.serial.timeout = 1
- return DataSourceSmartOS.JoyentMetadataSerialClient(None,
- fp=self.serial)
+ return DataSourceSmartOS.JoyentMetadataSerialClient(
+ None, fp=self.serial
+ )
def assertEndsWith(self, haystack, prefix):
- self.assertTrue(haystack.endswith(prefix),
- "{0} does not end with '{1}'".format(
- repr(haystack), prefix))
+ self.assertTrue(
+ haystack.endswith(prefix),
+ "{0} does not end with '{1}'".format(repr(haystack), prefix),
+ )
def assertStartsWith(self, haystack, prefix):
- self.assertTrue(haystack.startswith(prefix),
- "{0} does not start with '{1}'".format(
- repr(haystack), prefix))
+ self.assertTrue(
+ haystack.startswith(prefix),
+ "{0} does not start with '{1}'".format(repr(haystack), prefix),
+ )
def assertNoMoreSideEffects(self, obj):
self.assertRaises(StopIteration, obj)
def test_get_metadata_writes_a_single_line(self):
client = self._get_client()
- client.get('some_key')
+ client.get("some_key")
self.assertEqual(1, self.serial.write.call_count)
written_line = self.serial.write.call_args[0][0]
- self.assertEndsWith(written_line.decode('ascii'),
- b'\n'.decode('ascii'))
- self.assertEqual(1, written_line.count(b'\n'))
+ self.assertEndsWith(
+ written_line.decode("ascii"), b"\n".decode("ascii")
+ )
+ self.assertEqual(1, written_line.count(b"\n"))
- def _get_written_line(self, key='some_key'):
+ def _get_written_line(self, key="some_key"):
client = self._get_client()
client.get(key)
return self.serial.write.call_args[0][0]
@@ -802,76 +873,86 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase):
def test_get_metadata_line_starts_with_v2(self):
foo = self._get_written_line()
- self.assertStartsWith(foo.decode('ascii'), b'V2'.decode('ascii'))
+ self.assertStartsWith(foo.decode("ascii"), b"V2".decode("ascii"))
def test_get_metadata_uses_get_command(self):
- parts = self._get_written_line().decode('ascii').strip().split(' ')
- self.assertEqual('GET', parts[4])
+ parts = self._get_written_line().decode("ascii").strip().split(" ")
+ self.assertEqual("GET", parts[4])
def test_get_metadata_base64_encodes_argument(self):
- key = 'my_key'
- parts = self._get_written_line(key).decode('ascii').strip().split(' ')
+ key = "my_key"
+ parts = self._get_written_line(key).decode("ascii").strip().split(" ")
self.assertEqual(b64e(key), parts[5])
def test_get_metadata_calculates_length_correctly(self):
- parts = self._get_written_line().decode('ascii').strip().split(' ')
- expected_length = len(' '.join(parts[3:]))
+ parts = self._get_written_line().decode("ascii").strip().split(" ")
+ expected_length = len(" ".join(parts[3:]))
self.assertEqual(expected_length, int(parts[1]))
def test_get_metadata_uses_appropriate_request_id(self):
- parts = self._get_written_line().decode('ascii').strip().split(' ')
+ parts = self._get_written_line().decode("ascii").strip().split(" ")
request_id = parts[3]
self.assertEqual(8, len(request_id))
self.assertEqual(request_id, request_id.lower())
def test_get_metadata_uses_random_number_for_request_id(self):
line = self._get_written_line()
- request_id = line.decode('ascii').strip().split(' ')[3]
- self.assertEqual('{0:08x}'.format(self.request_id), request_id)
+ request_id = line.decode("ascii").strip().split(" ")[3]
+ self.assertEqual("{0:08x}".format(self.request_id), request_id)
def test_get_metadata_checksums_correctly(self):
- parts = self._get_written_line().decode('ascii').strip().split(' ')
- expected_checksum = '{0:08x}'.format(
- crc32(' '.join(parts[3:]).encode('utf-8')) & 0xffffffff)
+ parts = self._get_written_line().decode("ascii").strip().split(" ")
+ expected_checksum = "{0:08x}".format(
+ crc32(" ".join(parts[3:]).encode("utf-8")) & 0xFFFFFFFF
+ )
checksum = parts[2]
self.assertEqual(expected_checksum, checksum)
def test_get_metadata_reads_a_line(self):
client = self._get_client()
- client.get('some_key')
+ client.get("some_key")
self.assertEqual(self.metasource_data_len, self.serial.read.call_count)
def test_get_metadata_returns_valid_value(self):
client = self._get_client()
- value = client.get('some_key')
+ value = client.get("some_key")
self.assertEqual(self.metadata_value, value)
def test_get_metadata_throws_exception_for_incorrect_length(self):
- self.response_parts['length'] = 0
+ self.response_parts["length"] = 0
client = self._get_client()
- self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
- client.get, 'some_key')
+ self.assertRaises(
+ DataSourceSmartOS.JoyentMetadataFetchException,
+ client.get,
+ "some_key",
+ )
def test_get_metadata_throws_exception_for_incorrect_crc(self):
- self.response_parts['crc'] = 'deadbeef'
+ self.response_parts["crc"] = "deadbeef"
client = self._get_client()
- self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
- client.get, 'some_key')
+ self.assertRaises(
+ DataSourceSmartOS.JoyentMetadataFetchException,
+ client.get,
+ "some_key",
+ )
def test_get_metadata_throws_exception_for_request_id_mismatch(self):
- self.response_parts['request_id'] = 'deadbeef'
+ self.response_parts["request_id"] = "deadbeef"
client = self._get_client()
- client._checksum = lambda _: self.response_parts['crc']
- self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
- client.get, 'some_key')
+ client._checksum = lambda _: self.response_parts["crc"]
+ self.assertRaises(
+ DataSourceSmartOS.JoyentMetadataFetchException,
+ client.get,
+ "some_key",
+ )
def test_get_metadata_returns_None_if_value_not_found(self):
- self.response_parts['payload'] = ''
- self.response_parts['command'] = 'NOTFOUND'
- self.response_parts['length'] = NOTFOUND_LEN
+ self.response_parts["payload"] = ""
+ self.response_parts["command"] = "NOTFOUND"
+ self.response_parts["length"] = NOTFOUND_LEN
client = self._get_client()
- client._checksum = lambda _: self.response_parts['crc']
- self.assertIsNone(client.get('some_key'))
+ client._checksum = lambda _: self.response_parts["crc"]
+ self.assertIsNone(client.get("some_key"))
def test_negotiate(self):
client = self._get_client()
@@ -883,55 +964,58 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase):
def test_negotiate_short_response(self):
client = self._get_client()
# chopped '\n' from v2_ok.
- reader = ShortReader(self.v2_ok[:-1] + b'\0')
+ reader = ShortReader(self.v2_ok[:-1] + b"\0")
client.fp.read.side_effect = reader.read
- self.assertRaises(DataSourceSmartOS.JoyentMetadataTimeoutException,
- client._negotiate)
+ self.assertRaises(
+ DataSourceSmartOS.JoyentMetadataTimeoutException, client._negotiate
+ )
self.assertTrue(reader.emptied)
def test_negotiate_bad_response(self):
client = self._get_client()
- reader = ShortReader(b'garbage\n' + self.v2_ok)
+ reader = ShortReader(b"garbage\n" + self.v2_ok)
client.fp.read.side_effect = reader.read
- self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
- client._negotiate)
+ self.assertRaises(
+ DataSourceSmartOS.JoyentMetadataFetchException, client._negotiate
+ )
self.assertEqual(self.v2_ok, client.fp.read())
def test_serial_open_transport(self):
client = self._get_serial_client()
- reader = ShortReader(b'garbage\0' + self.invalid + self.v2_ok)
+ reader = ShortReader(b"garbage\0" + self.invalid + self.v2_ok)
client.fp.read.side_effect = reader.read
client.open_transport()
self.assertTrue(reader.emptied)
def test_flush_failure(self):
client = self._get_serial_client()
- reader = ShortReader(b'garbage' + b'\0' + self.failure +
- self.invalid + self.v2_ok)
+ reader = ShortReader(
+ b"garbage" + b"\0" + self.failure + self.invalid + self.v2_ok
+ )
client.fp.read.side_effect = reader.read
client.open_transport()
self.assertTrue(reader.emptied)
def test_flush_many_timeouts(self):
client = self._get_serial_client()
- reader = ShortReader(b'\0' * 100 + self.invalid + self.v2_ok)
+ reader = ShortReader(b"\0" * 100 + self.invalid + self.v2_ok)
client.fp.read.side_effect = reader.read
client.open_transport()
self.assertTrue(reader.emptied)
def test_list_metadata_returns_list(self):
- parts = ['foo', 'bar']
- value = b64e('\n'.join(parts))
- self.response_parts['payload'] = value
- self.response_parts['crc'] = '40873553'
- self.response_parts['length'] = SUCCESS_LEN + len(value)
+ parts = ["foo", "bar"]
+ value = b64e("\n".join(parts))
+ self.response_parts["payload"] = value
+ self.response_parts["crc"] = "40873553"
+ self.response_parts["length"] = SUCCESS_LEN + len(value)
client = self._get_client()
self.assertEqual(client.list(), parts)
def test_list_metadata_returns_empty_list_if_no_customer_metadata(self):
- del self.response_parts['payload']
- self.response_parts['length'] = SUCCESS_LEN - 1
- self.response_parts['crc'] = '14e563ba'
+ del self.response_parts["payload"]
+ self.response_parts["length"] = SUCCESS_LEN - 1
+ self.response_parts["crc"] = "14e563ba"
client = self._get_client()
self.assertEqual(client.list(), [])
@@ -939,181 +1023,354 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase):
class TestNetworkConversion(CiTestCase):
def test_convert_simple(self):
expected = {
- 'version': 1,
- 'config': [
- {'name': 'net0', 'type': 'physical',
- 'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
- 'address': '8.12.42.102/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:f5:e4:f5'},
- {'name': 'net1', 'type': 'physical',
- 'subnets': [{'type': 'static',
- 'address': '192.168.128.93/22'}],
- 'mtu': 8500, 'mac_address': '90:b8:d0:a5:ff:cd'}]}
+ "version": 1,
+ "config": [
+ {
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "type": "static",
+ "gateway": "8.12.42.1",
+ "address": "8.12.42.102/24",
+ }
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:f5:e4:f5",
+ },
+ {
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {"type": "static", "address": "192.168.128.93/22"}
+ ],
+ "mtu": 8500,
+ "mac_address": "90:b8:d0:a5:ff:cd",
+ },
+ ],
+ }
found = convert_net(SDC_NICS)
self.assertEqual(expected, found)
def test_convert_simple_alt(self):
expected = {
- 'version': 1,
- 'config': [
- {'name': 'net0', 'type': 'physical',
- 'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
- 'address': '8.12.42.51/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
- {'name': 'net1', 'type': 'physical',
- 'subnets': [{'type': 'static',
- 'address': '10.210.1.217/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]}
+ "version": 1,
+ "config": [
+ {
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "type": "static",
+ "gateway": "8.12.42.1",
+ "address": "8.12.42.51/24",
+ }
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:ae:64:51",
+ },
+ {
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {"type": "static", "address": "10.210.1.217/24"}
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:bd:4f:9c",
+ },
+ ],
+ }
found = convert_net(SDC_NICS_ALT)
self.assertEqual(expected, found)
def test_convert_simple_dhcp(self):
expected = {
- 'version': 1,
- 'config': [
- {'name': 'net0', 'type': 'physical',
- 'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
- 'address': '8.12.42.51/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
- {'name': 'net1', 'type': 'physical',
- 'subnets': [{'type': 'dhcp4'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]}
+ "version": 1,
+ "config": [
+ {
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "type": "static",
+ "gateway": "8.12.42.1",
+ "address": "8.12.42.51/24",
+ }
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:ae:64:51",
+ },
+ {
+ "name": "net1",
+ "type": "physical",
+ "subnets": [{"type": "dhcp4"}],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:bd:4f:9c",
+ },
+ ],
+ }
found = convert_net(SDC_NICS_DHCP)
self.assertEqual(expected, found)
def test_convert_simple_multi_ip(self):
expected = {
- 'version': 1,
- 'config': [
- {'name': 'net0', 'type': 'physical',
- 'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
- 'address': '8.12.42.51/24'},
- {'type': 'static',
- 'address': '8.12.42.52/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
- {'name': 'net1', 'type': 'physical',
- 'subnets': [{'type': 'static',
- 'address': '10.210.1.217/24'},
- {'type': 'static',
- 'address': '10.210.1.151/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]}
+ "version": 1,
+ "config": [
+ {
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "type": "static",
+ "gateway": "8.12.42.1",
+ "address": "8.12.42.51/24",
+ },
+ {"type": "static", "address": "8.12.42.52/24"},
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:ae:64:51",
+ },
+ {
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {"type": "static", "address": "10.210.1.217/24"},
+ {"type": "static", "address": "10.210.1.151/24"},
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:bd:4f:9c",
+ },
+ ],
+ }
found = convert_net(SDC_NICS_MIP)
self.assertEqual(expected, found)
def test_convert_with_dns(self):
expected = {
- 'version': 1,
- 'config': [
- {'name': 'net0', 'type': 'physical',
- 'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
- 'address': '8.12.42.51/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
- {'name': 'net1', 'type': 'physical',
- 'subnets': [{'type': 'dhcp4'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'},
- {'type': 'nameserver',
- 'address': ['8.8.8.8', '8.8.8.1'], 'search': ["local"]}]}
+ "version": 1,
+ "config": [
+ {
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "type": "static",
+ "gateway": "8.12.42.1",
+ "address": "8.12.42.51/24",
+ }
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:ae:64:51",
+ },
+ {
+ "name": "net1",
+ "type": "physical",
+ "subnets": [{"type": "dhcp4"}],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:bd:4f:9c",
+ },
+ {
+ "type": "nameserver",
+ "address": ["8.8.8.8", "8.8.8.1"],
+ "search": ["local"],
+ },
+ ],
+ }
found = convert_net(
- network_data=SDC_NICS_DHCP, dns_servers=['8.8.8.8', '8.8.8.1'],
- dns_domain="local")
+ network_data=SDC_NICS_DHCP,
+ dns_servers=["8.8.8.8", "8.8.8.1"],
+ dns_domain="local",
+ )
self.assertEqual(expected, found)
def test_convert_simple_multi_ipv6(self):
expected = {
- 'version': 1,
- 'config': [
- {'name': 'net0', 'type': 'physical',
- 'subnets': [{'type': 'static', 'address':
- '2001:4800:78ff:1b:be76:4eff:fe06:96b3/64'},
- {'type': 'static', 'gateway': '8.12.42.1',
- 'address': '8.12.42.51/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
- {'name': 'net1', 'type': 'physical',
- 'subnets': [{'type': 'static',
- 'address': '10.210.1.217/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]}
+ "version": 1,
+ "config": [
+ {
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "type": "static",
+ "address": (
+ "2001:4800:78ff:1b:be76:4eff:fe06:96b3/64"
+ ),
+ },
+ {
+ "type": "static",
+ "gateway": "8.12.42.1",
+ "address": "8.12.42.51/24",
+ },
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:ae:64:51",
+ },
+ {
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {"type": "static", "address": "10.210.1.217/24"}
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:bd:4f:9c",
+ },
+ ],
+ }
found = convert_net(SDC_NICS_MIP_IPV6)
self.assertEqual(expected, found)
def test_convert_simple_both_ipv4_ipv6(self):
expected = {
- 'version': 1,
- 'config': [
- {'mac_address': '90:b8:d0:ae:64:51', 'mtu': 1500,
- 'name': 'net0', 'type': 'physical',
- 'subnets': [{'address': '2001::10/64', 'gateway': '2001::1',
- 'type': 'static'},
- {'address': '8.12.42.51/24',
- 'gateway': '8.12.42.1',
- 'type': 'static'},
- {'address': '2001::11/64', 'type': 'static'},
- {'address': '8.12.42.52/32', 'type': 'static'}]},
- {'mac_address': '90:b8:d0:bd:4f:9c', 'mtu': 1500,
- 'name': 'net1', 'type': 'physical',
- 'subnets': [{'address': '10.210.1.217/24',
- 'type': 'static'}]}]}
+ "version": 1,
+ "config": [
+ {
+ "mac_address": "90:b8:d0:ae:64:51",
+ "mtu": 1500,
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "address": "2001::10/64",
+ "gateway": "2001::1",
+ "type": "static",
+ },
+ {
+ "address": "8.12.42.51/24",
+ "gateway": "8.12.42.1",
+ "type": "static",
+ },
+ {"address": "2001::11/64", "type": "static"},
+ {"address": "8.12.42.52/32", "type": "static"},
+ ],
+ },
+ {
+ "mac_address": "90:b8:d0:bd:4f:9c",
+ "mtu": 1500,
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {"address": "10.210.1.217/24", "type": "static"}
+ ],
+ },
+ ],
+ }
found = convert_net(SDC_NICS_IPV4_IPV6)
self.assertEqual(expected, found)
def test_gateways_not_on_all_nics(self):
expected = {
- 'version': 1,
- 'config': [
- {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500,
- 'name': 'net0', 'type': 'physical',
- 'subnets': [{'address': '8.12.42.26/24',
- 'gateway': '8.12.42.1', 'type': 'static'}]},
- {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500,
- 'name': 'net1', 'type': 'physical',
- 'subnets': [{'address': '10.210.1.27/24',
- 'type': 'static'}]}]}
+ "version": 1,
+ "config": [
+ {
+ "mac_address": "90:b8:d0:d8:82:b4",
+ "mtu": 1500,
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "address": "8.12.42.26/24",
+ "gateway": "8.12.42.1",
+ "type": "static",
+ }
+ ],
+ },
+ {
+ "mac_address": "90:b8:d0:0a:51:31",
+ "mtu": 1500,
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {"address": "10.210.1.27/24", "type": "static"}
+ ],
+ },
+ ],
+ }
found = convert_net(SDC_NICS_SINGLE_GATEWAY)
self.assertEqual(expected, found)
def test_routes_on_all_nics(self):
routes = [
- {'linklocal': False, 'dst': '3.0.0.0/8', 'gateway': '8.12.42.3'},
- {'linklocal': False, 'dst': '4.0.0.0/8', 'gateway': '10.210.1.4'}]
+ {"linklocal": False, "dst": "3.0.0.0/8", "gateway": "8.12.42.3"},
+ {"linklocal": False, "dst": "4.0.0.0/8", "gateway": "10.210.1.4"},
+ ]
expected = {
- 'version': 1,
- 'config': [
- {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500,
- 'name': 'net0', 'type': 'physical',
- 'subnets': [{'address': '8.12.42.26/24',
- 'gateway': '8.12.42.1', 'type': 'static',
- 'routes': [{'network': '3.0.0.0/8',
- 'gateway': '8.12.42.3'},
- {'network': '4.0.0.0/8',
- 'gateway': '10.210.1.4'}]}]},
- {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500,
- 'name': 'net1', 'type': 'physical',
- 'subnets': [{'address': '10.210.1.27/24', 'type': 'static',
- 'routes': [{'network': '3.0.0.0/8',
- 'gateway': '8.12.42.3'},
- {'network': '4.0.0.0/8',
- 'gateway': '10.210.1.4'}]}]}]}
+ "version": 1,
+ "config": [
+ {
+ "mac_address": "90:b8:d0:d8:82:b4",
+ "mtu": 1500,
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "address": "8.12.42.26/24",
+ "gateway": "8.12.42.1",
+ "type": "static",
+ "routes": [
+ {
+ "network": "3.0.0.0/8",
+ "gateway": "8.12.42.3",
+ },
+ {
+ "network": "4.0.0.0/8",
+ "gateway": "10.210.1.4",
+ },
+ ],
+ }
+ ],
+ },
+ {
+ "mac_address": "90:b8:d0:0a:51:31",
+ "mtu": 1500,
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {
+ "address": "10.210.1.27/24",
+ "type": "static",
+ "routes": [
+ {
+ "network": "3.0.0.0/8",
+ "gateway": "8.12.42.3",
+ },
+ {
+ "network": "4.0.0.0/8",
+ "gateway": "10.210.1.4",
+ },
+ ],
+ }
+ ],
+ },
+ ],
+ }
found = convert_net(SDC_NICS_SINGLE_GATEWAY, routes=routes)
self.maxDiff = None
self.assertEqual(expected, found)
-@unittest.skipUnless(get_smartos_environ() == SMARTOS_ENV_KVM,
- "Only supported on KVM and bhyve guests under SmartOS")
-@unittest.skipUnless(os.access(SERIAL_DEVICE, os.W_OK),
- "Requires write access to " + SERIAL_DEVICE)
+@unittest.skipUnless(
+ get_smartos_environ() == SMARTOS_ENV_KVM,
+ "Only supported on KVM and bhyve guests under SmartOS",
+)
+@unittest.skipUnless(
+ os.access(SERIAL_DEVICE, os.W_OK),
+ "Requires write access to " + SERIAL_DEVICE,
+)
@unittest.skipUnless(HAS_PYSERIAL is True, "pyserial not available")
class TestSerialConcurrency(CiTestCase):
"""
- This class tests locking on an actual serial port, and as such can only
- be run in a kvm or bhyve guest running on a SmartOS host. A test run on
- a metadata socket will not be valid because a metadata socket ensures
- there is only one session over a connection. In contrast, in the
- absence of proper locking multiple processes opening the same serial
- port can corrupt each others' exchanges with the metadata server.
-
- This takes on the order of 2 to 3 minutes to run.
+ This class tests locking on an actual serial port, and as such can only
+ be run in a kvm or bhyve guest running on a SmartOS host. A test run on
+ a metadata socket will not be valid because a metadata socket ensures
+ there is only one session over a connection. In contrast, in the
+ absence of proper locking multiple processes opening the same serial
+ port can corrupt each others' exchanges with the metadata server.
+
+ This takes on the order of 2 to 3 minutes to run.
"""
- allowed_subp = ['mdata-get']
+
+ allowed_subp = ["mdata-get"]
def setUp(self):
self.mdata_proc = multiprocessing.Process(target=self.start_mdata_loop)
@@ -1128,16 +1385,16 @@ class TestSerialConcurrency(CiTestCase):
def start_mdata_loop(self):
"""
- The mdata-get command is repeatedly run in a separate process so
- that it may try to race with metadata operations performed in the
- main test process. Use of mdata-get is better than two processes
- using the protocol implementation in DataSourceSmartOS because we
- are testing to be sure that cloud-init and mdata-get respect each
- others locks.
+ The mdata-get command is repeatedly run in a separate process so
+ that it may try to race with metadata operations performed in the
+ main test process. Use of mdata-get is better than two processes
+ using the protocol implementation in DataSourceSmartOS because we
+ are testing to be sure that cloud-init and mdata-get respect each
+ others locks.
"""
rcs = list(range(0, 256))
while True:
- subp(['mdata-get', 'sdc:routes'], rcs=rcs)
+ subp(["mdata-get", "sdc:routes"], rcs=rcs)
def test_all_keys(self):
self.assertIsNotNone(self.mdata_proc.pid)
@@ -1160,4 +1417,5 @@ class TestSerialConcurrency(CiTestCase):
self.assertIsNone(self.mdata_proc.exitcode)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_upcloud.py b/tests/unittests/sources/test_upcloud.py
index 1d792066..e1125b65 100644
--- a/tests/unittests/sources/test_upcloud.py
+++ b/tests/unittests/sources/test_upcloud.py
@@ -4,15 +4,15 @@
import json
-from cloudinit import helpers
-from cloudinit import settings
-from cloudinit import sources
-from cloudinit.sources.DataSourceUpCloud import DataSourceUpCloud, \
- DataSourceUpCloudLocal
-
-from tests.unittests.helpers import mock, CiTestCase
-
-UC_METADATA = json.loads("""
+from cloudinit import helpers, settings, sources
+from cloudinit.sources.DataSourceUpCloud import (
+ DataSourceUpCloud,
+ DataSourceUpCloudLocal,
+)
+from tests.unittests.helpers import CiTestCase, mock
+
+UC_METADATA = json.loads(
+ """
{
"cloud_name": "upcloud",
"instance_id": "00322b68-0096-4042-9406-faad61922128",
@@ -130,14 +130,17 @@ UC_METADATA = json.loads("""
"user_data": "",
"vendor_data": ""
}
-""")
+"""
+)
-UC_METADATA["user_data"] = b"""#cloud-config
+UC_METADATA[
+ "user_data"
+] = b"""#cloud-config
runcmd:
- [touch, /root/cloud-init-worked ]
"""
-MD_URL = 'http://169.254.169.254/metadata/v1.json'
+MD_URL = "http://169.254.169.254/metadata/v1.json"
def _mock_dmi():
@@ -148,25 +151,27 @@ class TestUpCloudMetadata(CiTestCase):
"""
Test reading the meta-data
"""
+
def setUp(self):
super(TestUpCloudMetadata, self).setUp()
self.tmp = self.tmp_dir()
def get_ds(self, get_sysinfo=_mock_dmi):
ds = DataSourceUpCloud(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
if get_sysinfo:
ds._get_sysinfo = get_sysinfo
return ds
- @mock.patch('cloudinit.sources.helpers.upcloud.read_sysinfo')
+ @mock.patch("cloudinit.sources.helpers.upcloud.read_sysinfo")
def test_returns_false_not_on_upcloud(self, m_read_sysinfo):
m_read_sysinfo.return_value = (False, None)
ds = self.get_ds(get_sysinfo=None)
self.assertEqual(False, ds.get_data())
self.assertTrue(m_read_sysinfo.called)
- @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata')
+ @mock.patch("cloudinit.sources.helpers.upcloud.read_metadata")
def test_metadata(self, mock_readmd):
mock_readmd.return_value = UC_METADATA.copy()
@@ -178,15 +183,17 @@ class TestUpCloudMetadata(CiTestCase):
self.assertTrue(mock_readmd.called)
- self.assertEqual(UC_METADATA.get('user_data'), ds.get_userdata_raw())
- self.assertEqual(UC_METADATA.get('vendor_data'),
- ds.get_vendordata_raw())
- self.assertEqual(UC_METADATA.get('region'), ds.availability_zone)
- self.assertEqual(UC_METADATA.get('instance_id'), ds.get_instance_id())
- self.assertEqual(UC_METADATA.get('cloud_name'), ds.cloud_name)
+ self.assertEqual(UC_METADATA.get("user_data"), ds.get_userdata_raw())
+ self.assertEqual(
+ UC_METADATA.get("vendor_data"), ds.get_vendordata_raw()
+ )
+ self.assertEqual(UC_METADATA.get("region"), ds.availability_zone)
+ self.assertEqual(UC_METADATA.get("instance_id"), ds.get_instance_id())
+ self.assertEqual(UC_METADATA.get("cloud_name"), ds.cloud_name)
- self.assertEqual(UC_METADATA.get('public_keys'),
- ds.get_public_ssh_keys())
+ self.assertEqual(
+ UC_METADATA.get("public_keys"), ds.get_public_ssh_keys()
+ )
self.assertIsInstance(ds.get_public_ssh_keys(), list)
@@ -201,24 +208,30 @@ class TestUpCloudNetworkSetup(CiTestCase):
def get_ds(self, get_sysinfo=_mock_dmi):
ds = DataSourceUpCloudLocal(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
if get_sysinfo:
ds._get_sysinfo = get_sysinfo
return ds
- @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata')
- @mock.patch('cloudinit.net.find_fallback_nic')
- @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
- @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
- def test_network_configured_metadata(self, m_net, m_dhcp,
- m_fallback_nic, mock_readmd):
+ @mock.patch("cloudinit.sources.helpers.upcloud.read_metadata")
+ @mock.patch("cloudinit.net.find_fallback_nic")
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ @mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
+ def test_network_configured_metadata(
+ self, m_net, m_dhcp, m_fallback_nic, mock_readmd
+ ):
mock_readmd.return_value = UC_METADATA.copy()
- m_fallback_nic.return_value = 'eth1'
- m_dhcp.return_value = [{
- 'interface': 'eth1', 'fixed-address': '10.6.3.27',
- 'routers': '10.6.0.1', 'subnet-mask': '22',
- 'broadcast-address': '10.6.3.255'}
+ m_fallback_nic.return_value = "eth1"
+ m_dhcp.return_value = [
+ {
+ "interface": "eth1",
+ "fixed-address": "10.6.3.27",
+ "routers": "10.6.0.1",
+ "subnet-mask": "22",
+ "broadcast-address": "10.6.3.255",
+ }
]
ds = self.get_ds()
@@ -227,33 +240,36 @@ class TestUpCloudNetworkSetup(CiTestCase):
self.assertTrue(ret)
self.assertTrue(m_dhcp.called)
- m_dhcp.assert_called_with('eth1', None)
+ m_dhcp.assert_called_with("eth1", None)
m_net.assert_called_once_with(
- broadcast='10.6.3.255', interface='eth1',
- ip='10.6.3.27', prefix_or_mask='22',
- router='10.6.0.1', static_routes=None
+ broadcast="10.6.3.255",
+ interface="eth1",
+ ip="10.6.3.27",
+ prefix_or_mask="22",
+ router="10.6.0.1",
+ static_routes=None,
)
self.assertTrue(mock_readmd.called)
- self.assertEqual(UC_METADATA.get('region'), ds.availability_zone)
- self.assertEqual(UC_METADATA.get('instance_id'), ds.get_instance_id())
- self.assertEqual(UC_METADATA.get('cloud_name'), ds.cloud_name)
+ self.assertEqual(UC_METADATA.get("region"), ds.availability_zone)
+ self.assertEqual(UC_METADATA.get("instance_id"), ds.get_instance_id())
+ self.assertEqual(UC_METADATA.get("cloud_name"), ds.cloud_name)
- @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata')
- @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ @mock.patch("cloudinit.sources.helpers.upcloud.read_metadata")
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
def test_network_configuration(self, m_get_by_mac, mock_readmd):
mock_readmd.return_value = UC_METADATA.copy()
- raw_ifaces = UC_METADATA.get('network').get('interfaces')
+ raw_ifaces = UC_METADATA.get("network").get("interfaces")
self.assertEqual(4, len(raw_ifaces))
m_get_by_mac.return_value = {
- raw_ifaces[0].get('mac'): 'eth0',
- raw_ifaces[1].get('mac'): 'eth1',
- raw_ifaces[2].get('mac'): 'eth2',
- raw_ifaces[3].get('mac'): 'eth3',
+ raw_ifaces[0].get("mac"): "eth0",
+ raw_ifaces[1].get("mac"): "eth1",
+ raw_ifaces[2].get("mac"): "eth2",
+ raw_ifaces[3].get("mac"): "eth3",
}
ds = self.get_ds()
@@ -266,49 +282,50 @@ class TestUpCloudNetworkSetup(CiTestCase):
netcfg = ds.network_config
- self.assertEqual(1, netcfg.get('version'))
+ self.assertEqual(1, netcfg.get("version"))
- config = netcfg.get('config')
+ config = netcfg.get("config")
self.assertIsInstance(config, list)
self.assertEqual(5, len(config))
- self.assertEqual('physical', config[3].get('type'))
+ self.assertEqual("physical", config[3].get("type"))
- self.assertEqual(raw_ifaces[2].get('mac'), config[2]
- .get('mac_address'))
- self.assertEqual(1, len(config[2].get('subnets')))
- self.assertEqual('ipv6_dhcpv6-stateless', config[2].get('subnets')[0]
- .get('type'))
+ self.assertEqual(
+ raw_ifaces[2].get("mac"), config[2].get("mac_address")
+ )
+ self.assertEqual(1, len(config[2].get("subnets")))
+ self.assertEqual(
+ "ipv6_dhcpv6-stateless", config[2].get("subnets")[0].get("type")
+ )
- self.assertEqual(2, len(config[0].get('subnets')))
- self.assertEqual('static', config[0].get('subnets')[1].get('type'))
+ self.assertEqual(2, len(config[0].get("subnets")))
+ self.assertEqual("static", config[0].get("subnets")[1].get("type"))
dns = config[4]
- self.assertEqual('nameserver', dns.get('type'))
- self.assertEqual(2, len(dns.get('address')))
+ self.assertEqual("nameserver", dns.get("type"))
+ self.assertEqual(2, len(dns.get("address")))
self.assertEqual(
- UC_METADATA.get('network').get('dns')[1],
- dns.get('address')[1]
+ UC_METADATA.get("network").get("dns")[1], dns.get("address")[1]
)
class TestUpCloudDatasourceLoading(CiTestCase):
def test_get_datasource_list_returns_in_local(self):
- deps = (sources.DEP_FILESYSTEM, )
+ deps = (sources.DEP_FILESYSTEM,)
ds_list = sources.DataSourceUpCloud.get_datasource_list(deps)
- self.assertEqual(ds_list,
- [DataSourceUpCloudLocal])
+ self.assertEqual(ds_list, [DataSourceUpCloudLocal])
def test_get_datasource_list_returns_in_normal(self):
deps = (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)
ds_list = sources.DataSourceUpCloud.get_datasource_list(deps)
- self.assertEqual(ds_list,
- [DataSourceUpCloud])
+ self.assertEqual(ds_list, [DataSourceUpCloud])
def test_list_sources_finds_ds(self):
found = sources.list_sources(
- ['UpCloud'], (sources.DEP_FILESYSTEM, sources.DEP_NETWORK),
- ['cloudinit.sources'])
- self.assertEqual([DataSourceUpCloud],
- found)
+ ["UpCloud"],
+ (sources.DEP_FILESYSTEM, sources.DEP_NETWORK),
+ ["cloudinit.sources"],
+ )
+ self.assertEqual([DataSourceUpCloud], found)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_vmware.py b/tests/unittests/sources/test_vmware.py
index d34d7782..dcdbda89 100644
--- a/tests/unittests/sources/test_vmware.py
+++ b/tests/unittests/sources/test_vmware.py
@@ -10,17 +10,15 @@ import os
import pytest
-from cloudinit import dmi, helpers, safeyaml
-from cloudinit import settings
+from cloudinit import dmi, helpers, safeyaml, settings
from cloudinit.sources import DataSourceVMware
from tests.unittests.helpers import (
- mock,
CiTestCase,
FilesystemMockingTestCase,
+ mock,
populate_dir,
)
-
PRODUCT_NAME_FILE_PATH = "/sys/class/dmi/id/product_name"
PRODUCT_NAME = "VMware7,1"
PRODUCT_UUID = "82343CED-E4C7-423B-8F6B-0D34D19067AB"
@@ -61,11 +59,11 @@ runcmd:
@pytest.yield_fixture(autouse=True)
def common_patches():
- with mock.patch('cloudinit.util.platform.platform', return_value='Linux'):
+ with mock.patch("cloudinit.util.platform.platform", return_value="Linux"):
with mock.patch.multiple(
- 'cloudinit.dmi',
+ "cloudinit.dmi",
is_container=mock.Mock(return_value=False),
- is_FreeBSD=mock.Mock(return_value=False)
+ is_FreeBSD=mock.Mock(return_value=False),
):
yield
diff --git a/tests/unittests/sources/test_vultr.py b/tests/unittests/sources/test_vultr.py
index 40594b95..21d5bc17 100644
--- a/tests/unittests/sources/test_vultr.py
+++ b/tests/unittests/sources/test_vultr.py
@@ -7,255 +7,204 @@
import json
-from cloudinit import helpers
-from cloudinit import settings
+from cloudinit import helpers, settings
from cloudinit.sources import DataSourceVultr
from cloudinit.sources.helpers import vultr
-
-from tests.unittests.helpers import mock, CiTestCase
+from tests.unittests.helpers import CiTestCase, mock
# Vultr metadata test data
VULTR_V1_1 = {
- 'bgp': {
- 'ipv4': {
- 'my-address': '',
- 'my-asn': '',
- 'peer-address': '',
- 'peer-asn': ''
+ "bgp": {
+ "ipv4": {
+ "my-address": "",
+ "my-asn": "",
+ "peer-address": "",
+ "peer-asn": "",
+ },
+ "ipv6": {
+ "my-address": "",
+ "my-asn": "",
+ "peer-address": "",
+ "peer-asn": "",
},
- 'ipv6': {
- 'my-address': '',
- 'my-asn': '',
- 'peer-address': '',
- 'peer-asn': ''
- }
},
- 'hostname': 'CLOUDINIT_1',
- 'instanceid': '42506325',
- 'interfaces': [
+ "hostname": "CLOUDINIT_1",
+ "instanceid": "42506325",
+ "interfaces": [
{
- 'ipv4': {
- 'additional': [
- ],
- 'address': '108.61.89.242',
- 'gateway': '108.61.89.1',
- 'netmask': '255.255.255.0'
+ "ipv4": {
+ "additional": [],
+ "address": "108.61.89.242",
+ "gateway": "108.61.89.1",
+ "netmask": "255.255.255.0",
},
- 'ipv6': {
- 'additional': [
- ],
- 'address': '2001:19f0:5:56c2:5400:03ff:fe15:c465',
- 'network': '2001:19f0:5:56c2::',
- 'prefix': '64'
+ "ipv6": {
+ "additional": [],
+ "address": "2001:19f0:5:56c2:5400:03ff:fe15:c465",
+ "network": "2001:19f0:5:56c2::",
+ "prefix": "64",
},
- 'mac': '56:00:03:15:c4:65',
- 'network-type': 'public'
+ "mac": "56:00:03:15:c4:65",
+ "network-type": "public",
}
],
- 'public-keys': [
- 'ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key'
- ],
- 'region': {
- 'regioncode': 'EWR'
- },
- 'user-defined': [
- ],
- 'startup-script': 'echo No configured startup script',
- 'raid1-script': '',
- 'user-data': [
- ],
- 'vendor-data': [
+ "public-keys": ["ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"],
+ "region": {"regioncode": "EWR"},
+ "user-defined": [],
+ "startup-script": "echo No configured startup script",
+ "raid1-script": "",
+ "user-data": [],
+ "vendor-data": [
{
- 'package_upgrade': 'true',
- 'disable_root': 0,
- 'ssh_pwauth': 1,
- 'chpasswd': {
- 'expire': False,
- 'list': [
- 'root:$6$S2Smuj.../VqxmIR9Urw0jPZ88i4yvB/'
- ]
+ "package_upgrade": "true",
+ "disable_root": 0,
+ "ssh_pwauth": 1,
+ "chpasswd": {
+ "expire": False,
+ "list": ["root:$6$S2Smuj.../VqxmIR9Urw0jPZ88i4yvB/"],
},
- 'system_info': {
- 'default_user': {
- 'name': 'root'
- }
- }
+ "system_info": {"default_user": {"name": "root"}},
}
- ]
+ ],
}
VULTR_V1_2 = {
- 'bgp': {
- 'ipv4': {
- 'my-address': '',
- 'my-asn': '',
- 'peer-address': '',
- 'peer-asn': ''
+ "bgp": {
+ "ipv4": {
+ "my-address": "",
+ "my-asn": "",
+ "peer-address": "",
+ "peer-asn": "",
+ },
+ "ipv6": {
+ "my-address": "",
+ "my-asn": "",
+ "peer-address": "",
+ "peer-asn": "",
},
- 'ipv6': {
- 'my-address': '',
- 'my-asn': '',
- 'peer-address': '',
- 'peer-asn': ''
- }
},
- 'hostname': 'CLOUDINIT_2',
- 'instance-v2-id': '29bea708-2e6e-480a-90ad-0e6b5d5ad62f',
- 'instanceid': '42872224',
- 'interfaces': [
+ "hostname": "CLOUDINIT_2",
+ "instance-v2-id": "29bea708-2e6e-480a-90ad-0e6b5d5ad62f",
+ "instanceid": "42872224",
+ "interfaces": [
{
- 'ipv4': {
- 'additional': [
- ],
- 'address':'45.76.7.171',
- 'gateway':'45.76.6.1',
- 'netmask':'255.255.254.0'
+ "ipv4": {
+ "additional": [],
+ "address": "45.76.7.171",
+ "gateway": "45.76.6.1",
+ "netmask": "255.255.254.0",
},
- 'ipv6':{
- 'additional': [
- ],
- 'address':'2001:19f0:5:28a7:5400:03ff:fe1b:4eca',
- 'network':'2001:19f0:5:28a7::',
- 'prefix':'64'
+ "ipv6": {
+ "additional": [],
+ "address": "2001:19f0:5:28a7:5400:03ff:fe1b:4eca",
+ "network": "2001:19f0:5:28a7::",
+ "prefix": "64",
},
- 'mac':'56:00:03:1b:4e:ca',
- 'network-type':'public'
+ "mac": "56:00:03:1b:4e:ca",
+ "network-type": "public",
},
{
- 'ipv4': {
- 'additional': [
- ],
- 'address':'10.1.112.3',
- 'gateway':'',
- 'netmask':'255.255.240.0'
- },
- 'ipv6':{
- 'additional': [
- ],
- 'network':'',
- 'prefix':''
+ "ipv4": {
+ "additional": [],
+ "address": "10.1.112.3",
+ "gateway": "",
+ "netmask": "255.255.240.0",
},
- 'mac':'5a:00:03:1b:4e:ca',
- 'network-type':'private',
- 'network-v2-id':'fbbe2b5b-b986-4396-87f5-7246660ccb64',
- 'networkid':'net5e7155329d730'
- }
- ],
- 'public-keys': [
- 'ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key'
- ],
- 'region': {
- 'regioncode': 'EWR'
- },
- 'user-defined': [
- ],
- 'startup-script': 'echo No configured startup script',
- 'user-data': [
+ "ipv6": {"additional": [], "network": "", "prefix": ""},
+ "mac": "5a:00:03:1b:4e:ca",
+ "network-type": "private",
+ "network-v2-id": "fbbe2b5b-b986-4396-87f5-7246660ccb64",
+ "networkid": "net5e7155329d730",
+ },
],
-
- 'vendor-data': [
+ "public-keys": ["ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"],
+ "region": {"regioncode": "EWR"},
+ "user-defined": [],
+ "startup-script": "echo No configured startup script",
+ "user-data": [],
+ "vendor-data": [
{
- 'package_upgrade': 'true',
- 'disable_root': 0,
- 'ssh_pwauth': 1,
- 'chpasswd': {
- 'expire': False,
- 'list': [
- 'root:$6$SxXx...k2mJNIzZB5vMCDBlYT1'
- ]
+ "package_upgrade": "true",
+ "disable_root": 0,
+ "ssh_pwauth": 1,
+ "chpasswd": {
+ "expire": False,
+ "list": ["root:$6$SxXx...k2mJNIzZB5vMCDBlYT1"],
},
- 'system_info': {
- 'default_user': {
- 'name': 'root'
- }
- }
+ "system_info": {"default_user": {"name": "root"}},
}
- ]
+ ],
}
-SSH_KEYS_1 = [
- "ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"
-]
+SSH_KEYS_1 = ["ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"]
# Expected generated objects
# Expected config
EXPECTED_VULTR_CONFIG = {
- 'package_upgrade': 'true',
- 'disable_root': 0,
- 'ssh_pwauth': 1,
- 'chpasswd': {
- 'expire': False,
- 'list': [
- 'root:$6$SxXx...k2mJNIzZB5vMCDBlYT1'
- ]
+ "package_upgrade": "true",
+ "disable_root": 0,
+ "ssh_pwauth": 1,
+ "chpasswd": {
+ "expire": False,
+ "list": ["root:$6$SxXx...k2mJNIzZB5vMCDBlYT1"],
},
- 'system_info': {
- 'default_user': {
- 'name': 'root'
- }
- }
+ "system_info": {"default_user": {"name": "root"}},
}
# Expected network config object from generator
EXPECTED_VULTR_NETWORK_1 = {
- 'version': 1,
- 'config': [
+ "version": 1,
+ "config": [
+ {"type": "nameserver", "address": ["108.61.10.10"]},
{
- 'type': 'nameserver',
- 'address': ['108.61.10.10']
- },
- {
- 'name': 'eth0',
- 'type': 'physical',
- 'mac_address': '56:00:03:15:c4:65',
- 'accept-ra': 1,
- 'subnets': [
- {'type': 'dhcp', 'control': 'auto'},
- {'type': 'ipv6_slaac', 'control': 'auto'}
+ "name": "eth0",
+ "type": "physical",
+ "mac_address": "56:00:03:15:c4:65",
+ "accept-ra": 1,
+ "subnets": [
+ {"type": "dhcp", "control": "auto"},
+ {"type": "ipv6_slaac", "control": "auto"},
],
- }
- ]
+ },
+ ],
}
EXPECTED_VULTR_NETWORK_2 = {
- 'version': 1,
- 'config': [
+ "version": 1,
+ "config": [
+ {"type": "nameserver", "address": ["108.61.10.10"]},
{
- 'type': 'nameserver',
- 'address': ['108.61.10.10']
- },
- {
- 'name': 'eth0',
- 'type': 'physical',
- 'mac_address': '56:00:03:1b:4e:ca',
- 'accept-ra': 1,
- 'subnets': [
- {'type': 'dhcp', 'control': 'auto'},
- {'type': 'ipv6_slaac', 'control': 'auto'}
+ "name": "eth0",
+ "type": "physical",
+ "mac_address": "56:00:03:1b:4e:ca",
+ "accept-ra": 1,
+ "subnets": [
+ {"type": "dhcp", "control": "auto"},
+ {"type": "ipv6_slaac", "control": "auto"},
],
},
{
- 'name': 'eth1',
- 'type': 'physical',
- 'mac_address': '5a:00:03:1b:4e:ca',
- 'subnets': [
+ "name": "eth1",
+ "type": "physical",
+ "mac_address": "5a:00:03:1b:4e:ca",
+ "subnets": [
{
"type": "static",
"control": "auto",
"address": "10.1.112.3",
- "netmask": "255.255.240.0"
+ "netmask": "255.255.240.0",
}
],
- }
- ]
+ },
+ ],
}
INTERFACE_MAP = {
- '56:00:03:15:c4:65': 'eth0',
- '56:00:03:1b:4e:ca': 'eth0',
- '5a:00:03:1b:4e:ca': 'eth1'
+ "56:00:03:15:c4:65": "eth0",
+ "56:00:03:1b:4e:ca": "eth0",
+ "5a:00:03:1b:4e:ca": "eth1",
}
@@ -264,41 +213,39 @@ class TestDataSourceVultr(CiTestCase):
super(TestDataSourceVultr, self).setUp()
# Stored as a dict to make it easier to maintain
- raw1 = json.dumps(VULTR_V1_1['vendor-data'][0])
- raw2 = json.dumps(VULTR_V1_2['vendor-data'][0])
+ raw1 = json.dumps(VULTR_V1_1["vendor-data"][0])
+ raw2 = json.dumps(VULTR_V1_2["vendor-data"][0])
# Make expected format
- VULTR_V1_1['vendor-data'] = [raw1]
- VULTR_V1_2['vendor-data'] = [raw2]
+ VULTR_V1_1["vendor-data"] = [raw1]
+ VULTR_V1_2["vendor-data"] = [raw2]
self.tmp = self.tmp_dir()
# Test the datasource itself
- @mock.patch('cloudinit.net.get_interfaces_by_mac')
- @mock.patch('cloudinit.sources.helpers.vultr.is_vultr')
- @mock.patch('cloudinit.sources.helpers.vultr.get_metadata')
- def test_datasource(self,
- mock_getmeta,
- mock_isvultr,
- mock_netmap):
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ @mock.patch("cloudinit.sources.helpers.vultr.is_vultr")
+ @mock.patch("cloudinit.sources.helpers.vultr.get_metadata")
+ def test_datasource(self, mock_getmeta, mock_isvultr, mock_netmap):
mock_getmeta.return_value = VULTR_V1_2
mock_isvultr.return_value = True
mock_netmap.return_value = INTERFACE_MAP
source = DataSourceVultr.DataSourceVultr(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
# Test for failure
self.assertEqual(True, source._get_data())
# Test instance id
- self.assertEqual("42872224", source.metadata['instanceid'])
+ self.assertEqual("42872224", source.metadata["instanceid"])
# Test hostname
- self.assertEqual("CLOUDINIT_2", source.metadata['local-hostname'])
+ self.assertEqual("CLOUDINIT_2", source.metadata["local-hostname"])
# Test ssh keys
- self.assertEqual(SSH_KEYS_1, source.metadata['public-keys'])
+ self.assertEqual(SSH_KEYS_1, source.metadata["public-keys"])
# Test vendor data generation
orig_val = self.maxDiff
@@ -309,7 +256,8 @@ class TestDataSourceVultr(CiTestCase):
# Test vendor config
self.assertEqual(
EXPECTED_VULTR_CONFIG,
- json.loads(vendordata[0].replace("#cloud-config", "")))
+ json.loads(vendordata[0].replace("#cloud-config", "")),
+ )
self.maxDiff = orig_val
@@ -317,21 +265,24 @@ class TestDataSourceVultr(CiTestCase):
self.assertEqual(EXPECTED_VULTR_NETWORK_2, source.network_config)
# Test network config generation
- @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
def test_network_config(self, mock_netmap):
mock_netmap.return_value = INTERFACE_MAP
- interf = VULTR_V1_1['interfaces']
+ interf = VULTR_V1_1["interfaces"]
- self.assertEqual(EXPECTED_VULTR_NETWORK_1,
- vultr.generate_network_config(interf))
+ self.assertEqual(
+ EXPECTED_VULTR_NETWORK_1, vultr.generate_network_config(interf)
+ )
# Test Private Networking config generation
- @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
def test_private_network_config(self, mock_netmap):
mock_netmap.return_value = INTERFACE_MAP
- interf = VULTR_V1_2['interfaces']
+ interf = VULTR_V1_2["interfaces"]
+
+ self.assertEqual(
+ EXPECTED_VULTR_NETWORK_2, vultr.generate_network_config(interf)
+ )
- self.assertEqual(EXPECTED_VULTR_NETWORK_2,
- vultr.generate_network_config(interf))
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/vmware/test_custom_script.py b/tests/unittests/sources/vmware/test_custom_script.py
index fcbb9cd5..9b3e079f 100644
--- a/tests/unittests/sources/vmware/test_custom_script.py
+++ b/tests/unittests/sources/vmware/test_custom_script.py
@@ -7,12 +7,13 @@
import os
import stat
+
from cloudinit import util
from cloudinit.sources.helpers.vmware.imc.config_custom_script import (
CustomScriptConstant,
CustomScriptNotFound,
- PreCustomScript,
PostCustomScript,
+ PreCustomScript,
)
from tests.unittests.helpers import CiTestCase, mock
@@ -22,8 +23,7 @@ class TestVmwareCustomScript(CiTestCase):
self.tmpDir = self.tmp_dir()
# Mock the tmpDir as the root dir in VM.
self.execDir = os.path.join(self.tmpDir, ".customization")
- self.execScript = os.path.join(self.execDir,
- ".customize.sh")
+ self.execScript = os.path.join(self.execDir, ".customize.sh")
def test_prepare_custom_script(self):
"""
@@ -36,23 +36,24 @@ class TestVmwareCustomScript(CiTestCase):
preCust = PreCustomScript("random-vmw-test", self.tmpDir)
self.assertEqual("random-vmw-test", preCust.scriptname)
self.assertEqual(self.tmpDir, preCust.directory)
- self.assertEqual(self.tmp_path("random-vmw-test", self.tmpDir),
- preCust.scriptpath)
+ self.assertEqual(
+ self.tmp_path("random-vmw-test", self.tmpDir), preCust.scriptpath
+ )
with self.assertRaises(CustomScriptNotFound):
preCust.prepare_script()
# Custom script exists.
custScript = self.tmp_path("test-cust", self.tmpDir)
util.write_file(custScript, "test-CR-strip\r\r")
- with mock.patch.object(CustomScriptConstant,
- "CUSTOM_TMP_DIR",
- self.execDir):
- with mock.patch.object(CustomScriptConstant,
- "CUSTOM_SCRIPT",
- self.execScript):
- postCust = PostCustomScript("test-cust",
- self.tmpDir,
- self.tmpDir)
+ with mock.patch.object(
+ CustomScriptConstant, "CUSTOM_TMP_DIR", self.execDir
+ ):
+ with mock.patch.object(
+ CustomScriptConstant, "CUSTOM_SCRIPT", self.execScript
+ ):
+ postCust = PostCustomScript(
+ "test-cust", self.tmpDir, self.tmpDir
+ )
self.assertEqual("test-cust", postCust.scriptname)
self.assertEqual(self.tmpDir, postCust.directory)
self.assertEqual(custScript, postCust.scriptpath)
@@ -84,26 +85,30 @@ class TestVmwareCustomScript(CiTestCase):
ccScriptDir = self.tmp_dir()
ccScript = os.path.join(ccScriptDir, "post-customize-guest.sh")
markerFile = os.path.join(self.tmpDir, ".markerFile")
- with mock.patch.object(CustomScriptConstant,
- "CUSTOM_TMP_DIR",
- self.execDir):
- with mock.patch.object(CustomScriptConstant,
- "CUSTOM_SCRIPT",
- self.execScript):
- with mock.patch.object(CustomScriptConstant,
- "POST_CUSTOM_PENDING_MARKER",
- markerFile):
- postCust = PostCustomScript("test-cust",
- self.tmpDir,
- ccScriptDir)
+ with mock.patch.object(
+ CustomScriptConstant, "CUSTOM_TMP_DIR", self.execDir
+ ):
+ with mock.patch.object(
+ CustomScriptConstant, "CUSTOM_SCRIPT", self.execScript
+ ):
+ with mock.patch.object(
+ CustomScriptConstant,
+ "POST_CUSTOM_PENDING_MARKER",
+ markerFile,
+ ):
+ postCust = PostCustomScript(
+ "test-cust", self.tmpDir, ccScriptDir
+ )
postCust.execute()
# Check cc_scripts_per_instance and marker file
# are created.
self.assertTrue(os.path.exists(ccScript))
with open(ccScript, "r") as f:
content = f.read()
- self.assertEqual(content,
- "This is the script to run post cust")
+ self.assertEqual(
+ content, "This is the script to run post cust"
+ )
self.assertTrue(os.path.exists(markerFile))
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/vmware/test_guestcust_util.py b/tests/unittests/sources/vmware/test_guestcust_util.py
index 9114f0b9..fc63bcae 100644
--- a/tests/unittests/sources/vmware/test_guestcust_util.py
+++ b/tests/unittests/sources/vmware/test_guestcust_util.py
@@ -21,78 +21,89 @@ class TestGuestCustUtil(CiTestCase):
This test is designed to verify the behavior if vmware-toolbox-cmd
is not installed.
"""
- with mock.patch.object(subp, 'which', return_value=None):
+ with mock.patch.object(subp, "which", return_value=None):
self.assertEqual(
- get_tools_config('section', 'key', 'defaultVal'), 'defaultVal')
+ get_tools_config("section", "key", "defaultVal"), "defaultVal"
+ )
def test_get_tools_config_internal_exception(self):
"""
This test is designed to verify the behavior if internal exception
is raised.
"""
- with mock.patch.object(subp, 'which', return_value='/dummy/path'):
- with mock.patch.object(subp, 'subp',
- return_value=('key=value', b''),
- side_effect=subp.ProcessExecutionError(
- "subp failed", exit_code=99)):
+ with mock.patch.object(subp, "which", return_value="/dummy/path"):
+ with mock.patch.object(
+ subp,
+ "subp",
+ return_value=("key=value", b""),
+ side_effect=subp.ProcessExecutionError(
+ "subp failed", exit_code=99
+ ),
+ ):
# verify return value is 'defaultVal', not 'value'.
self.assertEqual(
- get_tools_config('section', 'key', 'defaultVal'),
- 'defaultVal')
+ get_tools_config("section", "key", "defaultVal"),
+ "defaultVal",
+ )
def test_get_tools_config_normal(self):
"""
This test is designed to verify the value could be parsed from
key = value of the given [section]
"""
- with mock.patch.object(subp, 'which', return_value='/dummy/path'):
+ with mock.patch.object(subp, "which", return_value="/dummy/path"):
# value is not blank
- with mock.patch.object(subp, 'subp',
- return_value=('key = value ', b'')):
+ with mock.patch.object(
+ subp, "subp", return_value=("key = value ", b"")
+ ):
self.assertEqual(
- get_tools_config('section', 'key', 'defaultVal'),
- 'value')
+ get_tools_config("section", "key", "defaultVal"), "value"
+ )
# value is blank
- with mock.patch.object(subp, 'subp',
- return_value=('key = ', b'')):
+ with mock.patch.object(subp, "subp", return_value=("key = ", b"")):
self.assertEqual(
- get_tools_config('section', 'key', 'defaultVal'),
- '')
+ get_tools_config("section", "key", "defaultVal"), ""
+ )
# value contains =
- with mock.patch.object(subp, 'subp',
- return_value=('key=Bar=Wark', b'')):
+ with mock.patch.object(
+ subp, "subp", return_value=("key=Bar=Wark", b"")
+ ):
self.assertEqual(
- get_tools_config('section', 'key', 'defaultVal'),
- 'Bar=Wark')
+ get_tools_config("section", "key", "defaultVal"),
+ "Bar=Wark",
+ )
# value contains specific characters
- with mock.patch.object(subp, 'subp',
- return_value=('[a] b.c_d=e-f', b'')):
+ with mock.patch.object(
+ subp, "subp", return_value=("[a] b.c_d=e-f", b"")
+ ):
self.assertEqual(
- get_tools_config('section', 'key', 'defaultVal'),
- 'e-f')
+ get_tools_config("section", "key", "defaultVal"), "e-f"
+ )
def test_set_gc_status(self):
"""
This test is designed to verify the behavior of set_gc_status
"""
# config is None, return None
- self.assertEqual(set_gc_status(None, 'Successful'), None)
+ self.assertEqual(set_gc_status(None, "Successful"), None)
# post gc status is NO, return None
cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
conf = Config(cf)
- self.assertEqual(set_gc_status(conf, 'Successful'), None)
+ self.assertEqual(set_gc_status(conf, "Successful"), None)
# post gc status is YES, subp is called to execute command
cf._insertKey("MISC|POST-GC-STATUS", "YES")
conf = Config(cf)
- with mock.patch.object(subp, 'subp',
- return_value=('ok', b'')) as mockobj:
- self.assertEqual(
- set_gc_status(conf, 'Successful'), ('ok', b''))
+ with mock.patch.object(
+ subp, "subp", return_value=("ok", b"")
+ ) as mockobj:
+ self.assertEqual(set_gc_status(conf, "Successful"), ("ok", b""))
mockobj.assert_called_once_with(
- ['vmware-rpctool', 'info-set guestinfo.gc.status Successful'],
- rcs=[0])
+ ["vmware-rpctool", "info-set guestinfo.gc.status Successful"],
+ rcs=[0],
+ )
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/vmware/test_vmware_config_file.py b/tests/unittests/sources/vmware/test_vmware_config_file.py
index 1d66ab4a..38d45d0e 100644
--- a/tests/unittests/sources/vmware/test_vmware_config_file.py
+++ b/tests/unittests/sources/vmware/test_vmware_config_file.py
@@ -12,15 +12,19 @@ import sys
import tempfile
import textwrap
-from cloudinit.sources.DataSourceOVF import get_network_config_from_conf
-from cloudinit.sources.DataSourceOVF import read_vmware_imc
+from cloudinit.sources.DataSourceOVF import (
+ get_network_config_from_conf,
+ read_vmware_imc,
+)
from cloudinit.sources.helpers.vmware.imc.boot_proto import BootProtoEnum
from cloudinit.sources.helpers.vmware.imc.config import Config
from cloudinit.sources.helpers.vmware.imc.config_file import (
ConfigFile as WrappedConfigFile,
)
-from cloudinit.sources.helpers.vmware.imc.config_nic import gen_subnet
-from cloudinit.sources.helpers.vmware.imc.config_nic import NicConfigurator
+from cloudinit.sources.helpers.vmware.imc.config_nic import (
+ NicConfigurator,
+ gen_subnet,
+)
from tests.unittests.helpers import CiTestCase, cloud_init_project_dir
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
@@ -32,7 +36,6 @@ def ConfigFile(path: str):
class TestVmwareConfigFile(CiTestCase):
-
def test_utility_methods(self):
"""Tests basic utility methods of ConfigFile class"""
cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
@@ -45,12 +48,14 @@ class TestVmwareConfigFile(CiTestCase):
cf._insertKey("BAR", " ")
self.assertEqual(2, len(cf), "insert size")
- self.assertEqual('foo', cf["PASSWORD|-PASS"], "password")
+ self.assertEqual("foo", cf["PASSWORD|-PASS"], "password")
self.assertTrue("PASSWORD|-PASS" in cf, "hasPassword")
- self.assertFalse(cf.should_keep_current_value("PASSWORD|-PASS"),
- "keepPassword")
- self.assertFalse(cf.should_remove_current_value("PASSWORD|-PASS"),
- "removePassword")
+ self.assertFalse(
+ cf.should_keep_current_value("PASSWORD|-PASS"), "keepPassword"
+ )
+ self.assertFalse(
+ cf.should_remove_current_value("PASSWORD|-PASS"), "removePassword"
+ )
self.assertFalse("FOO" in cf, "hasFoo")
self.assertTrue(cf.should_keep_current_value("FOO"), "keepFoo")
self.assertFalse(cf.should_remove_current_value("FOO"), "removeFoo")
@@ -62,17 +67,17 @@ class TestVmwareConfigFile(CiTestCase):
"""Tests instance id for the DatasourceOVF"""
cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
- instance_id_prefix = 'iid-vmware-'
+ instance_id_prefix = "iid-vmware-"
conf = Config(cf)
(md1, _, _) = read_vmware_imc(conf)
self.assertIn(instance_id_prefix, md1["instance-id"])
- self.assertEqual(md1["instance-id"], 'iid-vmware-imc')
+ self.assertEqual(md1["instance-id"], "iid-vmware-imc")
(md2, _, _) = read_vmware_imc(conf)
self.assertIn(instance_id_prefix, md2["instance-id"])
- self.assertEqual(md2["instance-id"], 'iid-vmware-imc')
+ self.assertEqual(md2["instance-id"], "iid-vmware-imc")
self.assertEqual(md2["instance-id"], md1["instance-id"])
@@ -82,36 +87,38 @@ class TestVmwareConfigFile(CiTestCase):
conf = Config(cf)
- self.assertEqual('myhost1', conf.host_name, "hostName")
- self.assertEqual('Africa/Abidjan', conf.timezone, "tz")
+ self.assertEqual("myhost1", conf.host_name, "hostName")
+ self.assertEqual("Africa/Abidjan", conf.timezone, "tz")
self.assertTrue(conf.utc, "utc")
- self.assertEqual(['10.20.145.1', '10.20.145.2'],
- conf.name_servers,
- "dns")
- self.assertEqual(['eng.vmware.com', 'proxy.vmware.com'],
- conf.dns_suffixes,
- "suffixes")
+ self.assertEqual(
+ ["10.20.145.1", "10.20.145.2"], conf.name_servers, "dns"
+ )
+ self.assertEqual(
+ ["eng.vmware.com", "proxy.vmware.com"],
+ conf.dns_suffixes,
+ "suffixes",
+ )
nics = conf.nics
ipv40 = nics[0].staticIpv4
self.assertEqual(2, len(nics), "nics")
- self.assertEqual('NIC1', nics[0].name, "nic0")
- self.assertEqual('00:50:56:a6:8c:08', nics[0].mac, "mac0")
+ self.assertEqual("NIC1", nics[0].name, "nic0")
+ self.assertEqual("00:50:56:a6:8c:08", nics[0].mac, "mac0")
self.assertEqual(BootProtoEnum.STATIC, nics[0].bootProto, "bootproto0")
- self.assertEqual('10.20.87.154', ipv40[0].ip, "ipv4Addr0")
- self.assertEqual('255.255.252.0', ipv40[0].netmask, "ipv4Mask0")
+ self.assertEqual("10.20.87.154", ipv40[0].ip, "ipv4Addr0")
+ self.assertEqual("255.255.252.0", ipv40[0].netmask, "ipv4Mask0")
self.assertEqual(2, len(ipv40[0].gateways), "ipv4Gw0")
- self.assertEqual('10.20.87.253', ipv40[0].gateways[0], "ipv4Gw0_0")
- self.assertEqual('10.20.87.105', ipv40[0].gateways[1], "ipv4Gw0_1")
+ self.assertEqual("10.20.87.253", ipv40[0].gateways[0], "ipv4Gw0_0")
+ self.assertEqual("10.20.87.105", ipv40[0].gateways[1], "ipv4Gw0_1")
self.assertEqual(1, len(nics[0].staticIpv6), "ipv6Cnt0")
- self.assertEqual('fc00:10:20:87::154',
- nics[0].staticIpv6[0].ip,
- "ipv6Addr0")
+ self.assertEqual(
+ "fc00:10:20:87::154", nics[0].staticIpv6[0].ip, "ipv6Addr0"
+ )
- self.assertEqual('NIC2', nics[1].name, "nic1")
+ self.assertEqual("NIC2", nics[1].name, "nic1")
self.assertTrue(not nics[1].staticIpv6, "ipv61 dhcp")
def test_config_file_dhcp_2nics(self):
@@ -121,8 +128,8 @@ class TestVmwareConfigFile(CiTestCase):
conf = Config(cf)
nics = conf.nics
self.assertEqual(2, len(nics), "nics")
- self.assertEqual('NIC1', nics[0].name, "nic0")
- self.assertEqual('00:50:56:a6:8c:08', nics[0].mac, "mac0")
+ self.assertEqual("NIC1", nics[0].name, "nic0")
+ self.assertEqual("00:50:56:a6:8c:08", nics[0].mac, "mac0")
self.assertEqual(BootProtoEnum.DHCP, nics[0].bootProto, "bootproto0")
def test_config_password(self):
@@ -132,7 +139,7 @@ class TestVmwareConfigFile(CiTestCase):
cf._insertKey("PASSWORD|RESET", "no")
conf = Config(cf)
- self.assertEqual('test-password', conf.admin_password, "password")
+ self.assertEqual("test-password", conf.admin_password, "password")
self.assertFalse(conf.reset_password, "do not reset password")
def test_config_reset_passwd(self):
@@ -161,67 +168,66 @@ class TestVmwareConfigFile(CiTestCase):
network_config = get_network_config_from_conf(config, False)
- self.assertEqual(1, network_config.get('version'))
+ self.assertEqual(1, network_config.get("version"))
- config_types = network_config.get('config')
+ config_types = network_config.get("config")
name_servers = None
dns_suffixes = None
for type in config_types:
- if type.get('type') == 'nameserver':
- name_servers = type.get('address')
- dns_suffixes = type.get('search')
+ if type.get("type") == "nameserver":
+ name_servers = type.get("address")
+ dns_suffixes = type.get("search")
break
- self.assertEqual(['10.20.145.1', '10.20.145.2'],
- name_servers,
- "dns")
- self.assertEqual(['eng.vmware.com', 'proxy.vmware.com'],
- dns_suffixes,
- "suffixes")
+ self.assertEqual(["10.20.145.1", "10.20.145.2"], name_servers, "dns")
+ self.assertEqual(
+ ["eng.vmware.com", "proxy.vmware.com"], dns_suffixes, "suffixes"
+ )
def test_gen_subnet(self):
"""Tests if gen_subnet properly calculates network subnet from
- IPv4 address and netmask"""
- ip_subnet_list = [['10.20.87.253', '255.255.252.0', '10.20.84.0'],
- ['10.20.92.105', '255.255.252.0', '10.20.92.0'],
- ['192.168.0.10', '255.255.0.0', '192.168.0.0']]
+ IPv4 address and netmask"""
+ ip_subnet_list = [
+ ["10.20.87.253", "255.255.252.0", "10.20.84.0"],
+ ["10.20.92.105", "255.255.252.0", "10.20.92.0"],
+ ["192.168.0.10", "255.255.0.0", "192.168.0.0"],
+ ]
for entry in ip_subnet_list:
- self.assertEqual(entry[2], gen_subnet(entry[0], entry[1]),
- "Subnet for a specified ip and netmask")
+ self.assertEqual(
+ entry[2],
+ gen_subnet(entry[0], entry[1]),
+ "Subnet for a specified ip and netmask",
+ )
def test_get_config_dns_suffixes(self):
"""Tests if get_network_config_from_conf properly
- generates nameservers and dns settings from a
- specified configuration"""
+ generates nameservers and dns settings from a
+ specified configuration"""
cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
config = Config(cf)
network_config = get_network_config_from_conf(config, False)
- self.assertEqual(1, network_config.get('version'))
+ self.assertEqual(1, network_config.get("version"))
- config_types = network_config.get('config')
+ config_types = network_config.get("config")
name_servers = None
dns_suffixes = None
for type in config_types:
- if type.get('type') == 'nameserver':
- name_servers = type.get('address')
- dns_suffixes = type.get('search')
+ if type.get("type") == "nameserver":
+ name_servers = type.get("address")
+ dns_suffixes = type.get("search")
break
- self.assertEqual([],
- name_servers,
- "dns")
- self.assertEqual(['eng.vmware.com'],
- dns_suffixes,
- "suffixes")
+ self.assertEqual([], name_servers, "dns")
+ self.assertEqual(["eng.vmware.com"], dns_suffixes, "suffixes")
def test_get_nics_list_dhcp(self):
"""Tests if NicConfigurator properly calculates network subnets
- for a configuration with a list of DHCP NICs"""
+ for a configuration with a list of DHCP NICs"""
cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
config = Config(cf)
@@ -231,37 +237,39 @@ class TestVmwareConfigFile(CiTestCase):
self.assertEqual(2, len(nics_cfg_list), "number of config elements")
- nic1 = {'name': 'NIC1'}
- nic2 = {'name': 'NIC2'}
+ nic1 = {"name": "NIC1"}
+ nic2 = {"name": "NIC2"}
for cfg in nics_cfg_list:
- if cfg.get('name') == nic1.get('name'):
+ if cfg.get("name") == nic1.get("name"):
nic1.update(cfg)
- elif cfg.get('name') == nic2.get('name'):
+ elif cfg.get("name") == nic2.get("name"):
nic2.update(cfg)
- self.assertEqual('physical', nic1.get('type'), 'type of NIC1')
- self.assertEqual('NIC1', nic1.get('name'), 'name of NIC1')
- self.assertEqual('00:50:56:a6:8c:08', nic1.get('mac_address'),
- 'mac address of NIC1')
- subnets = nic1.get('subnets')
- self.assertEqual(1, len(subnets), 'number of subnets for NIC1')
+ self.assertEqual("physical", nic1.get("type"), "type of NIC1")
+ self.assertEqual("NIC1", nic1.get("name"), "name of NIC1")
+ self.assertEqual(
+ "00:50:56:a6:8c:08", nic1.get("mac_address"), "mac address of NIC1"
+ )
+ subnets = nic1.get("subnets")
+ self.assertEqual(1, len(subnets), "number of subnets for NIC1")
subnet = subnets[0]
- self.assertEqual('dhcp', subnet.get('type'), 'DHCP type for NIC1')
- self.assertEqual('auto', subnet.get('control'), 'NIC1 Control type')
-
- self.assertEqual('physical', nic2.get('type'), 'type of NIC2')
- self.assertEqual('NIC2', nic2.get('name'), 'name of NIC2')
- self.assertEqual('00:50:56:a6:5a:de', nic2.get('mac_address'),
- 'mac address of NIC2')
- subnets = nic2.get('subnets')
- self.assertEqual(1, len(subnets), 'number of subnets for NIC2')
+ self.assertEqual("dhcp", subnet.get("type"), "DHCP type for NIC1")
+ self.assertEqual("auto", subnet.get("control"), "NIC1 Control type")
+
+ self.assertEqual("physical", nic2.get("type"), "type of NIC2")
+ self.assertEqual("NIC2", nic2.get("name"), "name of NIC2")
+ self.assertEqual(
+ "00:50:56:a6:5a:de", nic2.get("mac_address"), "mac address of NIC2"
+ )
+ subnets = nic2.get("subnets")
+ self.assertEqual(1, len(subnets), "number of subnets for NIC2")
subnet = subnets[0]
- self.assertEqual('dhcp', subnet.get('type'), 'DHCP type for NIC2')
- self.assertEqual('auto', subnet.get('control'), 'NIC2 Control type')
+ self.assertEqual("dhcp", subnet.get("type"), "DHCP type for NIC2")
+ self.assertEqual("auto", subnet.get("control"), "NIC2 Control type")
def test_get_nics_list_static(self):
"""Tests if NicConfigurator properly calculates network subnets
- for a configuration with 2 static NICs"""
+ for a configuration with 2 static NICs"""
cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg")
config = Config(cf)
@@ -271,80 +279,93 @@ class TestVmwareConfigFile(CiTestCase):
self.assertEqual(2, len(nics_cfg_list), "number of elements")
- nic1 = {'name': 'NIC1'}
- nic2 = {'name': 'NIC2'}
+ nic1 = {"name": "NIC1"}
+ nic2 = {"name": "NIC2"}
route_list = []
for cfg in nics_cfg_list:
- cfg_type = cfg.get('type')
- if cfg_type == 'physical':
- if cfg.get('name') == nic1.get('name'):
+ cfg_type = cfg.get("type")
+ if cfg_type == "physical":
+ if cfg.get("name") == nic1.get("name"):
nic1.update(cfg)
- elif cfg.get('name') == nic2.get('name'):
+ elif cfg.get("name") == nic2.get("name"):
nic2.update(cfg)
- self.assertEqual('physical', nic1.get('type'), 'type of NIC1')
- self.assertEqual('NIC1', nic1.get('name'), 'name of NIC1')
- self.assertEqual('00:50:56:a6:8c:08', nic1.get('mac_address'),
- 'mac address of NIC1')
+ self.assertEqual("physical", nic1.get("type"), "type of NIC1")
+ self.assertEqual("NIC1", nic1.get("name"), "name of NIC1")
+ self.assertEqual(
+ "00:50:56:a6:8c:08", nic1.get("mac_address"), "mac address of NIC1"
+ )
- subnets = nic1.get('subnets')
- self.assertEqual(2, len(subnets), 'Number of subnets')
+ subnets = nic1.get("subnets")
+ self.assertEqual(2, len(subnets), "Number of subnets")
static_subnet = []
static6_subnet = []
for subnet in subnets:
- subnet_type = subnet.get('type')
- if subnet_type == 'static':
+ subnet_type = subnet.get("type")
+ if subnet_type == "static":
static_subnet.append(subnet)
- elif subnet_type == 'static6':
+ elif subnet_type == "static6":
static6_subnet.append(subnet)
else:
- self.assertEqual(True, False, 'Unknown type')
- if 'route' in subnet:
- for route in subnet.get('routes'):
+ self.assertEqual(True, False, "Unknown type")
+ if "route" in subnet:
+ for route in subnet.get("routes"):
route_list.append(route)
- self.assertEqual(1, len(static_subnet), 'Number of static subnet')
- self.assertEqual(1, len(static6_subnet), 'Number of static6 subnet')
+ self.assertEqual(1, len(static_subnet), "Number of static subnet")
+ self.assertEqual(1, len(static6_subnet), "Number of static6 subnet")
subnet = static_subnet[0]
- self.assertEqual('10.20.87.154', subnet.get('address'),
- 'IPv4 address of static subnet')
- self.assertEqual('255.255.252.0', subnet.get('netmask'),
- 'NetMask of static subnet')
- self.assertEqual('auto', subnet.get('control'),
- 'control for static subnet')
+ self.assertEqual(
+ "10.20.87.154",
+ subnet.get("address"),
+ "IPv4 address of static subnet",
+ )
+ self.assertEqual(
+ "255.255.252.0", subnet.get("netmask"), "NetMask of static subnet"
+ )
+ self.assertEqual(
+ "auto", subnet.get("control"), "control for static subnet"
+ )
subnet = static6_subnet[0]
- self.assertEqual('fc00:10:20:87::154', subnet.get('address'),
- 'IPv6 address of static subnet')
- self.assertEqual('64', subnet.get('netmask'),
- 'NetMask of static6 subnet')
+ self.assertEqual(
+ "fc00:10:20:87::154",
+ subnet.get("address"),
+ "IPv6 address of static subnet",
+ )
+ self.assertEqual(
+ "64", subnet.get("netmask"), "NetMask of static6 subnet"
+ )
- route_set = set(['10.20.87.253', '10.20.87.105', '192.168.0.10'])
+ route_set = set(["10.20.87.253", "10.20.87.105", "192.168.0.10"])
for route in route_list:
- self.assertEqual(10000, route.get('metric'), 'metric of route')
- gateway = route.get('gateway')
+ self.assertEqual(10000, route.get("metric"), "metric of route")
+ gateway = route.get("gateway")
if gateway in route_set:
route_set.discard(gateway)
else:
- self.assertEqual(True, False, 'invalid gateway %s' % (gateway))
+ self.assertEqual(True, False, "invalid gateway %s" % (gateway))
- self.assertEqual('physical', nic2.get('type'), 'type of NIC2')
- self.assertEqual('NIC2', nic2.get('name'), 'name of NIC2')
- self.assertEqual('00:50:56:a6:ef:7d', nic2.get('mac_address'),
- 'mac address of NIC2')
+ self.assertEqual("physical", nic2.get("type"), "type of NIC2")
+ self.assertEqual("NIC2", nic2.get("name"), "name of NIC2")
+ self.assertEqual(
+ "00:50:56:a6:ef:7d", nic2.get("mac_address"), "mac address of NIC2"
+ )
- subnets = nic2.get('subnets')
- self.assertEqual(1, len(subnets), 'Number of subnets for NIC2')
+ subnets = nic2.get("subnets")
+ self.assertEqual(1, len(subnets), "Number of subnets for NIC2")
subnet = subnets[0]
- self.assertEqual('static', subnet.get('type'), 'Subnet type')
- self.assertEqual('192.168.6.102', subnet.get('address'),
- 'Subnet address')
- self.assertEqual('255.255.0.0', subnet.get('netmask'),
- 'Subnet netmask')
+ self.assertEqual("static", subnet.get("type"), "Subnet type")
+ self.assertEqual(
+ "192.168.6.102", subnet.get("address"), "Subnet address"
+ )
+ self.assertEqual(
+ "255.255.0.0", subnet.get("netmask"), "Subnet netmask"
+ )
def test_custom_script(self):
cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
@@ -385,8 +406,9 @@ class TestVmwareNetConfig(CiTestCase):
def _get_NicConfigurator(self, text):
fp = None
try:
- with tempfile.NamedTemporaryFile(mode="w", dir=self.tmp_dir(),
- delete=False) as fp:
+ with tempfile.NamedTemporaryFile(
+ mode="w", dir=self.tmp_dir(), delete=False
+ ) as fp:
fp.write(text)
fp.close()
cfg = Config(ConfigFile(fp.name))
@@ -397,7 +419,8 @@ class TestVmwareNetConfig(CiTestCase):
def test_non_primary_nic_without_gateway(self):
"""A non primary nic set is not required to have a gateway."""
- config = textwrap.dedent("""\
+ config = textwrap.dedent(
+ """\
[NETWORK]
NETWORKING = yes
BOOTPROTO = dhcp
@@ -414,19 +437,32 @@ class TestVmwareNetConfig(CiTestCase):
BOOTPROTO = static
IPADDR = 10.20.87.154
NETMASK = 255.255.252.0
- """)
+ """
+ )
nc = self._get_NicConfigurator(config)
self.assertEqual(
- [{'type': 'physical', 'name': 'NIC1',
- 'mac_address': '00:50:56:a6:8c:08',
- 'subnets': [
- {'control': 'auto', 'type': 'static',
- 'address': '10.20.87.154', 'netmask': '255.255.252.0'}]}],
- nc.generate())
+ [
+ {
+ "type": "physical",
+ "name": "NIC1",
+ "mac_address": "00:50:56:a6:8c:08",
+ "subnets": [
+ {
+ "control": "auto",
+ "type": "static",
+ "address": "10.20.87.154",
+ "netmask": "255.255.252.0",
+ }
+ ],
+ }
+ ],
+ nc.generate(),
+ )
def test_non_primary_nic_with_gateway(self):
"""A non primary nic set can have a gateway."""
- config = textwrap.dedent("""\
+ config = textwrap.dedent(
+ """\
[NETWORK]
NETWORKING = yes
BOOTPROTO = dhcp
@@ -444,22 +480,40 @@ class TestVmwareNetConfig(CiTestCase):
IPADDR = 10.20.87.154
NETMASK = 255.255.252.0
GATEWAY = 10.20.87.253
- """)
+ """
+ )
nc = self._get_NicConfigurator(config)
self.assertEqual(
- [{'type': 'physical', 'name': 'NIC1',
- 'mac_address': '00:50:56:a6:8c:08',
- 'subnets': [
- {'control': 'auto', 'type': 'static',
- 'address': '10.20.87.154', 'netmask': '255.255.252.0',
- 'routes':
- [{'type': 'route', 'destination': '10.20.84.0/22',
- 'gateway': '10.20.87.253', 'metric': 10000}]}]}],
- nc.generate())
+ [
+ {
+ "type": "physical",
+ "name": "NIC1",
+ "mac_address": "00:50:56:a6:8c:08",
+ "subnets": [
+ {
+ "control": "auto",
+ "type": "static",
+ "address": "10.20.87.154",
+ "netmask": "255.255.252.0",
+ "routes": [
+ {
+ "type": "route",
+ "destination": "10.20.84.0/22",
+ "gateway": "10.20.87.253",
+ "metric": 10000,
+ }
+ ],
+ }
+ ],
+ }
+ ],
+ nc.generate(),
+ )
def test_cust_non_primary_nic_with_gateway_(self):
"""A customer non primary nic set can have a gateway."""
- config = textwrap.dedent("""\
+ config = textwrap.dedent(
+ """\
[NETWORK]
NETWORKING = yes
BOOTPROTO = dhcp
@@ -486,22 +540,40 @@ class TestVmwareNetConfig(CiTestCase):
[DATETIME]
UTC = yes
- """)
+ """
+ )
nc = self._get_NicConfigurator(config)
self.assertEqual(
- [{'type': 'physical', 'name': 'NIC1',
- 'mac_address': '00:50:56:ac:d1:8a',
- 'subnets': [
- {'control': 'auto', 'type': 'static',
- 'address': '100.115.223.75', 'netmask': '255.255.255.0',
- 'routes':
- [{'type': 'route', 'destination': '100.115.223.0/24',
- 'gateway': '100.115.223.254', 'metric': 10000}]}]}],
- nc.generate())
+ [
+ {
+ "type": "physical",
+ "name": "NIC1",
+ "mac_address": "00:50:56:ac:d1:8a",
+ "subnets": [
+ {
+ "control": "auto",
+ "type": "static",
+ "address": "100.115.223.75",
+ "netmask": "255.255.255.0",
+ "routes": [
+ {
+ "type": "route",
+ "destination": "100.115.223.0/24",
+ "gateway": "100.115.223.254",
+ "metric": 10000,
+ }
+ ],
+ }
+ ],
+ }
+ ],
+ nc.generate(),
+ )
def test_a_primary_nic_with_gateway(self):
"""A primary nic set can have a gateway."""
- config = textwrap.dedent("""\
+ config = textwrap.dedent(
+ """\
[NETWORK]
NETWORKING = yes
BOOTPROTO = dhcp
@@ -520,16 +592,28 @@ class TestVmwareNetConfig(CiTestCase):
NETMASK = 255.255.252.0
PRIMARY = true
GATEWAY = 10.20.87.253
- """)
+ """
+ )
nc = self._get_NicConfigurator(config)
self.assertEqual(
- [{'type': 'physical', 'name': 'NIC1',
- 'mac_address': '00:50:56:a6:8c:08',
- 'subnets': [
- {'control': 'auto', 'type': 'static',
- 'address': '10.20.87.154', 'netmask': '255.255.252.0',
- 'gateway': '10.20.87.253'}]}],
- nc.generate())
+ [
+ {
+ "type": "physical",
+ "name": "NIC1",
+ "mac_address": "00:50:56:a6:8c:08",
+ "subnets": [
+ {
+ "control": "auto",
+ "type": "static",
+ "address": "10.20.87.154",
+ "netmask": "255.255.252.0",
+ "gateway": "10.20.87.253",
+ }
+ ],
+ }
+ ],
+ nc.generate(),
+ )
def test_meta_data(self):
cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")