summaryrefslogtreecommitdiff
path: root/tests/unittests/sources
diff options
context:
space:
mode:
Diffstat (limited to 'tests/unittests/sources')
-rw-r--r--tests/unittests/sources/__init__.py0
-rw-r--r--tests/unittests/sources/helpers/test_netlink.py573
-rw-r--r--tests/unittests/sources/helpers/test_openstack.py62
-rw-r--r--tests/unittests/sources/test_aliyun.py287
-rw-r--r--tests/unittests/sources/test_altcloud.py459
-rw-r--r--tests/unittests/sources/test_azure.py4306
-rw-r--r--tests/unittests/sources/test_azure_helper.py1609
-rw-r--r--tests/unittests/sources/test_cloudsigma.py145
-rw-r--r--tests/unittests/sources/test_cloudstack.py205
-rw-r--r--tests/unittests/sources/test_common.py123
-rw-r--r--tests/unittests/sources/test_configdrive.py1068
-rw-r--r--tests/unittests/sources/test_digitalocean.py389
-rw-r--r--tests/unittests/sources/test_ec2.py1125
-rw-r--r--tests/unittests/sources/test_exoscale.py241
-rw-r--r--tests/unittests/sources/test_gce.py416
-rw-r--r--tests/unittests/sources/test_hetzner.py164
-rw-r--r--tests/unittests/sources/test_ibmcloud.py426
-rw-r--r--tests/unittests/sources/test_init.py994
-rw-r--r--tests/unittests/sources/test_lxd.py394
-rw-r--r--tests/unittests/sources/test_maas.py227
-rw-r--r--tests/unittests/sources/test_nocloud.py453
-rw-r--r--tests/unittests/sources/test_opennebula.py1085
-rw-r--r--tests/unittests/sources/test_openstack.py788
-rw-r--r--tests/unittests/sources/test_oracle.py933
-rw-r--r--tests/unittests/sources/test_ovf.py1237
-rw-r--r--tests/unittests/sources/test_rbx.py241
-rw-r--r--tests/unittests/sources/test_scaleway.py526
-rw-r--r--tests/unittests/sources/test_smartos.py1421
-rw-r--r--tests/unittests/sources/test_upcloud.py331
-rw-r--r--tests/unittests/sources/test_vmware.py389
-rw-r--r--tests/unittests/sources/test_vultr.py339
-rw-r--r--tests/unittests/sources/vmware/__init__.py0
-rw-r--r--tests/unittests/sources/vmware/test_custom_script.py114
-rw-r--r--tests/unittests/sources/vmware/test_guestcust_util.py109
-rw-r--r--tests/unittests/sources/vmware/test_vmware_config_file.py635
35 files changed, 21814 insertions, 0 deletions
diff --git a/tests/unittests/sources/__init__.py b/tests/unittests/sources/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/unittests/sources/__init__.py
diff --git a/tests/unittests/sources/helpers/test_netlink.py b/tests/unittests/sources/helpers/test_netlink.py
new file mode 100644
index 00000000..5eabf104
--- /dev/null
+++ b/tests/unittests/sources/helpers/test_netlink.py
@@ -0,0 +1,573 @@
+# Author: Tamilmani Manoharan <tamanoha@microsoft.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import codecs
+import socket
+import struct
+
+from cloudinit.sources.helpers.netlink import (
+ MAX_SIZE,
+ OPER_DORMANT,
+ OPER_DOWN,
+ OPER_LOWERLAYERDOWN,
+ OPER_NOTPRESENT,
+ OPER_TESTING,
+ OPER_UNKNOWN,
+ OPER_UP,
+ RTATTR_START_OFFSET,
+ RTM_DELLINK,
+ RTM_GETLINK,
+ RTM_NEWLINK,
+ RTM_SETLINK,
+ NetlinkCreateSocketError,
+ create_bound_netlink_socket,
+ read_netlink_socket,
+ read_rta_oper_state,
+ unpack_rta_attr,
+ wait_for_media_disconnect_connect,
+ wait_for_nic_attach_event,
+ wait_for_nic_detach_event,
+)
+from tests.unittests.helpers import CiTestCase, mock
+
+
+def int_to_bytes(i):
+ """convert integer to binary: eg: 1 to \x01"""
+ hex_value = "{0:x}".format(i)
+ hex_value = "0" * (len(hex_value) % 2) + hex_value
+ return codecs.decode(hex_value, "hex_codec")
+
+
+class TestCreateBoundNetlinkSocket(CiTestCase):
+ @mock.patch("cloudinit.sources.helpers.netlink.socket.socket")
+ def test_socket_error_on_create(self, m_socket):
+ """create_bound_netlink_socket catches socket creation exception"""
+
+ """NetlinkCreateSocketError is raised when socket creation errors."""
+ m_socket.side_effect = socket.error("Fake socket failure")
+ with self.assertRaises(NetlinkCreateSocketError) as ctx_mgr:
+ create_bound_netlink_socket()
+ self.assertEqual(
+ "Exception during netlink socket create: Fake socket failure",
+ str(ctx_mgr.exception),
+ )
+
+
+class TestReadNetlinkSocket(CiTestCase):
+ @mock.patch("cloudinit.sources.helpers.netlink.socket.socket")
+ @mock.patch("cloudinit.sources.helpers.netlink.select.select")
+ def test_read_netlink_socket(self, m_select, m_socket):
+ """read_netlink_socket able to receive data"""
+ data = "netlinktest"
+ m_select.return_value = [m_socket], None, None
+ m_socket.recv.return_value = data
+ recv_data = read_netlink_socket(m_socket, 2)
+ m_select.assert_called_with([m_socket], [], [], 2)
+ m_socket.recv.assert_called_with(MAX_SIZE)
+ self.assertIsNotNone(recv_data)
+ self.assertEqual(recv_data, data)
+
+ @mock.patch("cloudinit.sources.helpers.netlink.socket.socket")
+ @mock.patch("cloudinit.sources.helpers.netlink.select.select")
+ def test_netlink_read_timeout(self, m_select, m_socket):
+ """read_netlink_socket should timeout if nothing to read"""
+ m_select.return_value = [], None, None
+ data = read_netlink_socket(m_socket, 1)
+ m_select.assert_called_with([m_socket], [], [], 1)
+ self.assertEqual(m_socket.recv.call_count, 0)
+ self.assertIsNone(data)
+
+ def test_read_invalid_socket(self):
+ """read_netlink_socket raises assert error if socket is invalid"""
+ socket = None
+ with self.assertRaises(AssertionError) as context:
+ read_netlink_socket(socket, 1)
+ self.assertTrue("netlink socket is none" in str(context.exception))
+
+
+class TestParseNetlinkMessage(CiTestCase):
+ def test_read_rta_oper_state(self):
+ """read_rta_oper_state could parse netlink message and extract data"""
+ ifname = "eth0"
+ bytes = ifname.encode("utf-8")
+ buf = bytearray(48)
+ struct.pack_into(
+ "HH4sHHc",
+ buf,
+ RTATTR_START_OFFSET,
+ 8,
+ 3,
+ bytes,
+ 5,
+ 16,
+ int_to_bytes(OPER_DOWN),
+ )
+ interface_state = read_rta_oper_state(buf)
+ self.assertEqual(interface_state.ifname, ifname)
+ self.assertEqual(interface_state.operstate, OPER_DOWN)
+
+ def test_read_none_data(self):
+ """read_rta_oper_state raises assert error if data is none"""
+ data = None
+ with self.assertRaises(AssertionError) as context:
+ read_rta_oper_state(data)
+ self.assertEqual("data is none", str(context.exception))
+
+ def test_read_invalid_rta_operstate_none(self):
+ """read_rta_oper_state returns none if operstate is none"""
+ ifname = "eth0"
+ buf = bytearray(40)
+ bytes = ifname.encode("utf-8")
+ struct.pack_into("HH4s", buf, RTATTR_START_OFFSET, 8, 3, bytes)
+ interface_state = read_rta_oper_state(buf)
+ self.assertIsNone(interface_state)
+
+ def test_read_invalid_rta_ifname_none(self):
+ """read_rta_oper_state returns none if ifname is none"""
+ buf = bytearray(40)
+ struct.pack_into(
+ "HHc", buf, RTATTR_START_OFFSET, 5, 16, int_to_bytes(OPER_DOWN)
+ )
+ interface_state = read_rta_oper_state(buf)
+ self.assertIsNone(interface_state)
+
+ def test_read_invalid_data_len(self):
+ """raise assert error if data size is smaller than required size"""
+ buf = bytearray(32)
+ with self.assertRaises(AssertionError) as context:
+ read_rta_oper_state(buf)
+ self.assertTrue(
+ "length of data is smaller than RTATTR_START_OFFSET"
+ in str(context.exception)
+ )
+
+ def test_unpack_rta_attr_none_data(self):
+ """unpack_rta_attr raises assert error if data is none"""
+ data = None
+ with self.assertRaises(AssertionError) as context:
+ unpack_rta_attr(data, RTATTR_START_OFFSET)
+ self.assertTrue("data is none" in str(context.exception))
+
+ def test_unpack_rta_attr_invalid_offset(self):
+ """unpack_rta_attr raises assert error if offset is invalid"""
+ data = bytearray(48)
+ with self.assertRaises(AssertionError) as context:
+ unpack_rta_attr(data, "offset")
+ self.assertTrue("offset is not integer" in str(context.exception))
+ with self.assertRaises(AssertionError) as context:
+ unpack_rta_attr(data, 31)
+ self.assertTrue(
+ "rta offset is less than expected length" in str(context.exception)
+ )
+
+
+@mock.patch("cloudinit.sources.helpers.netlink.socket.socket")
+@mock.patch("cloudinit.sources.helpers.netlink.read_netlink_socket")
+class TestNicAttachDetach(CiTestCase):
+ with_logs = True
+
+ def _media_switch_data(self, ifname, msg_type, operstate):
+ """construct netlink data with specified fields"""
+ if ifname and operstate is not None:
+ data = bytearray(48)
+ bytes = ifname.encode("utf-8")
+ struct.pack_into(
+ "HH4sHHc",
+ data,
+ RTATTR_START_OFFSET,
+ 8,
+ 3,
+ bytes,
+ 5,
+ 16,
+ int_to_bytes(operstate),
+ )
+ elif ifname:
+ data = bytearray(40)
+ bytes = ifname.encode("utf-8")
+ struct.pack_into("HH4s", data, RTATTR_START_OFFSET, 8, 3, bytes)
+ elif operstate:
+ data = bytearray(40)
+ struct.pack_into(
+ "HHc",
+ data,
+ RTATTR_START_OFFSET,
+ 5,
+ 16,
+ int_to_bytes(operstate),
+ )
+ struct.pack_into("=LHHLL", data, 0, len(data), msg_type, 0, 0, 0)
+ return data
+
+ def test_nic_attached_oper_down(self, m_read_netlink_socket, m_socket):
+ """Test for a new nic attached"""
+ ifname = "eth0"
+ data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
+ m_read_netlink_socket.side_effect = [data_op_down]
+ ifread = wait_for_nic_attach_event(m_socket, [])
+ self.assertEqual(m_read_netlink_socket.call_count, 1)
+ self.assertEqual(ifname, ifread)
+
+ def test_nic_attached_oper_up(self, m_read_netlink_socket, m_socket):
+ """Test for a new nic attached"""
+ ifname = "eth0"
+ data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
+ m_read_netlink_socket.side_effect = [data_op_up]
+ ifread = wait_for_nic_attach_event(m_socket, [])
+ self.assertEqual(m_read_netlink_socket.call_count, 1)
+ self.assertEqual(ifname, ifread)
+
+ def test_nic_attach_ignore_existing(self, m_read_netlink_socket, m_socket):
+ """Test that we read only the interfaces we are interested in."""
+ data_eth0 = self._media_switch_data("eth0", RTM_NEWLINK, OPER_DOWN)
+ data_eth1 = self._media_switch_data("eth1", RTM_NEWLINK, OPER_DOWN)
+ m_read_netlink_socket.side_effect = [data_eth0, data_eth1]
+ ifread = wait_for_nic_attach_event(m_socket, ["eth0"])
+ self.assertEqual(m_read_netlink_socket.call_count, 2)
+ self.assertEqual("eth1", ifread)
+
+ def test_nic_attach_read_first(self, m_read_netlink_socket, m_socket):
+ """Test that we read only the interfaces we are interested in."""
+ data_eth0 = self._media_switch_data("eth0", RTM_NEWLINK, OPER_DOWN)
+ data_eth1 = self._media_switch_data("eth1", RTM_NEWLINK, OPER_DOWN)
+ m_read_netlink_socket.side_effect = [data_eth0, data_eth1]
+ ifread = wait_for_nic_attach_event(m_socket, ["eth1"])
+ self.assertEqual(m_read_netlink_socket.call_count, 1)
+ self.assertEqual("eth0", ifread)
+
+ def test_nic_detached(self, m_read_netlink_socket, m_socket):
+ """Test for an existing nic detached"""
+ ifname = "eth0"
+ data_op_down = self._media_switch_data(ifname, RTM_DELLINK, OPER_DOWN)
+ m_read_netlink_socket.side_effect = [data_op_down]
+ ifread = wait_for_nic_detach_event(m_socket)
+ self.assertEqual(m_read_netlink_socket.call_count, 1)
+ self.assertEqual(ifname, ifread)
+
+
+@mock.patch("cloudinit.sources.helpers.netlink.socket.socket")
+@mock.patch("cloudinit.sources.helpers.netlink.read_netlink_socket")
+class TestWaitForMediaDisconnectConnect(CiTestCase):
+ with_logs = True
+
+ def _media_switch_data(self, ifname, msg_type, operstate):
+ """construct netlink data with specified fields"""
+ if ifname and operstate is not None:
+ data = bytearray(48)
+ bytes = ifname.encode("utf-8")
+ struct.pack_into(
+ "HH4sHHc",
+ data,
+ RTATTR_START_OFFSET,
+ 8,
+ 3,
+ bytes,
+ 5,
+ 16,
+ int_to_bytes(operstate),
+ )
+ elif ifname:
+ data = bytearray(40)
+ bytes = ifname.encode("utf-8")
+ struct.pack_into("HH4s", data, RTATTR_START_OFFSET, 8, 3, bytes)
+ elif operstate:
+ data = bytearray(40)
+ struct.pack_into(
+ "HHc",
+ data,
+ RTATTR_START_OFFSET,
+ 5,
+ 16,
+ int_to_bytes(operstate),
+ )
+ struct.pack_into("=LHHLL", data, 0, len(data), msg_type, 0, 0, 0)
+ return data
+
+ def test_media_down_up_scenario(self, m_read_netlink_socket, m_socket):
+ """Test for media down up sequence for required interface name"""
+ ifname = "eth0"
+ # construct data for Oper State down
+ data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
+ # construct data for Oper State up
+ data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
+ m_read_netlink_socket.side_effect = [data_op_down, data_op_up]
+ wait_for_media_disconnect_connect(m_socket, ifname)
+ self.assertEqual(m_read_netlink_socket.call_count, 2)
+
+ def test_wait_for_media_switch_diff_interface(
+ self, m_read_netlink_socket, m_socket
+ ):
+ """wait_for_media_disconnect_connect ignores unexpected interfaces.
+
+ The first two messages are for other interfaces and last two are for
+ expected interface. So the function exit only after receiving last
+ 2 messages and therefore the call count for m_read_netlink_socket
+ has to be 4
+ """
+ other_ifname = "eth1"
+ expected_ifname = "eth0"
+ data_op_down_eth1 = self._media_switch_data(
+ other_ifname, RTM_NEWLINK, OPER_DOWN
+ )
+ data_op_up_eth1 = self._media_switch_data(
+ other_ifname, RTM_NEWLINK, OPER_UP
+ )
+ data_op_down_eth0 = self._media_switch_data(
+ expected_ifname, RTM_NEWLINK, OPER_DOWN
+ )
+ data_op_up_eth0 = self._media_switch_data(
+ expected_ifname, RTM_NEWLINK, OPER_UP
+ )
+ m_read_netlink_socket.side_effect = [
+ data_op_down_eth1,
+ data_op_up_eth1,
+ data_op_down_eth0,
+ data_op_up_eth0,
+ ]
+ wait_for_media_disconnect_connect(m_socket, expected_ifname)
+ self.assertIn(
+ "Ignored netlink event on interface %s" % other_ifname,
+ self.logs.getvalue(),
+ )
+ self.assertEqual(m_read_netlink_socket.call_count, 4)
+
+ def test_invalid_msgtype_getlink(self, m_read_netlink_socket, m_socket):
+ """wait_for_media_disconnect_connect ignores GETLINK events.
+
+ The first two messages are for oper down and up for RTM_GETLINK type
+ which netlink module will ignore. The last 2 messages are RTM_NEWLINK
+ with oper state down and up messages. Therefore the call count for
+ m_read_netlink_socket has to be 4 ignoring first 2 messages
+ of RTM_GETLINK
+ """
+ ifname = "eth0"
+ data_getlink_down = self._media_switch_data(
+ ifname, RTM_GETLINK, OPER_DOWN
+ )
+ data_getlink_up = self._media_switch_data(ifname, RTM_GETLINK, OPER_UP)
+ data_newlink_down = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_DOWN
+ )
+ data_newlink_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
+ m_read_netlink_socket.side_effect = [
+ data_getlink_down,
+ data_getlink_up,
+ data_newlink_down,
+ data_newlink_up,
+ ]
+ wait_for_media_disconnect_connect(m_socket, ifname)
+ self.assertEqual(m_read_netlink_socket.call_count, 4)
+
+ def test_invalid_msgtype_setlink(self, m_read_netlink_socket, m_socket):
+ """wait_for_media_disconnect_connect ignores SETLINK events.
+
+ The first two messages are for oper down and up for RTM_GETLINK type
+ which it will ignore. 3rd and 4th messages are RTM_NEWLINK with down
+ and up messages. This function should exit after 4th messages since it
+ sees down->up scenario. So the call count for m_read_netlink_socket
+ has to be 4 ignoring first 2 messages of RTM_GETLINK and
+ last 2 messages of RTM_NEWLINK
+ """
+ ifname = "eth0"
+ data_setlink_down = self._media_switch_data(
+ ifname, RTM_SETLINK, OPER_DOWN
+ )
+ data_setlink_up = self._media_switch_data(ifname, RTM_SETLINK, OPER_UP)
+ data_newlink_down = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_DOWN
+ )
+ data_newlink_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
+ m_read_netlink_socket.side_effect = [
+ data_setlink_down,
+ data_setlink_up,
+ data_newlink_down,
+ data_newlink_up,
+ data_newlink_down,
+ data_newlink_up,
+ ]
+ wait_for_media_disconnect_connect(m_socket, ifname)
+ self.assertEqual(m_read_netlink_socket.call_count, 4)
+
+ def test_netlink_invalid_switch_scenario(
+ self, m_read_netlink_socket, m_socket
+ ):
+ """returns only if it receives UP event after a DOWN event"""
+ ifname = "eth0"
+ data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
+ data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
+ data_op_dormant = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_DORMANT
+ )
+ data_op_notpresent = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_NOTPRESENT
+ )
+ data_op_lowerdown = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_LOWERLAYERDOWN
+ )
+ data_op_testing = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_TESTING
+ )
+ data_op_unknown = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_UNKNOWN
+ )
+ m_read_netlink_socket.side_effect = [
+ data_op_up,
+ data_op_up,
+ data_op_dormant,
+ data_op_up,
+ data_op_notpresent,
+ data_op_up,
+ data_op_lowerdown,
+ data_op_up,
+ data_op_testing,
+ data_op_up,
+ data_op_unknown,
+ data_op_up,
+ data_op_down,
+ data_op_up,
+ ]
+ wait_for_media_disconnect_connect(m_socket, ifname)
+ self.assertEqual(m_read_netlink_socket.call_count, 14)
+
+ def test_netlink_valid_inbetween_transitions(
+ self, m_read_netlink_socket, m_socket
+ ):
+ """wait_for_media_disconnect_connect handles in between transitions"""
+ ifname = "eth0"
+ data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
+ data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
+ data_op_dormant = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_DORMANT
+ )
+ data_op_unknown = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_UNKNOWN
+ )
+ m_read_netlink_socket.side_effect = [
+ data_op_down,
+ data_op_dormant,
+ data_op_unknown,
+ data_op_up,
+ ]
+ wait_for_media_disconnect_connect(m_socket, ifname)
+ self.assertEqual(m_read_netlink_socket.call_count, 4)
+
+ def test_netlink_invalid_operstate(self, m_read_netlink_socket, m_socket):
+ """wait_for_media_disconnect_connect should handle invalid operstates.
+
+ The function should not fail and return even if it receives invalid
+ operstates. It always should wait for down up sequence.
+ """
+ ifname = "eth0"
+ data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
+ data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
+ data_op_invalid = self._media_switch_data(ifname, RTM_NEWLINK, 7)
+ m_read_netlink_socket.side_effect = [
+ data_op_invalid,
+ data_op_up,
+ data_op_down,
+ data_op_invalid,
+ data_op_up,
+ ]
+ wait_for_media_disconnect_connect(m_socket, ifname)
+ self.assertEqual(m_read_netlink_socket.call_count, 5)
+
+ def test_wait_invalid_socket(self, m_read_netlink_socket, m_socket):
+ """wait_for_media_disconnect_connect handle none netlink socket."""
+ socket = None
+ ifname = "eth0"
+ with self.assertRaises(AssertionError) as context:
+ wait_for_media_disconnect_connect(socket, ifname)
+ self.assertTrue("netlink socket is none" in str(context.exception))
+
+ def test_wait_invalid_ifname(self, m_read_netlink_socket, m_socket):
+ """wait_for_media_disconnect_connect handle none interface name"""
+ ifname = None
+ with self.assertRaises(AssertionError) as context:
+ wait_for_media_disconnect_connect(m_socket, ifname)
+ self.assertTrue("interface name is none" in str(context.exception))
+ ifname = ""
+ with self.assertRaises(AssertionError) as context:
+ wait_for_media_disconnect_connect(m_socket, ifname)
+ self.assertTrue(
+ "interface name cannot be empty" in str(context.exception)
+ )
+
+ def test_wait_invalid_rta_attr(self, m_read_netlink_socket, m_socket):
+ """wait_for_media_disconnect_connect handles invalid rta data"""
+ ifname = "eth0"
+ data_invalid1 = self._media_switch_data(None, RTM_NEWLINK, OPER_DOWN)
+ data_invalid2 = self._media_switch_data(ifname, RTM_NEWLINK, None)
+ data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
+ data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
+ m_read_netlink_socket.side_effect = [
+ data_invalid1,
+ data_invalid2,
+ data_op_down,
+ data_op_up,
+ ]
+ wait_for_media_disconnect_connect(m_socket, ifname)
+ self.assertEqual(m_read_netlink_socket.call_count, 4)
+
+ def test_read_multiple_netlink_msgs(self, m_read_netlink_socket, m_socket):
+ """Read multiple messages in single receive call"""
+ ifname = "eth0"
+ bytes = ifname.encode("utf-8")
+ data = bytearray(96)
+ struct.pack_into("=LHHLL", data, 0, 48, RTM_NEWLINK, 0, 0, 0)
+ struct.pack_into(
+ "HH4sHHc",
+ data,
+ RTATTR_START_OFFSET,
+ 8,
+ 3,
+ bytes,
+ 5,
+ 16,
+ int_to_bytes(OPER_DOWN),
+ )
+ struct.pack_into("=LHHLL", data, 48, 48, RTM_NEWLINK, 0, 0, 0)
+ struct.pack_into(
+ "HH4sHHc",
+ data,
+ 48 + RTATTR_START_OFFSET,
+ 8,
+ 3,
+ bytes,
+ 5,
+ 16,
+ int_to_bytes(OPER_UP),
+ )
+ m_read_netlink_socket.return_value = data
+ wait_for_media_disconnect_connect(m_socket, ifname)
+ self.assertEqual(m_read_netlink_socket.call_count, 1)
+
+ def test_read_partial_netlink_msgs(self, m_read_netlink_socket, m_socket):
+ """Read partial messages in receive call"""
+ ifname = "eth0"
+ bytes = ifname.encode("utf-8")
+ data1 = bytearray(112)
+ data2 = bytearray(32)
+ struct.pack_into("=LHHLL", data1, 0, 48, RTM_NEWLINK, 0, 0, 0)
+ struct.pack_into(
+ "HH4sHHc",
+ data1,
+ RTATTR_START_OFFSET,
+ 8,
+ 3,
+ bytes,
+ 5,
+ 16,
+ int_to_bytes(OPER_DOWN),
+ )
+ struct.pack_into("=LHHLL", data1, 48, 48, RTM_NEWLINK, 0, 0, 0)
+ struct.pack_into(
+ "HH4sHHc", data1, 80, 8, 3, bytes, 5, 16, int_to_bytes(OPER_DOWN)
+ )
+ struct.pack_into("=LHHLL", data1, 96, 48, RTM_NEWLINK, 0, 0, 0)
+ struct.pack_into(
+ "HH4sHHc", data2, 16, 8, 3, bytes, 5, 16, int_to_bytes(OPER_UP)
+ )
+ m_read_netlink_socket.side_effect = [data1, data2]
+ wait_for_media_disconnect_connect(m_socket, ifname)
+ self.assertEqual(m_read_netlink_socket.call_count, 2)
diff --git a/tests/unittests/sources/helpers/test_openstack.py b/tests/unittests/sources/helpers/test_openstack.py
new file mode 100644
index 00000000..eb87b1ce
--- /dev/null
+++ b/tests/unittests/sources/helpers/test_openstack.py
@@ -0,0 +1,62 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+# ./cloudinit/sources/helpers/tests/test_openstack.py
+from unittest import mock
+
+from cloudinit.sources.helpers import openstack
+from tests.unittests import helpers as test_helpers
+
+
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
+class TestConvertNetJson(test_helpers.CiTestCase):
+ def test_phy_types(self):
+ """Verify the different known physical types are handled."""
+ # network_data.json example from
+ # https://docs.openstack.org/nova/latest/user/metadata.html
+ mac0 = "fa:16:3e:9c:bf:3d"
+ net_json = {
+ "links": [
+ {
+ "ethernet_mac_address": mac0,
+ "id": "tapcd9f6d46-4a",
+ "mtu": None,
+ "type": "bridge",
+ "vif_id": "cd9f6d46-4a3a-43ab-a466-994af9db96fc",
+ }
+ ],
+ "networks": [
+ {
+ "id": "network0",
+ "link": "tapcd9f6d46-4a",
+ "network_id": "99e88329-f20d-4741-9593-25bf07847b16",
+ "type": "ipv4_dhcp",
+ }
+ ],
+ "services": [{"address": "8.8.8.8", "type": "dns"}],
+ }
+ macs = {mac0: "eth0"}
+
+ expected = {
+ "version": 1,
+ "config": [
+ {
+ "mac_address": "fa:16:3e:9c:bf:3d",
+ "mtu": None,
+ "name": "eth0",
+ "subnets": [{"type": "dhcp4"}],
+ "type": "physical",
+ },
+ {"address": "8.8.8.8", "type": "nameserver"},
+ ],
+ }
+
+ for t in openstack.KNOWN_PHYSICAL_TYPES:
+ net_json["links"][0]["type"] = t
+ self.assertEqual(
+ expected,
+ openstack.convert_net_json(
+ network_json=net_json, known_macs=macs
+ ),
+ )
diff --git a/tests/unittests/sources/test_aliyun.py b/tests/unittests/sources/test_aliyun.py
new file mode 100644
index 00000000..8a61d5ee
--- /dev/null
+++ b/tests/unittests/sources/test_aliyun.py
@@ -0,0 +1,287 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import functools
+import os
+from unittest import mock
+
+import httpretty
+
+from cloudinit import helpers
+from cloudinit.sources import DataSourceAliYun as ay
+from cloudinit.sources.DataSourceEc2 import convert_ec2_metadata_network_config
+from tests.unittests import helpers as test_helpers
+
+DEFAULT_METADATA = {
+ "instance-id": "aliyun-test-vm-00",
+ "eipv4": "10.0.0.1",
+ "hostname": "test-hostname",
+ "image-id": "m-test",
+ "launch-index": "0",
+ "mac": "00:16:3e:00:00:00",
+ "network-type": "vpc",
+ "private-ipv4": "192.168.0.1",
+ "serial-number": "test-string",
+ "vpc-cidr-block": "192.168.0.0/16",
+ "vpc-id": "test-vpc",
+ "vswitch-id": "test-vpc",
+ "vswitch-cidr-block": "192.168.0.0/16",
+ "zone-id": "test-zone-1",
+ "ntp-conf": {
+ "ntp_servers": [
+ "ntp1.aliyun.com",
+ "ntp2.aliyun.com",
+ "ntp3.aliyun.com",
+ ]
+ },
+ "source-address": [
+ "http://mirrors.aliyun.com",
+ "http://mirrors.aliyuncs.com",
+ ],
+ "public-keys": {
+ "key-pair-1": {"openssh-key": "ssh-rsa AAAAB3..."},
+ "key-pair-2": {"openssh-key": "ssh-rsa AAAAB3..."},
+ },
+}
+
+DEFAULT_USERDATA = """\
+#cloud-config
+
+hostname: localhost"""
+
+
+def register_mock_metaserver(base_url, data):
+ def register_helper(register, base_url, body):
+ if isinstance(body, str):
+ register(base_url, body)
+ elif isinstance(body, list):
+ register(base_url.rstrip("/"), "\n".join(body) + "\n")
+ elif isinstance(body, dict):
+ if not body:
+ register(
+ base_url.rstrip("/") + "/", "not found", status_code=404
+ )
+ vals = []
+ for k, v in body.items():
+ if isinstance(v, (str, list)):
+ suffix = k.rstrip("/")
+ else:
+ suffix = k.rstrip("/") + "/"
+ vals.append(suffix)
+ url = base_url.rstrip("/") + "/" + suffix
+ register_helper(register, url, v)
+ register(base_url, "\n".join(vals) + "\n")
+
+ register = functools.partial(httpretty.register_uri, httpretty.GET)
+ register_helper(register, base_url, data)
+
+
+class TestAliYunDatasource(test_helpers.HttprettyTestCase):
+ def setUp(self):
+ super(TestAliYunDatasource, self).setUp()
+ cfg = {"datasource": {"AliYun": {"timeout": "1", "max_wait": "1"}}}
+ distro = {}
+ paths = helpers.Paths({"run_dir": self.tmp_dir()})
+ self.ds = ay.DataSourceAliYun(cfg, distro, paths)
+ self.metadata_address = self.ds.metadata_urls[0]
+
+ @property
+ def default_metadata(self):
+ return DEFAULT_METADATA
+
+ @property
+ def default_userdata(self):
+ return DEFAULT_USERDATA
+
+ @property
+ def metadata_url(self):
+ return (
+ os.path.join(
+ self.metadata_address,
+ self.ds.min_metadata_version,
+ "meta-data",
+ )
+ + "/"
+ )
+
+ @property
+ def userdata_url(self):
+ return os.path.join(
+ self.metadata_address, self.ds.min_metadata_version, "user-data"
+ )
+
+ # EC2 provides an instance-identity document which must return 404 here
+ # for this test to pass.
+ @property
+ def default_identity(self):
+ return {}
+
+ @property
+ def identity_url(self):
+ return os.path.join(
+ self.metadata_address,
+ self.ds.min_metadata_version,
+ "dynamic",
+ "instance-identity",
+ )
+
+ def regist_default_server(self):
+ register_mock_metaserver(self.metadata_url, self.default_metadata)
+ register_mock_metaserver(self.userdata_url, self.default_userdata)
+ register_mock_metaserver(self.identity_url, self.default_identity)
+
+ def _test_get_data(self):
+ self.assertEqual(self.ds.metadata, self.default_metadata)
+ self.assertEqual(
+ self.ds.userdata_raw, self.default_userdata.encode("utf8")
+ )
+
+ def _test_get_sshkey(self):
+ pub_keys = [
+ v["openssh-key"]
+ for (_, v) in self.default_metadata["public-keys"].items()
+ ]
+ self.assertEqual(self.ds.get_public_ssh_keys(), pub_keys)
+
+ def _test_get_iid(self):
+ self.assertEqual(
+ self.default_metadata["instance-id"], self.ds.get_instance_id()
+ )
+
+ def _test_host_name(self):
+ self.assertEqual(
+ self.default_metadata["hostname"], self.ds.get_hostname()
+ )
+
+ @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun")
+ def test_with_mock_server(self, m_is_aliyun):
+ m_is_aliyun.return_value = True
+ self.regist_default_server()
+ ret = self.ds.get_data()
+ self.assertEqual(True, ret)
+ self.assertEqual(1, m_is_aliyun.call_count)
+ self._test_get_data()
+ self._test_get_sshkey()
+ self._test_get_iid()
+ self._test_host_name()
+ self.assertEqual("aliyun", self.ds.cloud_name)
+ self.assertEqual("ec2", self.ds.platform)
+ self.assertEqual(
+ "metadata (http://100.100.100.200)", self.ds.subplatform
+ )
+
+ @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun")
+ def test_returns_false_when_not_on_aliyun(self, m_is_aliyun):
+ """If is_aliyun returns false, then get_data should return False."""
+ m_is_aliyun.return_value = False
+ self.regist_default_server()
+ ret = self.ds.get_data()
+ self.assertEqual(1, m_is_aliyun.call_count)
+ self.assertEqual(False, ret)
+
+ def test_parse_public_keys(self):
+ public_keys = {}
+ self.assertEqual(ay.parse_public_keys(public_keys), [])
+
+ public_keys = {"key-pair-0": "ssh-key-0"}
+ self.assertEqual(
+ ay.parse_public_keys(public_keys), [public_keys["key-pair-0"]]
+ )
+
+ public_keys = {"key-pair-0": "ssh-key-0", "key-pair-1": "ssh-key-1"}
+ self.assertEqual(
+ set(ay.parse_public_keys(public_keys)),
+ set([public_keys["key-pair-0"], public_keys["key-pair-1"]]),
+ )
+
+ public_keys = {"key-pair-0": ["ssh-key-0", "ssh-key-1"]}
+ self.assertEqual(
+ ay.parse_public_keys(public_keys), public_keys["key-pair-0"]
+ )
+
+ public_keys = {"key-pair-0": {"openssh-key": []}}
+ self.assertEqual(ay.parse_public_keys(public_keys), [])
+
+ public_keys = {"key-pair-0": {"openssh-key": "ssh-key-0"}}
+ self.assertEqual(
+ ay.parse_public_keys(public_keys),
+ [public_keys["key-pair-0"]["openssh-key"]],
+ )
+
+ public_keys = {
+ "key-pair-0": {"openssh-key": ["ssh-key-0", "ssh-key-1"]}
+ }
+ self.assertEqual(
+ ay.parse_public_keys(public_keys),
+ public_keys["key-pair-0"]["openssh-key"],
+ )
+
+ def test_route_metric_calculated_without_device_number(self):
+ """Test that route-metric code works without `device-number`
+
+ `device-number` is part of EC2 metadata, but not supported on aliyun.
+ Attempting to access it will raise a KeyError.
+
+ LP: #1917875
+ """
+ netcfg = convert_ec2_metadata_network_config(
+ {
+ "interfaces": {
+ "macs": {
+ "06:17:04:d7:26:09": {
+ "interface-id": "eni-e44ef49e",
+ },
+ "06:17:04:d7:26:08": {
+ "interface-id": "eni-e44ef49f",
+ },
+ }
+ }
+ },
+ macs_to_nics={
+ "06:17:04:d7:26:09": "eth0",
+ "06:17:04:d7:26:08": "eth1",
+ },
+ )
+
+ met0 = netcfg["ethernets"]["eth0"]["dhcp4-overrides"]["route-metric"]
+ met1 = netcfg["ethernets"]["eth1"]["dhcp4-overrides"]["route-metric"]
+
+ # route-metric numbers should be 100 apart
+ assert 100 == abs(met0 - met1)
+
+
+class TestIsAliYun(test_helpers.CiTestCase):
+ ALIYUN_PRODUCT = "Alibaba Cloud ECS"
+ read_dmi_data_expected = [mock.call("system-product-name")]
+
+ @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data")
+ def test_true_on_aliyun_product(self, m_read_dmi_data):
+ """Should return true if the dmi product data has expected value."""
+ m_read_dmi_data.return_value = self.ALIYUN_PRODUCT
+ ret = ay._is_aliyun()
+ self.assertEqual(
+ self.read_dmi_data_expected, m_read_dmi_data.call_args_list
+ )
+ self.assertEqual(True, ret)
+
+ @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data")
+ def test_false_on_empty_string(self, m_read_dmi_data):
+ """Should return false on empty value returned."""
+ m_read_dmi_data.return_value = ""
+ ret = ay._is_aliyun()
+ self.assertEqual(
+ self.read_dmi_data_expected, m_read_dmi_data.call_args_list
+ )
+ self.assertEqual(False, ret)
+
+ @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data")
+ def test_false_on_unknown_string(self, m_read_dmi_data):
+ """Should return false on an unrelated string."""
+ m_read_dmi_data.return_value = "cubs win"
+ ret = ay._is_aliyun()
+ self.assertEqual(
+ self.read_dmi_data_expected, m_read_dmi_data.call_args_list
+ )
+ self.assertEqual(False, ret)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_altcloud.py b/tests/unittests/sources/test_altcloud.py
new file mode 100644
index 00000000..44dfafd9
--- /dev/null
+++ b/tests/unittests/sources/test_altcloud.py
@@ -0,0 +1,459 @@
+# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joe VLcek <JVLcek@RedHat.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""
+This test file exercises the code in sources DataSourceAltCloud.py
+"""
+
+import os
+import shutil
+import tempfile
+
+import cloudinit.sources.DataSourceAltCloud as dsac
+from cloudinit import dmi, helpers, subp, util
+from tests.unittests.helpers import CiTestCase, mock
+
+OS_UNAME_ORIG = getattr(os, "uname")
+
+
+def _write_user_data_files(mount_dir, value):
+ """
+ Populate the deltacloud_user_data_file the user_data_file
+ which would be populated with user data.
+ """
+ deltacloud_user_data_file = mount_dir + "/deltacloud-user-data.txt"
+ user_data_file = mount_dir + "/user-data.txt"
+
+ udfile = open(deltacloud_user_data_file, "w")
+ udfile.write(value)
+ udfile.close()
+ os.chmod(deltacloud_user_data_file, 0o664)
+
+ udfile = open(user_data_file, "w")
+ udfile.write(value)
+ udfile.close()
+ os.chmod(user_data_file, 0o664)
+
+
+def _remove_user_data_files(mount_dir, dc_file=True, non_dc_file=True):
+ """
+ Remove the test files: deltacloud_user_data_file and
+ user_data_file
+ """
+ deltacloud_user_data_file = mount_dir + "/deltacloud-user-data.txt"
+ user_data_file = mount_dir + "/user-data.txt"
+
+ # Ignore any failures removeing files that are already gone.
+ if dc_file:
+ try:
+ os.remove(deltacloud_user_data_file)
+ except OSError:
+ pass
+
+ if non_dc_file:
+ try:
+ os.remove(user_data_file)
+ except OSError:
+ pass
+
+
+def _dmi_data(expected):
+ """
+ Spoof the data received over DMI
+ """
+
+ def _data(key):
+ return expected
+
+ return _data
+
+
+class TestGetCloudType(CiTestCase):
+ """Test to exercise method: DataSourceAltCloud.get_cloud_type()"""
+
+ with_logs = True
+
+ def setUp(self):
+ """Set up."""
+ super(TestGetCloudType, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.paths = helpers.Paths({"cloud_dir": self.tmp})
+ self.dmi_data = dmi.read_dmi_data
+ # We have a different code path for arm to deal with LP1243287
+ # We have to switch arch to x86_64 to avoid test failure
+ force_arch("x86_64")
+
+ def tearDown(self):
+ # Reset
+ dmi.read_dmi_data = self.dmi_data
+ force_arch()
+
+ def test_cloud_info_file_ioerror(self):
+ """Return UNKNOWN when /etc/sysconfig/cloud-info exists but errors."""
+ self.assertEqual("/etc/sysconfig/cloud-info", dsac.CLOUD_INFO_FILE)
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
+ # Attempting to read the directory generates IOError
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.tmp):
+ self.assertEqual("UNKNOWN", dsrc.get_cloud_type())
+ self.assertIn(
+ "[Errno 21] Is a directory: '%s'" % self.tmp, self.logs.getvalue()
+ )
+
+ def test_cloud_info_file(self):
+ """Return uppercase stripped content from /etc/sysconfig/cloud-info."""
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
+ cloud_info = self.tmp_path("cloud-info", dir=self.tmp)
+ util.write_file(cloud_info, " OverRiDdeN CloudType ")
+ # Attempting to read the directory generates IOError
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", cloud_info):
+ self.assertEqual("OVERRIDDEN CLOUDTYPE", dsrc.get_cloud_type())
+
+ def test_rhev(self):
+ """
+ Test method get_cloud_type() for RHEVm systems.
+ Forcing read_dmi_data return to match a RHEVm system: RHEV Hypervisor
+ """
+ dmi.read_dmi_data = _dmi_data("RHEV")
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
+ self.assertEqual("RHEV", dsrc.get_cloud_type())
+
+ def test_vsphere(self):
+ """
+ Test method get_cloud_type() for vSphere systems.
+ Forcing read_dmi_data return to match a vSphere system: RHEV Hypervisor
+ """
+ dmi.read_dmi_data = _dmi_data("VMware Virtual Platform")
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
+ self.assertEqual("VSPHERE", dsrc.get_cloud_type())
+
+ def test_unknown(self):
+ """
+ Test method get_cloud_type() for unknown systems.
+ Forcing read_dmi_data return to match an unrecognized return.
+ """
+ dmi.read_dmi_data = _dmi_data("Unrecognized Platform")
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
+ self.assertEqual("UNKNOWN", dsrc.get_cloud_type())
+
+
+class TestGetDataCloudInfoFile(CiTestCase):
+ """
+ Test to exercise method: DataSourceAltCloud.get_data()
+ With a contrived CLOUD_INFO_FILE
+ """
+
+ def setUp(self):
+ """Set up."""
+ self.tmp = self.tmp_dir()
+ self.paths = helpers.Paths(
+ {"cloud_dir": self.tmp, "run_dir": self.tmp}
+ )
+ self.cloud_info_file = self.tmp_path("cloud-info", dir=self.tmp)
+
+ def test_rhev(self):
+ """Success Test module get_data() forcing RHEV."""
+
+ util.write_file(self.cloud_info_file, "RHEV")
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
+ dsrc.user_data_rhevm = lambda: True
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file):
+ self.assertEqual(True, dsrc.get_data())
+ self.assertEqual("altcloud", dsrc.cloud_name)
+ self.assertEqual("altcloud", dsrc.platform_type)
+ self.assertEqual("rhev (/dev/fd0)", dsrc.subplatform)
+
+ def test_vsphere(self):
+ """Success Test module get_data() forcing VSPHERE."""
+
+ util.write_file(self.cloud_info_file, "VSPHERE")
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
+ dsrc.user_data_vsphere = lambda: True
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file):
+ self.assertEqual(True, dsrc.get_data())
+ self.assertEqual("altcloud", dsrc.cloud_name)
+ self.assertEqual("altcloud", dsrc.platform_type)
+ self.assertEqual("vsphere (unknown)", dsrc.subplatform)
+
+ def test_fail_rhev(self):
+ """Failure Test module get_data() forcing RHEV."""
+
+ util.write_file(self.cloud_info_file, "RHEV")
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
+ dsrc.user_data_rhevm = lambda: False
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file):
+ self.assertEqual(False, dsrc.get_data())
+
+ def test_fail_vsphere(self):
+ """Failure Test module get_data() forcing VSPHERE."""
+
+ util.write_file(self.cloud_info_file, "VSPHERE")
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
+ dsrc.user_data_vsphere = lambda: False
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file):
+ self.assertEqual(False, dsrc.get_data())
+
+ def test_unrecognized(self):
+ """Failure Test module get_data() forcing unrecognized."""
+
+ util.write_file(self.cloud_info_file, "unrecognized")
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file):
+ self.assertEqual(False, dsrc.get_data())
+
+
+class TestGetDataNoCloudInfoFile(CiTestCase):
+ """
+ Test to exercise method: DataSourceAltCloud.get_data()
+ Without a CLOUD_INFO_FILE
+ """
+
+ def setUp(self):
+ """Set up."""
+ self.tmp = self.tmp_dir()
+ self.paths = helpers.Paths(
+ {"cloud_dir": self.tmp, "run_dir": self.tmp}
+ )
+ self.dmi_data = dmi.read_dmi_data
+ dsac.CLOUD_INFO_FILE = "no such file"
+ # We have a different code path for arm to deal with LP1243287
+ # We have to switch arch to x86_64 to avoid test failure
+ force_arch("x86_64")
+
+ def tearDown(self):
+ # Reset
+ dsac.CLOUD_INFO_FILE = "/etc/sysconfig/cloud-info"
+ dmi.read_dmi_data = self.dmi_data
+ # Return back to original arch
+ force_arch()
+
+ def test_rhev_no_cloud_file(self):
+ """Test No cloud info file module get_data() forcing RHEV."""
+
+ dmi.read_dmi_data = _dmi_data("RHEV Hypervisor")
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
+ dsrc.user_data_rhevm = lambda: True
+ self.assertEqual(True, dsrc.get_data())
+
+ def test_vsphere_no_cloud_file(self):
+ """Test No cloud info file module get_data() forcing VSPHERE."""
+
+ dmi.read_dmi_data = _dmi_data("VMware Virtual Platform")
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
+ dsrc.user_data_vsphere = lambda: True
+ self.assertEqual(True, dsrc.get_data())
+
+ def test_failure_no_cloud_file(self):
+ """Test No cloud info file module get_data() forcing unrecognized."""
+
+ dmi.read_dmi_data = _dmi_data("Unrecognized Platform")
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
+ self.assertEqual(False, dsrc.get_data())
+
+
+class TestUserDataRhevm(CiTestCase):
+ """
+ Test to exercise method: DataSourceAltCloud.user_data_rhevm()
+ """
+
+ def setUp(self):
+ """Set up."""
+ self.paths = helpers.Paths({"cloud_dir": "/tmp"})
+ self.mount_dir = self.tmp_dir()
+ _write_user_data_files(self.mount_dir, "test user data")
+ self.add_patch(
+ "cloudinit.sources.DataSourceAltCloud.modprobe_floppy",
+ "m_modprobe_floppy",
+ return_value=None,
+ )
+ self.add_patch(
+ "cloudinit.sources.DataSourceAltCloud.util.udevadm_settle",
+ "m_udevadm_settle",
+ return_value=("", ""),
+ )
+ self.add_patch(
+ "cloudinit.sources.DataSourceAltCloud.util.mount_cb", "m_mount_cb"
+ )
+
+ def test_mount_cb_fails(self):
+ """Test user_data_rhevm() where mount_cb fails."""
+
+ self.m_mount_cb.side_effect = util.MountFailedError("Failed Mount")
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
+ self.assertEqual(False, dsrc.user_data_rhevm())
+
+ def test_modprobe_fails(self):
+ """Test user_data_rhevm() where modprobe fails."""
+
+ self.m_modprobe_floppy.side_effect = subp.ProcessExecutionError(
+ "Failed modprobe"
+ )
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
+ self.assertEqual(False, dsrc.user_data_rhevm())
+
+ def test_no_modprobe_cmd(self):
+ """Test user_data_rhevm() with no modprobe command."""
+
+ self.m_modprobe_floppy.side_effect = subp.ProcessExecutionError(
+ "No such file or dir"
+ )
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
+ self.assertEqual(False, dsrc.user_data_rhevm())
+
+ def test_udevadm_fails(self):
+ """Test user_data_rhevm() where udevadm fails."""
+
+ self.m_udevadm_settle.side_effect = subp.ProcessExecutionError(
+ "Failed settle."
+ )
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
+ self.assertEqual(False, dsrc.user_data_rhevm())
+
+ def test_no_udevadm_cmd(self):
+ """Test user_data_rhevm() with no udevadm command."""
+
+ self.m_udevadm_settle.side_effect = OSError("No such file or dir")
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
+ self.assertEqual(False, dsrc.user_data_rhevm())
+
+
+class TestUserDataVsphere(CiTestCase):
+ """
+ Test to exercise method: DataSourceAltCloud.user_data_vsphere()
+ """
+
+ def setUp(self):
+ """Set up."""
+ self.tmp = self.tmp_dir()
+ self.paths = helpers.Paths({"cloud_dir": self.tmp})
+ self.mount_dir = tempfile.mkdtemp()
+
+ _write_user_data_files(self.mount_dir, "test user data")
+
+ def tearDown(self):
+ # Reset
+
+ _remove_user_data_files(self.mount_dir)
+
+ # Attempt to remove the temp dir ignoring errors
+ try:
+ shutil.rmtree(self.mount_dir)
+ except OSError:
+ pass
+
+ dsac.CLOUD_INFO_FILE = "/etc/sysconfig/cloud-info"
+
+ @mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with")
+ @mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb")
+ def test_user_data_vsphere_no_cdrom(self, m_mount_cb, m_find_devs_with):
+ """Test user_data_vsphere() where mount_cb fails."""
+
+ m_mount_cb.return_value = []
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
+ self.assertEqual(False, dsrc.user_data_vsphere())
+ self.assertEqual(0, m_mount_cb.call_count)
+
+ @mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with")
+ @mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb")
+ def test_user_data_vsphere_mcb_fail(self, m_mount_cb, m_find_devs_with):
+ """Test user_data_vsphere() where mount_cb fails."""
+
+ m_find_devs_with.return_value = ["/dev/mock/cdrom"]
+ m_mount_cb.side_effect = util.MountFailedError("Unable To mount")
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
+ self.assertEqual(False, dsrc.user_data_vsphere())
+ self.assertEqual(1, m_find_devs_with.call_count)
+ self.assertEqual(1, m_mount_cb.call_count)
+
+ @mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with")
+ @mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb")
+ def test_user_data_vsphere_success(self, m_mount_cb, m_find_devs_with):
+ """Test user_data_vsphere() where successful."""
+ m_find_devs_with.return_value = ["/dev/mock/cdrom"]
+ m_mount_cb.return_value = "raw userdata from cdrom"
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
+ cloud_info = self.tmp_path("cloud-info", dir=self.tmp)
+ util.write_file(cloud_info, "VSPHERE")
+ self.assertEqual(True, dsrc.user_data_vsphere())
+ m_find_devs_with.assert_called_once_with("LABEL=CDROM")
+ m_mount_cb.assert_called_once_with(
+ "/dev/mock/cdrom", dsac.read_user_data_callback
+ )
+ with mock.patch.object(dsrc, "get_cloud_type", return_value="VSPHERE"):
+ self.assertEqual("vsphere (/dev/mock/cdrom)", dsrc.subplatform)
+
+
+class TestReadUserDataCallback(CiTestCase):
+ """
+ Test to exercise method: DataSourceAltCloud.read_user_data_callback()
+ """
+
+ def setUp(self):
+ """Set up."""
+ self.paths = helpers.Paths({"cloud_dir": "/tmp"})
+ self.mount_dir = tempfile.mkdtemp()
+
+ _write_user_data_files(self.mount_dir, "test user data")
+
+ def tearDown(self):
+ # Reset
+
+ _remove_user_data_files(self.mount_dir)
+
+ # Attempt to remove the temp dir ignoring errors
+ try:
+ shutil.rmtree(self.mount_dir)
+ except OSError:
+ pass
+
+ def test_callback_both(self):
+ """Test read_user_data_callback() with both files."""
+
+ self.assertEqual(
+ "test user data", dsac.read_user_data_callback(self.mount_dir)
+ )
+
+ def test_callback_dc(self):
+ """Test read_user_data_callback() with only DC file."""
+
+ _remove_user_data_files(
+ self.mount_dir, dc_file=False, non_dc_file=True
+ )
+
+ self.assertEqual(
+ "test user data", dsac.read_user_data_callback(self.mount_dir)
+ )
+
+ def test_callback_non_dc(self):
+ """Test read_user_data_callback() with only non-DC file."""
+
+ _remove_user_data_files(
+ self.mount_dir, dc_file=True, non_dc_file=False
+ )
+
+ self.assertEqual(
+ "test user data", dsac.read_user_data_callback(self.mount_dir)
+ )
+
+ def test_callback_none(self):
+ """Test read_user_data_callback() no files are found."""
+
+ _remove_user_data_files(self.mount_dir)
+ self.assertIsNone(dsac.read_user_data_callback(self.mount_dir))
+
+
+def force_arch(arch=None):
+ def _os_uname():
+ return ("LINUX", "NODENAME", "RELEASE", "VERSION", arch)
+
+ if arch:
+ setattr(os, "uname", _os_uname)
+ elif arch is None:
+ setattr(os, "uname", OS_UNAME_ORIG)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py
new file mode 100644
index 00000000..5f956a63
--- /dev/null
+++ b/tests/unittests/sources/test_azure.py
@@ -0,0 +1,4306 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import copy
+import crypt
+import json
+import os
+import stat
+import xml.etree.ElementTree as ET
+
+import httpretty
+import pytest
+import requests
+import yaml
+
+from cloudinit import distros, helpers, url_helper
+from cloudinit.sources import UNSET
+from cloudinit.sources import DataSourceAzure as dsaz
+from cloudinit.sources import InvalidMetaDataException
+from cloudinit.sources.helpers import netlink
+from cloudinit.util import (
+ MountFailedError,
+ b64e,
+ decode_binary,
+ json_dumps,
+ load_file,
+ load_json,
+ write_file,
+)
+from cloudinit.version import version_string as vs
+from tests.unittests.helpers import (
+ CiTestCase,
+ ExitStack,
+ HttprettyTestCase,
+ mock,
+ populate_dir,
+ resourceLocation,
+ wrap_and_call,
+)
+
+MOCKPATH = "cloudinit.sources.DataSourceAzure."
+
+
+@pytest.fixture
+def azure_ds(paths):
+ """Provide DataSourceAzure instance with mocks for minimal test case."""
+ with mock.patch(MOCKPATH + "_is_platform_viable", return_value=True):
+ yield dsaz.DataSourceAzure(sys_cfg={}, distro=mock.Mock(), paths=paths)
+
+
+@pytest.fixture
+def mock_azure_helper_readurl():
+ with mock.patch(
+ "cloudinit.sources.helpers.azure.url_helper.readurl", autospec=True
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_azure_get_metadata_from_fabric():
+ with mock.patch(
+ MOCKPATH + "get_metadata_from_fabric",
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_azure_report_failure_to_fabric():
+ with mock.patch(
+ MOCKPATH + "report_failure_to_fabric",
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_dmi_read_dmi_data():
+ def fake_read(key: str) -> str:
+ if key == "system-uuid":
+ return "fake-system-uuid"
+ raise RuntimeError()
+
+ with mock.patch(
+ MOCKPATH + "dmi.read_dmi_data",
+ side_effect=fake_read,
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_net_dhcp_maybe_perform_dhcp_discovery():
+ with mock.patch(
+ "cloudinit.net.dhcp.maybe_perform_dhcp_discovery",
+ return_value=[
+ {
+ "unknown-245": "aa:bb:cc:dd",
+ "interface": "ethBoot0",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ }
+ ],
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_net_dhcp_EphemeralIPv4Network():
+ with mock.patch(
+ "cloudinit.net.dhcp.EphemeralIPv4Network",
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_get_interfaces():
+ with mock.patch(MOCKPATH + "net.get_interfaces", return_value=[]) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_get_interface_mac():
+ with mock.patch(
+ MOCKPATH + "net.get_interface_mac",
+ return_value="001122334455",
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_netlink():
+ with mock.patch(
+ MOCKPATH + "netlink",
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_os_path_isfile():
+ with mock.patch(MOCKPATH + "os.path.isfile", autospec=True) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_readurl():
+ with mock.patch(MOCKPATH + "readurl", autospec=True) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_subp_subp():
+ with mock.patch(MOCKPATH + "subp.subp", side_effect=[]) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_util_ensure_dir():
+ with mock.patch(
+ MOCKPATH + "util.ensure_dir",
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_util_find_devs_with():
+ with mock.patch(MOCKPATH + "util.find_devs_with", autospec=True) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_util_load_file():
+ with mock.patch(
+ MOCKPATH + "util.load_file",
+ autospec=True,
+ return_value=b"",
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_util_mount_cb():
+ with mock.patch(
+ MOCKPATH + "util.mount_cb",
+ autospec=True,
+ return_value=({}, "", {}, {}),
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_util_write_file():
+ with mock.patch(
+ MOCKPATH + "util.write_file",
+ autospec=True,
+ ) as m:
+ yield m
+
+
+def construct_valid_ovf_env(
+ data=None, pubkeys=None, userdata=None, platform_settings=None
+):
+ if data is None:
+ data = {"HostName": "FOOHOST"}
+ if pubkeys is None:
+ pubkeys = {}
+
+ content = """<?xml version="1.0" encoding="utf-8"?>
+<Environment xmlns="http://schemas.dmtf.org/ovf/environment/1"
+ xmlns:oe="http://schemas.dmtf.org/ovf/environment/1"
+ xmlns:wa="http://schemas.microsoft.com/windowsazure"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+
+ <wa:ProvisioningSection><wa:Version>1.0</wa:Version>
+ <LinuxProvisioningConfigurationSet
+ xmlns="http://schemas.microsoft.com/windowsazure"
+ xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
+ <ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType>
+ """
+ for key, dval in data.items():
+ if isinstance(dval, dict):
+ val = dict(dval).get("text")
+ attrs = " " + " ".join(
+ [
+ "%s='%s'" % (k, v)
+ for k, v in dict(dval).items()
+ if k != "text"
+ ]
+ )
+ else:
+ val = dval
+ attrs = ""
+ content += "<%s%s>%s</%s>\n" % (key, attrs, val, key)
+
+ if userdata:
+ content += "<UserData>%s</UserData>\n" % (b64e(userdata))
+
+ if pubkeys:
+ content += "<SSH><PublicKeys>\n"
+ for fp, path, value in pubkeys:
+ content += " <PublicKey>"
+ if fp and path:
+ content += "<Fingerprint>%s</Fingerprint><Path>%s</Path>" % (
+ fp,
+ path,
+ )
+ if value:
+ content += "<Value>%s</Value>" % value
+ content += "</PublicKey>\n"
+ content += "</PublicKeys></SSH>"
+ content += """
+ </LinuxProvisioningConfigurationSet>
+ </wa:ProvisioningSection>
+ <wa:PlatformSettingsSection><wa:Version>1.0</wa:Version>
+ <PlatformSettings xmlns="http://schemas.microsoft.com/windowsazure"
+ xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
+ <KmsServerHostname>kms.core.windows.net</KmsServerHostname>
+ <ProvisionGuestAgent>false</ProvisionGuestAgent>
+ <GuestAgentPackageName i:nil="true" />"""
+ if platform_settings:
+ for k, v in platform_settings.items():
+ content += "<%s>%s</%s>\n" % (k, v, k)
+ if "PreprovisionedVMType" not in platform_settings:
+ content += """<PreprovisionedVMType i:nil="true" />"""
+ content += """</PlatformSettings></wa:PlatformSettingsSection>
+</Environment>"""
+
+ return content
+
+
+NETWORK_METADATA = {
+ "compute": {
+ "location": "eastus2",
+ "name": "my-hostname",
+ "offer": "UbuntuServer",
+ "osType": "Linux",
+ "placementGroupId": "",
+ "platformFaultDomain": "0",
+ "platformUpdateDomain": "0",
+ "publisher": "Canonical",
+ "resourceGroupName": "srugroup1",
+ "sku": "19.04-DAILY",
+ "subscriptionId": "12aad61c-6de4-4e53-a6c6-5aff52a83777",
+ "tags": "",
+ "version": "19.04.201906190",
+ "vmId": "ff702a6b-cb6a-4fcd-ad68-b4ce38227642",
+ "vmScaleSetName": "",
+ "vmSize": "Standard_DS1_v2",
+ "zone": "",
+ "publicKeys": [{"keyData": "ssh-rsa key1", "path": "path1"}],
+ },
+ "network": {
+ "interface": [
+ {
+ "macAddress": "000D3A047598",
+ "ipv6": {"ipAddress": []},
+ "ipv4": {
+ "subnet": [{"prefix": "24", "address": "10.0.0.0"}],
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.4",
+ "publicIpAddress": "104.46.124.81",
+ }
+ ],
+ },
+ }
+ ]
+ },
+}
+
+SECONDARY_INTERFACE = {
+ "macAddress": "220D3A047598",
+ "ipv6": {"ipAddress": []},
+ "ipv4": {
+ "subnet": [{"prefix": "24", "address": "10.0.1.0"}],
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.1.5",
+ }
+ ],
+ },
+}
+
+SECONDARY_INTERFACE_NO_IP = {
+ "macAddress": "220D3A047598",
+ "ipv6": {"ipAddress": []},
+ "ipv4": {
+ "subnet": [{"prefix": "24", "address": "10.0.1.0"}],
+ "ipAddress": [],
+ },
+}
+
+IMDS_NETWORK_METADATA = {
+ "interface": [
+ {
+ "macAddress": "000D3A047598",
+ "ipv6": {"ipAddress": []},
+ "ipv4": {
+ "subnet": [{"prefix": "24", "address": "10.0.0.0"}],
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.4",
+ "publicIpAddress": "104.46.124.81",
+ }
+ ],
+ },
+ }
+ ]
+}
+
+EXAMPLE_UUID = "d0df4c54-4ecb-4a4b-9954-5bdf3ed5c3b8"
+
+
+class TestParseNetworkConfig(CiTestCase):
+
+ maxDiff = None
+ fallback_config = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "eth0",
+ "mac_address": "00:11:22:33:44:55",
+ "params": {"driver": "hv_netsvc"},
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
+ }
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ def test_single_ipv4_nic_configuration(self, m_driver):
+ """parse_network_config emits dhcp on single nic with ipv4"""
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": False,
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ }
+ self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA))
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ def test_increases_route_metric_for_non_primary_nics(self, m_driver):
+ """parse_network_config increases route-metric for each nic"""
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": False,
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ },
+ "eth1": {
+ "set-name": "eth1",
+ "match": {"macaddress": "22:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 200},
+ },
+ "eth2": {
+ "set-name": "eth2",
+ "match": {"macaddress": "33:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 300},
+ },
+ },
+ "version": 2,
+ }
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ imds_data["network"]["interface"].append(SECONDARY_INTERFACE)
+ third_intf = copy.deepcopy(SECONDARY_INTERFACE)
+ third_intf["macAddress"] = third_intf["macAddress"].replace("22", "33")
+ third_intf["ipv4"]["subnet"][0]["address"] = "10.0.2.0"
+ third_intf["ipv4"]["ipAddress"][0]["privateIpAddress"] = "10.0.2.6"
+ imds_data["network"]["interface"].append(third_intf)
+ self.assertEqual(expected, dsaz.parse_network_config(imds_data))
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ def test_ipv4_and_ipv6_route_metrics_match_for_nics(self, m_driver):
+ """parse_network_config emits matching ipv4 and ipv6 route-metrics."""
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "addresses": ["10.0.0.5/24", "2001:dead:beef::2/128"],
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": True,
+ "dhcp6-overrides": {"route-metric": 100},
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ },
+ "eth1": {
+ "set-name": "eth1",
+ "match": {"macaddress": "22:0d:3a:04:75:98"},
+ "dhcp4": True,
+ "dhcp6": False,
+ "dhcp4-overrides": {"route-metric": 200},
+ },
+ "eth2": {
+ "set-name": "eth2",
+ "match": {"macaddress": "33:0d:3a:04:75:98"},
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 300},
+ "dhcp6": True,
+ "dhcp6-overrides": {"route-metric": 300},
+ },
+ },
+ "version": 2,
+ }
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ nic1 = imds_data["network"]["interface"][0]
+ nic1["ipv4"]["ipAddress"].append({"privateIpAddress": "10.0.0.5"})
+
+ nic1["ipv6"] = {
+ "subnet": [{"address": "2001:dead:beef::16"}],
+ "ipAddress": [
+ {"privateIpAddress": "2001:dead:beef::1"},
+ {"privateIpAddress": "2001:dead:beef::2"},
+ ],
+ }
+ imds_data["network"]["interface"].append(SECONDARY_INTERFACE)
+ third_intf = copy.deepcopy(SECONDARY_INTERFACE)
+ third_intf["macAddress"] = third_intf["macAddress"].replace("22", "33")
+ third_intf["ipv4"]["subnet"][0]["address"] = "10.0.2.0"
+ third_intf["ipv4"]["ipAddress"][0]["privateIpAddress"] = "10.0.2.6"
+ third_intf["ipv6"] = {
+ "subnet": [{"prefix": "64", "address": "2001:dead:beef::2"}],
+ "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}],
+ }
+ imds_data["network"]["interface"].append(third_intf)
+ self.assertEqual(expected, dsaz.parse_network_config(imds_data))
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ def test_ipv4_secondary_ips_will_be_static_addrs(self, m_driver):
+ """parse_network_config emits primary ipv4 as dhcp others are static"""
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "addresses": ["10.0.0.5/24"],
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": True,
+ "dhcp6-overrides": {"route-metric": 100},
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ }
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ nic1 = imds_data["network"]["interface"][0]
+ nic1["ipv4"]["ipAddress"].append({"privateIpAddress": "10.0.0.5"})
+
+ nic1["ipv6"] = {
+ "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}],
+ "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}],
+ }
+ self.assertEqual(expected, dsaz.parse_network_config(imds_data))
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ def test_ipv6_secondary_ips_will_be_static_cidrs(self, m_driver):
+ """parse_network_config emits primary ipv6 as dhcp others are static"""
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "addresses": ["10.0.0.5/24", "2001:dead:beef::2/10"],
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": True,
+ "dhcp6-overrides": {"route-metric": 100},
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ }
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ nic1 = imds_data["network"]["interface"][0]
+ nic1["ipv4"]["ipAddress"].append({"privateIpAddress": "10.0.0.5"})
+
+ # Secondary ipv6 addresses currently ignored/unconfigured
+ nic1["ipv6"] = {
+ "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}],
+ "ipAddress": [
+ {"privateIpAddress": "2001:dead:beef::1"},
+ {"privateIpAddress": "2001:dead:beef::2"},
+ ],
+ }
+ self.assertEqual(expected, dsaz.parse_network_config(imds_data))
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver",
+ return_value="hv_netvsc",
+ )
+ def test_match_driver_for_netvsc(self, m_driver):
+ """parse_network_config emits driver when using netvsc."""
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": False,
+ "match": {
+ "macaddress": "00:0d:3a:04:75:98",
+ "driver": "hv_netvsc",
+ },
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ }
+ self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA))
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ @mock.patch("cloudinit.net.generate_fallback_config")
+ def test_parse_network_config_uses_fallback_cfg_when_no_network_metadata(
+ self, m_fallback_config, m_driver
+ ):
+ """parse_network_config generates fallback network config when the
+ IMDS instance metadata is corrupted/invalid, such as when
+ network metadata is not present.
+ """
+ imds_metadata_missing_network_metadata = copy.deepcopy(
+ NETWORK_METADATA
+ )
+ del imds_metadata_missing_network_metadata["network"]
+ m_fallback_config.return_value = self.fallback_config
+ self.assertEqual(
+ self.fallback_config,
+ dsaz.parse_network_config(imds_metadata_missing_network_metadata),
+ )
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ @mock.patch("cloudinit.net.generate_fallback_config")
+ def test_parse_network_config_uses_fallback_cfg_when_no_interface_metadata(
+ self, m_fallback_config, m_driver
+ ):
+ """parse_network_config generates fallback network config when the
+ IMDS instance metadata is corrupted/invalid, such as when
+ network interface metadata is not present.
+ """
+ imds_metadata_missing_interface_metadata = copy.deepcopy(
+ NETWORK_METADATA
+ )
+ del imds_metadata_missing_interface_metadata["network"]["interface"]
+ m_fallback_config.return_value = self.fallback_config
+ self.assertEqual(
+ self.fallback_config,
+ dsaz.parse_network_config(
+ imds_metadata_missing_interface_metadata
+ ),
+ )
+
+
+class TestGetMetadataFromIMDS(HttprettyTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestGetMetadataFromIMDS, self).setUp()
+ self.network_md_url = "{}/instance?api-version=2019-06-01".format(
+ dsaz.IMDS_URL
+ )
+
+ @mock.patch(MOCKPATH + "readurl", autospec=True)
+ def test_get_metadata_uses_instance_url(self, m_readurl):
+ """Make sure readurl is called with the correct url when accessing
+ metadata"""
+ m_readurl.return_value = url_helper.StringResponse(
+ json.dumps(IMDS_NETWORK_METADATA).encode("utf-8")
+ )
+
+ dsaz.get_metadata_from_imds(retries=3, md_type=dsaz.MetadataType.ALL)
+ m_readurl.assert_called_with(
+ "http://169.254.169.254/metadata/instance?api-version=2019-06-01",
+ exception_cb=mock.ANY,
+ headers=mock.ANY,
+ retries=mock.ANY,
+ timeout=mock.ANY,
+ infinite=False,
+ )
+
+ @mock.patch(MOCKPATH + "readurl", autospec=True)
+ def test_get_network_metadata_uses_network_url(self, m_readurl):
+ """Make sure readurl is called with the correct url when accessing
+ network metadata"""
+ m_readurl.return_value = url_helper.StringResponse(
+ json.dumps(IMDS_NETWORK_METADATA).encode("utf-8")
+ )
+
+ dsaz.get_metadata_from_imds(
+ retries=3, md_type=dsaz.MetadataType.NETWORK
+ )
+ m_readurl.assert_called_with(
+ "http://169.254.169.254/metadata/instance/network?api-version="
+ "2019-06-01",
+ exception_cb=mock.ANY,
+ headers=mock.ANY,
+ retries=mock.ANY,
+ timeout=mock.ANY,
+ infinite=False,
+ )
+
+ @mock.patch(MOCKPATH + "readurl", autospec=True)
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4", autospec=True)
+ def test_get_default_metadata_uses_instance_url(self, m_dhcp, m_readurl):
+ """Make sure readurl is called with the correct url when accessing
+ metadata"""
+ m_readurl.return_value = url_helper.StringResponse(
+ json.dumps(IMDS_NETWORK_METADATA).encode("utf-8")
+ )
+
+ dsaz.get_metadata_from_imds(retries=3)
+ m_readurl.assert_called_with(
+ "http://169.254.169.254/metadata/instance?api-version=2019-06-01",
+ exception_cb=mock.ANY,
+ headers=mock.ANY,
+ retries=mock.ANY,
+ timeout=mock.ANY,
+ infinite=False,
+ )
+
+ @mock.patch(MOCKPATH + "readurl", autospec=True)
+ def test_get_metadata_uses_extended_url(self, m_readurl):
+ """Make sure readurl is called with the correct url when accessing
+ metadata"""
+ m_readurl.return_value = url_helper.StringResponse(
+ json.dumps(IMDS_NETWORK_METADATA).encode("utf-8")
+ )
+
+ dsaz.get_metadata_from_imds(
+ retries=3,
+ md_type=dsaz.MetadataType.ALL,
+ api_version="2021-08-01",
+ )
+ m_readurl.assert_called_with(
+ "http://169.254.169.254/metadata/instance?api-version="
+ "2021-08-01&extended=true",
+ exception_cb=mock.ANY,
+ headers=mock.ANY,
+ retries=mock.ANY,
+ timeout=mock.ANY,
+ infinite=False,
+ )
+
+ @mock.patch(MOCKPATH + "readurl", autospec=True)
+ def test_get_metadata_performs_dhcp_when_network_is_down(self, m_readurl):
+ """Perform DHCP setup when nic is not up."""
+ m_readurl.return_value = url_helper.StringResponse(
+ json.dumps(NETWORK_METADATA).encode("utf-8")
+ )
+
+ self.assertEqual(
+ NETWORK_METADATA, dsaz.get_metadata_from_imds(retries=2)
+ )
+
+ self.assertIn(
+ "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
+ self.logs.getvalue(),
+ )
+
+ m_readurl.assert_called_with(
+ self.network_md_url,
+ exception_cb=mock.ANY,
+ headers={"Metadata": "true"},
+ retries=2,
+ timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS,
+ infinite=False,
+ )
+
+ @mock.patch("cloudinit.url_helper.time.sleep")
+ def test_get_metadata_from_imds_empty_when_no_imds_present(self, m_sleep):
+ """Return empty dict when IMDS network metadata is absent."""
+ httpretty.register_uri(
+ httpretty.GET,
+ dsaz.IMDS_URL + "/instance?api-version=2017-12-01",
+ body={},
+ status=404,
+ )
+
+ self.assertEqual({}, dsaz.get_metadata_from_imds(retries=2))
+
+ self.assertEqual([mock.call(1), mock.call(1)], m_sleep.call_args_list)
+ self.assertIn(
+ "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("requests.Session.request")
+ @mock.patch("cloudinit.url_helper.time.sleep")
+ def test_get_metadata_from_imds_retries_on_timeout(
+ self, m_sleep, m_request
+ ):
+ """Retry IMDS network metadata on timeout errors."""
+
+ self.attempt = 0
+ m_request.side_effect = requests.Timeout("Fake Connection Timeout")
+
+ def retry_callback(request, uri, headers):
+ self.attempt += 1
+ raise requests.Timeout("Fake connection timeout")
+
+ httpretty.register_uri(
+ httpretty.GET,
+ dsaz.IMDS_URL + "instance?api-version=2017-12-01",
+ body=retry_callback,
+ )
+
+ self.assertEqual({}, dsaz.get_metadata_from_imds(retries=3))
+
+ self.assertEqual([mock.call(1)] * 3, m_sleep.call_args_list)
+ self.assertIn(
+ "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
+ self.logs.getvalue(),
+ )
+
+
+class TestAzureDataSource(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestAzureDataSource, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ # patch cloud_dir, so our 'seed_dir' is guaranteed empty
+ self.paths = helpers.Paths(
+ {"cloud_dir": self.tmp, "run_dir": self.tmp}
+ )
+ self.waagent_d = os.path.join(self.tmp, "var", "lib", "waagent")
+
+ self.patches = ExitStack()
+ self.addCleanup(self.patches.close)
+
+ self.patches.enter_context(
+ mock.patch.object(dsaz, "_get_random_seed", return_value="wild")
+ )
+
+ self.m_dhcp = self.patches.enter_context(
+ mock.patch.object(
+ dsaz,
+ "EphemeralDHCPv4",
+ autospec=True,
+ )
+ )
+ self.m_dhcp.return_value.lease = {}
+ self.m_dhcp.return_value.iface = "eth4"
+
+ self.m_get_metadata_from_imds = self.patches.enter_context(
+ mock.patch.object(
+ dsaz,
+ "get_metadata_from_imds",
+ mock.MagicMock(return_value=NETWORK_METADATA),
+ )
+ )
+ self.m_fallback_nic = self.patches.enter_context(
+ mock.patch(
+ "cloudinit.sources.net.find_fallback_nic", return_value="eth9"
+ )
+ )
+ self.m_remove_ubuntu_network_scripts = self.patches.enter_context(
+ mock.patch.object(
+ dsaz,
+ "maybe_remove_ubuntu_network_config_scripts",
+ mock.MagicMock(),
+ )
+ )
+ super(TestAzureDataSource, self).setUp()
+
+ def apply_patches(self, patches):
+ for module, name, new in patches:
+ self.patches.enter_context(mock.patch.object(module, name, new))
+
+ def _get_mockds(self):
+ sysctl_out = (
+ "dev.storvsc.3.%pnpinfo: "
+ "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "
+ "deviceid=f8b3781b-1e82-4818-a1c3-63d806ec15bb\n"
+ )
+ sysctl_out += (
+ "dev.storvsc.2.%pnpinfo: "
+ "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "
+ "deviceid=f8b3781a-1e82-4818-a1c3-63d806ec15bb\n"
+ )
+ sysctl_out += (
+ "dev.storvsc.1.%pnpinfo: "
+ "classid=32412632-86cb-44a2-9b5c-50d1417354f5 "
+ "deviceid=00000000-0001-8899-0000-000000000000\n"
+ )
+ camctl_devbus = """
+scbus0 on ata0 bus 0
+scbus1 on ata1 bus 0
+scbus2 on blkvsc0 bus 0
+scbus3 on blkvsc1 bus 0
+scbus4 on storvsc2 bus 0
+scbus5 on storvsc3 bus 0
+scbus-1 on xpt0 bus 0
+ """
+ camctl_dev = """
+<Msft Virtual CD/ROM 1.0> at scbus1 target 0 lun 0 (cd0,pass0)
+<Msft Virtual Disk 1.0> at scbus2 target 0 lun 0 (da0,pass1)
+<Msft Virtual Disk 1.0> at scbus3 target 1 lun 0 (da1,pass2)
+ """
+ self.apply_patches(
+ [
+ (
+ dsaz,
+ "get_dev_storvsc_sysctl",
+ mock.MagicMock(return_value=sysctl_out),
+ ),
+ (
+ dsaz,
+ "get_camcontrol_dev_bus",
+ mock.MagicMock(return_value=camctl_devbus),
+ ),
+ (
+ dsaz,
+ "get_camcontrol_dev",
+ mock.MagicMock(return_value=camctl_dev),
+ ),
+ ]
+ )
+ return dsaz
+
+ def _get_ds(
+ self,
+ data,
+ distro="ubuntu",
+ apply_network=None,
+ instance_id=None,
+ write_ovf_to_data_dir: bool = False,
+ write_ovf_to_seed_dir: bool = True,
+ ):
+ def _wait_for_files(flist, _maxwait=None, _naplen=None):
+ data["waited"] = flist
+ return []
+
+ def _load_possible_azure_ds(seed_dir, cache_dir):
+ yield seed_dir
+ yield dsaz.DEFAULT_PROVISIONING_ISO_DEV
+ yield from data.get("dsdevs", [])
+ if cache_dir:
+ yield cache_dir
+
+ seed_dir = os.path.join(self.paths.seed_dir, "azure")
+ if write_ovf_to_seed_dir and data.get("ovfcontent") is not None:
+ populate_dir(seed_dir, {"ovf-env.xml": data["ovfcontent"]})
+
+ if write_ovf_to_data_dir and data.get("ovfcontent") is not None:
+ populate_dir(self.waagent_d, {"ovf-env.xml": data["ovfcontent"]})
+
+ dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d
+
+ self.m_is_platform_viable = mock.MagicMock(autospec=True)
+ self.m_get_metadata_from_fabric = mock.MagicMock(return_value=[])
+ self.m_report_failure_to_fabric = mock.MagicMock(autospec=True)
+ self.m_get_interfaces = mock.MagicMock(
+ return_value=[
+ ("dummy0", "9e:65:d6:19:19:01", None, None),
+ ("eth0", "00:15:5d:69:63:ba", "hv_netvsc", "0x3"),
+ ("lo", "00:00:00:00:00:00", None, None),
+ ]
+ )
+ self.m_list_possible_azure_ds = mock.MagicMock(
+ side_effect=_load_possible_azure_ds
+ )
+
+ if instance_id:
+ self.instance_id = instance_id
+ else:
+ self.instance_id = EXAMPLE_UUID
+
+ def _dmi_mocks(key):
+ if key == "system-uuid":
+ return self.instance_id
+ elif key == "chassis-asset-tag":
+ return "7783-7084-3265-9085-8269-3286-77"
+
+ self.apply_patches(
+ [
+ (
+ dsaz,
+ "list_possible_azure_ds",
+ self.m_list_possible_azure_ds,
+ ),
+ (dsaz, "_is_platform_viable", self.m_is_platform_viable),
+ (
+ dsaz,
+ "get_metadata_from_fabric",
+ self.m_get_metadata_from_fabric,
+ ),
+ (
+ dsaz,
+ "report_failure_to_fabric",
+ self.m_report_failure_to_fabric,
+ ),
+ (dsaz, "get_boot_telemetry", mock.MagicMock()),
+ (dsaz, "get_system_info", mock.MagicMock()),
+ (
+ dsaz.net,
+ "get_interface_mac",
+ mock.MagicMock(return_value="00:15:5d:69:63:ba"),
+ ),
+ (
+ dsaz.net,
+ "get_interfaces",
+ self.m_get_interfaces,
+ ),
+ (dsaz.subp, "which", lambda x: True),
+ (
+ dsaz.dmi,
+ "read_dmi_data",
+ mock.MagicMock(side_effect=_dmi_mocks),
+ ),
+ (
+ dsaz.util,
+ "wait_for_files",
+ mock.MagicMock(side_effect=_wait_for_files),
+ ),
+ ]
+ )
+
+ if isinstance(distro, str):
+ distro_cls = distros.fetch(distro)
+ distro = distro_cls(distro, data.get("sys_cfg", {}), self.paths)
+ dsrc = dsaz.DataSourceAzure(
+ data.get("sys_cfg", {}), distro=distro, paths=self.paths
+ )
+ if apply_network is not None:
+ dsrc.ds_cfg["apply_network_config"] = apply_network
+
+ return dsrc
+
+ def _get_and_setup(self, dsrc):
+ ret = dsrc.get_data()
+ if ret:
+ dsrc.setup(True)
+ return ret
+
+ def xml_equals(self, oxml, nxml):
+ """Compare two sets of XML to make sure they are equal"""
+
+ def create_tag_index(xml):
+ et = ET.fromstring(xml)
+ ret = {}
+ for x in et.iter():
+ ret[x.tag] = x
+ return ret
+
+ def tags_exists(x, y):
+ for tag in x.keys():
+ assert tag in y
+ for tag in y.keys():
+ assert tag in x
+
+ def tags_equal(x, y):
+ for x_val in x.values():
+ y_val = y.get(x_val.tag)
+ assert x_val.text == y_val.text
+
+ old_cnt = create_tag_index(oxml)
+ new_cnt = create_tag_index(nxml)
+ tags_exists(old_cnt, new_cnt)
+ tags_equal(old_cnt, new_cnt)
+
+ def xml_notequals(self, oxml, nxml):
+ try:
+ self.xml_equals(oxml, nxml)
+ except AssertionError:
+ return
+ raise AssertionError("XML is the same")
+
+ def test_get_resource_disk(self):
+ ds = self._get_mockds()
+ dev = ds.get_resource_disk_on_freebsd(1)
+ self.assertEqual("da1", dev)
+
+ def test_not_is_platform_viable_seed_should_return_no_datasource(self):
+ """Check seed_dir using _is_platform_viable and return False."""
+ # Return a non-matching asset tag value
+ data = {}
+ dsrc = self._get_ds(data)
+ self.m_is_platform_viable.return_value = False
+ with mock.patch.object(
+ dsrc, "crawl_metadata"
+ ) as m_crawl_metadata, mock.patch.object(
+ dsrc, "_report_failure"
+ ) as m_report_failure:
+ ret = dsrc.get_data()
+ self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
+ self.assertFalse(ret)
+ # Assert that for non viable platforms,
+ # there is no communication with the Azure datasource.
+ self.assertEqual(0, m_crawl_metadata.call_count)
+ self.assertEqual(0, m_report_failure.call_count)
+
+ def test_platform_viable_but_no_devs_should_return_no_datasource(self):
+ """For platforms where the Azure platform is viable
+ (which is indicated by the matching asset tag),
+ the absence of any devs at all (devs == candidate sources
+ for crawling Azure datasource) is NOT expected.
+ Report failure to Azure as this is an unexpected fatal error.
+ """
+ data = {}
+ dsrc = self._get_ds(data)
+ with mock.patch.object(dsrc, "_report_failure") as m_report_failure:
+ self.m_is_platform_viable.return_value = True
+ ret = dsrc.get_data()
+ self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
+ self.assertFalse(ret)
+ self.assertEqual(1, m_report_failure.call_count)
+
+ def test_crawl_metadata_exception_returns_no_datasource(self):
+ data = {}
+ dsrc = self._get_ds(data)
+ self.m_is_platform_viable.return_value = True
+ with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
+ m_crawl_metadata.side_effect = Exception
+ ret = dsrc.get_data()
+ self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
+ self.assertEqual(1, m_crawl_metadata.call_count)
+ self.assertFalse(ret)
+
+ def test_crawl_metadata_exception_should_report_failure_with_msg(self):
+ data = {}
+ dsrc = self._get_ds(data)
+ self.m_is_platform_viable.return_value = True
+ with mock.patch.object(
+ dsrc, "crawl_metadata"
+ ) as m_crawl_metadata, mock.patch.object(
+ dsrc, "_report_failure"
+ ) as m_report_failure:
+ m_crawl_metadata.side_effect = Exception
+ dsrc.get_data()
+ self.assertEqual(1, m_crawl_metadata.call_count)
+ m_report_failure.assert_called_once_with(
+ description=dsaz.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
+ )
+
+ def test_crawl_metadata_exc_should_log_could_not_crawl_msg(self):
+ data = {}
+ dsrc = self._get_ds(data)
+ self.m_is_platform_viable.return_value = True
+ with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
+ m_crawl_metadata.side_effect = Exception
+ dsrc.get_data()
+ self.assertEqual(1, m_crawl_metadata.call_count)
+ self.assertIn(
+ "Could not crawl Azure metadata", self.logs.getvalue()
+ )
+
+ def test_basic_seed_dir(self):
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(dsrc.userdata_raw, "")
+ self.assertEqual(dsrc.metadata["local-hostname"], odata["HostName"])
+ self.assertTrue(
+ os.path.isfile(os.path.join(self.waagent_d, "ovf-env.xml"))
+ )
+ self.assertEqual("azure", dsrc.cloud_name)
+ self.assertEqual("azure", dsrc.platform_type)
+ self.assertEqual(
+ "seed-dir (%s/seed/azure)" % self.tmp, dsrc.subplatform
+ )
+
+ def test_data_dir_without_imds_data(self):
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
+ dsrc = self._get_ds(
+ data, write_ovf_to_data_dir=True, write_ovf_to_seed_dir=False
+ )
+
+ self.m_get_metadata_from_imds.return_value = {}
+ with mock.patch(MOCKPATH + "util.mount_cb") as m_mount_cb:
+ m_mount_cb.side_effect = [
+ MountFailedError("fail"),
+ ({"local-hostname": "me"}, "ud", {"cfg": ""}, {}),
+ ]
+ ret = dsrc.get_data()
+
+ self.assertTrue(ret)
+ self.assertEqual(dsrc.userdata_raw, "")
+ self.assertEqual(dsrc.metadata["local-hostname"], odata["HostName"])
+ self.assertTrue(
+ os.path.isfile(os.path.join(self.waagent_d, "ovf-env.xml"))
+ )
+ self.assertEqual("azure", dsrc.cloud_name)
+ self.assertEqual("azure", dsrc.platform_type)
+ self.assertEqual("seed-dir (%s)" % self.waagent_d, dsrc.subplatform)
+
+ def test_basic_dev_file(self):
+ """When a device path is used, present that in subplatform."""
+ data = {"sys_cfg": {}, "dsdevs": ["/dev/cd0"]}
+ dsrc = self._get_ds(data)
+ # DSAzure will attempt to mount /dev/sr0 first, which should
+ # fail with mount error since the list of devices doesn't have
+ # /dev/sr0
+ with mock.patch(MOCKPATH + "util.mount_cb") as m_mount_cb:
+ m_mount_cb.side_effect = [
+ MountFailedError("fail"),
+ ({"local-hostname": "me"}, "ud", {"cfg": ""}, {}),
+ ]
+ self.assertTrue(dsrc.get_data())
+ self.assertEqual(dsrc.userdata_raw, "ud")
+ self.assertEqual(dsrc.metadata["local-hostname"], "me")
+ self.assertEqual("azure", dsrc.cloud_name)
+ self.assertEqual("azure", dsrc.platform_type)
+ self.assertEqual("config-disk (/dev/cd0)", dsrc.subplatform)
+
+ def test_get_data_non_ubuntu_will_not_remove_network_scripts(self):
+ """get_data on non-Ubuntu will not remove ubuntu net scripts."""
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
+
+ dsrc = self._get_ds(data, distro="debian")
+ dsrc.get_data()
+ self.m_remove_ubuntu_network_scripts.assert_not_called()
+
+ def test_get_data_on_ubuntu_will_remove_network_scripts(self):
+ """get_data will remove ubuntu net scripts on Ubuntu distro."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+
+ dsrc = self._get_ds(data, distro="ubuntu")
+ dsrc.get_data()
+ self.m_remove_ubuntu_network_scripts.assert_called_once_with()
+
+ def test_get_data_on_ubuntu_will_not_remove_network_scripts_disabled(self):
+ """When apply_network_config false, do not remove scripts on Ubuntu."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": False}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+
+ dsrc = self._get_ds(data, distro="ubuntu")
+ dsrc.get_data()
+ self.m_remove_ubuntu_network_scripts.assert_not_called()
+
+ def test_crawl_metadata_returns_structured_data_and_caches_nothing(self):
+ """Return all structured metadata and cache no class attributes."""
+ yaml_cfg = ""
+ odata = {
+ "HostName": "myhost",
+ "UserName": "myuser",
+ "UserData": {"text": "FOOBAR", "encoding": "plain"},
+ "dscfg": {"text": yaml_cfg, "encoding": "plain"},
+ }
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
+ dsrc = self._get_ds(data)
+ expected_cfg = {
+ "PreprovisionedVMType": None,
+ "PreprovisionedVm": False,
+ "datasource": {"Azure": {}},
+ "system_info": {"default_user": {"name": "myuser"}},
+ }
+ expected_metadata = {
+ "azure_data": {
+ "configurationsettype": "LinuxProvisioningConfiguration"
+ },
+ "imds": NETWORK_METADATA,
+ "instance-id": EXAMPLE_UUID,
+ "local-hostname": "myhost",
+ "random_seed": "wild",
+ }
+
+ crawled_metadata = dsrc.crawl_metadata()
+
+ self.assertCountEqual(
+ crawled_metadata.keys(),
+ ["cfg", "files", "metadata", "userdata_raw"],
+ )
+ self.assertEqual(crawled_metadata["cfg"], expected_cfg)
+ self.assertEqual(
+ list(crawled_metadata["files"].keys()), ["ovf-env.xml"]
+ )
+ self.assertIn(
+ b"<HostName>myhost</HostName>",
+ crawled_metadata["files"]["ovf-env.xml"],
+ )
+ self.assertEqual(crawled_metadata["metadata"], expected_metadata)
+ self.assertEqual(crawled_metadata["userdata_raw"], "FOOBAR")
+ self.assertEqual(dsrc.userdata_raw, None)
+ self.assertEqual(dsrc.metadata, {})
+ self.assertEqual(dsrc._metadata_imds, UNSET)
+ self.assertFalse(
+ os.path.isfile(os.path.join(self.waagent_d, "ovf-env.xml"))
+ )
+
+ def test_crawl_metadata_raises_invalid_metadata_on_error(self):
+ """crawl_metadata raises an exception on invalid ovf-env.xml."""
+ data = {"ovfcontent": "BOGUS", "sys_cfg": {}}
+ dsrc = self._get_ds(data)
+ error_msg = (
+ "BrokenAzureDataSource: Invalid ovf-env.xml:"
+ " syntax error: line 1, column 0"
+ )
+ with self.assertRaises(InvalidMetaDataException) as cm:
+ dsrc.crawl_metadata()
+ self.assertEqual(str(cm.exception), error_msg)
+
+ def test_crawl_metadata_call_imds_once_no_reprovision(self):
+ """If reprovisioning, report ready at the end"""
+ ovfenv = construct_valid_ovf_env(
+ platform_settings={"PreprovisionedVm": "False"}
+ )
+
+ data = {"ovfcontent": ovfenv, "sys_cfg": {}}
+ dsrc = self._get_ds(data)
+ dsrc.crawl_metadata()
+ self.assertEqual(1, self.m_get_metadata_from_imds.call_count)
+
+ @mock.patch("cloudinit.sources.DataSourceAzure.util.write_file")
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready"
+ )
+ @mock.patch("cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds")
+ def test_crawl_metadata_call_imds_twice_with_reprovision(
+ self, poll_imds_func, m_report_ready, m_write
+ ):
+ """If reprovisioning, imds metadata will be fetched twice"""
+ ovfenv = construct_valid_ovf_env(
+ platform_settings={"PreprovisionedVm": "True"}
+ )
+
+ data = {"ovfcontent": ovfenv, "sys_cfg": {}}
+ dsrc = self._get_ds(data)
+ poll_imds_func.return_value = ovfenv
+ dsrc.crawl_metadata()
+ self.assertEqual(2, self.m_get_metadata_from_imds.call_count)
+
+ @mock.patch("cloudinit.sources.DataSourceAzure.util.write_file")
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready"
+ )
+ @mock.patch("cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds")
+ def test_crawl_metadata_on_reprovision_reports_ready(
+ self, poll_imds_func, m_report_ready, m_write
+ ):
+ """If reprovisioning, report ready at the end"""
+ ovfenv = construct_valid_ovf_env(
+ platform_settings={"PreprovisionedVm": "True"}
+ )
+
+ data = {"ovfcontent": ovfenv, "sys_cfg": {}}
+ dsrc = self._get_ds(data)
+ poll_imds_func.return_value = ovfenv
+ dsrc.crawl_metadata()
+ self.assertEqual(1, m_report_ready.call_count)
+
+ @mock.patch("cloudinit.sources.DataSourceAzure.util.write_file")
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready"
+ )
+ @mock.patch("cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds")
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.DataSourceAzure."
+ "_wait_for_all_nics_ready"
+ )
+ def test_crawl_metadata_waits_for_nic_on_savable_vms(
+ self, detect_nics, poll_imds_func, report_ready_func, m_write
+ ):
+ """If reprovisioning, report ready at the end"""
+ ovfenv = construct_valid_ovf_env(
+ platform_settings={
+ "PreprovisionedVMType": "Savable",
+ "PreprovisionedVm": "True",
+ }
+ )
+
+ data = {"ovfcontent": ovfenv, "sys_cfg": {}}
+ dsrc = self._get_ds(data)
+ poll_imds_func.return_value = ovfenv
+ dsrc.crawl_metadata()
+ self.assertEqual(1, report_ready_func.call_count)
+ self.assertEqual(1, detect_nics.call_count)
+
+ @mock.patch("cloudinit.sources.DataSourceAzure.util.write_file")
+ @mock.patch(
+ "cloudinit.sources.helpers.netlink.wait_for_media_disconnect_connect"
+ )
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready",
+ return_value=True,
+ )
+ @mock.patch("cloudinit.sources.DataSourceAzure.readurl")
+ def test_crawl_metadata_on_reprovision_reports_ready_using_lease(
+ self, m_readurl, m_report_ready, m_media_switch, m_write
+ ):
+ """If reprovisioning, report ready using the obtained lease"""
+ ovfenv = construct_valid_ovf_env(
+ platform_settings={"PreprovisionedVm": "True"}
+ )
+
+ data = {"ovfcontent": ovfenv, "sys_cfg": {}}
+ dsrc = self._get_ds(data)
+
+ lease = {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+ self.m_dhcp.return_value.obtain_lease.return_value = lease
+ m_media_switch.return_value = None
+
+ reprovision_ovfenv = construct_valid_ovf_env()
+ m_readurl.return_value = url_helper.StringResponse(
+ reprovision_ovfenv.encode("utf-8")
+ )
+
+ dsrc.crawl_metadata()
+
+ assert m_report_ready.mock_calls == [
+ mock.call(),
+ mock.call(pubkey_info=None),
+ ]
+
+ def test_waagent_d_has_0700_perms(self):
+ # we expect /var/lib/waagent to be created 0700
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertTrue(os.path.isdir(self.waagent_d))
+ self.assertEqual(stat.S_IMODE(os.stat(self.waagent_d).st_mode), 0o700)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ def test_network_config_set_from_imds(self, m_driver):
+ """Datasource.network_config returns IMDS network data."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ expected_network_config = {
+ "ethernets": {
+ "eth0": {
+ "set-name": "eth0",
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ }
+ },
+ "version": 2,
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual(expected_network_config, dsrc.network_config)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ def test_network_config_set_from_imds_route_metric_for_secondary_nic(
+ self, m_driver
+ ):
+ """Datasource.network_config adds route-metric to secondary nics."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ expected_network_config = {
+ "ethernets": {
+ "eth0": {
+ "set-name": "eth0",
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ },
+ "eth1": {
+ "set-name": "eth1",
+ "match": {"macaddress": "22:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 200},
+ },
+ "eth2": {
+ "set-name": "eth2",
+ "match": {"macaddress": "33:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 300},
+ },
+ },
+ "version": 2,
+ }
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ imds_data["network"]["interface"].append(SECONDARY_INTERFACE)
+ third_intf = copy.deepcopy(SECONDARY_INTERFACE)
+ third_intf["macAddress"] = third_intf["macAddress"].replace("22", "33")
+ third_intf["ipv4"]["subnet"][0]["address"] = "10.0.2.0"
+ third_intf["ipv4"]["ipAddress"][0]["privateIpAddress"] = "10.0.2.6"
+ imds_data["network"]["interface"].append(third_intf)
+
+ self.m_get_metadata_from_imds.return_value = imds_data
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual(expected_network_config, dsrc.network_config)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ def test_network_config_set_from_imds_for_secondary_nic_no_ip(
+ self, m_driver
+ ):
+ """If an IP address is empty then there should no config for it."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ expected_network_config = {
+ "ethernets": {
+ "eth0": {
+ "set-name": "eth0",
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ }
+ },
+ "version": 2,
+ }
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ imds_data["network"]["interface"].append(SECONDARY_INTERFACE_NO_IP)
+ self.m_get_metadata_from_imds.return_value = imds_data
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual(expected_network_config, dsrc.network_config)
+
+ def test_availability_zone_set_from_imds(self):
+ """Datasource.availability returns IMDS platformFaultDomain."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual("0", dsrc.availability_zone)
+
+ def test_region_set_from_imds(self):
+ """Datasource.region returns IMDS region location."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual("eastus2", dsrc.region)
+
+ def test_sys_cfg_set_never_destroy_ntfs(self):
+ sys_cfg = {
+ "datasource": {
+ "Azure": {"never_destroy_ntfs": "user-supplied-value"}
+ }
+ }
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data={}),
+ "sys_cfg": sys_cfg,
+ }
+
+ dsrc = self._get_ds(data)
+ ret = self._get_and_setup(dsrc)
+ self.assertTrue(ret)
+ self.assertEqual(
+ dsrc.ds_cfg.get(dsaz.DS_CFG_KEY_PRESERVE_NTFS),
+ "user-supplied-value",
+ )
+
+ def test_username_used(self):
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(
+ dsrc.cfg["system_info"]["default_user"]["name"], "myuser"
+ )
+
+ def test_password_given(self):
+ odata = {
+ "HostName": "myhost",
+ "UserName": "myuser",
+ "UserPassword": "mypass",
+ }
+ data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertIn("default_user", dsrc.cfg["system_info"])
+ defuser = dsrc.cfg["system_info"]["default_user"]
+
+ # default user should be updated username and should not be locked.
+ self.assertEqual(defuser["name"], odata["UserName"])
+ self.assertFalse(defuser["lock_passwd"])
+ # passwd is crypt formated string $id$salt$encrypted
+ # encrypting plaintext with salt value of everything up to final '$'
+ # should equal that after the '$'
+ pos = defuser["passwd"].rfind("$") + 1
+ self.assertEqual(
+ defuser["passwd"],
+ crypt.crypt(odata["UserPassword"], defuser["passwd"][0:pos]),
+ )
+
+ # the same hashed value should also be present in cfg['password']
+ self.assertEqual(defuser["passwd"], dsrc.cfg["password"])
+
+ def test_user_not_locked_if_password_redacted(self):
+ odata = {
+ "HostName": "myhost",
+ "UserName": "myuser",
+ "UserPassword": dsaz.DEF_PASSWD_REDACTION,
+ }
+ data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertIn("default_user", dsrc.cfg["system_info"])
+ defuser = dsrc.cfg["system_info"]["default_user"]
+
+ # default user should be updated username and should not be locked.
+ self.assertEqual(defuser["name"], odata["UserName"])
+ self.assertIn("lock_passwd", defuser)
+ self.assertFalse(defuser["lock_passwd"])
+
+ def test_userdata_plain(self):
+ mydata = "FOOBAR"
+ odata = {"UserData": {"text": mydata, "encoding": "plain"}}
+ data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(decode_binary(dsrc.userdata_raw), mydata)
+
+ def test_userdata_found(self):
+ mydata = "FOOBAR"
+ odata = {"UserData": {"text": b64e(mydata), "encoding": "base64"}}
+ data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(dsrc.userdata_raw, mydata.encode("utf-8"))
+
+ def test_default_ephemeral_configs_ephemeral_exists(self):
+ # make sure the ephemeral configs are correct if disk present
+ odata = {}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
+
+ orig_exists = dsaz.os.path.exists
+
+ def changed_exists(path):
+ return (
+ True if path == dsaz.RESOURCE_DISK_PATH else orig_exists(path)
+ )
+
+ with mock.patch(MOCKPATH + "os.path.exists", new=changed_exists):
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ cfg = dsrc.get_config_obj()
+
+ self.assertEqual(
+ dsrc.device_name_to_device("ephemeral0"),
+ dsaz.RESOURCE_DISK_PATH,
+ )
+ assert "disk_setup" in cfg
+ assert "fs_setup" in cfg
+ self.assertIsInstance(cfg["disk_setup"], dict)
+ self.assertIsInstance(cfg["fs_setup"], list)
+
+ def test_default_ephemeral_configs_ephemeral_does_not_exist(self):
+ # make sure the ephemeral configs are correct if disk not present
+ odata = {}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
+
+ orig_exists = dsaz.os.path.exists
+
+ def changed_exists(path):
+ return (
+ False if path == dsaz.RESOURCE_DISK_PATH else orig_exists(path)
+ )
+
+ with mock.patch(MOCKPATH + "os.path.exists", new=changed_exists):
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ cfg = dsrc.get_config_obj()
+
+ assert "disk_setup" not in cfg
+ assert "fs_setup" not in cfg
+
+ def test_provide_disk_aliases(self):
+ # Make sure that user can affect disk aliases
+ dscfg = {"disk_aliases": {"ephemeral0": "/dev/sdc"}}
+ odata = {
+ "HostName": "myhost",
+ "UserName": "myuser",
+ "dscfg": {"text": b64e(yaml.dump(dscfg)), "encoding": "base64"},
+ }
+ usercfg = {
+ "disk_setup": {
+ "/dev/sdc": {"something": "..."},
+ "ephemeral0": False,
+ }
+ }
+ userdata = "#cloud-config" + yaml.dump(usercfg) + "\n"
+
+ ovfcontent = construct_valid_ovf_env(data=odata, userdata=userdata)
+ data = {"ovfcontent": ovfcontent, "sys_cfg": {}}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ cfg = dsrc.get_config_obj()
+ self.assertTrue(cfg)
+
+ def test_userdata_arrives(self):
+ userdata = "This is my user-data"
+ xml = construct_valid_ovf_env(data={}, userdata=userdata)
+ data = {"ovfcontent": xml}
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+
+ self.assertEqual(userdata.encode("us-ascii"), dsrc.userdata_raw)
+
+ def test_password_redacted_in_ovf(self):
+ odata = {
+ "HostName": "myhost",
+ "UserName": "myuser",
+ "UserPassword": "mypass",
+ }
+ data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+
+ self.assertTrue(ret)
+ ovf_env_path = os.path.join(self.waagent_d, "ovf-env.xml")
+
+ # The XML should not be same since the user password is redacted
+ on_disk_ovf = load_file(ovf_env_path)
+ self.xml_notequals(data["ovfcontent"], on_disk_ovf)
+
+ # Make sure that the redacted password on disk is not used by CI
+ self.assertNotEqual(
+ dsrc.cfg.get("password"), dsaz.DEF_PASSWD_REDACTION
+ )
+
+ # Make sure that the password was really encrypted
+ et = ET.fromstring(on_disk_ovf)
+ for elem in et.iter():
+ if "UserPassword" in elem.tag:
+ self.assertEqual(dsaz.DEF_PASSWD_REDACTION, elem.text)
+
+ def test_ovf_env_arrives_in_waagent_dir(self):
+ xml = construct_valid_ovf_env(data={}, userdata="FOODATA")
+ dsrc = self._get_ds({"ovfcontent": xml})
+ dsrc.get_data()
+
+ # 'data_dir' is '/var/lib/waagent' (walinux-agent's state dir)
+ # we expect that the ovf-env.xml file is copied there.
+ ovf_env_path = os.path.join(self.waagent_d, "ovf-env.xml")
+ self.assertTrue(os.path.exists(ovf_env_path))
+ self.xml_equals(xml, load_file(ovf_env_path))
+
+ def test_ovf_can_include_unicode(self):
+ xml = construct_valid_ovf_env(data={})
+ xml = "\ufeff{0}".format(xml)
+ dsrc = self._get_ds({"ovfcontent": xml})
+ dsrc.get_data()
+
+ def test_dsaz_report_ready_returns_true_when_report_succeeds(self):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ assert dsrc._report_ready() == []
+
+ @mock.patch(MOCKPATH + "report_diagnostic_event")
+ def test_dsaz_report_ready_failure_reports_telemetry(self, m_report_diag):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ self.m_get_metadata_from_fabric.side_effect = Exception("foo")
+
+ with pytest.raises(Exception):
+ dsrc._report_ready()
+
+ assert m_report_diag.mock_calls == [
+ mock.call(
+ "Error communicating with Azure fabric; "
+ "You may experience connectivity issues: foo",
+ logger_func=dsaz.LOG.warning,
+ )
+ ]
+
+ def test_dsaz_report_failure_returns_true_when_report_succeeds(self):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+
+ with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
+ # mock crawl metadata failure to cause report failure
+ m_crawl_metadata.side_effect = Exception
+
+ self.assertTrue(dsrc._report_failure())
+ self.assertEqual(1, self.m_report_failure_to_fabric.call_count)
+
+ def test_dsaz_report_failure_returns_false_and_does_not_propagate_exc(
+ self,
+ ):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+
+ with mock.patch.object(
+ dsrc, "crawl_metadata"
+ ) as m_crawl_metadata, mock.patch.object(
+ dsrc, "_ephemeral_dhcp_ctx"
+ ) as m_ephemeral_dhcp_ctx, mock.patch.object(
+ dsrc.distro.networking, "is_up"
+ ) as m_dsrc_distro_networking_is_up:
+ # mock crawl metadata failure to cause report failure
+ m_crawl_metadata.side_effect = Exception
+
+ # setup mocks to allow using cached ephemeral dhcp lease
+ m_dsrc_distro_networking_is_up.return_value = True
+ test_lease_dhcp_option_245 = "test_lease_dhcp_option_245"
+ test_lease = {"unknown-245": test_lease_dhcp_option_245}
+ m_ephemeral_dhcp_ctx.lease = test_lease
+
+ # We expect 2 calls to report_failure_to_fabric,
+ # because we try 2 different methods of calling report failure.
+ # The different methods are attempted in the following order:
+ # 1. Using cached ephemeral dhcp context to report failure to Azure
+ # 2. Using new ephemeral dhcp to report failure to Azure
+ self.m_report_failure_to_fabric.side_effect = Exception
+ self.assertFalse(dsrc._report_failure())
+ self.assertEqual(2, self.m_report_failure_to_fabric.call_count)
+
+ def test_dsaz_report_failure_description_msg(self):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+
+ with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
+ # mock crawl metadata failure to cause report failure
+ m_crawl_metadata.side_effect = Exception
+
+ test_msg = "Test report failure description message"
+ self.assertTrue(dsrc._report_failure(description=test_msg))
+ self.m_report_failure_to_fabric.assert_called_once_with(
+ dhcp_opts=mock.ANY, description=test_msg
+ )
+
+ def test_dsaz_report_failure_no_description_msg(self):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+
+ with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
+ m_crawl_metadata.side_effect = Exception
+
+ self.assertTrue(dsrc._report_failure()) # no description msg
+ self.m_report_failure_to_fabric.assert_called_once_with(
+ dhcp_opts=mock.ANY, description=None
+ )
+
+ def test_dsaz_report_failure_uses_cached_ephemeral_dhcp_ctx_lease(self):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+
+ with mock.patch.object(
+ dsrc, "crawl_metadata"
+ ) as m_crawl_metadata, mock.patch.object(
+ dsrc, "_wireserver_endpoint", return_value="test-ep"
+ ) as m_wireserver_endpoint:
+ # mock crawl metadata failure to cause report failure
+ m_crawl_metadata.side_effect = Exception
+
+ self.assertTrue(dsrc._report_failure())
+
+ # ensure called with cached ephemeral dhcp lease option 245
+ self.m_report_failure_to_fabric.assert_called_once_with(
+ description=mock.ANY, dhcp_opts=m_wireserver_endpoint
+ )
+
+ def test_dsaz_report_failure_no_net_uses_new_ephemeral_dhcp_lease(self):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+
+ with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
+ # mock crawl metadata failure to cause report failure
+ m_crawl_metadata.side_effect = Exception
+
+ test_lease_dhcp_option_245 = "test_lease_dhcp_option_245"
+ test_lease = {
+ "unknown-245": test_lease_dhcp_option_245,
+ "interface": "eth0",
+ }
+ self.m_dhcp.return_value.obtain_lease.return_value = test_lease
+
+ self.assertTrue(dsrc._report_failure())
+
+ # ensure called with the newly discovered
+ # ephemeral dhcp lease option 245
+ self.m_report_failure_to_fabric.assert_called_once_with(
+ description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245
+ )
+
+ def test_exception_fetching_fabric_data_doesnt_propagate(self):
+ """Errors communicating with fabric should warn, but return True."""
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ self.m_get_metadata_from_fabric.side_effect = Exception
+ ret = self._get_and_setup(dsrc)
+ self.assertTrue(ret)
+
+ def test_fabric_data_included_in_metadata(self):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ self.m_get_metadata_from_fabric.return_value = ["ssh-key-value"]
+ ret = self._get_and_setup(dsrc)
+ self.assertTrue(ret)
+ self.assertEqual(["ssh-key-value"], dsrc.metadata["public-keys"])
+
+ def test_instance_id_case_insensitive(self):
+ """Return the previous iid when current is a case-insensitive match."""
+ lower_iid = EXAMPLE_UUID.lower()
+ upper_iid = EXAMPLE_UUID.upper()
+ # lowercase current UUID
+ ds = self._get_ds(
+ {"ovfcontent": construct_valid_ovf_env()}, instance_id=lower_iid
+ )
+ # UPPERCASE previous
+ write_file(
+ os.path.join(self.paths.cloud_dir, "data", "instance-id"),
+ upper_iid,
+ )
+ ds.get_data()
+ self.assertEqual(upper_iid, ds.metadata["instance-id"])
+
+ # UPPERCASE current UUID
+ ds = self._get_ds(
+ {"ovfcontent": construct_valid_ovf_env()}, instance_id=upper_iid
+ )
+ # lowercase previous
+ write_file(
+ os.path.join(self.paths.cloud_dir, "data", "instance-id"),
+ lower_iid,
+ )
+ ds.get_data()
+ self.assertEqual(lower_iid, ds.metadata["instance-id"])
+
+ def test_instance_id_endianness(self):
+ """Return the previous iid when dmi uuid is the byteswapped iid."""
+ ds = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ # byte-swapped previous
+ write_file(
+ os.path.join(self.paths.cloud_dir, "data", "instance-id"),
+ "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8",
+ )
+ ds.get_data()
+ self.assertEqual(
+ "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8", ds.metadata["instance-id"]
+ )
+ # not byte-swapped previous
+ write_file(
+ os.path.join(self.paths.cloud_dir, "data", "instance-id"),
+ "644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8",
+ )
+ ds.get_data()
+ self.assertEqual(self.instance_id, ds.metadata["instance-id"])
+
+ def test_instance_id_from_dmidecode_used(self):
+ ds = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ ds.get_data()
+ self.assertEqual(self.instance_id, ds.metadata["instance-id"])
+
+ def test_instance_id_from_dmidecode_used_for_builtin(self):
+ ds = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ ds.get_data()
+ self.assertEqual(self.instance_id, ds.metadata["instance-id"])
+
+ @mock.patch(MOCKPATH + "util.is_FreeBSD")
+ @mock.patch(MOCKPATH + "_check_freebsd_cdrom")
+ def test_list_possible_azure_ds(self, m_check_fbsd_cdrom, m_is_FreeBSD):
+ """On FreeBSD, possible devs should show /dev/cd0."""
+ m_is_FreeBSD.return_value = True
+ m_check_fbsd_cdrom.return_value = True
+ possible_ds = []
+ for src in dsaz.list_possible_azure_ds("seed_dir", "cache_dir"):
+ possible_ds.append(src)
+ self.assertEqual(
+ possible_ds,
+ [
+ "seed_dir",
+ dsaz.DEFAULT_PROVISIONING_ISO_DEV,
+ "/dev/cd0",
+ "cache_dir",
+ ],
+ )
+ self.assertEqual(
+ [mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list
+ )
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ @mock.patch("cloudinit.net.generate_fallback_config")
+ def test_imds_network_config(self, mock_fallback, m_driver):
+ """Network config is generated from IMDS network data when present."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+
+ expected_cfg = {
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": False,
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ }
+
+ self.assertEqual(expected_cfg, dsrc.network_config)
+ mock_fallback.assert_not_called()
+
+ @mock.patch("cloudinit.net.get_interface_mac")
+ @mock.patch("cloudinit.net.get_devicelist")
+ @mock.patch("cloudinit.net.device_driver")
+ @mock.patch("cloudinit.net.generate_fallback_config")
+ def test_imds_network_ignored_when_apply_network_config_false(
+ self, mock_fallback, mock_dd, mock_devlist, mock_get_mac
+ ):
+ """When apply_network_config is False, use fallback instead of IMDS."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": False}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ fallback_config = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "eth0",
+ "mac_address": "00:11:22:33:44:55",
+ "params": {"driver": "hv_netsvc"},
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
+ }
+ mock_fallback.return_value = fallback_config
+
+ mock_devlist.return_value = ["eth0"]
+ mock_dd.return_value = ["hv_netsvc"]
+ mock_get_mac.return_value = "00:11:22:33:44:55"
+
+ dsrc = self._get_ds(data)
+ self.assertTrue(dsrc.get_data())
+ self.assertEqual(dsrc.network_config, fallback_config)
+
+ @mock.patch("cloudinit.net.get_interface_mac")
+ @mock.patch("cloudinit.net.get_devicelist")
+ @mock.patch("cloudinit.net.device_driver")
+ @mock.patch("cloudinit.net.generate_fallback_config", autospec=True)
+ def test_fallback_network_config(
+ self, mock_fallback, mock_dd, mock_devlist, mock_get_mac
+ ):
+ """On absent IMDS network data, generate network fallback config."""
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
+
+ fallback_config = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "eth0",
+ "mac_address": "00:11:22:33:44:55",
+ "params": {"driver": "hv_netsvc"},
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
+ }
+ mock_fallback.return_value = fallback_config
+
+ mock_devlist.return_value = ["eth0"]
+ mock_dd.return_value = ["hv_netsvc"]
+ mock_get_mac.return_value = "00:11:22:33:44:55"
+
+ dsrc = self._get_ds(data)
+ # Represent empty response from network imds
+ self.m_get_metadata_from_imds.return_value = {}
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+
+ netconfig = dsrc.network_config
+ self.assertEqual(netconfig, fallback_config)
+ mock_fallback.assert_called_with(
+ blacklist_drivers=["mlx4_core", "mlx5_core"], config_driver=True
+ )
+
+ @mock.patch(MOCKPATH + "net.get_interfaces", autospec=True)
+ def test_blacklist_through_distro(self, m_net_get_interfaces):
+ """Verify Azure DS updates blacklist drivers in the distro's
+ networking object."""
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
+
+ distro_cls = distros.fetch("ubuntu")
+ distro = distro_cls("ubuntu", {}, self.paths)
+ dsrc = self._get_ds(data, distro=distro)
+ dsrc.get_data()
+ self.assertEqual(
+ distro.networking.blacklist_drivers, dsaz.BLACKLIST_DRIVERS
+ )
+
+ distro.networking.get_interfaces_by_mac()
+ self.m_get_interfaces.assert_called_with(
+ blacklist_drivers=dsaz.BLACKLIST_DRIVERS
+ )
+
+ @mock.patch(
+ "cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates"
+ )
+ def test_get_public_ssh_keys_with_imds(self, m_parse_certificates):
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ dsrc.setup(True)
+ ssh_keys = dsrc.get_public_ssh_keys()
+ self.assertEqual(ssh_keys, ["ssh-rsa key1"])
+ self.assertEqual(m_parse_certificates.call_count, 0)
+
+ def test_key_without_crlf_valid(self):
+ test_key = "ssh-rsa somerandomkeystuff some comment"
+ assert True is dsaz._key_is_openssh_formatted(test_key)
+
+ def test_key_with_crlf_invalid(self):
+ test_key = "ssh-rsa someran\r\ndomkeystuff some comment"
+ assert False is dsaz._key_is_openssh_formatted(test_key)
+
+ def test_key_endswith_crlf_valid(self):
+ test_key = "ssh-rsa somerandomkeystuff some comment\r\n"
+ assert True is dsaz._key_is_openssh_formatted(test_key)
+
+ @mock.patch(
+ "cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates"
+ )
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_get_public_ssh_keys_with_no_openssh_format(
+ self, m_get_metadata_from_imds, m_parse_certificates
+ ):
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ imds_data["compute"]["publicKeys"][0]["keyData"] = "no-openssh-format"
+ m_get_metadata_from_imds.return_value = imds_data
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ dsrc.setup(True)
+ ssh_keys = dsrc.get_public_ssh_keys()
+ self.assertEqual(ssh_keys, [])
+ self.assertEqual(m_parse_certificates.call_count, 0)
+
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_get_public_ssh_keys_without_imds(self, m_get_metadata_from_imds):
+ m_get_metadata_from_imds.return_value = dict()
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ dsrc = self._get_ds(data)
+ dsaz.get_metadata_from_fabric.return_value = ["key2"]
+ dsrc.get_data()
+ dsrc.setup(True)
+ ssh_keys = dsrc.get_public_ssh_keys()
+ self.assertEqual(ssh_keys, ["key2"])
+
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_imds_api_version_wanted_nonexistent(
+ self, m_get_metadata_from_imds
+ ):
+ def get_metadata_from_imds_side_eff(*args, **kwargs):
+ if kwargs["api_version"] == dsaz.IMDS_VER_WANT:
+ raise url_helper.UrlError("No IMDS version", code=400)
+ return NETWORK_METADATA
+
+ m_get_metadata_from_imds.side_effect = get_metadata_from_imds_side_eff
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertIsNotNone(dsrc.metadata)
+
+ assert m_get_metadata_from_imds.mock_calls == [
+ mock.call(
+ retries=0,
+ md_type=dsaz.MetadataType.ALL,
+ api_version="2021-08-01",
+ exc_cb=mock.ANY,
+ ),
+ mock.call(
+ retries=10,
+ md_type=dsaz.MetadataType.ALL,
+ api_version="2019-06-01",
+ exc_cb=mock.ANY,
+ infinite=False,
+ ),
+ ]
+
+ @mock.patch(
+ MOCKPATH + "get_metadata_from_imds", return_value=NETWORK_METADATA
+ )
+ def test_imds_api_version_wanted_exists(self, m_get_metadata_from_imds):
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertIsNotNone(dsrc.metadata)
+
+ assert m_get_metadata_from_imds.mock_calls == [
+ mock.call(
+ retries=0,
+ md_type=dsaz.MetadataType.ALL,
+ api_version="2021-08-01",
+ exc_cb=mock.ANY,
+ )
+ ]
+
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_hostname_from_imds(self, m_get_metadata_from_imds):
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA)
+ imds_data_with_os_profile["compute"]["osProfile"] = dict(
+ adminUsername="username1",
+ computerName="hostname1",
+ disablePasswordAuthentication="true",
+ )
+ m_get_metadata_from_imds.return_value = imds_data_with_os_profile
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual(dsrc.metadata["local-hostname"], "hostname1")
+
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_username_from_imds(self, m_get_metadata_from_imds):
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA)
+ imds_data_with_os_profile["compute"]["osProfile"] = dict(
+ adminUsername="username1",
+ computerName="hostname1",
+ disablePasswordAuthentication="true",
+ )
+ m_get_metadata_from_imds.return_value = imds_data_with_os_profile
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual(
+ dsrc.cfg["system_info"]["default_user"]["name"], "username1"
+ )
+
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_disable_password_from_imds(self, m_get_metadata_from_imds):
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA)
+ imds_data_with_os_profile["compute"]["osProfile"] = dict(
+ adminUsername="username1",
+ computerName="hostname1",
+ disablePasswordAuthentication="true",
+ )
+ m_get_metadata_from_imds.return_value = imds_data_with_os_profile
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertTrue(dsrc.metadata["disable_password"])
+
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_userdata_from_imds(self, m_get_metadata_from_imds):
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ userdata = "userdataImds"
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ imds_data["compute"]["osProfile"] = dict(
+ adminUsername="username1",
+ computerName="hostname1",
+ disablePasswordAuthentication="true",
+ )
+ imds_data["compute"]["userData"] = b64e(userdata)
+ m_get_metadata_from_imds.return_value = imds_data
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(dsrc.userdata_raw, userdata.encode("utf-8"))
+
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_userdata_from_imds_with_customdata_from_OVF(
+ self, m_get_metadata_from_imds
+ ):
+ userdataOVF = "userdataOVF"
+ odata = {
+ "HostName": "myhost",
+ "UserName": "myuser",
+ "UserData": {"text": b64e(userdataOVF), "encoding": "base64"},
+ }
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+
+ userdataImds = "userdataImds"
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ imds_data["compute"]["osProfile"] = dict(
+ adminUsername="username1",
+ computerName="hostname1",
+ disablePasswordAuthentication="true",
+ )
+ imds_data["compute"]["userData"] = b64e(userdataImds)
+ m_get_metadata_from_imds.return_value = imds_data
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(dsrc.userdata_raw, userdataOVF.encode("utf-8"))
+
+
+class TestLoadAzureDsDir(CiTestCase):
+ """Tests for load_azure_ds_dir."""
+
+ def setUp(self):
+ self.source_dir = self.tmp_dir()
+ super(TestLoadAzureDsDir, self).setUp()
+
+ def test_missing_ovf_env_xml_raises_non_azure_datasource_error(self):
+ """load_azure_ds_dir raises an error When ovf-env.xml doesn't exit."""
+ with self.assertRaises(dsaz.NonAzureDataSource) as context_manager:
+ dsaz.load_azure_ds_dir(self.source_dir)
+ self.assertEqual(
+ "No ovf-env file found", str(context_manager.exception)
+ )
+
+ def test_wb_invalid_ovf_env_xml_calls_read_azure_ovf(self):
+ """load_azure_ds_dir calls read_azure_ovf to parse the xml."""
+ ovf_path = os.path.join(self.source_dir, "ovf-env.xml")
+ with open(ovf_path, "wb") as stream:
+ stream.write(b"invalid xml")
+ with self.assertRaises(dsaz.BrokenAzureDataSource) as context_manager:
+ dsaz.load_azure_ds_dir(self.source_dir)
+ self.assertEqual(
+ "Invalid ovf-env.xml: syntax error: line 1, column 0",
+ str(context_manager.exception),
+ )
+
+
+class TestReadAzureOvf(CiTestCase):
+ def test_invalid_xml_raises_non_azure_ds(self):
+ invalid_xml = "<foo>" + construct_valid_ovf_env(data={})
+ self.assertRaises(
+ dsaz.BrokenAzureDataSource, dsaz.read_azure_ovf, invalid_xml
+ )
+
+ def test_load_with_pubkeys(self):
+ mypklist = [{"fingerprint": "fp1", "path": "path1", "value": ""}]
+ pubkeys = [(x["fingerprint"], x["path"], x["value"]) for x in mypklist]
+ content = construct_valid_ovf_env(pubkeys=pubkeys)
+ (_md, _ud, cfg) = dsaz.read_azure_ovf(content)
+ for mypk in mypklist:
+ self.assertIn(mypk, cfg["_pubkeys"])
+
+
+class TestCanDevBeReformatted(CiTestCase):
+ warning_file = "dataloss_warning_readme.txt"
+
+ def _domock(self, mockpath, sattr=None):
+ patcher = mock.patch(mockpath)
+ setattr(self, sattr, patcher.start())
+ self.addCleanup(patcher.stop)
+
+ def patchup(self, devs):
+ bypath = {}
+ for path, data in devs.items():
+ bypath[path] = data
+ if "realpath" in data:
+ bypath[data["realpath"]] = data
+ for ppath, pdata in data.get("partitions", {}).items():
+ bypath[ppath] = pdata
+ if "realpath" in data:
+ bypath[pdata["realpath"]] = pdata
+
+ def realpath(d):
+ return bypath[d].get("realpath", d)
+
+ def partitions_on_device(devpath):
+ parts = bypath.get(devpath, {}).get("partitions", {})
+ ret = []
+ for path, data in parts.items():
+ ret.append((data.get("num"), realpath(path)))
+ # return sorted by partition number
+ return sorted(ret, key=lambda d: d[0])
+
+ def mount_cb(device, callback, mtype, update_env_for_mount):
+ self.assertEqual("ntfs", mtype)
+ self.assertEqual("C", update_env_for_mount.get("LANG"))
+ p = self.tmp_dir()
+ for f in bypath.get(device).get("files", []):
+ write_file(os.path.join(p, f), content=f)
+ return callback(p)
+
+ def has_ntfs_fs(device):
+ return bypath.get(device, {}).get("fs") == "ntfs"
+
+ p = MOCKPATH
+ self._domock(p + "_partitions_on_device", "m_partitions_on_device")
+ self._domock(p + "_has_ntfs_filesystem", "m_has_ntfs_filesystem")
+ self._domock(p + "util.mount_cb", "m_mount_cb")
+ self._domock(p + "os.path.realpath", "m_realpath")
+ self._domock(p + "os.path.exists", "m_exists")
+ self._domock(p + "util.SeLinuxGuard", "m_selguard")
+
+ self.m_exists.side_effect = lambda p: p in bypath
+ self.m_realpath.side_effect = realpath
+ self.m_has_ntfs_filesystem.side_effect = has_ntfs_fs
+ self.m_mount_cb.side_effect = mount_cb
+ self.m_partitions_on_device.side_effect = partitions_on_device
+ self.m_selguard.__enter__ = mock.Mock(return_value=False)
+ self.m_selguard.__exit__ = mock.Mock()
+
+ def test_three_partitions_is_false(self):
+ """A disk with 3 partitions can not be formatted."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1},
+ "/dev/sda2": {"num": 2},
+ "/dev/sda3": {"num": 3},
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertFalse(value)
+ self.assertIn("3 or more", msg.lower())
+
+ def test_no_partitions_is_false(self):
+ """A disk with no partitions can not be formatted."""
+ self.patchup({"/dev/sda": {}})
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertFalse(value)
+ self.assertIn("not partitioned", msg.lower())
+
+ def test_two_partitions_not_ntfs_false(self):
+ """2 partitions and 2nd not ntfs can not be formatted."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1},
+ "/dev/sda2": {"num": 2, "fs": "ext4", "files": []},
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertFalse(value)
+ self.assertIn("not ntfs", msg.lower())
+
+ def test_two_partitions_ntfs_populated_false(self):
+ """2 partitions and populated ntfs fs on 2nd can not be formatted."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1},
+ "/dev/sda2": {
+ "num": 2,
+ "fs": "ntfs",
+ "files": ["secret.txt"],
+ },
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertFalse(value)
+ self.assertIn("files on it", msg.lower())
+
+ def test_two_partitions_ntfs_empty_is_true(self):
+ """2 partitions and empty ntfs fs on 2nd can be formatted."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1},
+ "/dev/sda2": {"num": 2, "fs": "ntfs", "files": []},
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertTrue(value)
+ self.assertIn("safe for", msg.lower())
+
+ def test_one_partition_not_ntfs_false(self):
+ """1 partition witih fs other than ntfs can not be formatted."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1, "fs": "zfs"},
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertFalse(value)
+ self.assertIn("not ntfs", msg.lower())
+
+ def test_one_partition_ntfs_populated_false(self):
+ """1 mountable ntfs partition with many files can not be formatted."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {
+ "num": 1,
+ "fs": "ntfs",
+ "files": ["file1.txt", "file2.exe"],
+ },
+ }
+ }
+ }
+ )
+ with mock.patch.object(dsaz.LOG, "warning") as warning:
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ wmsg = warning.call_args[0][0]
+ self.assertIn(
+ "looks like you're using NTFS on the ephemeral disk", wmsg
+ )
+ self.assertFalse(value)
+ self.assertIn("files on it", msg.lower())
+
+ def test_one_partition_ntfs_empty_is_true(self):
+ """1 mountable ntfs partition and no files can be formatted."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1, "fs": "ntfs", "files": []}
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertTrue(value)
+ self.assertIn("safe for", msg.lower())
+
+ def test_one_partition_ntfs_empty_with_dataloss_file_is_true(self):
+ """1 mountable ntfs partition and only warn file can be formatted."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {
+ "num": 1,
+ "fs": "ntfs",
+ "files": ["dataloss_warning_readme.txt"],
+ }
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertTrue(value)
+ self.assertIn("safe for", msg.lower())
+
+ def test_one_partition_through_realpath_is_true(self):
+ """A symlink to a device with 1 ntfs partition can be formatted."""
+ epath = "/dev/disk/cloud/azure_resource"
+ self.patchup(
+ {
+ epath: {
+ "realpath": "/dev/sdb",
+ "partitions": {
+ epath
+ + "-part1": {
+ "num": 1,
+ "fs": "ntfs",
+ "files": [self.warning_file],
+ "realpath": "/dev/sdb1",
+ }
+ },
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(epath, preserve_ntfs=False)
+ self.assertTrue(value)
+ self.assertIn("safe for", msg.lower())
+
+ def test_three_partition_through_realpath_is_false(self):
+ """A symlink to a device with 3 partitions can not be formatted."""
+ epath = "/dev/disk/cloud/azure_resource"
+ self.patchup(
+ {
+ epath: {
+ "realpath": "/dev/sdb",
+ "partitions": {
+ epath
+ + "-part1": {
+ "num": 1,
+ "fs": "ntfs",
+ "files": [self.warning_file],
+ "realpath": "/dev/sdb1",
+ },
+ epath
+ + "-part2": {
+ "num": 2,
+ "fs": "ext3",
+ "realpath": "/dev/sdb2",
+ },
+ epath
+ + "-part3": {
+ "num": 3,
+ "fs": "ext",
+ "realpath": "/dev/sdb3",
+ },
+ },
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(epath, preserve_ntfs=False)
+ self.assertFalse(value)
+ self.assertIn("3 or more", msg.lower())
+
+ def test_ntfs_mount_errors_true(self):
+ """can_dev_be_reformatted does not fail if NTFS is unknown fstype."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1, "fs": "ntfs", "files": []}
+ }
+ }
+ }
+ )
+
+ error_msgs = [
+ "Stderr: mount: unknown filesystem type 'ntfs'", # RHEL
+ "Stderr: mount: /dev/sdb1: unknown filesystem type 'ntfs'", # SLES
+ ]
+
+ for err_msg in error_msgs:
+ self.m_mount_cb.side_effect = MountFailedError(
+ "Failed mounting %s to %s due to: \nUnexpected.\n%s"
+ % ("/dev/sda", "/fake-tmp/dir", err_msg)
+ )
+
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertTrue(value)
+ self.assertIn("cannot mount NTFS, assuming", msg)
+
+ def test_never_destroy_ntfs_config_false(self):
+ """Normally formattable situation with never_destroy_ntfs set."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {
+ "num": 1,
+ "fs": "ntfs",
+ "files": ["dataloss_warning_readme.txt"],
+ }
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=True
+ )
+ self.assertFalse(value)
+ self.assertIn(
+ "config says to never destroy NTFS "
+ "(datasource.Azure.never_destroy_ntfs)",
+ msg,
+ )
+
+
+class TestClearCachedData(CiTestCase):
+ def test_clear_cached_attrs_clears_imds(self):
+ """All class attributes are reset to defaults, including imds data."""
+ tmp = self.tmp_dir()
+ paths = helpers.Paths({"cloud_dir": tmp, "run_dir": tmp})
+ dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=paths)
+ clean_values = [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds]
+ dsrc.metadata = "md"
+ dsrc.userdata = "ud"
+ dsrc._metadata_imds = "imds"
+ dsrc._dirty_cache = True
+ dsrc.clear_cached_attrs()
+ self.assertEqual(
+ [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds], clean_values
+ )
+
+
+class TestAzureNetExists(CiTestCase):
+ def test_azure_net_must_exist_for_legacy_objpkl(self):
+ """DataSourceAzureNet must exist for old obj.pkl files
+ that reference it."""
+ self.assertTrue(hasattr(dsaz, "DataSourceAzureNet"))
+
+
+class TestPreprovisioningReadAzureOvfFlag(CiTestCase):
+ def test_read_azure_ovf_with_true_flag(self):
+ """The read_azure_ovf method should set the PreprovisionedVM
+ cfg flag if the proper setting is present."""
+ content = construct_valid_ovf_env(
+ platform_settings={"PreprovisionedVm": "True"}
+ )
+ ret = dsaz.read_azure_ovf(content)
+ cfg = ret[2]
+ self.assertTrue(cfg["PreprovisionedVm"])
+
+ def test_read_azure_ovf_with_false_flag(self):
+ """The read_azure_ovf method should set the PreprovisionedVM
+ cfg flag to false if the proper setting is false."""
+ content = construct_valid_ovf_env(
+ platform_settings={"PreprovisionedVm": "False"}
+ )
+ ret = dsaz.read_azure_ovf(content)
+ cfg = ret[2]
+ self.assertFalse(cfg["PreprovisionedVm"])
+
+ def test_read_azure_ovf_without_flag(self):
+ """The read_azure_ovf method should not set the
+ PreprovisionedVM cfg flag."""
+ content = construct_valid_ovf_env()
+ ret = dsaz.read_azure_ovf(content)
+ cfg = ret[2]
+ self.assertFalse(cfg["PreprovisionedVm"])
+ self.assertEqual(None, cfg["PreprovisionedVMType"])
+
+ def test_read_azure_ovf_with_running_type(self):
+ """The read_azure_ovf method should set PreprovisionedVMType
+ cfg flag to Running."""
+ content = construct_valid_ovf_env(
+ platform_settings={
+ "PreprovisionedVMType": "Running",
+ "PreprovisionedVm": "True",
+ }
+ )
+ ret = dsaz.read_azure_ovf(content)
+ cfg = ret[2]
+ self.assertTrue(cfg["PreprovisionedVm"])
+ self.assertEqual("Running", cfg["PreprovisionedVMType"])
+
+ def test_read_azure_ovf_with_savable_type(self):
+ """The read_azure_ovf method should set PreprovisionedVMType
+ cfg flag to Savable."""
+ content = construct_valid_ovf_env(
+ platform_settings={
+ "PreprovisionedVMType": "Savable",
+ "PreprovisionedVm": "True",
+ }
+ )
+ ret = dsaz.read_azure_ovf(content)
+ cfg = ret[2]
+ self.assertTrue(cfg["PreprovisionedVm"])
+ self.assertEqual("Savable", cfg["PreprovisionedVMType"])
+
+
+@pytest.mark.parametrize(
+ "ovf_cfg,imds_md,pps_type",
+ [
+ (
+ {"PreprovisionedVm": False, "PreprovisionedVMType": None},
+ {},
+ dsaz.PPSType.NONE,
+ ),
+ (
+ {"PreprovisionedVm": True, "PreprovisionedVMType": "Running"},
+ {},
+ dsaz.PPSType.RUNNING,
+ ),
+ (
+ {"PreprovisionedVm": True, "PreprovisionedVMType": "Savable"},
+ {},
+ dsaz.PPSType.SAVABLE,
+ ),
+ (
+ {"PreprovisionedVm": True},
+ {},
+ dsaz.PPSType.RUNNING,
+ ),
+ (
+ {},
+ {"extended": {"compute": {"ppsType": "None"}}},
+ dsaz.PPSType.NONE,
+ ),
+ (
+ {},
+ {"extended": {"compute": {"ppsType": "Running"}}},
+ dsaz.PPSType.RUNNING,
+ ),
+ (
+ {},
+ {"extended": {"compute": {"ppsType": "Savable"}}},
+ dsaz.PPSType.SAVABLE,
+ ),
+ (
+ {"PreprovisionedVm": False, "PreprovisionedVMType": None},
+ {"extended": {"compute": {"ppsType": "None"}}},
+ dsaz.PPSType.NONE,
+ ),
+ (
+ {"PreprovisionedVm": True, "PreprovisionedVMType": "Running"},
+ {"extended": {"compute": {"ppsType": "Running"}}},
+ dsaz.PPSType.RUNNING,
+ ),
+ (
+ {"PreprovisionedVm": True, "PreprovisionedVMType": "Savable"},
+ {"extended": {"compute": {"ppsType": "Savable"}}},
+ dsaz.PPSType.SAVABLE,
+ ),
+ (
+ {"PreprovisionedVm": True},
+ {"extended": {"compute": {"ppsType": "Running"}}},
+ dsaz.PPSType.RUNNING,
+ ),
+ ],
+)
+class TestDeterminePPSTypeScenarios:
+ @mock.patch("os.path.isfile", return_value=False)
+ def test_determine_pps_without_reprovision_marker(
+ self, is_file, azure_ds, ovf_cfg, imds_md, pps_type
+ ):
+ assert azure_ds._determine_pps_type(ovf_cfg, imds_md) == pps_type
+
+ @mock.patch("os.path.isfile", return_value=True)
+ def test_determine_pps_with_reprovision_marker(
+ self, is_file, azure_ds, ovf_cfg, imds_md, pps_type
+ ):
+ assert (
+ azure_ds._determine_pps_type(ovf_cfg, imds_md)
+ == dsaz.PPSType.UNKNOWN
+ )
+ assert is_file.mock_calls == [mock.call(dsaz.REPROVISION_MARKER_FILE)]
+
+
+@mock.patch("os.path.isfile", return_value=False)
+class TestReprovision(CiTestCase):
+ def setUp(self):
+ super(TestReprovision, self).setUp()
+ tmp = self.tmp_dir()
+ self.waagent_d = self.tmp_path("/var/lib/waagent", tmp)
+ self.paths = helpers.Paths({"cloud_dir": tmp})
+ dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d
+
+ @mock.patch(MOCKPATH + "DataSourceAzure._poll_imds")
+ def test_reprovision_calls__poll_imds(self, _poll_imds, isfile):
+ """_reprovision will poll IMDS."""
+ isfile.return_value = False
+ hostname = "myhost"
+ username = "myuser"
+ odata = {"HostName": hostname, "UserName": username}
+ _poll_imds.return_value = construct_valid_ovf_env(data=odata)
+ dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
+ dsa._reprovision()
+ _poll_imds.assert_called_with()
+
+
+class TestPreprovisioningHotAttachNics(CiTestCase):
+ def setUp(self):
+ super(TestPreprovisioningHotAttachNics, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.waagent_d = self.tmp_path("/var/lib/waagent", self.tmp)
+ self.paths = helpers.Paths({"cloud_dir": self.tmp})
+ dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d
+ self.paths = helpers.Paths({"cloud_dir": self.tmp})
+
+ @mock.patch(
+ "cloudinit.sources.helpers.netlink.wait_for_nic_detach_event",
+ autospec=True,
+ )
+ @mock.patch(MOCKPATH + "util.write_file", autospec=True)
+ def test_nic_detach_writes_marker(self, m_writefile, m_detach):
+ """When we detect that a nic gets detached, we write a marker for it"""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ nl_sock = mock.MagicMock()
+ dsa._wait_for_nic_detach(nl_sock)
+ m_detach.assert_called_with(nl_sock)
+ self.assertEqual(1, m_detach.call_count)
+ m_writefile.assert_called_with(
+ dsaz.REPROVISION_NIC_DETACHED_MARKER_FILE, mock.ANY
+ )
+
+ @mock.patch(MOCKPATH + "util.write_file", autospec=True)
+ @mock.patch(MOCKPATH + "DataSourceAzure.fallback_interface")
+ @mock.patch(MOCKPATH + "DataSourceAzure._report_ready")
+ @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach")
+ def test_detect_nic_attach_reports_ready_and_waits_for_detach(
+ self, m_detach, m_report_ready, m_fallback_if, m_writefile
+ ):
+ """Report ready first and then wait for nic detach"""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ dsa._wait_for_all_nics_ready()
+ m_fallback_if.return_value = "Dummy interface"
+ self.assertEqual(1, m_report_ready.call_count)
+ self.assertEqual(1, m_detach.call_count)
+ self.assertEqual(1, m_writefile.call_count)
+ m_writefile.assert_called_with(
+ dsaz.REPORTED_READY_MARKER_FILE, mock.ANY
+ )
+
+ @mock.patch("os.path.isfile")
+ @mock.patch(MOCKPATH + "DataSourceAzure.fallback_interface")
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4", autospec=True)
+ @mock.patch(MOCKPATH + "DataSourceAzure._report_ready")
+ @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach")
+ def test_detect_nic_attach_skips_report_ready_when_marker_present(
+ self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile
+ ):
+ """Skip reporting ready if we already have a marker file."""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+
+ def isfile(key):
+ return key == dsaz.REPORTED_READY_MARKER_FILE
+
+ m_isfile.side_effect = isfile
+ dsa._wait_for_all_nics_ready()
+ m_fallback_if.return_value = "Dummy interface"
+ self.assertEqual(0, m_report_ready.call_count)
+ self.assertEqual(0, m_dhcp.call_count)
+ self.assertEqual(1, m_detach.call_count)
+
+ @mock.patch("os.path.isfile")
+ @mock.patch(MOCKPATH + "DataSourceAzure.fallback_interface")
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4")
+ @mock.patch(MOCKPATH + "DataSourceAzure._report_ready")
+ @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach")
+ def test_detect_nic_attach_skips_nic_detach_when_marker_present(
+ self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile
+ ):
+ """Skip wait for nic detach if it already happened."""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+
+ m_isfile.return_value = True
+ dsa._wait_for_all_nics_ready()
+ m_fallback_if.return_value = "Dummy interface"
+ self.assertEqual(0, m_report_ready.call_count)
+ self.assertEqual(0, m_dhcp.call_count)
+ self.assertEqual(0, m_detach.call_count)
+
+ @mock.patch(MOCKPATH + "DataSourceAzure.wait_for_link_up", autospec=True)
+ @mock.patch("cloudinit.sources.helpers.netlink.wait_for_nic_attach_event")
+ @mock.patch("cloudinit.sources.net.find_fallback_nic")
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4", autospec=True)
+ @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach")
+ @mock.patch("os.path.isfile")
+ def test_wait_for_nic_attach_if_no_fallback_interface(
+ self,
+ m_isfile,
+ m_detach,
+ m_dhcpv4,
+ m_imds,
+ m_fallback_if,
+ m_attach,
+ m_link_up,
+ ):
+ """Wait for nic attach if we do not have a fallback interface"""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ lease = {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+
+ m_isfile.return_value = True
+ m_attach.return_value = "eth0"
+ dhcp_ctx = mock.MagicMock(lease=lease)
+ dhcp_ctx.obtain_lease.return_value = lease
+ m_dhcpv4.return_value = dhcp_ctx
+ m_imds.return_value = IMDS_NETWORK_METADATA
+ m_fallback_if.return_value = None
+
+ dsa._wait_for_all_nics_ready()
+
+ self.assertEqual(0, m_detach.call_count)
+ self.assertEqual(1, m_attach.call_count)
+ self.assertEqual(1, m_dhcpv4.call_count)
+ self.assertEqual(1, m_imds.call_count)
+ self.assertEqual(1, m_link_up.call_count)
+ m_link_up.assert_called_with(mock.ANY, "eth0")
+
+ @mock.patch(MOCKPATH + "DataSourceAzure.wait_for_link_up")
+ @mock.patch("cloudinit.sources.helpers.netlink.wait_for_nic_attach_event")
+ @mock.patch("cloudinit.sources.net.find_fallback_nic")
+ @mock.patch(MOCKPATH + "DataSourceAzure.get_imds_data_with_api_fallback")
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4", autospec=True)
+ @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach")
+ @mock.patch("os.path.isfile")
+ def test_wait_for_nic_attach_multinic_attach(
+ self,
+ m_isfile,
+ m_detach,
+ m_dhcpv4,
+ m_imds,
+ m_fallback_if,
+ m_attach,
+ m_link_up,
+ ):
+ """Wait for nic attach if we do not have a fallback interface"""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ lease = {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+
+ # Simulate two NICs by adding the same one twice.
+ md = {
+ "interface": [
+ IMDS_NETWORK_METADATA["interface"][0],
+ IMDS_NETWORK_METADATA["interface"][0],
+ ]
+ }
+
+ m_isfile.return_value = True
+ m_attach.side_effect = [
+ "eth0",
+ "eth1",
+ ]
+ dhcp_ctx = mock.MagicMock(lease=lease)
+ dhcp_ctx.obtain_lease.return_value = lease
+ m_dhcpv4.return_value = dhcp_ctx
+ m_imds.side_effect = [md]
+ m_fallback_if.return_value = None
+
+ dsa._wait_for_all_nics_ready()
+
+ self.assertEqual(0, m_detach.call_count)
+ self.assertEqual(2, m_attach.call_count)
+ # DHCP and network metadata calls will only happen on the primary NIC.
+ self.assertEqual(1, m_dhcpv4.call_count)
+ self.assertEqual(1, m_imds.call_count)
+ self.assertEqual(2, m_link_up.call_count)
+
+ @mock.patch("cloudinit.url_helper.time.sleep", autospec=True)
+ @mock.patch("requests.Session.request", autospec=True)
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4", autospec=True)
+ def test_check_if_nic_is_primary_retries_on_failures(
+ self, m_dhcpv4, m_request, m_sleep
+ ):
+ """Retry polling for network metadata on all failures except timeout
+ and network unreachable errors"""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ lease = {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+
+ # Simulate two NICs by adding the same one twice.
+ md = {
+ "interface": [
+ IMDS_NETWORK_METADATA["interface"][0],
+ IMDS_NETWORK_METADATA["interface"][0],
+ ]
+ }
+
+ m_req = mock.Mock(content=json.dumps(md))
+ m_request.side_effect = [
+ requests.Timeout("Fake connection timeout"),
+ requests.ConnectionError("Fake Network Unreachable"),
+ m_req,
+ ]
+ m_dhcpv4.return_value.lease = lease
+
+ is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth0")
+ self.assertEqual(True, is_primary)
+ self.assertEqual(2, expected_nic_count)
+ assert len(m_request.mock_calls) == 3
+
+ # Re-run tests to verify max retries.
+ m_request.reset_mock()
+ m_request.side_effect = [
+ requests.Timeout("Fake connection timeout")
+ ] * 6 + [requests.ConnectionError("Fake Network Unreachable")] * 6
+
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+
+ is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth1")
+ self.assertEqual(False, is_primary)
+ assert len(m_request.mock_calls) == 11
+
+ @mock.patch("cloudinit.distros.networking.LinuxNetworking.try_set_link_up")
+ def test_wait_for_link_up_returns_if_already_up(self, m_is_link_up):
+ """Waiting for link to be up should return immediately if the link is
+ already up."""
+
+ distro_cls = distros.fetch("ubuntu")
+ distro = distro_cls("ubuntu", {}, self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
+ m_is_link_up.return_value = True
+
+ dsa.wait_for_link_up("eth0")
+ self.assertEqual(1, m_is_link_up.call_count)
+
+ @mock.patch(MOCKPATH + "net.is_up", autospec=True)
+ @mock.patch(MOCKPATH + "util.write_file")
+ @mock.patch("cloudinit.net.read_sys_net", return_value="device-id")
+ @mock.patch("cloudinit.distros.networking.LinuxNetworking.try_set_link_up")
+ def test_wait_for_link_up_checks_link_after_sleep(
+ self, m_try_set_link_up, m_read_sys_net, m_writefile, m_is_up
+ ):
+ """Waiting for link to be up should return immediately if the link is
+ already up."""
+
+ distro_cls = distros.fetch("ubuntu")
+ distro = distro_cls("ubuntu", {}, self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
+ m_try_set_link_up.return_value = False
+
+ callcount = 0
+
+ def is_up_mock(key):
+ nonlocal callcount
+ if callcount == 0:
+ callcount += 1
+ return False
+ return True
+
+ m_is_up.side_effect = is_up_mock
+
+ with mock.patch("cloudinit.sources.DataSourceAzure.sleep"):
+ dsa.wait_for_link_up("eth0")
+ self.assertEqual(2, m_try_set_link_up.call_count)
+ self.assertEqual(2, m_is_up.call_count)
+
+ @mock.patch(MOCKPATH + "util.write_file")
+ @mock.patch("cloudinit.net.read_sys_net", return_value="device-id")
+ @mock.patch("cloudinit.distros.networking.LinuxNetworking.try_set_link_up")
+ def test_wait_for_link_up_writes_to_device_file(
+ self, m_is_link_up, m_read_sys_net, m_writefile
+ ):
+ """Waiting for link to be up should return immediately if the link is
+ already up."""
+
+ distro_cls = distros.fetch("ubuntu")
+ distro = distro_cls("ubuntu", {}, self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
+
+ callcount = 0
+
+ def linkup(key):
+ nonlocal callcount
+ if callcount == 0:
+ callcount += 1
+ return False
+ return True
+
+ m_is_link_up.side_effect = linkup
+
+ dsa.wait_for_link_up("eth0")
+ self.assertEqual(2, m_is_link_up.call_count)
+ self.assertEqual(1, m_read_sys_net.call_count)
+ self.assertEqual(2, m_writefile.call_count)
+
+ @mock.patch(
+ "cloudinit.sources.helpers.netlink.create_bound_netlink_socket"
+ )
+ def test_wait_for_all_nics_ready_raises_if_socket_fails(self, m_socket):
+ """Waiting for all nics should raise exception if netlink socket
+ creation fails."""
+
+ m_socket.side_effect = netlink.NetlinkCreateSocketError
+ distro_cls = distros.fetch("ubuntu")
+ distro = distro_cls("ubuntu", {}, self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
+
+ self.assertRaises(
+ netlink.NetlinkCreateSocketError, dsa._wait_for_all_nics_ready
+ )
+ # dsa._wait_for_all_nics_ready()
+
+
+@mock.patch("cloudinit.net.find_fallback_nic", return_value="eth9")
+@mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
+@mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+@mock.patch(
+ "cloudinit.sources.helpers.netlink.wait_for_media_disconnect_connect"
+)
+@mock.patch("requests.Session.request")
+@mock.patch(MOCKPATH + "DataSourceAzure._report_ready", return_value=True)
+class TestPreprovisioningPollIMDS(CiTestCase):
+ def setUp(self):
+ super(TestPreprovisioningPollIMDS, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.waagent_d = self.tmp_path("/var/lib/waagent", self.tmp)
+ self.paths = helpers.Paths({"cloud_dir": self.tmp})
+ dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d
+
+ @mock.patch("time.sleep", mock.MagicMock())
+ def test_poll_imds_re_dhcp_on_timeout(
+ self,
+ m_report_ready,
+ m_request,
+ m_media_switch,
+ m_dhcp,
+ m_net,
+ m_fallback,
+ ):
+ """The poll_imds will retry DHCP on IMDS timeout."""
+ report_file = self.tmp_path("report_marker", self.tmp)
+ lease = {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+ m_dhcp.return_value = [lease]
+ m_media_switch.return_value = None
+ dhcp_ctx = mock.MagicMock(lease=lease)
+ dhcp_ctx.obtain_lease.return_value = lease
+
+ self.tries = 0
+
+ def fake_timeout_once(**kwargs):
+ self.tries += 1
+ if self.tries == 1:
+ raise requests.Timeout("Fake connection timeout")
+ elif self.tries in (2, 3):
+ response = requests.Response()
+ response.status_code = 404 if self.tries == 2 else 410
+ raise requests.exceptions.HTTPError(
+ "fake {}".format(response.status_code), response=response
+ )
+ # Third try should succeed and stop retries or redhcp
+ return mock.MagicMock(status_code=200, text="good", content="good")
+
+ m_request.side_effect = fake_timeout_once
+
+ dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
+ with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
+ dsa._poll_imds()
+
+ assert m_report_ready.mock_calls == [mock.call()]
+
+ self.assertEqual(3, m_dhcp.call_count, "Expected 3 DHCP calls")
+ self.assertEqual(4, self.tries, "Expected 4 total reads from IMDS")
+
+ @mock.patch("os.path.isfile")
+ def test_poll_imds_skips_dhcp_if_ctx_present(
+ self,
+ m_isfile,
+ report_ready_func,
+ fake_resp,
+ m_media_switch,
+ m_dhcp,
+ m_net,
+ m_fallback,
+ ):
+ """The poll_imds function should reuse the dhcp ctx if it is already
+ present. This happens when we wait for nic to be hot-attached before
+ polling for reprovisiondata. Note that if this ctx is set when
+ _poll_imds is called, then it is not expected to be waiting for
+ media_disconnect_connect either."""
+ report_file = self.tmp_path("report_marker", self.tmp)
+ m_isfile.return_value = True
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ dsa._ephemeral_dhcp_ctx = mock.Mock(lease={})
+ with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
+ dsa._poll_imds()
+ self.assertEqual(0, m_dhcp.call_count)
+ self.assertEqual(0, m_media_switch.call_count)
+
+ @mock.patch("os.path.isfile")
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4", autospec=True)
+ def test_poll_imds_does_dhcp_on_retries_if_ctx_present(
+ self,
+ m_ephemeral_dhcpv4,
+ m_isfile,
+ report_ready_func,
+ m_request,
+ m_media_switch,
+ m_dhcp,
+ m_net,
+ m_fallback,
+ ):
+ """The poll_imds function should reuse the dhcp ctx if it is already
+ present. This happens when we wait for nic to be hot-attached before
+ polling for reprovisiondata. Note that if this ctx is set when
+ _poll_imds is called, then it is not expected to be waiting for
+ media_disconnect_connect either."""
+
+ tries = 0
+
+ def fake_timeout_once(**kwargs):
+ nonlocal tries
+ tries += 1
+ if tries == 1:
+ raise requests.Timeout("Fake connection timeout")
+ return mock.MagicMock(status_code=200, text="good", content="good")
+
+ m_request.side_effect = fake_timeout_once
+ report_file = self.tmp_path("report_marker", self.tmp)
+ m_isfile.return_value = True
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ with mock.patch(
+ MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file
+ ), mock.patch.object(dsa, "_ephemeral_dhcp_ctx") as m_dhcp_ctx:
+ m_dhcp_ctx.obtain_lease.return_value = "Dummy lease"
+ dsa._ephemeral_dhcp_ctx = m_dhcp_ctx
+ dsa._poll_imds()
+ self.assertEqual(1, m_dhcp_ctx.clean_network.call_count)
+ self.assertEqual(1, m_ephemeral_dhcpv4.call_count)
+ self.assertEqual(0, m_media_switch.call_count)
+ self.assertEqual(2, m_request.call_count)
+
+ def test_does_not_poll_imds_report_ready_when_marker_file_exists(
+ self,
+ m_report_ready,
+ m_request,
+ m_media_switch,
+ m_dhcp,
+ m_net,
+ m_fallback,
+ ):
+ """poll_imds should not call report ready when the reported ready
+ marker file exists"""
+ report_file = self.tmp_path("report_marker", self.tmp)
+ write_file(report_file, content="dont run report_ready :)")
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+ ]
+ m_media_switch.return_value = None
+ dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
+ with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
+ dsa._poll_imds()
+ self.assertEqual(m_report_ready.call_count, 0)
+
+ def test_poll_imds_report_ready_success_writes_marker_file(
+ self,
+ m_report_ready,
+ m_request,
+ m_media_switch,
+ m_dhcp,
+ m_net,
+ m_fallback,
+ ):
+ """poll_imds should write the report_ready marker file if
+ reporting ready succeeds"""
+ report_file = self.tmp_path("report_marker", self.tmp)
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+ ]
+ m_media_switch.return_value = None
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ self.assertFalse(os.path.exists(report_file))
+ with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
+ dsa._poll_imds()
+ self.assertEqual(m_report_ready.call_count, 1)
+ self.assertTrue(os.path.exists(report_file))
+
+ def test_poll_imds_report_ready_failure_raises_exc_and_doesnt_write_marker(
+ self,
+ m_report_ready,
+ m_request,
+ m_media_switch,
+ m_dhcp,
+ m_net,
+ m_fallback,
+ ):
+ """poll_imds should write the report_ready marker file if
+ reporting ready succeeds"""
+ report_file = self.tmp_path("report_marker", self.tmp)
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+ ]
+ m_media_switch.return_value = None
+ m_report_ready.side_effect = [Exception("fail")]
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ self.assertFalse(os.path.exists(report_file))
+ with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
+ self.assertRaises(InvalidMetaDataException, dsa._poll_imds)
+ self.assertEqual(m_report_ready.call_count, 1)
+ self.assertFalse(os.path.exists(report_file))
+
+
+@mock.patch(MOCKPATH + "DataSourceAzure._report_ready", mock.MagicMock())
+@mock.patch(MOCKPATH + "subp.subp", mock.MagicMock())
+@mock.patch(MOCKPATH + "util.write_file", mock.MagicMock())
+@mock.patch(
+ "cloudinit.sources.helpers.netlink.wait_for_media_disconnect_connect"
+)
+@mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network", autospec=True)
+@mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+@mock.patch("requests.Session.request")
+class TestAzureDataSourcePreprovisioning(CiTestCase):
+ def setUp(self):
+ super(TestAzureDataSourcePreprovisioning, self).setUp()
+ tmp = self.tmp_dir()
+ self.waagent_d = self.tmp_path("/var/lib/waagent", tmp)
+ self.paths = helpers.Paths({"cloud_dir": tmp})
+ dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d
+
+ def test_poll_imds_returns_ovf_env(
+ self, m_request, m_dhcp, m_net, m_media_switch
+ ):
+ """The _poll_imds method should return the ovf_env.xml."""
+ m_media_switch.return_value = None
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ }
+ ]
+ url = "http://{0}/metadata/reprovisiondata?api-version=2019-06-01"
+ host = "169.254.169.254"
+ full_url = url.format(host)
+ m_request.return_value = mock.MagicMock(
+ status_code=200, text="ovf", content="ovf"
+ )
+ dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
+ self.assertTrue(len(dsa._poll_imds()) > 0)
+ self.assertEqual(
+ m_request.call_args_list,
+ [
+ mock.call(
+ allow_redirects=True,
+ headers={
+ "Metadata": "true",
+ "User-Agent": "Cloud-Init/%s" % vs(),
+ },
+ method="GET",
+ timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS,
+ url=full_url,
+ )
+ ],
+ )
+ self.assertEqual(m_dhcp.call_count, 2)
+ m_net.assert_any_call(
+ broadcast="192.168.2.255",
+ interface="eth9",
+ ip="192.168.2.9",
+ prefix_or_mask="255.255.255.0",
+ router="192.168.2.1",
+ static_routes=None,
+ )
+ self.assertEqual(m_net.call_count, 2)
+
+ def test__reprovision_calls__poll_imds(
+ self, m_request, m_dhcp, m_net, m_media_switch
+ ):
+ """The _reprovision method should call poll IMDS."""
+ m_media_switch.return_value = None
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+ ]
+ url = "http://{0}/metadata/reprovisiondata?api-version=2019-06-01"
+ host = "169.254.169.254"
+ full_url = url.format(host)
+ hostname = "myhost"
+ username = "myuser"
+ odata = {"HostName": hostname, "UserName": username}
+ content = construct_valid_ovf_env(data=odata)
+ m_request.return_value = mock.MagicMock(
+ status_code=200, text=content, content=content
+ )
+ dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
+ md, _ud, cfg, _d = dsa._reprovision()
+ self.assertEqual(md["local-hostname"], hostname)
+ self.assertEqual(cfg["system_info"]["default_user"]["name"], username)
+ self.assertIn(
+ mock.call(
+ allow_redirects=True,
+ headers={
+ "Metadata": "true",
+ "User-Agent": "Cloud-Init/%s" % vs(),
+ },
+ method="GET",
+ timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS,
+ url=full_url,
+ ),
+ m_request.call_args_list,
+ )
+ self.assertEqual(m_dhcp.call_count, 2)
+ m_net.assert_any_call(
+ broadcast="192.168.2.255",
+ interface="eth9",
+ ip="192.168.2.9",
+ prefix_or_mask="255.255.255.0",
+ router="192.168.2.1",
+ static_routes=None,
+ )
+ self.assertEqual(m_net.call_count, 2)
+
+
+class TestRemoveUbuntuNetworkConfigScripts(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestRemoveUbuntuNetworkConfigScripts, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def test_remove_network_scripts_removes_both_files_and_directories(self):
+ """Any files or directories in paths are removed when present."""
+ file1 = self.tmp_path("file1", dir=self.tmp)
+ subdir = self.tmp_path("sub1", dir=self.tmp)
+ subfile = self.tmp_path("leaf1", dir=subdir)
+ write_file(file1, "file1content")
+ write_file(subfile, "leafcontent")
+ dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[subdir, file1])
+
+ for path in (file1, subdir, subfile):
+ self.assertFalse(
+ os.path.exists(path), "Found unremoved: %s" % path
+ )
+
+ expected_logs = [
+ "INFO: Removing Ubuntu extended network scripts because cloud-init"
+ " updates Azure network configuration on the following events:"
+ " ['boot', 'boot-legacy']",
+ "Recursively deleting %s" % subdir,
+ "Attempting to remove %s" % file1,
+ ]
+ for log in expected_logs:
+ self.assertIn(log, self.logs.getvalue())
+
+ def test_remove_network_scripts_only_attempts_removal_if_path_exists(self):
+ """Any files or directories absent are skipped without error."""
+ dsaz.maybe_remove_ubuntu_network_config_scripts(
+ paths=[
+ self.tmp_path("nodirhere/", dir=self.tmp),
+ self.tmp_path("notfilehere", dir=self.tmp),
+ ]
+ )
+ self.assertNotIn("/not/a", self.logs.getvalue()) # No delete logs
+
+ @mock.patch(MOCKPATH + "os.path.exists")
+ def test_remove_network_scripts_default_removes_stock_scripts(
+ self, m_exists
+ ):
+ """Azure's stock ubuntu image scripts and artifacts are removed."""
+ # Report path absent on all to avoid delete operation
+ m_exists.return_value = False
+ dsaz.maybe_remove_ubuntu_network_config_scripts()
+ calls = m_exists.call_args_list
+ for path in dsaz.UBUNTU_EXTENDED_NETWORK_SCRIPTS:
+ self.assertIn(mock.call(path), calls)
+
+
+class TestWBIsPlatformViable(CiTestCase):
+ """White box tests for _is_platform_viable."""
+
+ with_logs = True
+
+ @mock.patch(MOCKPATH + "dmi.read_dmi_data")
+ def test_true_on_non_azure_chassis(self, m_read_dmi_data):
+ """Return True if DMI chassis-asset-tag is AZURE_CHASSIS_ASSET_TAG."""
+ m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG
+ self.assertTrue(dsaz._is_platform_viable("doesnotmatter"))
+
+ @mock.patch(MOCKPATH + "os.path.exists")
+ @mock.patch(MOCKPATH + "dmi.read_dmi_data")
+ def test_true_on_azure_ovf_env_in_seed_dir(self, m_read_dmi_data, m_exist):
+ """Return True if ovf-env.xml exists in known seed dirs."""
+ # Non-matching Azure chassis-asset-tag
+ m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG + "X"
+
+ m_exist.return_value = True
+ self.assertTrue(dsaz._is_platform_viable("/some/seed/dir"))
+ m_exist.called_once_with("/other/seed/dir")
+
+ def test_false_on_no_matching_azure_criteria(self):
+ """Report non-azure on unmatched asset tag, ovf-env absent and no dev.
+
+ Return False when the asset tag doesn't match Azure's static
+ AZURE_CHASSIS_ASSET_TAG, no ovf-env.xml files exist in known seed dirs
+ and no devices have a label starting with prefix 'rd_rdfe_'.
+ """
+ self.assertFalse(
+ wrap_and_call(
+ MOCKPATH,
+ {
+ "os.path.exists": False,
+ # Non-matching Azure chassis-asset-tag
+ "dmi.read_dmi_data": dsaz.AZURE_CHASSIS_ASSET_TAG + "X",
+ "subp.which": None,
+ },
+ dsaz._is_platform_viable,
+ "doesnotmatter",
+ )
+ )
+ self.assertIn(
+ "DEBUG: Non-Azure DMI asset tag '{0}' discovered.\n".format(
+ dsaz.AZURE_CHASSIS_ASSET_TAG + "X"
+ ),
+ self.logs.getvalue(),
+ )
+
+
+class TestRandomSeed(CiTestCase):
+ """Test proper handling of random_seed"""
+
+ def test_non_ascii_seed_is_serializable(self):
+ """Pass if a random string from the Azure infrastructure which
+ contains at least one non-Unicode character can be converted to/from
+ JSON without alteration and without throwing an exception.
+ """
+ path = resourceLocation("azure/non_unicode_random_string")
+ result = dsaz._get_random_seed(path)
+
+ obj = {"seed": result}
+ try:
+ serialized = json_dumps(obj)
+ deserialized = load_json(serialized)
+ except UnicodeDecodeError:
+ self.fail("Non-serializable random seed returned")
+
+ self.assertEqual(deserialized["seed"], result)
+
+
+class TestProvisioning:
+ @pytest.fixture(autouse=True)
+ def provisioning_setup(
+ self,
+ azure_ds,
+ mock_azure_get_metadata_from_fabric,
+ mock_azure_report_failure_to_fabric,
+ mock_net_dhcp_maybe_perform_dhcp_discovery,
+ mock_net_dhcp_EphemeralIPv4Network,
+ mock_dmi_read_dmi_data,
+ mock_get_interfaces,
+ mock_get_interface_mac,
+ mock_netlink,
+ mock_os_path_isfile,
+ mock_readurl,
+ mock_subp_subp,
+ mock_util_ensure_dir,
+ mock_util_find_devs_with,
+ mock_util_load_file,
+ mock_util_mount_cb,
+ mock_util_write_file,
+ ):
+ self.azure_ds = azure_ds
+ self.mock_azure_get_metadata_from_fabric = (
+ mock_azure_get_metadata_from_fabric
+ )
+ self.mock_azure_report_failure_to_fabric = (
+ mock_azure_report_failure_to_fabric
+ )
+ self.mock_net_dhcp_maybe_perform_dhcp_discovery = (
+ mock_net_dhcp_maybe_perform_dhcp_discovery
+ )
+ self.mock_net_dhcp_EphemeralIPv4Network = (
+ mock_net_dhcp_EphemeralIPv4Network
+ )
+ self.mock_dmi_read_dmi_data = mock_dmi_read_dmi_data
+ self.mock_get_interfaces = mock_get_interfaces
+ self.mock_get_interface_mac = mock_get_interface_mac
+ self.mock_netlink = mock_netlink
+ self.mock_os_path_isfile = mock_os_path_isfile
+ self.mock_readurl = mock_readurl
+ self.mock_subp_subp = mock_subp_subp
+ self.mock_util_ensure_dir = mock_util_ensure_dir
+ self.mock_util_find_devs_with = mock_util_find_devs_with
+ self.mock_util_load_file = mock_util_load_file
+ self.mock_util_mount_cb = mock_util_mount_cb
+ self.mock_util_write_file = mock_util_write_file
+
+ self.imds_md = {
+ "extended": {"compute": {"ppsType": "None"}},
+ "network": {
+ "interface": [
+ {
+ "ipv4": {
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.22",
+ "publicIpAddress": "",
+ }
+ ],
+ "subnet": [
+ {"address": "10.0.0.0", "prefix": "24"}
+ ],
+ },
+ "ipv6": {"ipAddress": []},
+ "macAddress": "011122334455",
+ },
+ ]
+ },
+ }
+
+ def test_no_pps(self):
+ self.mock_readurl.side_effect = [
+ mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
+ ]
+ self.mock_azure_get_metadata_from_fabric.return_value = []
+ self.mock_os_path_isfile.side_effect = [False, False, False]
+
+ self.azure_ds._get_data()
+
+ assert self.mock_os_path_isfile.mock_calls == [
+ mock.call("/var/lib/cloud/data/poll_imds"),
+ mock.call(
+ os.path.join(
+ self.azure_ds.paths.cloud_dir, "seed/azure/ovf-env.xml"
+ )
+ ),
+ mock.call("/var/lib/cloud/data/poll_imds"),
+ ]
+
+ assert self.mock_readurl.mock_calls == [
+ mock.call(
+ "http://169.254.169.254/metadata/instance?"
+ "api-version=2021-08-01&extended=true",
+ timeout=2,
+ headers={"Metadata": "true"},
+ retries=0,
+ exception_cb=dsaz.retry_on_url_exc,
+ infinite=False,
+ ),
+ ]
+
+ # Verify DHCP is setup once.
+ assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [
+ mock.call(None, dsaz.dhcp_log_cb)
+ ]
+ assert self.azure_ds._wireserver_endpoint == "aa:bb:cc:dd"
+ assert self.azure_ds._is_ephemeral_networking_up() is False
+
+ # Verify DMI usage.
+ assert self.mock_dmi_read_dmi_data.mock_calls == [
+ mock.call("system-uuid")
+ ]
+ assert self.azure_ds.metadata["instance-id"] == "fake-system-uuid"
+
+ # Verify IMDS metadata.
+ assert self.azure_ds.metadata["imds"] == self.imds_md
+
+ # Verify reporting ready once.
+ assert self.mock_azure_get_metadata_from_fabric.mock_calls == [
+ mock.call(
+ fallback_lease_file=None,
+ dhcp_opts="aa:bb:cc:dd",
+ iso_dev="/dev/sr0",
+ pubkey_info=None,
+ )
+ ]
+
+ # Verify netlink.
+ assert self.mock_netlink.mock_calls == []
+
+ def test_running_pps(self):
+ self.imds_md["extended"]["compute"]["ppsType"] = "Running"
+ ovf_data = {"HostName": "myhost", "UserName": "myuser"}
+
+ nl_sock = mock.MagicMock()
+ self.mock_netlink.create_bound_netlink_socket.return_value = nl_sock
+ self.mock_readurl.side_effect = [
+ mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
+ mock.MagicMock(
+ contents=construct_valid_ovf_env(data=ovf_data).encode()
+ ),
+ mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
+ ]
+ self.mock_azure_get_metadata_from_fabric.return_value = []
+ self.mock_os_path_isfile.side_effect = [False, False, False, False]
+
+ self.azure_ds._get_data()
+
+ assert self.mock_os_path_isfile.mock_calls == [
+ mock.call("/var/lib/cloud/data/poll_imds"),
+ mock.call(
+ os.path.join(
+ self.azure_ds.paths.cloud_dir, "seed/azure/ovf-env.xml"
+ )
+ ),
+ mock.call("/var/lib/cloud/data/poll_imds"),
+ mock.call("/var/lib/cloud/data/reported_ready"),
+ ]
+
+ assert self.mock_readurl.mock_calls == [
+ mock.call(
+ "http://169.254.169.254/metadata/instance?"
+ "api-version=2021-08-01&extended=true",
+ timeout=2,
+ headers={"Metadata": "true"},
+ retries=0,
+ exception_cb=dsaz.retry_on_url_exc,
+ infinite=False,
+ ),
+ mock.call(
+ "http://169.254.169.254/metadata/reprovisiondata?"
+ "api-version=2019-06-01",
+ timeout=2,
+ headers={"Metadata": "true"},
+ exception_cb=mock.ANY,
+ infinite=True,
+ log_req_resp=False,
+ ),
+ mock.call(
+ "http://169.254.169.254/metadata/instance?"
+ "api-version=2021-08-01&extended=true",
+ timeout=2,
+ headers={"Metadata": "true"},
+ retries=0,
+ exception_cb=dsaz.retry_on_url_exc,
+ infinite=False,
+ ),
+ ]
+
+ # Verify DHCP is setup twice.
+ assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [
+ mock.call(None, dsaz.dhcp_log_cb),
+ mock.call(None, dsaz.dhcp_log_cb),
+ ]
+ assert self.azure_ds._wireserver_endpoint == "aa:bb:cc:dd"
+ assert self.azure_ds._is_ephemeral_networking_up() is False
+
+ # Verify DMI usage.
+ assert self.mock_dmi_read_dmi_data.mock_calls == [
+ mock.call("system-uuid")
+ ]
+ assert self.azure_ds.metadata["instance-id"] == "fake-system-uuid"
+
+ # Verify IMDS metadata.
+ assert self.azure_ds.metadata["imds"] == self.imds_md
+
+ # Verify reporting ready twice.
+ assert self.mock_azure_get_metadata_from_fabric.mock_calls == [
+ mock.call(
+ fallback_lease_file=None,
+ dhcp_opts="aa:bb:cc:dd",
+ iso_dev="/dev/sr0",
+ pubkey_info=None,
+ ),
+ mock.call(
+ fallback_lease_file=None,
+ dhcp_opts="aa:bb:cc:dd",
+ iso_dev=None,
+ pubkey_info=None,
+ ),
+ ]
+
+ # Verify netlink operations for Running PPS.
+ assert self.mock_netlink.mock_calls == [
+ mock.call.create_bound_netlink_socket(),
+ mock.call.wait_for_media_disconnect_connect(mock.ANY, "ethBoot0"),
+ mock.call.create_bound_netlink_socket().__bool__(),
+ mock.call.create_bound_netlink_socket().close(),
+ ]
+
+ def test_savable_pps(self):
+ self.imds_md["extended"]["compute"]["ppsType"] = "Savable"
+ ovf_data = {"HostName": "myhost", "UserName": "myuser"}
+
+ nl_sock = mock.MagicMock()
+ self.mock_netlink.create_bound_netlink_socket.return_value = nl_sock
+ self.mock_netlink.wait_for_nic_detach_event.return_value = "eth9"
+ self.mock_netlink.wait_for_nic_attach_event.return_value = (
+ "ethAttached1"
+ )
+ self.mock_readurl.side_effect = [
+ mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
+ mock.MagicMock(
+ contents=json.dumps(self.imds_md["network"]).encode()
+ ),
+ mock.MagicMock(
+ contents=construct_valid_ovf_env(data=ovf_data).encode()
+ ),
+ mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
+ ]
+ self.mock_azure_get_metadata_from_fabric.return_value = []
+ self.mock_os_path_isfile.side_effect = [
+ False, # /var/lib/cloud/data/poll_imds
+ False, # seed/azure/ovf-env.xml
+ False, # /var/lib/cloud/data/poll_imds
+ False, # /var/lib/cloud/data/reported_ready
+ False, # /var/lib/cloud/data/reported_ready
+ False, # /var/lib/cloud/data/nic_detached
+ True, # /var/lib/cloud/data/reported_ready
+ ]
+ self.azure_ds._fallback_interface = False
+
+ self.azure_ds._get_data()
+
+ assert self.mock_os_path_isfile.mock_calls == [
+ mock.call("/var/lib/cloud/data/poll_imds"),
+ mock.call(
+ os.path.join(
+ self.azure_ds.paths.cloud_dir, "seed/azure/ovf-env.xml"
+ )
+ ),
+ mock.call("/var/lib/cloud/data/poll_imds"),
+ mock.call("/var/lib/cloud/data/reported_ready"),
+ mock.call("/var/lib/cloud/data/reported_ready"),
+ mock.call("/var/lib/cloud/data/nic_detached"),
+ mock.call("/var/lib/cloud/data/reported_ready"),
+ ]
+
+ assert self.mock_readurl.mock_calls == [
+ mock.call(
+ "http://169.254.169.254/metadata/instance?"
+ "api-version=2021-08-01&extended=true",
+ timeout=2,
+ headers={"Metadata": "true"},
+ retries=0,
+ exception_cb=dsaz.retry_on_url_exc,
+ infinite=False,
+ ),
+ mock.call(
+ "http://169.254.169.254/metadata/instance/network?"
+ "api-version=2019-06-01",
+ timeout=2,
+ headers={"Metadata": "true"},
+ retries=0,
+ exception_cb=mock.ANY,
+ infinite=True,
+ ),
+ mock.call(
+ "http://169.254.169.254/metadata/reprovisiondata?"
+ "api-version=2019-06-01",
+ timeout=2,
+ headers={"Metadata": "true"},
+ exception_cb=mock.ANY,
+ infinite=True,
+ log_req_resp=False,
+ ),
+ mock.call(
+ "http://169.254.169.254/metadata/instance?"
+ "api-version=2021-08-01&extended=true",
+ timeout=2,
+ headers={"Metadata": "true"},
+ retries=0,
+ exception_cb=dsaz.retry_on_url_exc,
+ infinite=False,
+ ),
+ ]
+
+ # Verify DHCP is setup twice.
+ assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [
+ mock.call(None, dsaz.dhcp_log_cb),
+ mock.call("ethAttached1", dsaz.dhcp_log_cb),
+ ]
+ assert self.azure_ds._wireserver_endpoint == "aa:bb:cc:dd"
+ assert self.azure_ds._is_ephemeral_networking_up() is False
+
+ # Verify DMI usage.
+ assert self.mock_dmi_read_dmi_data.mock_calls == [
+ mock.call("system-uuid")
+ ]
+ assert self.azure_ds.metadata["instance-id"] == "fake-system-uuid"
+
+ # Verify IMDS metadata.
+ assert self.azure_ds.metadata["imds"] == self.imds_md
+
+ # Verify reporting ready twice.
+ assert self.mock_azure_get_metadata_from_fabric.mock_calls == [
+ mock.call(
+ fallback_lease_file=None,
+ dhcp_opts="aa:bb:cc:dd",
+ iso_dev="/dev/sr0",
+ pubkey_info=None,
+ ),
+ mock.call(
+ fallback_lease_file=None,
+ dhcp_opts="aa:bb:cc:dd",
+ iso_dev=None,
+ pubkey_info=None,
+ ),
+ ]
+
+ # Verify netlink operations for Savable PPS.
+ assert self.mock_netlink.mock_calls == [
+ mock.call.create_bound_netlink_socket(),
+ mock.call.wait_for_nic_detach_event(nl_sock),
+ mock.call.wait_for_nic_attach_event(nl_sock, ["ethAttached1"]),
+ mock.call.create_bound_netlink_socket().__bool__(),
+ mock.call.create_bound_netlink_socket().close(),
+ ]
+
+
+class TestValidateIMDSMetadata:
+ @pytest.mark.parametrize(
+ "mac,expected",
+ [
+ ("001122aabbcc", "00:11:22:aa:bb:cc"),
+ ("001122AABBCC", "00:11:22:aa:bb:cc"),
+ ("00:11:22:aa:bb:cc", "00:11:22:aa:bb:cc"),
+ ("00:11:22:AA:BB:CC", "00:11:22:aa:bb:cc"),
+ ("pass-through-the-unexpected", "pass-through-the-unexpected"),
+ ("", ""),
+ ],
+ )
+ def test_normalize_scenarios(self, mac, expected):
+ normalized = dsaz.normalize_mac_address(mac)
+ assert normalized == expected
+
+ def test_empty(
+ self, azure_ds, caplog, mock_get_interfaces, mock_get_interface_mac
+ ):
+ imds_md = {}
+
+ assert azure_ds.validate_imds_network_metadata(imds_md) is False
+ assert (
+ "cloudinit.sources.DataSourceAzure",
+ 30,
+ "IMDS network metadata has incomplete configuration: None",
+ ) in caplog.record_tuples
+
+ def test_validates_one_nic(
+ self, azure_ds, mock_get_interfaces, mock_get_interface_mac
+ ):
+
+ mock_get_interfaces.return_value = [
+ ("dummy0", "9e:65:d6:19:19:01", None, None),
+ ("test0", "00:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("lo", "00:00:00:00:00:00", None, None),
+ ]
+ azure_ds._ephemeral_dhcp_ctx = mock.Mock(iface="test0")
+
+ imds_md = {
+ "network": {
+ "interface": [
+ {
+ "ipv4": {
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.22",
+ "publicIpAddress": "",
+ }
+ ],
+ "subnet": [
+ {"address": "10.0.0.0", "prefix": "24"}
+ ],
+ },
+ "ipv6": {"ipAddress": []},
+ "macAddress": "001122334455",
+ }
+ ]
+ }
+ }
+
+ assert azure_ds.validate_imds_network_metadata(imds_md) is True
+
+ def test_validates_multiple_nic(
+ self, azure_ds, mock_get_interfaces, mock_get_interface_mac
+ ):
+
+ mock_get_interfaces.return_value = [
+ ("dummy0", "9e:65:d6:19:19:01", None, None),
+ ("test0", "00:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("test1", "01:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("lo", "00:00:00:00:00:00", None, None),
+ ]
+ azure_ds._ephemeral_dhcp_ctx = mock.Mock(iface="test0")
+
+ imds_md = {
+ "network": {
+ "interface": [
+ {
+ "ipv4": {
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.22",
+ "publicIpAddress": "",
+ }
+ ],
+ "subnet": [
+ {"address": "10.0.0.0", "prefix": "24"}
+ ],
+ },
+ "ipv6": {"ipAddress": []},
+ "macAddress": "001122334455",
+ },
+ {
+ "ipv4": {
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.22",
+ "publicIpAddress": "",
+ }
+ ],
+ "subnet": [
+ {"address": "10.0.0.0", "prefix": "24"}
+ ],
+ },
+ "ipv6": {"ipAddress": []},
+ "macAddress": "011122334455",
+ },
+ ]
+ }
+ }
+
+ assert azure_ds.validate_imds_network_metadata(imds_md) is True
+
+ def test_missing_all(
+ self, azure_ds, caplog, mock_get_interfaces, mock_get_interface_mac
+ ):
+
+ mock_get_interfaces.return_value = [
+ ("dummy0", "9e:65:d6:19:19:01", None, None),
+ ("test0", "00:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("test1", "01:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("lo", "00:00:00:00:00:00", None, None),
+ ]
+ azure_ds._ephemeral_dhcp_ctx = mock.Mock(iface="test0")
+
+ imds_md = {"network": {"interface": []}}
+
+ assert azure_ds.validate_imds_network_metadata(imds_md) is False
+ assert (
+ "cloudinit.sources.DataSourceAzure",
+ 30,
+ "IMDS network metadata is missing configuration for NICs "
+ "['00:11:22:33:44:55', '01:11:22:33:44:55']: "
+ f"{imds_md['network']!r}",
+ ) in caplog.record_tuples
+
+ def test_missing_primary(
+ self, azure_ds, caplog, mock_get_interfaces, mock_get_interface_mac
+ ):
+
+ mock_get_interfaces.return_value = [
+ ("dummy0", "9e:65:d6:19:19:01", None, None),
+ ("test0", "00:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("test1", "01:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("lo", "00:00:00:00:00:00", None, None),
+ ]
+ azure_ds._ephemeral_dhcp_ctx = mock.Mock(iface="test0")
+
+ imds_md = {
+ "network": {
+ "interface": [
+ {
+ "ipv4": {
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.22",
+ "publicIpAddress": "",
+ }
+ ],
+ "subnet": [
+ {"address": "10.0.0.0", "prefix": "24"}
+ ],
+ },
+ "ipv6": {"ipAddress": []},
+ "macAddress": "011122334455",
+ },
+ ]
+ }
+ }
+
+ assert azure_ds.validate_imds_network_metadata(imds_md) is False
+ assert (
+ "cloudinit.sources.DataSourceAzure",
+ 30,
+ "IMDS network metadata is missing configuration for NICs "
+ f"['00:11:22:33:44:55']: {imds_md['network']!r}",
+ ) in caplog.record_tuples
+ assert (
+ "cloudinit.sources.DataSourceAzure",
+ 30,
+ "IMDS network metadata is missing primary NIC "
+ f"'00:11:22:33:44:55': {imds_md['network']!r}",
+ ) in caplog.record_tuples
+
+ def test_missing_secondary(
+ self, azure_ds, mock_get_interfaces, mock_get_interface_mac
+ ):
+
+ mock_get_interfaces.return_value = [
+ ("dummy0", "9e:65:d6:19:19:01", None, None),
+ ("test0", "00:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("test1", "01:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("lo", "00:00:00:00:00:00", None, None),
+ ]
+ azure_ds._ephemeral_dhcp_ctx = mock.Mock(iface="test0")
+
+ imds_md = {
+ "network": {
+ "interface": [
+ {
+ "ipv4": {
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.22",
+ "publicIpAddress": "",
+ }
+ ],
+ "subnet": [
+ {"address": "10.0.0.0", "prefix": "24"}
+ ],
+ },
+ "ipv6": {"ipAddress": []},
+ "macAddress": "001122334455",
+ },
+ ]
+ }
+ }
+
+ assert azure_ds.validate_imds_network_metadata(imds_md) is False
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_azure_helper.py b/tests/unittests/sources/test_azure_helper.py
new file mode 100644
index 00000000..98143bc3
--- /dev/null
+++ b/tests/unittests/sources/test_azure_helper.py
@@ -0,0 +1,1609 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import copy
+import os
+import re
+import unittest
+from textwrap import dedent
+from xml.etree import ElementTree
+from xml.sax.saxutils import escape, unescape
+
+from cloudinit.sources.helpers import azure as azure_helper
+from cloudinit.sources.helpers.azure import WALinuxAgentShim as wa_shim
+from cloudinit.util import load_file
+from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir
+
+GOAL_STATE_TEMPLATE = """\
+<?xml version="1.0" encoding="utf-8"?>
+<GoalState xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:noNamespaceSchemaLocation="goalstate10.xsd">
+ <Version>2012-11-30</Version>
+ <Incarnation>{incarnation}</Incarnation>
+ <Machine>
+ <ExpectedState>Started</ExpectedState>
+ <StopRolesDeadlineHint>300000</StopRolesDeadlineHint>
+ <LBProbePorts>
+ <Port>16001</Port>
+ </LBProbePorts>
+ <ExpectHealthReport>FALSE</ExpectHealthReport>
+ </Machine>
+ <Container>
+ <ContainerId>{container_id}</ContainerId>
+ <RoleInstanceList>
+ <RoleInstance>
+ <InstanceId>{instance_id}</InstanceId>
+ <State>Started</State>
+ <Configuration>
+ <HostingEnvironmentConfig>
+ http://100.86.192.70:80/...hostingEnvironmentConfig...
+ </HostingEnvironmentConfig>
+ <SharedConfig>http://100.86.192.70:80/..SharedConfig..</SharedConfig>
+ <ExtensionsConfig>
+ http://100.86.192.70:80/...extensionsConfig...
+ </ExtensionsConfig>
+ <FullConfig>http://100.86.192.70:80/...fullConfig...</FullConfig>
+ <Certificates>{certificates_url}</Certificates>
+ <ConfigName>68ce47.0.68ce47.0.utl-trusty--292258.1.xml</ConfigName>
+ </Configuration>
+ </RoleInstance>
+ </RoleInstanceList>
+ </Container>
+</GoalState>
+"""
+
+HEALTH_REPORT_XML_TEMPLATE = """\
+<?xml version="1.0" encoding="utf-8"?>
+<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns:xsd="http://www.w3.org/2001/XMLSchema">
+ <GoalStateIncarnation>{incarnation}</GoalStateIncarnation>
+ <Container>
+ <ContainerId>{container_id}</ContainerId>
+ <RoleInstanceList>
+ <Role>
+ <InstanceId>{instance_id}</InstanceId>
+ <Health>
+ <State>{health_status}</State>
+ {health_detail_subsection}
+ </Health>
+ </Role>
+ </RoleInstanceList>
+ </Container>
+</Health>
+"""
+
+HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = dedent(
+ """\
+ <Details>
+ <SubStatus>{health_substatus}</SubStatus>
+ <Description>{health_description}</Description>
+ </Details>
+ """
+)
+
+HEALTH_REPORT_DESCRIPTION_TRIM_LEN = 512
+
+
+class SentinelException(Exception):
+ pass
+
+
+class TestFindEndpoint(CiTestCase):
+ def setUp(self):
+ super(TestFindEndpoint, self).setUp()
+ patches = ExitStack()
+ self.addCleanup(patches.close)
+
+ self.load_file = patches.enter_context(
+ mock.patch.object(azure_helper.util, "load_file")
+ )
+
+ self.dhcp_options = patches.enter_context(
+ mock.patch.object(wa_shim, "_load_dhclient_json")
+ )
+
+ self.networkd_leases = patches.enter_context(
+ mock.patch.object(wa_shim, "_networkd_get_value_from_leases")
+ )
+ self.networkd_leases.return_value = None
+
+ def test_missing_file(self):
+ """wa_shim find_endpoint uses default endpoint if
+ leasefile not found
+ """
+ self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16")
+
+ def test_missing_special_azure_line(self):
+ """wa_shim find_endpoint uses default endpoint if leasefile is found
+ but does not contain DHCP Option 245 (whose value is the endpoint)
+ """
+ self.load_file.return_value = ""
+ self.dhcp_options.return_value = {"eth0": {"key": "value"}}
+ self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16")
+
+ @staticmethod
+ def _build_lease_content(encoded_address):
+ endpoint = azure_helper._get_dhcp_endpoint_option_name()
+ return "\n".join(
+ [
+ "lease {",
+ ' interface "eth0";',
+ " option {0} {1};".format(endpoint, encoded_address),
+ "}",
+ ]
+ )
+
+ def test_from_dhcp_client(self):
+ self.dhcp_options.return_value = {"eth0": {"unknown_245": "5:4:3:2"}}
+ self.assertEqual("5.4.3.2", wa_shim.find_endpoint(None))
+
+ def test_latest_lease_used(self):
+ encoded_addresses = ["5:4:3:2", "4:3:2:1"]
+ file_content = "\n".join(
+ [
+ self._build_lease_content(encoded_address)
+ for encoded_address in encoded_addresses
+ ]
+ )
+ self.load_file.return_value = file_content
+ self.assertEqual(
+ encoded_addresses[-1].replace(":", "."),
+ wa_shim.find_endpoint("foobar"),
+ )
+
+
+class TestExtractIpAddressFromLeaseValue(CiTestCase):
+ def test_hex_string(self):
+ ip_address, encoded_address = "98.76.54.32", "62:4c:36:20"
+ self.assertEqual(
+ ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
+ )
+
+ def test_hex_string_with_single_character_part(self):
+ ip_address, encoded_address = "4.3.2.1", "4:3:2:1"
+ self.assertEqual(
+ ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
+ )
+
+ def test_packed_string(self):
+ ip_address, encoded_address = "98.76.54.32", "bL6 "
+ self.assertEqual(
+ ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
+ )
+
+ def test_packed_string_with_escaped_quote(self):
+ ip_address, encoded_address = "100.72.34.108", 'dH\\"l'
+ self.assertEqual(
+ ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
+ )
+
+ def test_packed_string_containing_a_colon(self):
+ ip_address, encoded_address = "100.72.58.108", "dH:l"
+ self.assertEqual(
+ ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
+ )
+
+
+class TestGoalStateParsing(CiTestCase):
+
+ default_parameters = {
+ "incarnation": 1,
+ "container_id": "MyContainerId",
+ "instance_id": "MyInstanceId",
+ "certificates_url": "MyCertificatesUrl",
+ }
+
+ def _get_formatted_goal_state_xml_string(self, **kwargs):
+ parameters = self.default_parameters.copy()
+ parameters.update(kwargs)
+ xml = GOAL_STATE_TEMPLATE.format(**parameters)
+ if parameters["certificates_url"] is None:
+ new_xml_lines = []
+ for line in xml.splitlines():
+ if "Certificates" in line:
+ continue
+ new_xml_lines.append(line)
+ xml = "\n".join(new_xml_lines)
+ return xml
+
+ def _get_goal_state(self, m_azure_endpoint_client=None, **kwargs):
+ if m_azure_endpoint_client is None:
+ m_azure_endpoint_client = mock.MagicMock()
+ xml = self._get_formatted_goal_state_xml_string(**kwargs)
+ return azure_helper.GoalState(xml, m_azure_endpoint_client)
+
+ def test_incarnation_parsed_correctly(self):
+ incarnation = "123"
+ goal_state = self._get_goal_state(incarnation=incarnation)
+ self.assertEqual(incarnation, goal_state.incarnation)
+
+ def test_container_id_parsed_correctly(self):
+ container_id = "TestContainerId"
+ goal_state = self._get_goal_state(container_id=container_id)
+ self.assertEqual(container_id, goal_state.container_id)
+
+ def test_instance_id_parsed_correctly(self):
+ instance_id = "TestInstanceId"
+ goal_state = self._get_goal_state(instance_id=instance_id)
+ self.assertEqual(instance_id, goal_state.instance_id)
+
+ def test_instance_id_byte_swap(self):
+ """Return true when previous_iid is byteswapped current_iid"""
+ previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
+ current_iid = "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8"
+ self.assertTrue(
+ azure_helper.is_byte_swapped(previous_iid, current_iid)
+ )
+
+ def test_instance_id_no_byte_swap_same_instance_id(self):
+ previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
+ current_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
+ self.assertFalse(
+ azure_helper.is_byte_swapped(previous_iid, current_iid)
+ )
+
+ def test_instance_id_no_byte_swap_diff_instance_id(self):
+ previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
+ current_iid = "G0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
+ self.assertFalse(
+ azure_helper.is_byte_swapped(previous_iid, current_iid)
+ )
+
+ def test_certificates_xml_parsed_and_fetched_correctly(self):
+ m_azure_endpoint_client = mock.MagicMock()
+ certificates_url = "TestCertificatesUrl"
+ goal_state = self._get_goal_state(
+ m_azure_endpoint_client=m_azure_endpoint_client,
+ certificates_url=certificates_url,
+ )
+ certificates_xml = goal_state.certificates_xml
+ self.assertEqual(1, m_azure_endpoint_client.get.call_count)
+ self.assertEqual(
+ certificates_url, m_azure_endpoint_client.get.call_args[0][0]
+ )
+ self.assertTrue(
+ m_azure_endpoint_client.get.call_args[1].get("secure", False)
+ )
+ self.assertEqual(
+ m_azure_endpoint_client.get.return_value.contents, certificates_xml
+ )
+
+ def test_missing_certificates_skips_http_get(self):
+ m_azure_endpoint_client = mock.MagicMock()
+ goal_state = self._get_goal_state(
+ m_azure_endpoint_client=m_azure_endpoint_client,
+ certificates_url=None,
+ )
+ certificates_xml = goal_state.certificates_xml
+ self.assertEqual(0, m_azure_endpoint_client.get.call_count)
+ self.assertIsNone(certificates_xml)
+
+ def test_invalid_goal_state_xml_raises_parse_error(self):
+ xml = "random non-xml data"
+ with self.assertRaises(ElementTree.ParseError):
+ azure_helper.GoalState(xml, mock.MagicMock())
+
+ def test_missing_container_id_in_goal_state_xml_raises_exc(self):
+ xml = self._get_formatted_goal_state_xml_string()
+ xml = re.sub("<ContainerId>.*</ContainerId>", "", xml)
+ with self.assertRaises(azure_helper.InvalidGoalStateXMLException):
+ azure_helper.GoalState(xml, mock.MagicMock())
+
+ def test_missing_instance_id_in_goal_state_xml_raises_exc(self):
+ xml = self._get_formatted_goal_state_xml_string()
+ xml = re.sub("<InstanceId>.*</InstanceId>", "", xml)
+ with self.assertRaises(azure_helper.InvalidGoalStateXMLException):
+ azure_helper.GoalState(xml, mock.MagicMock())
+
+ def test_missing_incarnation_in_goal_state_xml_raises_exc(self):
+ xml = self._get_formatted_goal_state_xml_string()
+ xml = re.sub("<Incarnation>.*</Incarnation>", "", xml)
+ with self.assertRaises(azure_helper.InvalidGoalStateXMLException):
+ azure_helper.GoalState(xml, mock.MagicMock())
+
+
+class TestAzureEndpointHttpClient(CiTestCase):
+
+ regular_headers = {
+ "x-ms-agent-name": "WALinuxAgent",
+ "x-ms-version": "2012-11-30",
+ }
+
+ def setUp(self):
+ super(TestAzureEndpointHttpClient, self).setUp()
+ patches = ExitStack()
+ self.addCleanup(patches.close)
+ self.m_http_with_retries = patches.enter_context(
+ mock.patch.object(azure_helper, "http_with_retries")
+ )
+
+ def test_non_secure_get(self):
+ client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
+ url = "MyTestUrl"
+ response = client.get(url, secure=False)
+ self.assertEqual(1, self.m_http_with_retries.call_count)
+ self.assertEqual(self.m_http_with_retries.return_value, response)
+ self.assertEqual(
+ mock.call(url, headers=self.regular_headers),
+ self.m_http_with_retries.call_args,
+ )
+
+ def test_non_secure_get_raises_exception(self):
+ client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
+ url = "MyTestUrl"
+ self.m_http_with_retries.side_effect = SentinelException
+ self.assertRaises(SentinelException, client.get, url, secure=False)
+ self.assertEqual(1, self.m_http_with_retries.call_count)
+
+ def test_secure_get(self):
+ url = "MyTestUrl"
+ m_certificate = mock.MagicMock()
+ expected_headers = self.regular_headers.copy()
+ expected_headers.update(
+ {
+ "x-ms-cipher-name": "DES_EDE3_CBC",
+ "x-ms-guest-agent-public-x509-cert": m_certificate,
+ }
+ )
+ client = azure_helper.AzureEndpointHttpClient(m_certificate)
+ response = client.get(url, secure=True)
+ self.assertEqual(1, self.m_http_with_retries.call_count)
+ self.assertEqual(self.m_http_with_retries.return_value, response)
+ self.assertEqual(
+ mock.call(url, headers=expected_headers),
+ self.m_http_with_retries.call_args,
+ )
+
+ def test_secure_get_raises_exception(self):
+ url = "MyTestUrl"
+ client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
+ self.m_http_with_retries.side_effect = SentinelException
+ self.assertRaises(SentinelException, client.get, url, secure=True)
+ self.assertEqual(1, self.m_http_with_retries.call_count)
+
+ def test_post(self):
+ m_data = mock.MagicMock()
+ url = "MyTestUrl"
+ client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
+ response = client.post(url, data=m_data)
+ self.assertEqual(1, self.m_http_with_retries.call_count)
+ self.assertEqual(self.m_http_with_retries.return_value, response)
+ self.assertEqual(
+ mock.call(url, data=m_data, headers=self.regular_headers),
+ self.m_http_with_retries.call_args,
+ )
+
+ def test_post_raises_exception(self):
+ m_data = mock.MagicMock()
+ url = "MyTestUrl"
+ client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
+ self.m_http_with_retries.side_effect = SentinelException
+ self.assertRaises(SentinelException, client.post, url, data=m_data)
+ self.assertEqual(1, self.m_http_with_retries.call_count)
+
+ def test_post_with_extra_headers(self):
+ url = "MyTestUrl"
+ client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
+ extra_headers = {"test": "header"}
+ client.post(url, extra_headers=extra_headers)
+ expected_headers = self.regular_headers.copy()
+ expected_headers.update(extra_headers)
+ self.assertEqual(1, self.m_http_with_retries.call_count)
+ self.assertEqual(
+ mock.call(url, data=mock.ANY, headers=expected_headers),
+ self.m_http_with_retries.call_args,
+ )
+
+ def test_post_with_sleep_with_extra_headers_raises_exception(self):
+ m_data = mock.MagicMock()
+ url = "MyTestUrl"
+ extra_headers = {"test": "header"}
+ client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
+ self.m_http_with_retries.side_effect = SentinelException
+ self.assertRaises(
+ SentinelException,
+ client.post,
+ url,
+ data=m_data,
+ extra_headers=extra_headers,
+ )
+ self.assertEqual(1, self.m_http_with_retries.call_count)
+
+
+class TestAzureHelperHttpWithRetries(CiTestCase):
+
+ with_logs = True
+
+ max_readurl_attempts = 240
+ default_readurl_timeout = 5
+ sleep_duration_between_retries = 5
+ periodic_logging_attempts = 12
+
+ def setUp(self):
+ super(TestAzureHelperHttpWithRetries, self).setUp()
+ patches = ExitStack()
+ self.addCleanup(patches.close)
+
+ self.m_readurl = patches.enter_context(
+ mock.patch.object(
+ azure_helper.url_helper, "readurl", mock.MagicMock()
+ )
+ )
+ self.m_sleep = patches.enter_context(
+ mock.patch.object(azure_helper.time, "sleep", autospec=True)
+ )
+
+ def test_http_with_retries(self):
+ self.m_readurl.return_value = "TestResp"
+ self.assertEqual(
+ azure_helper.http_with_retries("testurl"),
+ self.m_readurl.return_value,
+ )
+ self.assertEqual(self.m_readurl.call_count, 1)
+
+ def test_http_with_retries_propagates_readurl_exc_and_logs_exc(self):
+ self.m_readurl.side_effect = SentinelException
+
+ self.assertRaises(
+ SentinelException, azure_helper.http_with_retries, "testurl"
+ )
+ self.assertEqual(self.m_readurl.call_count, self.max_readurl_attempts)
+
+ self.assertIsNotNone(
+ re.search(
+ r"Failed HTTP request with Azure endpoint \S* during "
+ r"attempt \d+ with exception: \S*",
+ self.logs.getvalue(),
+ )
+ )
+ self.assertIsNone(
+ re.search(
+ r"Successful HTTP request with Azure endpoint \S* after "
+ r"\d+ attempts",
+ self.logs.getvalue(),
+ )
+ )
+
+ def test_http_with_retries_delayed_success_due_to_temporary_readurl_exc(
+ self,
+ ):
+ self.m_readurl.side_effect = [
+ SentinelException
+ ] * self.periodic_logging_attempts + ["TestResp"]
+ self.m_readurl.return_value = "TestResp"
+
+ response = azure_helper.http_with_retries("testurl")
+ self.assertEqual(response, self.m_readurl.return_value)
+ self.assertEqual(
+ self.m_readurl.call_count, self.periodic_logging_attempts + 1
+ )
+
+ # Ensure that cloud-init did sleep between each failed request
+ self.assertEqual(
+ self.m_sleep.call_count, self.periodic_logging_attempts
+ )
+ self.m_sleep.assert_called_with(self.sleep_duration_between_retries)
+
+ def test_http_with_retries_long_delay_logs_periodic_failure_msg(self):
+ self.m_readurl.side_effect = [
+ SentinelException
+ ] * self.periodic_logging_attempts + ["TestResp"]
+ self.m_readurl.return_value = "TestResp"
+
+ azure_helper.http_with_retries("testurl")
+
+ self.assertEqual(
+ self.m_readurl.call_count, self.periodic_logging_attempts + 1
+ )
+ self.assertIsNotNone(
+ re.search(
+ r"Failed HTTP request with Azure endpoint \S* during "
+ r"attempt \d+ with exception: \S*",
+ self.logs.getvalue(),
+ )
+ )
+ self.assertIsNotNone(
+ re.search(
+ r"Successful HTTP request with Azure endpoint \S* after "
+ r"\d+ attempts",
+ self.logs.getvalue(),
+ )
+ )
+
+ def test_http_with_retries_short_delay_does_not_log_periodic_failure_msg(
+ self,
+ ):
+ self.m_readurl.side_effect = [SentinelException] * (
+ self.periodic_logging_attempts - 1
+ ) + ["TestResp"]
+ self.m_readurl.return_value = "TestResp"
+
+ azure_helper.http_with_retries("testurl")
+ self.assertEqual(
+ self.m_readurl.call_count, self.periodic_logging_attempts
+ )
+
+ self.assertIsNone(
+ re.search(
+ r"Failed HTTP request with Azure endpoint \S* during "
+ r"attempt \d+ with exception: \S*",
+ self.logs.getvalue(),
+ )
+ )
+ self.assertIsNotNone(
+ re.search(
+ r"Successful HTTP request with Azure endpoint \S* after "
+ r"\d+ attempts",
+ self.logs.getvalue(),
+ )
+ )
+
+ def test_http_with_retries_calls_url_helper_readurl_with_args_kwargs(self):
+ testurl = mock.MagicMock()
+ kwargs = {
+ "headers": mock.MagicMock(),
+ "data": mock.MagicMock(),
+ # timeout kwarg should not be modified or deleted if present
+ "timeout": mock.MagicMock(),
+ }
+ azure_helper.http_with_retries(testurl, **kwargs)
+ self.m_readurl.assert_called_once_with(testurl, **kwargs)
+
+ def test_http_with_retries_adds_timeout_kwarg_if_not_present(self):
+ testurl = mock.MagicMock()
+ kwargs = {"headers": mock.MagicMock(), "data": mock.MagicMock()}
+ expected_kwargs = copy.deepcopy(kwargs)
+ expected_kwargs["timeout"] = self.default_readurl_timeout
+
+ azure_helper.http_with_retries(testurl, **kwargs)
+ self.m_readurl.assert_called_once_with(testurl, **expected_kwargs)
+
+ def test_http_with_retries_deletes_retries_kwargs_passed_in(self):
+ """http_with_retries already implements retry logic,
+ so url_helper.readurl should not have retries.
+ http_with_retries should delete kwargs that
+ cause url_helper.readurl to retry.
+ """
+ testurl = mock.MagicMock()
+ kwargs = {
+ "headers": mock.MagicMock(),
+ "data": mock.MagicMock(),
+ "timeout": mock.MagicMock(),
+ "retries": mock.MagicMock(),
+ "infinite": mock.MagicMock(),
+ }
+ expected_kwargs = copy.deepcopy(kwargs)
+ expected_kwargs.pop("retries", None)
+ expected_kwargs.pop("infinite", None)
+
+ azure_helper.http_with_retries(testurl, **kwargs)
+ self.m_readurl.assert_called_once_with(testurl, **expected_kwargs)
+ self.assertIn(
+ "retries kwarg passed in for communication with Azure endpoint.",
+ self.logs.getvalue(),
+ )
+ self.assertIn(
+ "infinite kwarg passed in for communication with Azure endpoint.",
+ self.logs.getvalue(),
+ )
+
+
+class TestOpenSSLManager(CiTestCase):
+ def setUp(self):
+ super(TestOpenSSLManager, self).setUp()
+ patches = ExitStack()
+ self.addCleanup(patches.close)
+
+ self.subp = patches.enter_context(
+ mock.patch.object(azure_helper.subp, "subp")
+ )
+ try:
+ self.open = patches.enter_context(mock.patch("__builtin__.open"))
+ except ImportError:
+ self.open = patches.enter_context(mock.patch("builtins.open"))
+
+ @mock.patch.object(azure_helper, "cd", mock.MagicMock())
+ @mock.patch.object(azure_helper.temp_utils, "mkdtemp")
+ def test_openssl_manager_creates_a_tmpdir(self, mkdtemp):
+ manager = azure_helper.OpenSSLManager()
+ self.assertEqual(mkdtemp.return_value, manager.tmpdir)
+
+ def test_generate_certificate_uses_tmpdir(self):
+ subp_directory = {}
+
+ def capture_directory(*args, **kwargs):
+ subp_directory["path"] = os.getcwd()
+
+ self.subp.side_effect = capture_directory
+ manager = azure_helper.OpenSSLManager()
+ self.assertEqual(manager.tmpdir, subp_directory["path"])
+ manager.clean_up()
+
+ @mock.patch.object(azure_helper, "cd", mock.MagicMock())
+ @mock.patch.object(azure_helper.temp_utils, "mkdtemp", mock.MagicMock())
+ @mock.patch.object(azure_helper.util, "del_dir")
+ def test_clean_up(self, del_dir):
+ manager = azure_helper.OpenSSLManager()
+ manager.clean_up()
+ self.assertEqual([mock.call(manager.tmpdir)], del_dir.call_args_list)
+
+
+class TestOpenSSLManagerActions(CiTestCase):
+ def setUp(self):
+ super(TestOpenSSLManagerActions, self).setUp()
+
+ self.allowed_subp = True
+
+ def _data_file(self, name):
+ path = "tests/data/azure"
+ return os.path.join(path, name)
+
+ @unittest.skip("todo move to cloud_test")
+ def test_pubkey_extract(self):
+ cert = load_file(self._data_file("pubkey_extract_cert"))
+ good_key = load_file(self._data_file("pubkey_extract_ssh_key"))
+ sslmgr = azure_helper.OpenSSLManager()
+ key = sslmgr._get_ssh_key_from_cert(cert)
+ self.assertEqual(good_key, key)
+
+ good_fingerprint = "073E19D14D1C799224C6A0FD8DDAB6A8BF27D473"
+ fingerprint = sslmgr._get_fingerprint_from_cert(cert)
+ self.assertEqual(good_fingerprint, fingerprint)
+
+ @unittest.skip("todo move to cloud_test")
+ @mock.patch.object(azure_helper.OpenSSLManager, "_decrypt_certs_from_xml")
+ def test_parse_certificates(self, mock_decrypt_certs):
+ """Azure control plane puts private keys as well as certificates
+ into the Certificates XML object. Make sure only the public keys
+ from certs are extracted and that fingerprints are converted to
+ the form specified in the ovf-env.xml file.
+ """
+ cert_contents = load_file(self._data_file("parse_certificates_pem"))
+ fingerprints = load_file(
+ self._data_file("parse_certificates_fingerprints")
+ ).splitlines()
+ mock_decrypt_certs.return_value = cert_contents
+ sslmgr = azure_helper.OpenSSLManager()
+ keys_by_fp = sslmgr.parse_certificates("")
+ for fp in keys_by_fp.keys():
+ self.assertIn(fp, fingerprints)
+ for fp in fingerprints:
+ self.assertIn(fp, keys_by_fp)
+
+
+class TestGoalStateHealthReporter(CiTestCase):
+
+ maxDiff = None
+
+ default_parameters = {
+ "incarnation": 1634,
+ "container_id": "MyContainerId",
+ "instance_id": "MyInstanceId",
+ }
+
+ test_azure_endpoint = "TestEndpoint"
+ test_health_report_url = "http://{0}/machine?comp=health".format(
+ test_azure_endpoint
+ )
+ test_default_headers = {"Content-Type": "text/xml; charset=utf-8"}
+
+ provisioning_success_status = "Ready"
+ provisioning_not_ready_status = "NotReady"
+ provisioning_failure_substatus = "ProvisioningFailed"
+ provisioning_failure_err_description = (
+ "Test error message containing provisioning failure details"
+ )
+
+ def setUp(self):
+ super(TestGoalStateHealthReporter, self).setUp()
+ patches = ExitStack()
+ self.addCleanup(patches.close)
+
+ patches.enter_context(
+ mock.patch.object(azure_helper.time, "sleep", mock.MagicMock())
+ )
+ self.read_file_or_url = patches.enter_context(
+ mock.patch.object(azure_helper.url_helper, "read_file_or_url")
+ )
+
+ self.post = patches.enter_context(
+ mock.patch.object(azure_helper.AzureEndpointHttpClient, "post")
+ )
+
+ self.GoalState = patches.enter_context(
+ mock.patch.object(azure_helper, "GoalState")
+ )
+ self.GoalState.return_value.container_id = self.default_parameters[
+ "container_id"
+ ]
+ self.GoalState.return_value.instance_id = self.default_parameters[
+ "instance_id"
+ ]
+ self.GoalState.return_value.incarnation = self.default_parameters[
+ "incarnation"
+ ]
+
+ def _text_from_xpath_in_xroot(self, xroot, xpath):
+ element = xroot.find(xpath)
+ if element is not None:
+ return element.text
+ return None
+
+ def _get_formatted_health_report_xml_string(self, **kwargs):
+ return HEALTH_REPORT_XML_TEMPLATE.format(**kwargs)
+
+ def _get_formatted_health_detail_subsection_xml_string(self, **kwargs):
+ return HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format(**kwargs)
+
+ def _get_report_ready_health_document(self):
+ return self._get_formatted_health_report_xml_string(
+ incarnation=escape(str(self.default_parameters["incarnation"])),
+ container_id=escape(self.default_parameters["container_id"]),
+ instance_id=escape(self.default_parameters["instance_id"]),
+ health_status=escape(self.provisioning_success_status),
+ health_detail_subsection="",
+ )
+
+ def _get_report_failure_health_document(self):
+ health_detail_subsection = (
+ self._get_formatted_health_detail_subsection_xml_string(
+ health_substatus=escape(self.provisioning_failure_substatus),
+ health_description=escape(
+ self.provisioning_failure_err_description
+ ),
+ )
+ )
+
+ return self._get_formatted_health_report_xml_string(
+ incarnation=escape(str(self.default_parameters["incarnation"])),
+ container_id=escape(self.default_parameters["container_id"]),
+ instance_id=escape(self.default_parameters["instance_id"]),
+ health_status=escape(self.provisioning_not_ready_status),
+ health_detail_subsection=health_detail_subsection,
+ )
+
+ def test_send_ready_signal_sends_post_request(self):
+ with mock.patch.object(
+ azure_helper.GoalStateHealthReporter, "build_report"
+ ) as m_build_report:
+ client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
+ reporter = azure_helper.GoalStateHealthReporter(
+ azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
+ client,
+ self.test_azure_endpoint,
+ )
+ reporter.send_ready_signal()
+
+ self.assertEqual(1, self.post.call_count)
+ self.assertEqual(
+ mock.call(
+ self.test_health_report_url,
+ data=m_build_report.return_value,
+ extra_headers=self.test_default_headers,
+ ),
+ self.post.call_args,
+ )
+
+ def test_send_failure_signal_sends_post_request(self):
+ with mock.patch.object(
+ azure_helper.GoalStateHealthReporter, "build_report"
+ ) as m_build_report:
+ client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
+ reporter = azure_helper.GoalStateHealthReporter(
+ azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
+ client,
+ self.test_azure_endpoint,
+ )
+ reporter.send_failure_signal(
+ description=self.provisioning_failure_err_description
+ )
+
+ self.assertEqual(1, self.post.call_count)
+ self.assertEqual(
+ mock.call(
+ self.test_health_report_url,
+ data=m_build_report.return_value,
+ extra_headers=self.test_default_headers,
+ ),
+ self.post.call_args,
+ )
+
+ def test_build_report_for_ready_signal_health_document(self):
+ health_document = self._get_report_ready_health_document()
+ reporter = azure_helper.GoalStateHealthReporter(
+ azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
+ azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
+ self.test_azure_endpoint,
+ )
+ generated_health_document = reporter.build_report(
+ incarnation=self.default_parameters["incarnation"],
+ container_id=self.default_parameters["container_id"],
+ instance_id=self.default_parameters["instance_id"],
+ status=self.provisioning_success_status,
+ )
+
+ self.assertEqual(health_document, generated_health_document)
+
+ generated_xroot = ElementTree.fromstring(generated_health_document)
+ self.assertEqual(
+ self._text_from_xpath_in_xroot(
+ generated_xroot, "./GoalStateIncarnation"
+ ),
+ str(self.default_parameters["incarnation"]),
+ )
+ self.assertEqual(
+ self._text_from_xpath_in_xroot(
+ generated_xroot, "./Container/ContainerId"
+ ),
+ str(self.default_parameters["container_id"]),
+ )
+ self.assertEqual(
+ self._text_from_xpath_in_xroot(
+ generated_xroot, "./Container/RoleInstanceList/Role/InstanceId"
+ ),
+ str(self.default_parameters["instance_id"]),
+ )
+ self.assertEqual(
+ self._text_from_xpath_in_xroot(
+ generated_xroot,
+ "./Container/RoleInstanceList/Role/Health/State",
+ ),
+ escape(self.provisioning_success_status),
+ )
+ self.assertIsNone(
+ self._text_from_xpath_in_xroot(
+ generated_xroot,
+ "./Container/RoleInstanceList/Role/Health/Details",
+ )
+ )
+ self.assertIsNone(
+ self._text_from_xpath_in_xroot(
+ generated_xroot,
+ "./Container/RoleInstanceList/Role/Health/Details/SubStatus",
+ )
+ )
+ self.assertIsNone(
+ self._text_from_xpath_in_xroot(
+ generated_xroot,
+ "./Container/RoleInstanceList/Role/Health/Details/Description",
+ )
+ )
+
+ def test_build_report_for_failure_signal_health_document(self):
+ health_document = self._get_report_failure_health_document()
+ reporter = azure_helper.GoalStateHealthReporter(
+ azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
+ azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
+ self.test_azure_endpoint,
+ )
+ generated_health_document = reporter.build_report(
+ incarnation=self.default_parameters["incarnation"],
+ container_id=self.default_parameters["container_id"],
+ instance_id=self.default_parameters["instance_id"],
+ status=self.provisioning_not_ready_status,
+ substatus=self.provisioning_failure_substatus,
+ description=self.provisioning_failure_err_description,
+ )
+
+ self.assertEqual(health_document, generated_health_document)
+
+ generated_xroot = ElementTree.fromstring(generated_health_document)
+ self.assertEqual(
+ self._text_from_xpath_in_xroot(
+ generated_xroot, "./GoalStateIncarnation"
+ ),
+ str(self.default_parameters["incarnation"]),
+ )
+ self.assertEqual(
+ self._text_from_xpath_in_xroot(
+ generated_xroot, "./Container/ContainerId"
+ ),
+ self.default_parameters["container_id"],
+ )
+ self.assertEqual(
+ self._text_from_xpath_in_xroot(
+ generated_xroot, "./Container/RoleInstanceList/Role/InstanceId"
+ ),
+ self.default_parameters["instance_id"],
+ )
+ self.assertEqual(
+ self._text_from_xpath_in_xroot(
+ generated_xroot,
+ "./Container/RoleInstanceList/Role/Health/State",
+ ),
+ escape(self.provisioning_not_ready_status),
+ )
+ self.assertEqual(
+ self._text_from_xpath_in_xroot(
+ generated_xroot,
+ "./Container/RoleInstanceList/Role/Health/Details/SubStatus",
+ ),
+ escape(self.provisioning_failure_substatus),
+ )
+ self.assertEqual(
+ self._text_from_xpath_in_xroot(
+ generated_xroot,
+ "./Container/RoleInstanceList/Role/Health/Details/Description",
+ ),
+ escape(self.provisioning_failure_err_description),
+ )
+
+ def test_send_ready_signal_calls_build_report(self):
+ with mock.patch.object(
+ azure_helper.GoalStateHealthReporter, "build_report"
+ ) as m_build_report:
+ reporter = azure_helper.GoalStateHealthReporter(
+ azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
+ azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
+ self.test_azure_endpoint,
+ )
+ reporter.send_ready_signal()
+
+ self.assertEqual(1, m_build_report.call_count)
+ self.assertEqual(
+ mock.call(
+ incarnation=self.default_parameters["incarnation"],
+ container_id=self.default_parameters["container_id"],
+ instance_id=self.default_parameters["instance_id"],
+ status=self.provisioning_success_status,
+ ),
+ m_build_report.call_args,
+ )
+
+ def test_send_failure_signal_calls_build_report(self):
+ with mock.patch.object(
+ azure_helper.GoalStateHealthReporter, "build_report"
+ ) as m_build_report:
+ reporter = azure_helper.GoalStateHealthReporter(
+ azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
+ azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
+ self.test_azure_endpoint,
+ )
+ reporter.send_failure_signal(
+ description=self.provisioning_failure_err_description
+ )
+
+ self.assertEqual(1, m_build_report.call_count)
+ self.assertEqual(
+ mock.call(
+ incarnation=self.default_parameters["incarnation"],
+ container_id=self.default_parameters["container_id"],
+ instance_id=self.default_parameters["instance_id"],
+ status=self.provisioning_not_ready_status,
+ substatus=self.provisioning_failure_substatus,
+ description=self.provisioning_failure_err_description,
+ ),
+ m_build_report.call_args,
+ )
+
+ def test_build_report_escapes_chars(self):
+ incarnation = "jd8'9*&^<'A><A[p&o+\"SD()*&&&LKAJSD23"
+ container_id = "&&<\"><><ds8'9+7&d9a86!@($09asdl;<>"
+ instance_id = "Opo>>>jas'&d;[p&fp\"a<<!!@&&"
+ health_status = "&<897\"6&>&aa'sd!@&!)((*<&>"
+ health_substatus = "&as\"d<<a&s>d<'^@!5&6<7"
+ health_description = '&&&>!#$"&&<as\'1!@$d&>><>&"sd<67<]>>'
+
+ health_detail_subsection = (
+ self._get_formatted_health_detail_subsection_xml_string(
+ health_substatus=escape(health_substatus),
+ health_description=escape(health_description),
+ )
+ )
+ health_document = self._get_formatted_health_report_xml_string(
+ incarnation=escape(incarnation),
+ container_id=escape(container_id),
+ instance_id=escape(instance_id),
+ health_status=escape(health_status),
+ health_detail_subsection=health_detail_subsection,
+ )
+
+ reporter = azure_helper.GoalStateHealthReporter(
+ azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
+ azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
+ self.test_azure_endpoint,
+ )
+ generated_health_document = reporter.build_report(
+ incarnation=incarnation,
+ container_id=container_id,
+ instance_id=instance_id,
+ status=health_status,
+ substatus=health_substatus,
+ description=health_description,
+ )
+
+ self.assertEqual(health_document, generated_health_document)
+
+ def test_build_report_conforms_to_length_limits(self):
+ reporter = azure_helper.GoalStateHealthReporter(
+ azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
+ azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
+ self.test_azure_endpoint,
+ )
+ long_err_msg = "a9&ea8>>>e as1< d\"q2*&(^%'a=5<" * 100
+ generated_health_document = reporter.build_report(
+ incarnation=self.default_parameters["incarnation"],
+ container_id=self.default_parameters["container_id"],
+ instance_id=self.default_parameters["instance_id"],
+ status=self.provisioning_not_ready_status,
+ substatus=self.provisioning_failure_substatus,
+ description=long_err_msg,
+ )
+
+ generated_xroot = ElementTree.fromstring(generated_health_document)
+ generated_health_report_description = self._text_from_xpath_in_xroot(
+ generated_xroot,
+ "./Container/RoleInstanceList/Role/Health/Details/Description",
+ )
+ self.assertEqual(
+ len(unescape(generated_health_report_description)),
+ HEALTH_REPORT_DESCRIPTION_TRIM_LEN,
+ )
+
+ def test_trim_description_then_escape_conforms_to_len_limits_worst_case(
+ self,
+ ):
+ """When unescaped characters are XML-escaped, the length increases.
+ Char Escape String
+ < &lt;
+ > &gt;
+ " &quot;
+ ' &apos;
+ & &amp;
+
+ We (step 1) trim the health report XML's description field,
+ and then (step 2) XML-escape the health report XML's description field.
+
+ The health report XML's description field limit within cloud-init
+ is HEALTH_REPORT_DESCRIPTION_TRIM_LEN.
+
+ The Azure platform's limit on the health report XML's description field
+ is 4096 chars.
+
+ For worst-case chars, there is a 5x blowup in length
+ when the chars are XML-escaped.
+ ' and " when XML-escaped have a 5x blowup.
+
+ Ensure that (1) trimming and then (2) XML-escaping does not blow past
+ the Azure platform's limit for health report XML's description field
+ (4096 chars).
+ """
+ reporter = azure_helper.GoalStateHealthReporter(
+ azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
+ azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
+ self.test_azure_endpoint,
+ )
+ long_err_msg = "'\"" * 10000
+ generated_health_document = reporter.build_report(
+ incarnation=self.default_parameters["incarnation"],
+ container_id=self.default_parameters["container_id"],
+ instance_id=self.default_parameters["instance_id"],
+ status=self.provisioning_not_ready_status,
+ substatus=self.provisioning_failure_substatus,
+ description=long_err_msg,
+ )
+
+ generated_xroot = ElementTree.fromstring(generated_health_document)
+ generated_health_report_description = self._text_from_xpath_in_xroot(
+ generated_xroot,
+ "./Container/RoleInstanceList/Role/Health/Details/Description",
+ )
+ # The escaped description string should be less than
+ # the Azure platform limit for the escaped description string.
+ self.assertLessEqual(len(generated_health_report_description), 4096)
+
+
+class TestWALinuxAgentShim(CiTestCase):
+ def setUp(self):
+ super(TestWALinuxAgentShim, self).setUp()
+ patches = ExitStack()
+ self.addCleanup(patches.close)
+
+ self.AzureEndpointHttpClient = patches.enter_context(
+ mock.patch.object(azure_helper, "AzureEndpointHttpClient")
+ )
+ self.find_endpoint = patches.enter_context(
+ mock.patch.object(wa_shim, "find_endpoint")
+ )
+ self.GoalState = patches.enter_context(
+ mock.patch.object(azure_helper, "GoalState")
+ )
+ self.OpenSSLManager = patches.enter_context(
+ mock.patch.object(azure_helper, "OpenSSLManager", autospec=True)
+ )
+ patches.enter_context(
+ mock.patch.object(azure_helper.time, "sleep", mock.MagicMock())
+ )
+
+ self.test_incarnation = "TestIncarnation"
+ self.test_container_id = "TestContainerId"
+ self.test_instance_id = "TestInstanceId"
+ self.GoalState.return_value.incarnation = self.test_incarnation
+ self.GoalState.return_value.container_id = self.test_container_id
+ self.GoalState.return_value.instance_id = self.test_instance_id
+
+ def test_eject_iso_is_called(self):
+ shim = wa_shim()
+ with mock.patch.object(
+ shim, "eject_iso", autospec=True
+ ) as m_eject_iso:
+ shim.register_with_azure_and_fetch_data(iso_dev="/dev/sr0")
+ m_eject_iso.assert_called_once_with("/dev/sr0")
+
+ def test_http_client_does_not_use_certificate_for_report_ready(self):
+ shim = wa_shim()
+ shim.register_with_azure_and_fetch_data()
+ self.assertEqual(
+ [mock.call(None)], self.AzureEndpointHttpClient.call_args_list
+ )
+
+ def test_http_client_does_not_use_certificate_for_report_failure(self):
+ shim = wa_shim()
+ shim.register_with_azure_and_report_failure(description="TestDesc")
+ self.assertEqual(
+ [mock.call(None)], self.AzureEndpointHttpClient.call_args_list
+ )
+
+ def test_correct_url_used_for_goalstate_during_report_ready(self):
+ self.find_endpoint.return_value = "test_endpoint"
+ shim = wa_shim()
+ shim.register_with_azure_and_fetch_data()
+ m_get = self.AzureEndpointHttpClient.return_value.get
+ self.assertEqual(
+ [mock.call("http://test_endpoint/machine/?comp=goalstate")],
+ m_get.call_args_list,
+ )
+ self.assertEqual(
+ [
+ mock.call(
+ m_get.return_value.contents,
+ self.AzureEndpointHttpClient.return_value,
+ False,
+ )
+ ],
+ self.GoalState.call_args_list,
+ )
+
+ def test_correct_url_used_for_goalstate_during_report_failure(self):
+ self.find_endpoint.return_value = "test_endpoint"
+ shim = wa_shim()
+ shim.register_with_azure_and_report_failure(description="TestDesc")
+ m_get = self.AzureEndpointHttpClient.return_value.get
+ self.assertEqual(
+ [mock.call("http://test_endpoint/machine/?comp=goalstate")],
+ m_get.call_args_list,
+ )
+ self.assertEqual(
+ [
+ mock.call(
+ m_get.return_value.contents,
+ self.AzureEndpointHttpClient.return_value,
+ False,
+ )
+ ],
+ self.GoalState.call_args_list,
+ )
+
+ def test_certificates_used_to_determine_public_keys(self):
+ # if register_with_azure_and_fetch_data() isn't passed some info about
+ # the user's public keys, there's no point in even trying to parse the
+ # certificates
+ shim = wa_shim()
+ mypk = [
+ {"fingerprint": "fp1", "path": "path1"},
+ {"fingerprint": "fp3", "path": "path3", "value": ""},
+ ]
+ certs = {
+ "fp1": "expected-key",
+ "fp2": "should-not-be-found",
+ "fp3": "expected-no-value-key",
+ }
+ sslmgr = self.OpenSSLManager.return_value
+ sslmgr.parse_certificates.return_value = certs
+ data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk)
+ self.assertEqual(
+ [mock.call(self.GoalState.return_value.certificates_xml)],
+ sslmgr.parse_certificates.call_args_list,
+ )
+ self.assertIn("expected-key", data)
+ self.assertIn("expected-no-value-key", data)
+ self.assertNotIn("should-not-be-found", data)
+
+ def test_absent_certificates_produces_empty_public_keys(self):
+ mypk = [{"fingerprint": "fp1", "path": "path1"}]
+ self.GoalState.return_value.certificates_xml = None
+ shim = wa_shim()
+ data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk)
+ self.assertEqual([], data)
+
+ def test_correct_url_used_for_report_ready(self):
+ self.find_endpoint.return_value = "test_endpoint"
+ shim = wa_shim()
+ shim.register_with_azure_and_fetch_data()
+ expected_url = "http://test_endpoint/machine?comp=health"
+ self.assertEqual(
+ [mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)],
+ self.AzureEndpointHttpClient.return_value.post.call_args_list,
+ )
+
+ def test_correct_url_used_for_report_failure(self):
+ self.find_endpoint.return_value = "test_endpoint"
+ shim = wa_shim()
+ shim.register_with_azure_and_report_failure(description="TestDesc")
+ expected_url = "http://test_endpoint/machine?comp=health"
+ self.assertEqual(
+ [mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)],
+ self.AzureEndpointHttpClient.return_value.post.call_args_list,
+ )
+
+ def test_goal_state_values_used_for_report_ready(self):
+ shim = wa_shim()
+ shim.register_with_azure_and_fetch_data()
+ posted_document = (
+ self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"]
+ )
+ self.assertIn(self.test_incarnation, posted_document)
+ self.assertIn(self.test_container_id, posted_document)
+ self.assertIn(self.test_instance_id, posted_document)
+
+ def test_goal_state_values_used_for_report_failure(self):
+ shim = wa_shim()
+ shim.register_with_azure_and_report_failure(description="TestDesc")
+ posted_document = (
+ self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"]
+ )
+ self.assertIn(self.test_incarnation, posted_document)
+ self.assertIn(self.test_container_id, posted_document)
+ self.assertIn(self.test_instance_id, posted_document)
+
+ def test_xml_elems_in_report_ready_post(self):
+ shim = wa_shim()
+ shim.register_with_azure_and_fetch_data()
+ health_document = HEALTH_REPORT_XML_TEMPLATE.format(
+ incarnation=escape(self.test_incarnation),
+ container_id=escape(self.test_container_id),
+ instance_id=escape(self.test_instance_id),
+ health_status=escape("Ready"),
+ health_detail_subsection="",
+ )
+ posted_document = (
+ self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"]
+ )
+ self.assertEqual(health_document, posted_document)
+
+ def test_xml_elems_in_report_failure_post(self):
+ shim = wa_shim()
+ shim.register_with_azure_and_report_failure(description="TestDesc")
+ health_document = HEALTH_REPORT_XML_TEMPLATE.format(
+ incarnation=escape(self.test_incarnation),
+ container_id=escape(self.test_container_id),
+ instance_id=escape(self.test_instance_id),
+ health_status=escape("NotReady"),
+ health_detail_subsection=(
+ HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format(
+ health_substatus=escape("ProvisioningFailed"),
+ health_description=escape("TestDesc"),
+ )
+ ),
+ )
+ posted_document = (
+ self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"]
+ )
+ self.assertEqual(health_document, posted_document)
+
+ @mock.patch.object(azure_helper, "GoalStateHealthReporter", autospec=True)
+ def test_register_with_azure_and_fetch_data_calls_send_ready_signal(
+ self, m_goal_state_health_reporter
+ ):
+ shim = wa_shim()
+ shim.register_with_azure_and_fetch_data()
+ self.assertEqual(
+ 1,
+ m_goal_state_health_reporter.return_value.send_ready_signal.call_count, # noqa: E501
+ )
+
+ @mock.patch.object(azure_helper, "GoalStateHealthReporter", autospec=True)
+ def test_register_with_azure_and_report_failure_calls_send_failure_signal(
+ self, m_goal_state_health_reporter
+ ):
+ shim = wa_shim()
+ shim.register_with_azure_and_report_failure(description="TestDesc")
+ m_goal_state_health_reporter.return_value.send_failure_signal.assert_called_once_with( # noqa: E501
+ description="TestDesc"
+ )
+
+ def test_register_with_azure_and_report_failure_does_not_need_certificates(
+ self,
+ ):
+ shim = wa_shim()
+ with mock.patch.object(
+ shim, "_fetch_goal_state_from_azure", autospec=True
+ ) as m_fetch_goal_state_from_azure:
+ shim.register_with_azure_and_report_failure(description="TestDesc")
+ m_fetch_goal_state_from_azure.assert_called_once_with(
+ need_certificate=False
+ )
+
+ def test_clean_up_can_be_called_at_any_time(self):
+ shim = wa_shim()
+ shim.clean_up()
+
+ def test_openssl_manager_not_instantiated_by_shim_report_status(self):
+ shim = wa_shim()
+ shim.register_with_azure_and_fetch_data()
+ shim.register_with_azure_and_report_failure(description="TestDesc")
+ shim.clean_up()
+ self.OpenSSLManager.assert_not_called()
+
+ def test_clean_up_after_report_ready(self):
+ shim = wa_shim()
+ shim.register_with_azure_and_fetch_data()
+ shim.clean_up()
+ self.OpenSSLManager.return_value.clean_up.assert_not_called()
+
+ def test_clean_up_after_report_failure(self):
+ shim = wa_shim()
+ shim.register_with_azure_and_report_failure(description="TestDesc")
+ shim.clean_up()
+ self.OpenSSLManager.return_value.clean_up.assert_not_called()
+
+ def test_fetch_goalstate_during_report_ready_raises_exc_on_get_exc(self):
+ self.AzureEndpointHttpClient.return_value.get.side_effect = (
+ SentinelException
+ )
+ shim = wa_shim()
+ self.assertRaises(
+ SentinelException, shim.register_with_azure_and_fetch_data
+ )
+
+ def test_fetch_goalstate_during_report_failure_raises_exc_on_get_exc(self):
+ self.AzureEndpointHttpClient.return_value.get.side_effect = (
+ SentinelException
+ )
+ shim = wa_shim()
+ self.assertRaises(
+ SentinelException,
+ shim.register_with_azure_and_report_failure,
+ description="TestDesc",
+ )
+
+ def test_fetch_goalstate_during_report_ready_raises_exc_on_parse_exc(self):
+ self.GoalState.side_effect = SentinelException
+ shim = wa_shim()
+ self.assertRaises(
+ SentinelException, shim.register_with_azure_and_fetch_data
+ )
+
+ def test_fetch_goalstate_during_report_failure_raises_exc_on_parse_exc(
+ self,
+ ):
+ self.GoalState.side_effect = SentinelException
+ shim = wa_shim()
+ self.assertRaises(
+ SentinelException,
+ shim.register_with_azure_and_report_failure,
+ description="TestDesc",
+ )
+
+ def test_failure_to_send_report_ready_health_doc_bubbles_up(self):
+ self.AzureEndpointHttpClient.return_value.post.side_effect = (
+ SentinelException
+ )
+ shim = wa_shim()
+ self.assertRaises(
+ SentinelException, shim.register_with_azure_and_fetch_data
+ )
+
+ def test_failure_to_send_report_failure_health_doc_bubbles_up(self):
+ self.AzureEndpointHttpClient.return_value.post.side_effect = (
+ SentinelException
+ )
+ shim = wa_shim()
+ self.assertRaises(
+ SentinelException,
+ shim.register_with_azure_and_report_failure,
+ description="TestDesc",
+ )
+
+
+class TestGetMetadataGoalStateXMLAndReportReadyToFabric(CiTestCase):
+ def setUp(self):
+ super(TestGetMetadataGoalStateXMLAndReportReadyToFabric, self).setUp()
+ patches = ExitStack()
+ self.addCleanup(patches.close)
+
+ self.m_shim = patches.enter_context(
+ mock.patch.object(azure_helper, "WALinuxAgentShim")
+ )
+
+ def test_data_from_shim_returned(self):
+ ret = azure_helper.get_metadata_from_fabric()
+ self.assertEqual(
+ self.m_shim.return_value.register_with_azure_and_fetch_data.return_value, # noqa: E501
+ ret,
+ )
+
+ def test_success_calls_clean_up(self):
+ azure_helper.get_metadata_from_fabric()
+ self.assertEqual(1, self.m_shim.return_value.clean_up.call_count)
+
+ def test_failure_in_registration_propagates_exc_and_calls_clean_up(self):
+ self.m_shim.return_value.register_with_azure_and_fetch_data.side_effect = ( # noqa: E501
+ SentinelException
+ )
+ self.assertRaises(
+ SentinelException, azure_helper.get_metadata_from_fabric
+ )
+ self.assertEqual(1, self.m_shim.return_value.clean_up.call_count)
+
+ def test_calls_shim_register_with_azure_and_fetch_data(self):
+ m_pubkey_info = mock.MagicMock()
+ azure_helper.get_metadata_from_fabric(
+ pubkey_info=m_pubkey_info, iso_dev="/dev/sr0"
+ )
+ self.assertEqual(
+ 1,
+ self.m_shim.return_value.register_with_azure_and_fetch_data.call_count, # noqa: E501
+ )
+ self.assertEqual(
+ mock.call(iso_dev="/dev/sr0", pubkey_info=m_pubkey_info),
+ self.m_shim.return_value.register_with_azure_and_fetch_data.call_args, # noqa: E501
+ )
+
+ def test_instantiates_shim_with_kwargs(self):
+ m_fallback_lease_file = mock.MagicMock()
+ m_dhcp_options = mock.MagicMock()
+ azure_helper.get_metadata_from_fabric(
+ fallback_lease_file=m_fallback_lease_file, dhcp_opts=m_dhcp_options
+ )
+ self.assertEqual(1, self.m_shim.call_count)
+ self.assertEqual(
+ mock.call(
+ fallback_lease_file=m_fallback_lease_file,
+ dhcp_options=m_dhcp_options,
+ ),
+ self.m_shim.call_args,
+ )
+
+
+class TestGetMetadataGoalStateXMLAndReportFailureToFabric(CiTestCase):
+ def setUp(self):
+ super(
+ TestGetMetadataGoalStateXMLAndReportFailureToFabric, self
+ ).setUp()
+ patches = ExitStack()
+ self.addCleanup(patches.close)
+
+ self.m_shim = patches.enter_context(
+ mock.patch.object(azure_helper, "WALinuxAgentShim")
+ )
+
+ def test_success_calls_clean_up(self):
+ azure_helper.report_failure_to_fabric()
+ self.assertEqual(1, self.m_shim.return_value.clean_up.call_count)
+
+ def test_failure_in_shim_report_failure_propagates_exc_and_calls_clean_up(
+ self,
+ ):
+ self.m_shim.return_value.register_with_azure_and_report_failure.side_effect = ( # noqa: E501
+ SentinelException
+ )
+ self.assertRaises(
+ SentinelException, azure_helper.report_failure_to_fabric
+ )
+ self.assertEqual(1, self.m_shim.return_value.clean_up.call_count)
+
+ def test_report_failure_to_fabric_with_desc_calls_shim_report_failure(
+ self,
+ ):
+ azure_helper.report_failure_to_fabric(description="TestDesc")
+ self.m_shim.return_value.register_with_azure_and_report_failure.assert_called_once_with( # noqa: E501
+ description="TestDesc"
+ )
+
+ def test_report_failure_to_fabric_with_no_desc_calls_shim_report_failure(
+ self,
+ ):
+ azure_helper.report_failure_to_fabric()
+ # default err message description should be shown to the user
+ # if no description is passed in
+ self.m_shim.return_value.register_with_azure_and_report_failure.assert_called_once_with( # noqa: E501
+ description=(
+ azure_helper.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
+ )
+ )
+
+ def test_report_failure_to_fabric_empty_desc_calls_shim_report_failure(
+ self,
+ ):
+ azure_helper.report_failure_to_fabric(description="")
+ # default err message description should be shown to the user
+ # if an empty description is passed in
+ self.m_shim.return_value.register_with_azure_and_report_failure.assert_called_once_with( # noqa: E501
+ description=(
+ azure_helper.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
+ )
+ )
+
+ def test_instantiates_shim_with_kwargs(self):
+ m_fallback_lease_file = mock.MagicMock()
+ m_dhcp_options = mock.MagicMock()
+ azure_helper.report_failure_to_fabric(
+ fallback_lease_file=m_fallback_lease_file, dhcp_opts=m_dhcp_options
+ )
+ self.m_shim.assert_called_once_with(
+ fallback_lease_file=m_fallback_lease_file,
+ dhcp_options=m_dhcp_options,
+ )
+
+
+class TestExtractIpAddressFromNetworkd(CiTestCase):
+
+ azure_lease = dedent(
+ """\
+ # This is private data. Do not parse.
+ ADDRESS=10.132.0.5
+ NETMASK=255.255.255.255
+ ROUTER=10.132.0.1
+ SERVER_ADDRESS=169.254.169.254
+ NEXT_SERVER=10.132.0.1
+ MTU=1460
+ T1=43200
+ T2=75600
+ LIFETIME=86400
+ DNS=169.254.169.254
+ NTP=169.254.169.254
+ DOMAINNAME=c.ubuntu-foundations.internal
+ DOMAIN_SEARCH_LIST=c.ubuntu-foundations.internal google.internal
+ HOSTNAME=tribaal-test-171002-1349.c.ubuntu-foundations.internal
+ ROUTES=10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1
+ CLIENTID=ff405663a200020000ab11332859494d7a8b4c
+ OPTION_245=624c3620
+ """
+ )
+
+ def setUp(self):
+ super(TestExtractIpAddressFromNetworkd, self).setUp()
+ self.lease_d = self.tmp_dir()
+
+ def test_no_valid_leases_is_none(self):
+ """No valid leases should return None."""
+ self.assertIsNone(
+ wa_shim._networkd_get_value_from_leases(self.lease_d)
+ )
+
+ def test_option_245_is_found_in_single(self):
+ """A single valid lease with 245 option should return it."""
+ populate_dir(self.lease_d, {"9": self.azure_lease})
+ self.assertEqual(
+ "624c3620", wa_shim._networkd_get_value_from_leases(self.lease_d)
+ )
+
+ def test_option_245_not_found_returns_None(self):
+ """A valid lease, but no option 245 should return None."""
+ populate_dir(
+ self.lease_d,
+ {"9": self.azure_lease.replace("OPTION_245", "OPTION_999")},
+ )
+ self.assertIsNone(
+ wa_shim._networkd_get_value_from_leases(self.lease_d)
+ )
+
+ def test_multiple_returns_first(self):
+ """Somewhat arbitrarily return the first address when multiple.
+
+ Most important at the moment is that this is consistent behavior
+ rather than changing randomly as in order of a dictionary."""
+ myval = "624c3601"
+ populate_dir(
+ self.lease_d,
+ {
+ "9": self.azure_lease,
+ "2": self.azure_lease.replace("624c3620", myval),
+ },
+ )
+ self.assertEqual(
+ myval, wa_shim._networkd_get_value_from_leases(self.lease_d)
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_cloudsigma.py b/tests/unittests/sources/test_cloudsigma.py
new file mode 100644
index 00000000..a2f26245
--- /dev/null
+++ b/tests/unittests/sources/test_cloudsigma.py
@@ -0,0 +1,145 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import copy
+
+from cloudinit import distros, helpers, sources
+from cloudinit.cs_utils import Cepko
+from cloudinit.sources import DataSourceCloudSigma
+from tests.unittests import helpers as test_helpers
+
+SERVER_CONTEXT = {
+ "cpu": 1000,
+ "cpus_instead_of_cores": False,
+ "global_context": {"some_global_key": "some_global_val"},
+ "mem": 1073741824,
+ "meta": {
+ "ssh_public_key": "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe",
+ "cloudinit-user-data": "#cloud-config\n\n...",
+ },
+ "name": "test_server",
+ "requirements": [],
+ "smp": 1,
+ "tags": ["much server", "very performance"],
+ "uuid": "65b2fb23-8c03-4187-a3ba-8b7c919e8890",
+ "vnc_password": "9e84d6cb49e46379",
+ "vendor_data": {
+ "location": "zrh",
+ "cloudinit": "#cloud-config\n\n...",
+ },
+}
+
+DS_PATH = "cloudinit.sources.DataSourceCloudSigma.DataSourceCloudSigma"
+
+
+class CepkoMock(Cepko):
+ def __init__(self, mocked_context):
+ self.result = mocked_context
+
+ def all(self):
+ return self
+
+
+class DataSourceCloudSigmaTest(test_helpers.CiTestCase):
+ def setUp(self):
+ super(DataSourceCloudSigmaTest, self).setUp()
+ self.paths = helpers.Paths({"run_dir": self.tmp_dir()})
+ self.add_patch(
+ DS_PATH + ".is_running_in_cloudsigma",
+ "m_is_container",
+ return_value=True,
+ )
+
+ distro_cls = distros.fetch("ubuntu")
+ distro = distro_cls("ubuntu", cfg={}, paths=self.paths)
+ self.datasource = DataSourceCloudSigma.DataSourceCloudSigma(
+ sys_cfg={}, distro=distro, paths=self.paths
+ )
+ self.datasource.cepko = CepkoMock(SERVER_CONTEXT)
+
+ def test_get_hostname(self):
+ self.datasource.get_data()
+ self.assertEqual("test_server", self.datasource.get_hostname())
+ self.datasource.metadata["name"] = ""
+ self.assertEqual("65b2fb23", self.datasource.get_hostname())
+ utf8_hostname = b"\xd1\x82\xd0\xb5\xd1\x81\xd1\x82".decode("utf-8")
+ self.datasource.metadata["name"] = utf8_hostname
+ self.assertEqual("65b2fb23", self.datasource.get_hostname())
+
+ def test_get_public_ssh_keys(self):
+ self.datasource.get_data()
+ self.assertEqual(
+ [SERVER_CONTEXT["meta"]["ssh_public_key"]],
+ self.datasource.get_public_ssh_keys(),
+ )
+
+ def test_get_instance_id(self):
+ self.datasource.get_data()
+ self.assertEqual(
+ SERVER_CONTEXT["uuid"], self.datasource.get_instance_id()
+ )
+
+ def test_platform(self):
+ """All platform-related attributes are set."""
+ self.datasource.get_data()
+ self.assertEqual(self.datasource.cloud_name, "cloudsigma")
+ self.assertEqual(self.datasource.platform_type, "cloudsigma")
+ self.assertEqual(self.datasource.subplatform, "cepko (/dev/ttyS1)")
+
+ def test_metadata(self):
+ self.datasource.get_data()
+ self.assertEqual(self.datasource.metadata, SERVER_CONTEXT)
+
+ def test_user_data(self):
+ self.datasource.get_data()
+ self.assertEqual(
+ self.datasource.userdata_raw,
+ SERVER_CONTEXT["meta"]["cloudinit-user-data"],
+ )
+
+ def test_encoded_user_data(self):
+ encoded_context = copy.deepcopy(SERVER_CONTEXT)
+ encoded_context["meta"]["base64_fields"] = "cloudinit-user-data"
+ encoded_context["meta"]["cloudinit-user-data"] = "aGkgd29ybGQK"
+ self.datasource.cepko = CepkoMock(encoded_context)
+ self.datasource.get_data()
+
+ self.assertEqual(self.datasource.userdata_raw, b"hi world\n")
+
+ def test_vendor_data(self):
+ self.datasource.get_data()
+ self.assertEqual(
+ self.datasource.vendordata_raw,
+ SERVER_CONTEXT["vendor_data"]["cloudinit"],
+ )
+
+ def test_lack_of_vendor_data(self):
+ stripped_context = copy.deepcopy(SERVER_CONTEXT)
+ del stripped_context["vendor_data"]
+ self.datasource.cepko = CepkoMock(stripped_context)
+ self.datasource.get_data()
+
+ self.assertIsNone(self.datasource.vendordata_raw)
+
+ def test_lack_of_cloudinit_key_in_vendor_data(self):
+ stripped_context = copy.deepcopy(SERVER_CONTEXT)
+ del stripped_context["vendor_data"]["cloudinit"]
+ self.datasource.cepko = CepkoMock(stripped_context)
+ self.datasource.get_data()
+
+ self.assertIsNone(self.datasource.vendordata_raw)
+
+
+class DsLoads(test_helpers.TestCase):
+ def test_get_datasource_list_returns_in_local(self):
+ deps = (sources.DEP_FILESYSTEM,)
+ ds_list = DataSourceCloudSigma.get_datasource_list(deps)
+ self.assertEqual(ds_list, [DataSourceCloudSigma.DataSourceCloudSigma])
+
+ def test_list_sources_finds_ds(self):
+ found = sources.list_sources(
+ ["CloudSigma"], (sources.DEP_FILESYSTEM,), ["cloudinit.sources"]
+ )
+ self.assertEqual([DataSourceCloudSigma.DataSourceCloudSigma], found)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_cloudstack.py b/tests/unittests/sources/test_cloudstack.py
new file mode 100644
index 00000000..f7c69f91
--- /dev/null
+++ b/tests/unittests/sources/test_cloudstack.py
@@ -0,0 +1,205 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+import time
+
+from cloudinit import helpers, util
+from cloudinit.sources.DataSourceCloudStack import (
+ DataSourceCloudStack,
+ get_latest_lease,
+)
+from tests.unittests.helpers import CiTestCase, ExitStack, mock
+
+MOD_PATH = "cloudinit.sources.DataSourceCloudStack"
+DS_PATH = MOD_PATH + ".DataSourceCloudStack"
+
+
+class TestCloudStackPasswordFetching(CiTestCase):
+ def setUp(self):
+ super(TestCloudStackPasswordFetching, self).setUp()
+ self.patches = ExitStack()
+ self.addCleanup(self.patches.close)
+ mod_name = MOD_PATH
+ self.patches.enter_context(mock.patch("{0}.ec2".format(mod_name)))
+ self.patches.enter_context(mock.patch("{0}.uhelp".format(mod_name)))
+ default_gw = "192.201.20.0"
+ get_latest_lease = mock.MagicMock(return_value=None)
+ self.patches.enter_context(
+ mock.patch(mod_name + ".get_latest_lease", get_latest_lease)
+ )
+
+ get_default_gw = mock.MagicMock(return_value=default_gw)
+ self.patches.enter_context(
+ mock.patch(mod_name + ".get_default_gateway", get_default_gw)
+ )
+
+ get_networkd_server_address = mock.MagicMock(return_value=None)
+ self.patches.enter_context(
+ mock.patch(
+ mod_name + ".dhcp.networkd_get_option_from_leases",
+ get_networkd_server_address,
+ )
+ )
+ self.tmp = self.tmp_dir()
+
+ def _set_password_server_response(self, response_string):
+ subp = mock.MagicMock(return_value=(response_string, ""))
+ self.patches.enter_context(
+ mock.patch(
+ "cloudinit.sources.DataSourceCloudStack.subp.subp", subp
+ )
+ )
+ return subp
+
+ def test_empty_password_doesnt_create_config(self):
+ self._set_password_server_response("")
+ ds = DataSourceCloudStack(
+ {}, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ ds.get_data()
+ self.assertEqual({}, ds.get_config_obj())
+
+ def test_saved_password_doesnt_create_config(self):
+ self._set_password_server_response("saved_password")
+ ds = DataSourceCloudStack(
+ {}, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ ds.get_data()
+ self.assertEqual({}, ds.get_config_obj())
+
+ @mock.patch(DS_PATH + ".wait_for_metadata_service")
+ def test_password_sets_password(self, m_wait):
+ m_wait.return_value = True
+ password = "SekritSquirrel"
+ self._set_password_server_response(password)
+ ds = DataSourceCloudStack(
+ {}, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ ds.get_data()
+ self.assertEqual(password, ds.get_config_obj()["password"])
+
+ @mock.patch(DS_PATH + ".wait_for_metadata_service")
+ def test_bad_request_doesnt_stop_ds_from_working(self, m_wait):
+ m_wait.return_value = True
+ self._set_password_server_response("bad_request")
+ ds = DataSourceCloudStack(
+ {}, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ self.assertTrue(ds.get_data())
+
+ def assertRequestTypesSent(self, subp, expected_request_types):
+ request_types = []
+ for call in subp.call_args_list:
+ args = call[0][0]
+ for arg in args:
+ if arg.startswith("DomU_Request"):
+ request_types.append(arg.split()[1])
+ self.assertEqual(expected_request_types, request_types)
+
+ @mock.patch(DS_PATH + ".wait_for_metadata_service")
+ def test_valid_response_means_password_marked_as_saved(self, m_wait):
+ m_wait.return_value = True
+ password = "SekritSquirrel"
+ subp = self._set_password_server_response(password)
+ ds = DataSourceCloudStack(
+ {}, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ ds.get_data()
+ self.assertRequestTypesSent(
+ subp, ["send_my_password", "saved_password"]
+ )
+
+ def _check_password_not_saved_for(self, response_string):
+ subp = self._set_password_server_response(response_string)
+ ds = DataSourceCloudStack(
+ {}, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ with mock.patch(DS_PATH + ".wait_for_metadata_service") as m_wait:
+ m_wait.return_value = True
+ ds.get_data()
+ self.assertRequestTypesSent(subp, ["send_my_password"])
+
+ def test_password_not_saved_if_empty(self):
+ self._check_password_not_saved_for("")
+
+ def test_password_not_saved_if_already_saved(self):
+ self._check_password_not_saved_for("saved_password")
+
+ def test_password_not_saved_if_bad_request(self):
+ self._check_password_not_saved_for("bad_request")
+
+
+class TestGetLatestLease(CiTestCase):
+ def _populate_dir_list(self, bdir, files):
+ """populate_dir_list([(name, data), (name, data)])
+
+ writes files to bdir, and updates timestamps to ensure
+ that their mtime increases with each file."""
+
+ start = int(time.time())
+ for num, fname in enumerate(reversed(files)):
+ fpath = os.path.sep.join((bdir, fname))
+ util.write_file(fpath, fname.encode())
+ os.utime(fpath, (start - num, start - num))
+
+ def _pop_and_test(self, files, expected):
+ lease_d = self.tmp_dir()
+ self._populate_dir_list(lease_d, files)
+ self.assertEqual(
+ self.tmp_path(expected, lease_d), get_latest_lease(lease_d)
+ )
+
+ def test_skips_dhcpv6_files(self):
+ """files started with dhclient6 should be skipped."""
+ expected = "dhclient.lease"
+ self._pop_and_test([expected, "dhclient6.lease"], expected)
+
+ def test_selects_dhclient_dot_files(self):
+ """files named dhclient.lease or dhclient.leases should be used.
+
+ Ubuntu names files dhclient.eth0.leases dhclient6.leases and
+ sometimes dhclient.leases."""
+ self._pop_and_test(["dhclient.lease"], "dhclient.lease")
+ self._pop_and_test(["dhclient.leases"], "dhclient.leases")
+
+ def test_selects_dhclient_dash_files(self):
+ """files named dhclient-lease or dhclient-leases should be used.
+
+ Redhat/Centos names files with dhclient--eth0.lease (centos 7) or
+ dhclient-eth0.leases (centos 6).
+ """
+ self._pop_and_test(["dhclient-eth0.lease"], "dhclient-eth0.lease")
+ self._pop_and_test(["dhclient--eth0.lease"], "dhclient--eth0.lease")
+
+ def test_ignores_by_extension(self):
+ """only .lease or .leases file should be considered."""
+
+ self._pop_and_test(
+ [
+ "dhclient.lease",
+ "dhclient.lease.bk",
+ "dhclient.lease-old",
+ "dhclient.leaselease",
+ ],
+ "dhclient.lease",
+ )
+
+ def test_selects_newest_matching(self):
+ """If multiple files match, the newest written should be used."""
+ lease_d = self.tmp_dir()
+ valid_1 = "dhclient.leases"
+ valid_2 = "dhclient.lease"
+ valid_1_path = self.tmp_path(valid_1, lease_d)
+ valid_2_path = self.tmp_path(valid_2, lease_d)
+
+ self._populate_dir_list(lease_d, [valid_1, valid_2])
+ self.assertEqual(valid_2_path, get_latest_lease(lease_d))
+
+ # now update mtime on valid_2 to be older than valid_1 and re-check.
+ mtime = int(os.path.getmtime(valid_1_path)) - 1
+ os.utime(valid_2_path, (mtime, mtime))
+
+ self.assertEqual(valid_1_path, get_latest_lease(lease_d))
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_common.py b/tests/unittests/sources/test_common.py
new file mode 100644
index 00000000..a5bdb629
--- /dev/null
+++ b/tests/unittests/sources/test_common.py
@@ -0,0 +1,123 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import settings, sources, type_utils
+from cloudinit.sources import DataSource
+from cloudinit.sources import DataSourceAliYun as AliYun
+from cloudinit.sources import DataSourceAltCloud as AltCloud
+from cloudinit.sources import DataSourceAzure as Azure
+from cloudinit.sources import DataSourceBigstep as Bigstep
+from cloudinit.sources import DataSourceCloudSigma as CloudSigma
+from cloudinit.sources import DataSourceCloudStack as CloudStack
+from cloudinit.sources import DataSourceConfigDrive as ConfigDrive
+from cloudinit.sources import DataSourceDigitalOcean as DigitalOcean
+from cloudinit.sources import DataSourceEc2 as Ec2
+from cloudinit.sources import DataSourceExoscale as Exoscale
+from cloudinit.sources import DataSourceGCE as GCE
+from cloudinit.sources import DataSourceHetzner as Hetzner
+from cloudinit.sources import DataSourceIBMCloud as IBMCloud
+from cloudinit.sources import DataSourceLXD as LXD
+from cloudinit.sources import DataSourceMAAS as MAAS
+from cloudinit.sources import DataSourceNoCloud as NoCloud
+from cloudinit.sources import DataSourceNone as DSNone
+from cloudinit.sources import DataSourceOpenNebula as OpenNebula
+from cloudinit.sources import DataSourceOpenStack as OpenStack
+from cloudinit.sources import DataSourceOracle as Oracle
+from cloudinit.sources import DataSourceOVF as OVF
+from cloudinit.sources import DataSourceRbxCloud as RbxCloud
+from cloudinit.sources import DataSourceScaleway as Scaleway
+from cloudinit.sources import DataSourceSmartOS as SmartOS
+from cloudinit.sources import DataSourceUpCloud as UpCloud
+from cloudinit.sources import DataSourceVMware as VMware
+from cloudinit.sources import DataSourceVultr as Vultr
+from tests.unittests import helpers as test_helpers
+
+DEFAULT_LOCAL = [
+ Azure.DataSourceAzure,
+ CloudSigma.DataSourceCloudSigma,
+ ConfigDrive.DataSourceConfigDrive,
+ DigitalOcean.DataSourceDigitalOcean,
+ GCE.DataSourceGCELocal,
+ Hetzner.DataSourceHetzner,
+ IBMCloud.DataSourceIBMCloud,
+ LXD.DataSourceLXD,
+ NoCloud.DataSourceNoCloud,
+ OpenNebula.DataSourceOpenNebula,
+ Oracle.DataSourceOracle,
+ OVF.DataSourceOVF,
+ SmartOS.DataSourceSmartOS,
+ Vultr.DataSourceVultr,
+ Ec2.DataSourceEc2Local,
+ OpenStack.DataSourceOpenStackLocal,
+ RbxCloud.DataSourceRbxCloud,
+ Scaleway.DataSourceScaleway,
+ UpCloud.DataSourceUpCloudLocal,
+ VMware.DataSourceVMware,
+]
+
+DEFAULT_NETWORK = [
+ AliYun.DataSourceAliYun,
+ AltCloud.DataSourceAltCloud,
+ Bigstep.DataSourceBigstep,
+ CloudStack.DataSourceCloudStack,
+ DSNone.DataSourceNone,
+ Ec2.DataSourceEc2,
+ Exoscale.DataSourceExoscale,
+ GCE.DataSourceGCE,
+ MAAS.DataSourceMAAS,
+ NoCloud.DataSourceNoCloudNet,
+ OpenStack.DataSourceOpenStack,
+ OVF.DataSourceOVFNet,
+ UpCloud.DataSourceUpCloud,
+ VMware.DataSourceVMware,
+]
+
+
+class ExpectedDataSources(test_helpers.TestCase):
+ builtin_list = settings.CFG_BUILTIN["datasource_list"]
+ deps_local = [sources.DEP_FILESYSTEM]
+ deps_network = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
+ pkg_list = [type_utils.obj_name(sources)]
+
+ def test_expected_default_local_sources_found(self):
+ found = sources.list_sources(
+ self.builtin_list, self.deps_local, self.pkg_list
+ )
+ self.assertEqual(set(DEFAULT_LOCAL), set(found))
+
+ def test_expected_default_network_sources_found(self):
+ found = sources.list_sources(
+ self.builtin_list, self.deps_network, self.pkg_list
+ )
+ self.assertEqual(set(DEFAULT_NETWORK), set(found))
+
+ def test_expected_nondefault_network_sources_found(self):
+ found = sources.list_sources(
+ ["AliYun"], self.deps_network, self.pkg_list
+ )
+ self.assertEqual(set([AliYun.DataSourceAliYun]), set(found))
+
+
+class TestDataSourceInvariants(test_helpers.TestCase):
+ def test_data_sources_have_valid_network_config_sources(self):
+ for ds in DEFAULT_LOCAL + DEFAULT_NETWORK:
+ for cfg_src in ds.network_config_sources:
+ fail_msg = (
+ "{} has an invalid network_config_sources entry:"
+ " {}".format(str(ds), cfg_src)
+ )
+ self.assertTrue(
+ hasattr(sources.NetworkConfigSource, cfg_src), fail_msg
+ )
+
+ def test_expected_dsname_defined(self):
+ for ds in DEFAULT_LOCAL + DEFAULT_NETWORK:
+ fail_msg = (
+ "{} has an invalid / missing dsname property: {}".format(
+ str(ds), str(ds.dsname)
+ )
+ )
+ self.assertNotEqual(ds.dsname, DataSource.dsname, fail_msg)
+ self.assertIsNotNone(ds.dsname)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_configdrive.py b/tests/unittests/sources/test_configdrive.py
new file mode 100644
index 00000000..1fc40a0e
--- /dev/null
+++ b/tests/unittests/sources/test_configdrive.py
@@ -0,0 +1,1068 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+import os
+from copy import copy, deepcopy
+
+from cloudinit import helpers, settings, util
+from cloudinit.net import eni, network_state
+from cloudinit.sources import DataSourceConfigDrive as ds
+from cloudinit.sources.helpers import openstack
+from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir
+
+PUBKEY = "ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n"
+EC2_META = {
+ "ami-id": "ami-00000001",
+ "ami-launch-index": 0,
+ "ami-manifest-path": "FIXME",
+ "block-device-mapping": {
+ "ami": "sda1",
+ "ephemeral0": "sda2",
+ "root": "/dev/sda1",
+ "swap": "sda3",
+ },
+ "hostname": "sm-foo-test.novalocal",
+ "instance-action": "none",
+ "instance-id": "i-00000001",
+ "instance-type": "m1.tiny",
+ "local-hostname": "sm-foo-test.novalocal",
+ "local-ipv4": None,
+ "placement": {"availability-zone": "nova"},
+ "public-hostname": "sm-foo-test.novalocal",
+ "public-ipv4": "",
+ "public-keys": {"0": {"openssh-key": PUBKEY}},
+ "reservation-id": "r-iru5qm4m",
+ "security-groups": ["default"],
+}
+USER_DATA = b"#!/bin/sh\necho This is user data\n"
+OSTACK_META = {
+ "availability_zone": "nova",
+ "files": [
+ {"content_path": "/content/0000", "path": "/etc/foo.cfg"},
+ {"content_path": "/content/0001", "path": "/etc/bar/bar.cfg"},
+ ],
+ "hostname": "sm-foo-test.novalocal",
+ "meta": {"dsmode": "local", "my-meta": "my-value"},
+ "name": "sm-foo-test",
+ "public_keys": {"mykey": PUBKEY},
+ "uuid": "b0fa911b-69d4-4476-bbe2-1c92bff6535c",
+}
+
+CONTENT_0 = b"This is contents of /etc/foo.cfg\n"
+CONTENT_1 = b"# this is /etc/bar/bar.cfg\n"
+NETWORK_DATA = {
+ "services": [
+ {"type": "dns", "address": "199.204.44.24"},
+ {"type": "dns", "address": "199.204.47.54"},
+ ],
+ "links": [
+ {
+ "vif_id": "2ecc7709-b3f7-4448-9580-e1ec32d75bbd",
+ "ethernet_mac_address": "fa:16:3e:69:b0:58",
+ "type": "ovs",
+ "mtu": None,
+ "id": "tap2ecc7709-b3",
+ },
+ {
+ "vif_id": "2f88d109-5b57-40e6-af32-2472df09dc33",
+ "ethernet_mac_address": "fa:16:3e:d4:57:ad",
+ "type": "ovs",
+ "mtu": None,
+ "id": "tap2f88d109-5b",
+ },
+ {
+ "vif_id": "1a5382f8-04c5-4d75-ab98-d666c1ef52cc",
+ "ethernet_mac_address": "fa:16:3e:05:30:fe",
+ "type": "ovs",
+ "mtu": None,
+ "id": "tap1a5382f8-04",
+ "name": "nic0",
+ },
+ ],
+ "networks": [
+ {
+ "link": "tap2ecc7709-b3",
+ "type": "ipv4_dhcp",
+ "network_id": "6d6357ac-0f70-4afa-8bd7-c274cc4ea235",
+ "id": "network0",
+ },
+ {
+ "link": "tap2f88d109-5b",
+ "type": "ipv4_dhcp",
+ "network_id": "d227a9b3-6960-4d94-8976-ee5788b44f54",
+ "id": "network1",
+ },
+ {
+ "link": "tap1a5382f8-04",
+ "type": "ipv4_dhcp",
+ "network_id": "dab2ba57-cae2-4311-a5ed-010b263891f5",
+ "id": "network2",
+ },
+ ],
+}
+
+NETWORK_DATA_2 = {
+ "services": [
+ {"type": "dns", "address": "1.1.1.191"},
+ {"type": "dns", "address": "1.1.1.4"},
+ ],
+ "networks": [
+ {
+ "network_id": "d94bbe94-7abc-48d4-9c82-4628ea26164a",
+ "type": "ipv4",
+ "netmask": "255.255.255.248",
+ "link": "eth0",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "2.2.2.9",
+ }
+ ],
+ "ip_address": "2.2.2.10",
+ "id": "network0-ipv4",
+ },
+ {
+ "network_id": "ca447c83-6409-499b-aaef-6ad1ae995348",
+ "type": "ipv4",
+ "netmask": "255.255.255.224",
+ "link": "eth1",
+ "routes": [],
+ "ip_address": "3.3.3.24",
+ "id": "network1-ipv4",
+ },
+ ],
+ "links": [
+ {
+ "ethernet_mac_address": "fa:16:3e:dd:50:9a",
+ "mtu": 1500,
+ "type": "vif",
+ "id": "eth0",
+ "vif_id": "vif-foo1",
+ },
+ {
+ "ethernet_mac_address": "fa:16:3e:a8:14:69",
+ "mtu": 1500,
+ "type": "vif",
+ "id": "eth1",
+ "vif_id": "vif-foo2",
+ },
+ ],
+}
+
+# This network data ha 'tap' or null type for a link.
+NETWORK_DATA_3 = {
+ "services": [
+ {"type": "dns", "address": "172.16.36.11"},
+ {"type": "dns", "address": "172.16.36.12"},
+ ],
+ "networks": [
+ {
+ "network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e",
+ "type": "ipv4",
+ "netmask": "255.255.255.128",
+ "link": "tap77a0dc5b-72",
+ "ip_address": "172.17.48.18",
+ "id": "network0",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.17.48.1",
+ }
+ ],
+ },
+ {
+ "network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e",
+ "type": "ipv6",
+ "netmask": "ffff:ffff:ffff:ffff::",
+ "link": "tap77a0dc5b-72",
+ "ip_address": "fdb8:52d0:9d14:0:f816:3eff:fe9f:70d",
+ "id": "network1",
+ "routes": [
+ {
+ "netmask": "::",
+ "network": "::",
+ "gateway": "fdb8:52d0:9d14::1",
+ }
+ ],
+ },
+ {
+ "network_id": "1f53cb0e-72d3-47c7-94b9-ff4397c5fe54",
+ "type": "ipv4",
+ "netmask": "255.255.255.128",
+ "link": "tap7d6b7bec-93",
+ "ip_address": "172.16.48.13",
+ "id": "network2",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.16.48.1",
+ },
+ {
+ "netmask": "255.255.0.0",
+ "network": "172.16.0.0",
+ "gateway": "172.16.48.1",
+ },
+ ],
+ },
+ ],
+ "links": [
+ {
+ "ethernet_mac_address": "fa:16:3e:dd:50:9a",
+ "mtu": None,
+ "type": "tap",
+ "id": "tap77a0dc5b-72",
+ "vif_id": "77a0dc5b-720e-41b7-bfa7-1b2ff62e0d48",
+ },
+ {
+ "ethernet_mac_address": "fa:16:3e:a8:14:69",
+ "mtu": None,
+ "type": None,
+ "id": "tap7d6b7bec-93",
+ "vif_id": "7d6b7bec-93e6-4c03-869a-ddc5014892d5",
+ },
+ ],
+}
+
+BOND_MAC = "fa:16:3e:b3:72:36"
+NETWORK_DATA_BOND = {
+ "services": [
+ {"type": "dns", "address": "1.1.1.191"},
+ {"type": "dns", "address": "1.1.1.4"},
+ ],
+ "networks": [
+ {
+ "id": "network2-ipv4",
+ "ip_address": "2.2.2.13",
+ "link": "vlan2",
+ "netmask": "255.255.255.248",
+ "network_id": "4daf5ce8-38cf-4240-9f1a-04e86d7c6117",
+ "type": "ipv4",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "2.2.2.9",
+ }
+ ],
+ },
+ {
+ "id": "network3-ipv4",
+ "ip_address": "10.0.1.5",
+ "link": "vlan3",
+ "netmask": "255.255.255.248",
+ "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d",
+ "type": "ipv4",
+ "routes": [
+ {
+ "netmask": "255.255.255.255",
+ "network": "192.168.1.0",
+ "gateway": "10.0.1.1",
+ }
+ ],
+ },
+ ],
+ "links": [
+ {
+ "ethernet_mac_address": "0c:c4:7a:34:6e:3c",
+ "id": "eth0",
+ "mtu": 1500,
+ "type": "phy",
+ },
+ {
+ "ethernet_mac_address": "0c:c4:7a:34:6e:3d",
+ "id": "eth1",
+ "mtu": 1500,
+ "type": "phy",
+ },
+ {
+ "bond_links": ["eth0", "eth1"],
+ "bond_miimon": 100,
+ "bond_mode": "4",
+ "bond_xmit_hash_policy": "layer3+4",
+ "ethernet_mac_address": BOND_MAC,
+ "id": "bond0",
+ "type": "bond",
+ },
+ {
+ "ethernet_mac_address": "fa:16:3e:b3:72:30",
+ "id": "vlan2",
+ "type": "vlan",
+ "vlan_id": 602,
+ "vlan_link": "bond0",
+ "vlan_mac_address": "fa:16:3e:b3:72:30",
+ },
+ {
+ "ethernet_mac_address": "fa:16:3e:66:ab:a6",
+ "id": "vlan3",
+ "type": "vlan",
+ "vlan_id": 612,
+ "vlan_link": "bond0",
+ "vlan_mac_address": "fa:16:3e:66:ab:a6",
+ },
+ ],
+}
+
+NETWORK_DATA_VLAN = {
+ "services": [{"type": "dns", "address": "1.1.1.191"}],
+ "networks": [
+ {
+ "id": "network1-ipv4",
+ "ip_address": "10.0.1.5",
+ "link": "vlan1",
+ "netmask": "255.255.255.248",
+ "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d",
+ "type": "ipv4",
+ "routes": [
+ {
+ "netmask": "255.255.255.255",
+ "network": "192.168.1.0",
+ "gateway": "10.0.1.1",
+ }
+ ],
+ }
+ ],
+ "links": [
+ {
+ "ethernet_mac_address": "fa:16:3e:69:b0:58",
+ "id": "eth0",
+ "mtu": 1500,
+ "type": "phy",
+ },
+ {
+ "ethernet_mac_address": "fa:16:3e:b3:72:30",
+ "id": "vlan1",
+ "type": "vlan",
+ "vlan_id": 602,
+ "vlan_link": "eth0",
+ "vlan_mac_address": "fa:16:3e:b3:72:30",
+ },
+ ],
+}
+
+KNOWN_MACS = {
+ "fa:16:3e:69:b0:58": "enp0s1",
+ "fa:16:3e:d4:57:ad": "enp0s2",
+ "fa:16:3e:dd:50:9a": "foo1",
+ "fa:16:3e:a8:14:69": "foo2",
+ "fa:16:3e:ed:9a:59": "foo3",
+ "0c:c4:7a:34:6e:3d": "oeth1",
+ "0c:c4:7a:34:6e:3c": "oeth0",
+}
+
+CFG_DRIVE_FILES_V2 = {
+ "ec2/2009-04-04/meta-data.json": json.dumps(EC2_META),
+ "ec2/2009-04-04/user-data": USER_DATA,
+ "ec2/latest/meta-data.json": json.dumps(EC2_META),
+ "ec2/latest/user-data": USER_DATA,
+ "openstack/2012-08-10/meta_data.json": json.dumps(OSTACK_META),
+ "openstack/2012-08-10/user_data": USER_DATA,
+ "openstack/content/0000": CONTENT_0,
+ "openstack/content/0001": CONTENT_1,
+ "openstack/latest/meta_data.json": json.dumps(OSTACK_META),
+ "openstack/latest/user_data": USER_DATA,
+ "openstack/latest/network_data.json": json.dumps(NETWORK_DATA),
+ "openstack/2015-10-15/meta_data.json": json.dumps(OSTACK_META),
+ "openstack/2015-10-15/user_data": USER_DATA,
+ "openstack/2015-10-15/network_data.json": json.dumps(NETWORK_DATA),
+}
+
+M_PATH = "cloudinit.sources.DataSourceConfigDrive."
+
+
+class TestConfigDriveDataSource(CiTestCase):
+ def setUp(self):
+ super(TestConfigDriveDataSource, self).setUp()
+ self.add_patch(
+ M_PATH + "util.find_devs_with", "m_find_devs_with", return_value=[]
+ )
+ self.tmp = self.tmp_dir()
+
+ def test_ec2_metadata(self):
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+ found = ds.read_config_drive(self.tmp)
+ self.assertTrue("ec2-metadata" in found)
+ ec2_md = found["ec2-metadata"]
+ self.assertEqual(EC2_META, ec2_md)
+
+ def test_dev_os_remap(self):
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+ cfg_ds = ds.DataSourceConfigDrive(
+ settings.CFG_BUILTIN, None, helpers.Paths({})
+ )
+ found = ds.read_config_drive(self.tmp)
+ cfg_ds.metadata = found["metadata"]
+ name_tests = {
+ "ami": "/dev/vda1",
+ "root": "/dev/vda1",
+ "ephemeral0": "/dev/vda2",
+ "swap": "/dev/vda3",
+ }
+ for name, dev_name in name_tests.items():
+ with ExitStack() as mocks:
+ provided_name = dev_name[len("/dev/") :]
+ provided_name = "s" + provided_name[1:]
+ find_mock = mocks.enter_context(
+ mock.patch.object(
+ util, "find_devs_with", return_value=[provided_name]
+ )
+ )
+ # We want os.path.exists() to return False on its first call,
+ # and True on its second call. We use a handy generator as
+ # the mock side effect for this. The mocked function returns
+ # what the side effect returns.
+
+ def exists_side_effect():
+ yield False
+ yield True
+
+ exists_mock = mocks.enter_context(
+ mock.patch.object(
+ os.path, "exists", side_effect=exists_side_effect()
+ )
+ )
+ self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
+
+ find_mock.assert_called_once_with(mock.ANY)
+ self.assertEqual(exists_mock.call_count, 2)
+
+ def test_dev_os_map(self):
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+ cfg_ds = ds.DataSourceConfigDrive(
+ settings.CFG_BUILTIN, None, helpers.Paths({})
+ )
+ found = ds.read_config_drive(self.tmp)
+ os_md = found["metadata"]
+ cfg_ds.metadata = os_md
+ name_tests = {
+ "ami": "/dev/vda1",
+ "root": "/dev/vda1",
+ "ephemeral0": "/dev/vda2",
+ "swap": "/dev/vda3",
+ }
+ for name, dev_name in name_tests.items():
+ with ExitStack() as mocks:
+ find_mock = mocks.enter_context(
+ mock.patch.object(
+ util, "find_devs_with", return_value=[dev_name]
+ )
+ )
+ exists_mock = mocks.enter_context(
+ mock.patch.object(os.path, "exists", return_value=True)
+ )
+ self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
+
+ find_mock.assert_called_once_with(mock.ANY)
+ exists_mock.assert_called_once_with(mock.ANY)
+
+ def test_dev_ec2_remap(self):
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+ cfg_ds = ds.DataSourceConfigDrive(
+ settings.CFG_BUILTIN, None, helpers.Paths({})
+ )
+ found = ds.read_config_drive(self.tmp)
+ ec2_md = found["ec2-metadata"]
+ os_md = found["metadata"]
+ cfg_ds.ec2_metadata = ec2_md
+ cfg_ds.metadata = os_md
+ name_tests = {
+ "ami": "/dev/vda1",
+ "root": "/dev/vda1",
+ "ephemeral0": "/dev/vda2",
+ "swap": "/dev/vda3",
+ None: None,
+ "bob": None,
+ "root2k": None,
+ }
+ for name, dev_name in name_tests.items():
+ # We want os.path.exists() to return False on its first call,
+ # and True on its second call. We use a handy generator as
+ # the mock side effect for this. The mocked function returns
+ # what the side effect returns.
+ def exists_side_effect():
+ yield False
+ yield True
+
+ with mock.patch.object(
+ os.path, "exists", side_effect=exists_side_effect()
+ ):
+ self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
+ # We don't assert the call count for os.path.exists() because
+ # not all of the entries in name_tests results in two calls to
+ # that function. Specifically, 'root2k' doesn't seem to call
+ # it at all.
+
+ def test_dev_ec2_map(self):
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+ cfg_ds = ds.DataSourceConfigDrive(
+ settings.CFG_BUILTIN, None, helpers.Paths({})
+ )
+ found = ds.read_config_drive(self.tmp)
+ ec2_md = found["ec2-metadata"]
+ os_md = found["metadata"]
+ cfg_ds.ec2_metadata = ec2_md
+ cfg_ds.metadata = os_md
+ name_tests = {
+ "ami": "/dev/sda1",
+ "root": "/dev/sda1",
+ "ephemeral0": "/dev/sda2",
+ "swap": "/dev/sda3",
+ None: None,
+ "bob": None,
+ "root2k": None,
+ }
+ for name, dev_name in name_tests.items():
+ with mock.patch.object(os.path, "exists", return_value=True):
+ self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
+
+ def test_dir_valid(self):
+ """Verify a dir is read as such."""
+
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+
+ found = ds.read_config_drive(self.tmp)
+
+ expected_md = copy(OSTACK_META)
+ expected_md["instance-id"] = expected_md["uuid"]
+ expected_md["local-hostname"] = expected_md["hostname"]
+
+ self.assertEqual(USER_DATA, found["userdata"])
+ self.assertEqual(expected_md, found["metadata"])
+ self.assertEqual(NETWORK_DATA, found["networkdata"])
+ self.assertEqual(found["files"]["/etc/foo.cfg"], CONTENT_0)
+ self.assertEqual(found["files"]["/etc/bar/bar.cfg"], CONTENT_1)
+
+ def test_seed_dir_valid_extra(self):
+ """Verify extra files do not affect datasource validity."""
+
+ data = copy(CFG_DRIVE_FILES_V2)
+ data["myfoofile.txt"] = "myfoocontent"
+ data["openstack/latest/random-file.txt"] = "random-content"
+
+ populate_dir(self.tmp, data)
+
+ found = ds.read_config_drive(self.tmp)
+
+ expected_md = copy(OSTACK_META)
+ expected_md["instance-id"] = expected_md["uuid"]
+ expected_md["local-hostname"] = expected_md["hostname"]
+
+ self.assertEqual(expected_md, found["metadata"])
+
+ def test_seed_dir_bad_json_metadata(self):
+ """Verify that bad json in metadata raises BrokenConfigDriveDir."""
+ data = copy(CFG_DRIVE_FILES_V2)
+
+ data["openstack/2012-08-10/meta_data.json"] = "non-json garbage {}"
+ data["openstack/2015-10-15/meta_data.json"] = "non-json garbage {}"
+ data["openstack/latest/meta_data.json"] = "non-json garbage {}"
+
+ populate_dir(self.tmp, data)
+
+ self.assertRaises(
+ openstack.BrokenMetadata, ds.read_config_drive, self.tmp
+ )
+
+ def test_seed_dir_no_configdrive(self):
+ """Verify that no metadata raises NonConfigDriveDir."""
+
+ my_d = os.path.join(self.tmp, "non-configdrive")
+ data = copy(CFG_DRIVE_FILES_V2)
+ data["myfoofile.txt"] = "myfoocontent"
+ data["openstack/latest/random-file.txt"] = "random-content"
+ data["content/foo"] = "foocontent"
+
+ self.assertRaises(openstack.NonReadable, ds.read_config_drive, my_d)
+
+ def test_seed_dir_missing(self):
+ """Verify that missing seed_dir raises NonConfigDriveDir."""
+ my_d = os.path.join(self.tmp, "nonexistantdirectory")
+ self.assertRaises(openstack.NonReadable, ds.read_config_drive, my_d)
+
+ def test_find_candidates(self):
+ devs_with_answers = {}
+
+ def my_devs_with(*args, **kwargs):
+ criteria = args[0] if len(args) else kwargs.pop("criteria", None)
+ return devs_with_answers.get(criteria, [])
+
+ def my_is_partition(dev):
+ return dev[-1] in "0123456789" and not dev.startswith("sr")
+
+ try:
+ orig_find_devs_with = util.find_devs_with
+ util.find_devs_with = my_devs_with
+
+ orig_is_partition = util.is_partition
+ util.is_partition = my_is_partition
+
+ devs_with_answers = {
+ "TYPE=vfat": [],
+ "TYPE=iso9660": ["/dev/vdb"],
+ "LABEL=config-2": ["/dev/vdb"],
+ }
+ self.assertEqual(["/dev/vdb"], ds.find_candidate_devs())
+
+ # add a vfat item
+ # zdd reverse sorts after vdb, but config-2 label is preferred
+ devs_with_answers["TYPE=vfat"] = ["/dev/zdd"]
+ self.assertEqual(
+ ["/dev/vdb", "/dev/zdd"], ds.find_candidate_devs()
+ )
+
+ # verify that partitions are considered, that have correct label.
+ devs_with_answers = {
+ "TYPE=vfat": ["/dev/sda1"],
+ "TYPE=iso9660": [],
+ "LABEL=config-2": ["/dev/vdb3"],
+ }
+ self.assertEqual(["/dev/vdb3"], ds.find_candidate_devs())
+
+ # Verify that uppercase labels are also found.
+ devs_with_answers = {
+ "TYPE=vfat": [],
+ "TYPE=iso9660": ["/dev/vdb"],
+ "LABEL=CONFIG-2": ["/dev/vdb"],
+ }
+ self.assertEqual(["/dev/vdb"], ds.find_candidate_devs())
+
+ finally:
+ util.find_devs_with = orig_find_devs_with
+ util.is_partition = orig_is_partition
+
+ @mock.patch(M_PATH + "on_first_boot")
+ def test_pubkeys_v2(self, on_first_boot):
+ """Verify that public-keys work in config-drive-v2."""
+ myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2)
+ self.assertEqual(
+ myds.get_public_ssh_keys(), [OSTACK_META["public_keys"]["mykey"]]
+ )
+ self.assertEqual("configdrive", myds.cloud_name)
+ self.assertEqual("openstack", myds.platform)
+ self.assertEqual("seed-dir (%s/seed)" % self.tmp, myds.subplatform)
+
+ def test_subplatform_config_drive_when_starts_with_dev(self):
+ """subplatform reports config-drive when source starts with /dev/."""
+ cfg_ds = ds.DataSourceConfigDrive(
+ settings.CFG_BUILTIN, None, helpers.Paths({})
+ )
+ with mock.patch(M_PATH + "find_candidate_devs") as m_find_devs:
+ with mock.patch(M_PATH + "util.mount_cb"):
+ with mock.patch(M_PATH + "on_first_boot"):
+ m_find_devs.return_value = ["/dev/anything"]
+ self.assertEqual(True, cfg_ds.get_data())
+ self.assertEqual("config-disk (/dev/anything)", cfg_ds.subplatform)
+
+
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
+class TestNetJson(CiTestCase):
+ def setUp(self):
+ super(TestNetJson, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.maxDiff = None
+
+ @mock.patch(M_PATH + "on_first_boot")
+ def test_network_data_is_found(self, on_first_boot):
+ """Verify that network_data is present in ds in config-drive-v2."""
+ myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2)
+ self.assertIsNotNone(myds.network_json)
+
+ @mock.patch(M_PATH + "on_first_boot")
+ def test_network_config_is_converted(self, on_first_boot):
+ """Verify that network_data is converted and present on ds object."""
+ myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2)
+ network_config = openstack.convert_net_json(
+ NETWORK_DATA, known_macs=KNOWN_MACS
+ )
+ self.assertEqual(myds.network_config, network_config)
+
+ def test_network_config_conversion_dhcp6(self):
+ """Test some ipv6 input network json and check the expected
+ conversions."""
+ in_data = {
+ "links": [
+ {
+ "vif_id": "2ecc7709-b3f7-4448-9580-e1ec32d75bbd",
+ "ethernet_mac_address": "fa:16:3e:69:b0:58",
+ "type": "ovs",
+ "mtu": None,
+ "id": "tap2ecc7709-b3",
+ },
+ {
+ "vif_id": "2f88d109-5b57-40e6-af32-2472df09dc33",
+ "ethernet_mac_address": "fa:16:3e:d4:57:ad",
+ "type": "ovs",
+ "mtu": None,
+ "id": "tap2f88d109-5b",
+ },
+ ],
+ "networks": [
+ {
+ "link": "tap2ecc7709-b3",
+ "type": "ipv6_dhcpv6-stateless",
+ "network_id": "6d6357ac-0f70-4afa-8bd7-c274cc4ea235",
+ "id": "network0",
+ },
+ {
+ "link": "tap2f88d109-5b",
+ "type": "ipv6_dhcpv6-stateful",
+ "network_id": "d227a9b3-6960-4d94-8976-ee5788b44f54",
+ "id": "network1",
+ },
+ ],
+ }
+ out_data = {
+ "version": 1,
+ "config": [
+ {
+ "mac_address": "fa:16:3e:69:b0:58",
+ "mtu": None,
+ "name": "enp0s1",
+ "subnets": [{"type": "ipv6_dhcpv6-stateless"}],
+ "type": "physical",
+ },
+ {
+ "mac_address": "fa:16:3e:d4:57:ad",
+ "mtu": None,
+ "name": "enp0s2",
+ "subnets": [{"type": "ipv6_dhcpv6-stateful"}],
+ "type": "physical",
+ "accept-ra": True,
+ },
+ ],
+ }
+ conv_data = openstack.convert_net_json(in_data, known_macs=KNOWN_MACS)
+ self.assertEqual(out_data, conv_data)
+
+ def test_network_config_conversions(self):
+ """Tests a bunch of input network json and checks the
+ expected conversions."""
+ in_datas = [
+ NETWORK_DATA,
+ {
+ "services": [{"type": "dns", "address": "172.19.0.12"}],
+ "networks": [
+ {
+ "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
+ "type": "ipv4",
+ "netmask": "255.255.252.0",
+ "link": "tap1a81968a-79",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.19.3.254",
+ }
+ ],
+ "ip_address": "172.19.1.34",
+ "id": "network0",
+ }
+ ],
+ "links": [
+ {
+ "type": "bridge",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f",
+ "ethernet_mac_address": "fa:16:3e:ed:9a:59",
+ "id": "tap1a81968a-79",
+ "mtu": None,
+ }
+ ],
+ },
+ ]
+ out_datas = [
+ {
+ "version": 1,
+ "config": [
+ {
+ "subnets": [{"type": "dhcp4"}],
+ "type": "physical",
+ "mac_address": "fa:16:3e:69:b0:58",
+ "name": "enp0s1",
+ "mtu": None,
+ },
+ {
+ "subnets": [{"type": "dhcp4"}],
+ "type": "physical",
+ "mac_address": "fa:16:3e:d4:57:ad",
+ "name": "enp0s2",
+ "mtu": None,
+ },
+ {
+ "subnets": [{"type": "dhcp4"}],
+ "type": "physical",
+ "mac_address": "fa:16:3e:05:30:fe",
+ "name": "nic0",
+ "mtu": None,
+ },
+ {
+ "type": "nameserver",
+ "address": "199.204.44.24",
+ },
+ {
+ "type": "nameserver",
+ "address": "199.204.47.54",
+ },
+ ],
+ },
+ {
+ "version": 1,
+ "config": [
+ {
+ "name": "foo3",
+ "mac_address": "fa:16:3e:ed:9a:59",
+ "mtu": None,
+ "type": "physical",
+ "subnets": [
+ {
+ "address": "172.19.1.34",
+ "netmask": "255.255.252.0",
+ "type": "static",
+ "ipv4": True,
+ "routes": [
+ {
+ "gateway": "172.19.3.254",
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ }
+ ],
+ }
+ ],
+ },
+ {
+ "type": "nameserver",
+ "address": "172.19.0.12",
+ },
+ ],
+ },
+ ]
+ for in_data, out_data in zip(in_datas, out_datas):
+ conv_data = openstack.convert_net_json(
+ in_data, known_macs=KNOWN_MACS
+ )
+ self.assertEqual(out_data, conv_data)
+
+
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
+class TestConvertNetworkData(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestConvertNetworkData, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def _getnames_in_config(self, ncfg):
+ return set(
+ [n["name"] for n in ncfg["config"] if n["type"] == "physical"]
+ )
+
+ def test_conversion_fills_names(self):
+ ncfg = openstack.convert_net_json(NETWORK_DATA, known_macs=KNOWN_MACS)
+ expected = set(["nic0", "enp0s1", "enp0s2"])
+ found = self._getnames_in_config(ncfg)
+ self.assertEqual(found, expected)
+
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ def test_convert_reads_system_prefers_name(self, get_interfaces_by_mac):
+ macs = KNOWN_MACS.copy()
+ macs.update(
+ {"fa:16:3e:05:30:fe": "foonic1", "fa:16:3e:69:b0:58": "ens1"}
+ )
+ get_interfaces_by_mac.return_value = macs
+
+ ncfg = openstack.convert_net_json(NETWORK_DATA)
+ expected = set(["nic0", "ens1", "enp0s2"])
+ found = self._getnames_in_config(ncfg)
+ self.assertEqual(found, expected)
+
+ def test_convert_raises_value_error_on_missing_name(self):
+ macs = {"aa:aa:aa:aa:aa:00": "ens1"}
+ self.assertRaises(
+ ValueError,
+ openstack.convert_net_json,
+ NETWORK_DATA,
+ known_macs=macs,
+ )
+
+ def test_conversion_with_route(self):
+ ncfg = openstack.convert_net_json(
+ NETWORK_DATA_2, known_macs=KNOWN_MACS
+ )
+ # not the best test, but see that we get a route in the
+ # network config and that it gets rendered to an ENI file
+ routes = []
+ for n in ncfg["config"]:
+ for s in n.get("subnets", []):
+ routes.extend(s.get("routes", []))
+ self.assertIn(
+ {"network": "0.0.0.0", "netmask": "0.0.0.0", "gateway": "2.2.2.9"},
+ routes,
+ )
+ eni_renderer = eni.Renderer()
+ eni_renderer.render_network_state(
+ network_state.parse_net_config_data(ncfg), target=self.tmp
+ )
+ with open(
+ os.path.join(self.tmp, "etc", "network", "interfaces"), "r"
+ ) as f:
+ eni_rendering = f.read()
+ self.assertIn("route add default gw 2.2.2.9", eni_rendering)
+
+ def test_conversion_with_tap(self):
+ ncfg = openstack.convert_net_json(
+ NETWORK_DATA_3, known_macs=KNOWN_MACS
+ )
+ physicals = set()
+ for i in ncfg["config"]:
+ if i.get("type") == "physical":
+ physicals.add(i["name"])
+ self.assertEqual(physicals, set(("foo1", "foo2")))
+
+ def test_bond_conversion(self):
+ # light testing of bond conversion and eni rendering of bond
+ ncfg = openstack.convert_net_json(
+ NETWORK_DATA_BOND, known_macs=KNOWN_MACS
+ )
+ eni_renderer = eni.Renderer()
+
+ eni_renderer.render_network_state(
+ network_state.parse_net_config_data(ncfg), target=self.tmp
+ )
+ with open(
+ os.path.join(self.tmp, "etc", "network", "interfaces"), "r"
+ ) as f:
+ eni_rendering = f.read()
+
+ # Verify there are expected interfaces in the net config.
+ interfaces = sorted(
+ [
+ i["name"]
+ for i in ncfg["config"]
+ if i["type"] in ("vlan", "bond", "physical")
+ ]
+ )
+ self.assertEqual(
+ sorted(["oeth0", "oeth1", "bond0", "bond0.602", "bond0.612"]),
+ interfaces,
+ )
+
+ words = eni_rendering.split()
+ # 'eth0' and 'eth1' are the ids. because their mac adresses
+ # map to other names, we should not see them in the ENI
+ self.assertNotIn("eth0", words)
+ self.assertNotIn("eth1", words)
+
+ # oeth0 and oeth1 are the interface names for eni.
+ # bond0 will be generated for the bond. Each should be auto.
+ self.assertIn("auto oeth0", eni_rendering)
+ self.assertIn("auto oeth1", eni_rendering)
+ self.assertIn("auto bond0", eni_rendering)
+ # The bond should have the given mac address
+ pos = eni_rendering.find("auto bond0")
+ self.assertIn(BOND_MAC, eni_rendering[pos:])
+
+ def test_vlan(self):
+ # light testing of vlan config conversion and eni rendering
+ ncfg = openstack.convert_net_json(
+ NETWORK_DATA_VLAN, known_macs=KNOWN_MACS
+ )
+ eni_renderer = eni.Renderer()
+ eni_renderer.render_network_state(
+ network_state.parse_net_config_data(ncfg), target=self.tmp
+ )
+ with open(
+ os.path.join(self.tmp, "etc", "network", "interfaces"), "r"
+ ) as f:
+ eni_rendering = f.read()
+
+ self.assertIn("iface enp0s1", eni_rendering)
+ self.assertIn("address 10.0.1.5", eni_rendering)
+ self.assertIn("auto enp0s1.602", eni_rendering)
+
+ def test_mac_addrs_can_be_upper_case(self):
+ # input mac addresses on rackspace may be upper case
+ my_netdata = deepcopy(NETWORK_DATA)
+ for link in my_netdata["links"]:
+ link["ethernet_mac_address"] = link["ethernet_mac_address"].upper()
+
+ ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS)
+ config_name2mac = {}
+ for n in ncfg["config"]:
+ if n["type"] == "physical":
+ config_name2mac[n["name"]] = n["mac_address"]
+
+ expected = {
+ "nic0": "fa:16:3e:05:30:fe",
+ "enp0s1": "fa:16:3e:69:b0:58",
+ "enp0s2": "fa:16:3e:d4:57:ad",
+ }
+ self.assertEqual(expected, config_name2mac)
+
+ def test_unknown_device_types_accepted(self):
+ # If we don't recognise a link, we should treat it as physical for a
+ # best-effort boot
+ my_netdata = deepcopy(NETWORK_DATA)
+ my_netdata["links"][0]["type"] = "my-special-link-type"
+
+ ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS)
+ config_name2mac = {}
+ for n in ncfg["config"]:
+ if n["type"] == "physical":
+ config_name2mac[n["name"]] = n["mac_address"]
+
+ expected = {
+ "nic0": "fa:16:3e:05:30:fe",
+ "enp0s1": "fa:16:3e:69:b0:58",
+ "enp0s2": "fa:16:3e:d4:57:ad",
+ }
+ self.assertEqual(expected, config_name2mac)
+
+ # We should, however, warn the user that we don't recognise the type
+ self.assertIn(
+ "Unknown network_data link type (my-special-link-type)",
+ self.logs.getvalue(),
+ )
+
+
+def cfg_ds_from_dir(base_d, files=None):
+ run = os.path.join(base_d, "run")
+ os.mkdir(run)
+ cfg_ds = ds.DataSourceConfigDrive(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": run})
+ )
+ cfg_ds.seed_dir = os.path.join(base_d, "seed")
+ if files:
+ populate_dir(cfg_ds.seed_dir, files)
+ cfg_ds.known_macs = KNOWN_MACS.copy()
+ if not cfg_ds.get_data():
+ raise RuntimeError(
+ "Data source did not extract itself from seed directory %s"
+ % cfg_ds.seed_dir
+ )
+ return cfg_ds
+
+
+def populate_ds_from_read_config(cfg_ds, source, results):
+ """Patch the DataSourceConfigDrive from the results of
+ read_config_drive_dir hopefully in line with what it would have
+ if cfg_ds.get_data had been successfully called"""
+ cfg_ds.source = source
+ cfg_ds.metadata = results.get("metadata")
+ cfg_ds.ec2_metadata = results.get("ec2-metadata")
+ cfg_ds.userdata_raw = results.get("userdata")
+ cfg_ds.version = results.get("version")
+ cfg_ds.network_json = results.get("networkdata")
+ cfg_ds._network_config = openstack.convert_net_json(
+ cfg_ds.network_json, known_macs=KNOWN_MACS
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_digitalocean.py b/tests/unittests/sources/test_digitalocean.py
new file mode 100644
index 00000000..f3e6224e
--- /dev/null
+++ b/tests/unittests/sources/test_digitalocean.py
@@ -0,0 +1,389 @@
+# Copyright (C) 2014 Neal Shrader
+#
+# Author: Neal Shrader <neal@digitalocean.com>
+# Author: Ben Howard <bh@digitalocean.com>
+# Author: Scott Moser <smoser@ubuntu.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+
+from cloudinit import helpers, settings
+from cloudinit.sources import DataSourceDigitalOcean
+from cloudinit.sources.helpers import digitalocean
+from tests.unittests.helpers import CiTestCase, mock
+
+DO_MULTIPLE_KEYS = [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co",
+]
+DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@do.co"
+
+# the following JSON was taken from droplet (that's why its a string)
+DO_META = json.loads(
+ """
+{
+ "droplet_id": "22532410",
+ "hostname": "utl-96268",
+ "vendor_data": "vendordata goes here",
+ "user_data": "userdata goes here",
+ "public_keys": "",
+ "auth_key": "authorization_key",
+ "region": "nyc3",
+ "interfaces": {
+ "private": [
+ {
+ "ipv4": {
+ "ip_address": "10.132.6.205",
+ "netmask": "255.255.0.0",
+ "gateway": "10.132.0.1"
+ },
+ "mac": "04:01:57:d1:9e:02",
+ "type": "private"
+ }
+ ],
+ "public": [
+ {
+ "ipv4": {
+ "ip_address": "192.0.0.20",
+ "netmask": "255.255.255.0",
+ "gateway": "104.236.0.1"
+ },
+ "ipv6": {
+ "ip_address": "2604:A880:0800:0000:1000:0000:0000:0000",
+ "cidr": 64,
+ "gateway": "2604:A880:0800:0000:0000:0000:0000:0001"
+ },
+ "anchor_ipv4": {
+ "ip_address": "10.0.0.5",
+ "netmask": "255.255.0.0",
+ "gateway": "10.0.0.1"
+ },
+ "mac": "04:01:57:d1:9e:01",
+ "type": "public"
+ }
+ ]
+ },
+ "floating_ip": {
+ "ipv4": {
+ "active": false
+ }
+ },
+ "dns": {
+ "nameservers": [
+ "2001:4860:4860::8844",
+ "2001:4860:4860::8888",
+ "8.8.8.8"
+ ]
+ }
+}
+"""
+)
+
+# This has no private interface
+DO_META_2 = {
+ "droplet_id": 27223699,
+ "hostname": "smtest1",
+ "vendor_data": "\n".join(
+ [
+ '"Content-Type: multipart/mixed; '
+ 'boundary="===============8645434374073493512=="',
+ "MIME-Version: 1.0",
+ "",
+ "--===============8645434374073493512==",
+ "MIME-Version: 1.0"
+ 'Content-Type: text/cloud-config; charset="us-ascii"'
+ "Content-Transfer-Encoding: 7bit"
+ 'Content-Disposition: attachment; filename="cloud-config"'
+ "",
+ "#cloud-config",
+ "disable_root: false",
+ "manage_etc_hosts: true",
+ "",
+ "",
+ "--===============8645434374073493512==",
+ ]
+ ),
+ "public_keys": ["ssh-rsa AAAAB3NzaN...N3NtHw== smoser@brickies"],
+ "auth_key": "88888888888888888888888888888888",
+ "region": "nyc3",
+ "interfaces": {
+ "public": [
+ {
+ "ipv4": {
+ "ip_address": "45.55.249.133",
+ "netmask": "255.255.192.0",
+ "gateway": "45.55.192.1",
+ },
+ "anchor_ipv4": {
+ "ip_address": "10.17.0.5",
+ "netmask": "255.255.0.0",
+ "gateway": "10.17.0.1",
+ },
+ "mac": "ae:cc:08:7c:88:00",
+ "type": "public",
+ }
+ ]
+ },
+ "floating_ip": {"ipv4": {"active": True, "ip_address": "138.197.59.92"}},
+ "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]},
+ "tags": None,
+}
+
+DO_META["public_keys"] = DO_SINGLE_KEY
+
+MD_URL = "http://169.254.169.254/metadata/v1.json"
+
+
+def _mock_dmi():
+ return (True, DO_META.get("id"))
+
+
+class TestDataSourceDigitalOcean(CiTestCase):
+ """
+ Test reading the meta-data
+ """
+
+ def setUp(self):
+ super(TestDataSourceDigitalOcean, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def get_ds(self, get_sysinfo=_mock_dmi):
+ ds = DataSourceDigitalOcean.DataSourceDigitalOcean(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ ds.use_ip4LL = False
+ if get_sysinfo is not None:
+ ds._get_sysinfo = get_sysinfo
+ return ds
+
+ @mock.patch("cloudinit.sources.helpers.digitalocean.read_sysinfo")
+ def test_returns_false_not_on_docean(self, m_read_sysinfo):
+ m_read_sysinfo.return_value = (False, None)
+ ds = self.get_ds(get_sysinfo=None)
+ self.assertEqual(False, ds.get_data())
+ self.assertTrue(m_read_sysinfo.called)
+
+ @mock.patch("cloudinit.sources.helpers.digitalocean.read_metadata")
+ def test_metadata(self, mock_readmd):
+ mock_readmd.return_value = DO_META.copy()
+
+ ds = self.get_ds()
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ self.assertTrue(mock_readmd.called)
+
+ self.assertEqual(DO_META.get("user_data"), ds.get_userdata_raw())
+ self.assertEqual(DO_META.get("vendor_data"), ds.get_vendordata_raw())
+ self.assertEqual(DO_META.get("region"), ds.availability_zone)
+ self.assertEqual(DO_META.get("droplet_id"), ds.get_instance_id())
+ self.assertEqual(DO_META.get("hostname"), ds.get_hostname())
+
+ # Single key
+ self.assertEqual(
+ [DO_META.get("public_keys")], ds.get_public_ssh_keys()
+ )
+
+ self.assertIsInstance(ds.get_public_ssh_keys(), list)
+
+ @mock.patch("cloudinit.sources.helpers.digitalocean.read_metadata")
+ def test_multiple_ssh_keys(self, mock_readmd):
+ metadata = DO_META.copy()
+ metadata["public_keys"] = DO_MULTIPLE_KEYS
+ mock_readmd.return_value = metadata.copy()
+
+ ds = self.get_ds()
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ self.assertTrue(mock_readmd.called)
+
+ # Multiple keys
+ self.assertEqual(metadata["public_keys"], ds.get_public_ssh_keys())
+ self.assertIsInstance(ds.get_public_ssh_keys(), list)
+
+
+class TestNetworkConvert(CiTestCase):
+ def _get_networking(self):
+ self.m_get_by_mac.return_value = {
+ "04:01:57:d1:9e:01": "ens1",
+ "04:01:57:d1:9e:02": "ens2",
+ "b8:ae:ed:75:5f:9a": "enp0s25",
+ "ae:cc:08:7c:88:00": "meta2p1",
+ }
+ netcfg = digitalocean.convert_network_configuration(
+ DO_META["interfaces"], DO_META["dns"]["nameservers"]
+ )
+ self.assertIn("config", netcfg)
+ return netcfg
+
+ def setUp(self):
+ super(TestNetworkConvert, self).setUp()
+ self.add_patch("cloudinit.net.get_interfaces_by_mac", "m_get_by_mac")
+
+ def test_networking_defined(self):
+ netcfg = self._get_networking()
+ self.assertIsNotNone(netcfg)
+ dns_defined = False
+
+ for part in netcfg.get("config"):
+ n_type = part.get("type")
+ print("testing part ", n_type, "\n", json.dumps(part, indent=3))
+
+ if n_type == "nameserver":
+ n_address = part.get("address")
+ self.assertIsNotNone(n_address)
+ self.assertEqual(len(n_address), 3)
+
+ dns_resolvers = DO_META["dns"]["nameservers"]
+ for x in n_address:
+ self.assertIn(x, dns_resolvers)
+ dns_defined = True
+
+ else:
+ n_subnets = part.get("type")
+ n_name = part.get("name")
+ n_mac = part.get("mac_address")
+
+ self.assertIsNotNone(n_type)
+ self.assertIsNotNone(n_subnets)
+ self.assertIsNotNone(n_name)
+ self.assertIsNotNone(n_mac)
+
+ self.assertTrue(dns_defined)
+
+ def _get_nic_definition(self, int_type, expected_name):
+ """helper function to return if_type (i.e. public) and the expected
+ name used by cloud-init (i.e eth0)"""
+ netcfg = self._get_networking()
+ meta_def = (DO_META.get("interfaces")).get(int_type)[0]
+
+ self.assertEqual(int_type, meta_def.get("type"))
+
+ for nic_def in netcfg.get("config"):
+ print(nic_def)
+ if nic_def.get("name") == expected_name:
+ return nic_def, meta_def
+
+ def _get_match_subn(self, subnets, ip_addr):
+ """get the matching subnet definition based on ip address"""
+ for subn in subnets:
+ address = subn.get("address")
+ self.assertIsNotNone(address)
+
+ # equals won't work because of ipv6 addressing being in
+ # cidr notation, i.e fe00::1/64
+ if ip_addr in address:
+ print(json.dumps(subn, indent=3))
+ return subn
+
+ def test_correct_gateways_defined(self):
+ """test to make sure the eth0 ipv4 and ipv6 gateways are defined"""
+ netcfg = self._get_networking()
+ gateways = []
+ for nic_def in netcfg.get("config"):
+ if nic_def.get("type") != "physical":
+ continue
+ for subn in nic_def.get("subnets"):
+ if "gateway" in subn:
+ gateways.append(subn.get("gateway"))
+
+ # we should have two gateways, one ipv4 and ipv6
+ self.assertEqual(len(gateways), 2)
+
+ # make that the ipv6 gateway is there
+ (nic_def, meta_def) = self._get_nic_definition("public", "eth0")
+ ipv4_def = meta_def.get("ipv4")
+ self.assertIn(ipv4_def.get("gateway"), gateways)
+
+ # make sure the the ipv6 gateway is there
+ ipv6_def = meta_def.get("ipv6")
+ self.assertIn(ipv6_def.get("gateway"), gateways)
+
+ def test_public_interface_defined(self):
+ """test that the public interface is defined as eth0"""
+ (nic_def, meta_def) = self._get_nic_definition("public", "eth0")
+ self.assertEqual("eth0", nic_def.get("name"))
+ self.assertEqual(meta_def.get("mac"), nic_def.get("mac_address"))
+ self.assertEqual("physical", nic_def.get("type"))
+
+ def test_private_interface_defined(self):
+ """test that the private interface is defined as eth1"""
+ (nic_def, meta_def) = self._get_nic_definition("private", "eth1")
+ self.assertEqual("eth1", nic_def.get("name"))
+ self.assertEqual(meta_def.get("mac"), nic_def.get("mac_address"))
+ self.assertEqual("physical", nic_def.get("type"))
+
+ def test_public_interface_ipv6(self):
+ """test public ipv6 addressing"""
+ (nic_def, meta_def) = self._get_nic_definition("public", "eth0")
+ ipv6_def = meta_def.get("ipv6")
+ self.assertIsNotNone(ipv6_def)
+
+ subn_def = self._get_match_subn(
+ nic_def.get("subnets"), ipv6_def.get("ip_address")
+ )
+
+ cidr_notated_address = "{0}/{1}".format(
+ ipv6_def.get("ip_address"), ipv6_def.get("cidr")
+ )
+
+ self.assertEqual(cidr_notated_address, subn_def.get("address"))
+ self.assertEqual(ipv6_def.get("gateway"), subn_def.get("gateway"))
+
+ def test_public_interface_ipv4(self):
+ """test public ipv4 addressing"""
+ (nic_def, meta_def) = self._get_nic_definition("public", "eth0")
+ ipv4_def = meta_def.get("ipv4")
+ self.assertIsNotNone(ipv4_def)
+
+ subn_def = self._get_match_subn(
+ nic_def.get("subnets"), ipv4_def.get("ip_address")
+ )
+
+ self.assertEqual(ipv4_def.get("netmask"), subn_def.get("netmask"))
+ self.assertEqual(ipv4_def.get("gateway"), subn_def.get("gateway"))
+
+ def test_public_interface_anchor_ipv4(self):
+ """test public ipv4 addressing"""
+ (nic_def, meta_def) = self._get_nic_definition("public", "eth0")
+ ipv4_def = meta_def.get("anchor_ipv4")
+ self.assertIsNotNone(ipv4_def)
+
+ subn_def = self._get_match_subn(
+ nic_def.get("subnets"), ipv4_def.get("ip_address")
+ )
+
+ self.assertEqual(ipv4_def.get("netmask"), subn_def.get("netmask"))
+ self.assertNotIn("gateway", subn_def)
+
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ def test_convert_without_private(self, m_get_by_mac):
+ m_get_by_mac.return_value = {
+ "b8:ae:ed:75:5f:9a": "enp0s25",
+ "ae:cc:08:7c:88:00": "meta2p1",
+ }
+ netcfg = digitalocean.convert_network_configuration(
+ DO_META_2["interfaces"], DO_META_2["dns"]["nameservers"]
+ )
+
+ # print(netcfg)
+ byname = {}
+ for i in netcfg["config"]:
+ if "name" in i:
+ if i["name"] in byname:
+ raise ValueError(
+ "name '%s' in config twice: %s" % (i["name"], netcfg)
+ )
+ byname[i["name"]] = i
+ self.assertTrue("eth0" in byname)
+ self.assertTrue("subnets" in byname["eth0"])
+ eth0 = byname["eth0"]
+ self.assertEqual(
+ sorted(["45.55.249.133", "10.17.0.5"]),
+ sorted([i["address"] for i in eth0["subnets"]]),
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_ec2.py b/tests/unittests/sources/test_ec2.py
new file mode 100644
index 00000000..b376660d
--- /dev/null
+++ b/tests/unittests/sources/test_ec2.py
@@ -0,0 +1,1125 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import copy
+import json
+from unittest import mock
+
+import httpretty
+import requests
+
+from cloudinit import helpers
+from cloudinit.sources import DataSourceEc2 as ec2
+from tests.unittests import helpers as test_helpers
+
+DYNAMIC_METADATA = {
+ "instance-identity": {
+ "document": json.dumps(
+ {
+ "devpayProductCodes": None,
+ "marketplaceProductCodes": ["1abc2defghijklm3nopqrs4tu"],
+ "availabilityZone": "us-west-2b",
+ "privateIp": "10.158.112.84",
+ "version": "2017-09-30",
+ "instanceId": "my-identity-id",
+ "billingProducts": None,
+ "instanceType": "t2.micro",
+ "accountId": "123456789012",
+ "imageId": "ami-5fb8c835",
+ "pendingTime": "2016-11-19T16:32:11Z",
+ "architecture": "x86_64",
+ "kernelId": None,
+ "ramdiskId": None,
+ "region": "us-west-2",
+ }
+ )
+ }
+}
+
+
+# collected from api version 2016-09-02/ with
+# python3 -c 'import json
+# from cloudinit.ec2_utils import get_instance_metadata as gm
+# print(json.dumps(gm("2016-09-02"), indent=1, sort_keys=True))'
+# Note that the MAC addresses have been modified to sort in the opposite order
+# to the device-number attribute, to test LP: #1876312
+DEFAULT_METADATA = {
+ "ami-id": "ami-8b92b4ee",
+ "ami-launch-index": "0",
+ "ami-manifest-path": "(unknown)",
+ "block-device-mapping": {"ami": "/dev/sda1", "root": "/dev/sda1"},
+ "hostname": "ip-172-31-31-158.us-east-2.compute.internal",
+ "instance-action": "none",
+ "instance-id": "i-0a33f80f09c96477f",
+ "instance-type": "t2.small",
+ "local-hostname": "ip-172-3-3-15.us-east-2.compute.internal",
+ "local-ipv4": "172.3.3.15",
+ "mac": "06:17:04:d7:26:09",
+ "metrics": {"vhostmd": '<?xml version="1.0" encoding="UTF-8"?>'},
+ "network": {
+ "interfaces": {
+ "macs": {
+ "06:17:04:d7:26:09": {
+ "device-number": "0",
+ "interface-id": "eni-e44ef49e",
+ "ipv4-associations": {"13.59.77.202": "172.3.3.15"},
+ "ipv6s": "2600:1f16:aeb:b20b:9d87:a4af:5cc9:73dc",
+ "local-hostname": (
+ "ip-172-3-3-15.us-east-2.compute.internal"
+ ),
+ "local-ipv4s": "172.3.3.15",
+ "mac": "06:17:04:d7:26:09",
+ "owner-id": "950047163771",
+ "public-hostname": (
+ "ec2-13-59-77-202.us-east-2.compute.amazonaws.com"
+ ),
+ "public-ipv4s": "13.59.77.202",
+ "security-group-ids": "sg-5a61d333",
+ "security-groups": "wide-open",
+ "subnet-id": "subnet-20b8565b",
+ "subnet-ipv4-cidr-block": "172.31.16.0/20",
+ "subnet-ipv6-cidr-blocks": "2600:1f16:aeb:b20b::/64",
+ "vpc-id": "vpc-87e72bee",
+ "vpc-ipv4-cidr-block": "172.31.0.0/16",
+ "vpc-ipv4-cidr-blocks": "172.31.0.0/16",
+ "vpc-ipv6-cidr-blocks": "2600:1f16:aeb:b200::/56",
+ },
+ "06:17:04:d7:26:08": {
+ "device-number": "1", # Only IPv4 local config
+ "interface-id": "eni-e44ef49f",
+ "ipv4-associations": {"": "172.3.3.16"},
+ "ipv6s": "", # No IPv6 config
+ "local-hostname": (
+ "ip-172-3-3-16.us-east-2.compute.internal"
+ ),
+ "local-ipv4s": "172.3.3.16",
+ "mac": "06:17:04:d7:26:08",
+ "owner-id": "950047163771",
+ "public-hostname": (
+ "ec2-172-3-3-16.us-east-2.compute.amazonaws.com"
+ ),
+ "public-ipv4s": "", # No public ipv4 config
+ "security-group-ids": "sg-5a61d333",
+ "security-groups": "wide-open",
+ "subnet-id": "subnet-20b8565b",
+ "subnet-ipv4-cidr-block": "172.31.16.0/20",
+ "subnet-ipv6-cidr-blocks": "",
+ "vpc-id": "vpc-87e72bee",
+ "vpc-ipv4-cidr-block": "172.31.0.0/16",
+ "vpc-ipv4-cidr-blocks": "172.31.0.0/16",
+ "vpc-ipv6-cidr-blocks": "",
+ },
+ }
+ }
+ },
+ "placement": {"availability-zone": "us-east-2b"},
+ "profile": "default-hvm",
+ "public-hostname": "ec2-13-59-77-202.us-east-2.compute.amazonaws.com",
+ "public-ipv4": "13.59.77.202",
+ "public-keys": {"brickies": ["ssh-rsa AAAAB3Nz....w== brickies"]},
+ "reservation-id": "r-01efbc9996bac1bd6",
+ "security-groups": "my-wide-open",
+ "services": {"domain": "amazonaws.com", "partition": "aws"},
+}
+
+# collected from api version 2018-09-24/ with
+# python3 -c 'import json
+# from cloudinit.ec2_utils import get_instance_metadata as gm
+# print(json.dumps(gm("2018-09-24"), indent=1, sort_keys=True))'
+
+NIC1_MD_IPV4_IPV6_MULTI_IP = {
+ "device-number": "0",
+ "interface-id": "eni-0d6335689899ce9cc",
+ "ipv4-associations": {"18.218.219.181": "172.31.44.13"},
+ "ipv6s": [
+ "2600:1f16:292:100:c187:593c:4349:136",
+ "2600:1f16:292:100:f153:12a3:c37c:11f9",
+ "2600:1f16:292:100:f152:2222:3333:4444",
+ ],
+ "local-hostname": "ip-172-31-44-13.us-east-2.compute.internal",
+ "local-ipv4s": ["172.31.44.13", "172.31.45.70"],
+ "mac": "0a:07:84:3d:6e:38",
+ "owner-id": "329910648901",
+ "public-hostname": "ec2-18-218-219-181.us-east-2.compute.amazonaws.com",
+ "public-ipv4s": "18.218.219.181",
+ "security-group-ids": "sg-0c387755222ba8d2e",
+ "security-groups": "launch-wizard-4",
+ "subnet-id": "subnet-9d7ba0d1",
+ "subnet-ipv4-cidr-block": "172.31.32.0/20",
+ "subnet_ipv6_cidr_blocks": "2600:1f16:292:100::/64",
+ "vpc-id": "vpc-a07f62c8",
+ "vpc-ipv4-cidr-block": "172.31.0.0/16",
+ "vpc-ipv4-cidr-blocks": "172.31.0.0/16",
+ "vpc_ipv6_cidr_blocks": "2600:1f16:292:100::/56",
+}
+
+NIC2_MD = {
+ "device-number": "1",
+ "interface-id": "eni-043cdce36ded5e79f",
+ "local-hostname": "ip-172-31-47-221.us-east-2.compute.internal",
+ "local-ipv4s": "172.31.47.221",
+ "mac": "0a:75:69:92:e2:16",
+ "owner-id": "329910648901",
+ "security-group-ids": "sg-0d68fef37d8cc9b77",
+ "security-groups": "launch-wizard-17",
+ "subnet-id": "subnet-9d7ba0d1",
+ "subnet-ipv4-cidr-block": "172.31.32.0/20",
+ "vpc-id": "vpc-a07f62c8",
+ "vpc-ipv4-cidr-block": "172.31.0.0/16",
+ "vpc-ipv4-cidr-blocks": "172.31.0.0/16",
+}
+
+SECONDARY_IP_METADATA_2018_09_24 = {
+ "ami-id": "ami-0986c2ac728528ac2",
+ "ami-launch-index": "0",
+ "ami-manifest-path": "(unknown)",
+ "block-device-mapping": {"ami": "/dev/sda1", "root": "/dev/sda1"},
+ "events": {"maintenance": {"history": "[]", "scheduled": "[]"}},
+ "hostname": "ip-172-31-44-13.us-east-2.compute.internal",
+ "identity-credentials": {
+ "ec2": {
+ "info": {
+ "AccountId": "329910648901",
+ "Code": "Success",
+ "LastUpdated": "2019-07-06T14:22:56Z",
+ }
+ }
+ },
+ "instance-action": "none",
+ "instance-id": "i-069e01e8cc43732f8",
+ "instance-type": "t2.micro",
+ "local-hostname": "ip-172-31-44-13.us-east-2.compute.internal",
+ "local-ipv4": "172.31.44.13",
+ "mac": "0a:07:84:3d:6e:38",
+ "metrics": {"vhostmd": '<?xml version="1.0" encoding="UTF-8"?>'},
+ "network": {
+ "interfaces": {
+ "macs": {
+ "0a:07:84:3d:6e:38": NIC1_MD_IPV4_IPV6_MULTI_IP,
+ }
+ }
+ },
+ "placement": {"availability-zone": "us-east-2c"},
+ "profile": "default-hvm",
+ "public-hostname": "ec2-18-218-219-181.us-east-2.compute.amazonaws.com",
+ "public-ipv4": "18.218.219.181",
+ "public-keys": {"yourkeyname,e": ["ssh-rsa AAAAW...DZ yourkeyname"]},
+ "reservation-id": "r-09b4917135cdd33be",
+ "security-groups": "launch-wizard-4",
+ "services": {"domain": "amazonaws.com", "partition": "aws"},
+}
+
+M_PATH_NET = "cloudinit.sources.DataSourceEc2.net."
+
+
+def _register_ssh_keys(rfunc, base_url, keys_data):
+ """handle ssh key inconsistencies.
+
+ public-keys in the ec2 metadata is inconsistently formated compared
+ to other entries.
+ Given keys_data of {name1: pubkey1, name2: pubkey2}
+
+ This registers the following urls:
+ base_url 0={name1}\n1={name2} # (for each name)
+ base_url/ 0={name1}\n1={name2} # (for each name)
+ base_url/0 openssh-key
+ base_url/0/ openssh-key
+ base_url/0/openssh-key {pubkey1}
+ base_url/0/openssh-key/ {pubkey1}
+ ...
+ """
+
+ base_url = base_url.rstrip("/")
+ odd_index = "\n".join(
+ ["{0}={1}".format(n, name) for n, name in enumerate(sorted(keys_data))]
+ )
+
+ rfunc(base_url, odd_index)
+ rfunc(base_url + "/", odd_index)
+
+ for n, name in enumerate(sorted(keys_data)):
+ val = keys_data[name]
+ if isinstance(val, list):
+ val = "\n".join(val)
+ burl = base_url + "/%s" % n
+ rfunc(burl, "openssh-key")
+ rfunc(burl + "/", "openssh-key")
+ rfunc(burl + "/%s/openssh-key" % name, val)
+ rfunc(burl + "/%s/openssh-key/" % name, val)
+
+
+def register_mock_metaserver(base_url, data):
+ """Register with httpretty a ec2 metadata like service serving 'data'.
+
+ If given a dictionary, it will populate urls under base_url for
+ that dictionary. For example, input of
+ {"instance-id": "i-abc", "mac": "00:16:3e:00:00:00"}
+ populates
+ base_url with 'instance-id\nmac'
+ base_url/ with 'instance-id\nmac'
+ base_url/instance-id with i-abc
+ base_url/mac with 00:16:3e:00:00:00
+ In the index, references to lists or dictionaries have a trailing /.
+ """
+
+ def register_helper(register, base_url, body):
+ if not isinstance(base_url, str):
+ register(base_url, body)
+ return
+ base_url = base_url.rstrip("/")
+ if isinstance(body, str):
+ register(base_url, body)
+ elif isinstance(body, list):
+ register(base_url, "\n".join(body) + "\n")
+ register(base_url + "/", "\n".join(body) + "\n")
+ elif isinstance(body, dict):
+ vals = []
+ for k, v in body.items():
+ if k == "public-keys":
+ _register_ssh_keys(register, base_url + "/public-keys/", v)
+ continue
+ suffix = k.rstrip("/")
+ if not isinstance(v, (str, list)):
+ suffix += "/"
+ vals.append(suffix)
+ url = base_url + "/" + suffix
+ register_helper(register, url, v)
+ register(base_url, "\n".join(vals) + "\n")
+ register(base_url + "/", "\n".join(vals) + "\n")
+ elif body is None:
+ register(base_url, "not found", status=404)
+
+ def myreg(*argc, **kwargs):
+ url = argc[0]
+ method = httpretty.PUT if ec2.API_TOKEN_ROUTE in url else httpretty.GET
+ return httpretty.register_uri(method, *argc, **kwargs)
+
+ register_helper(myreg, base_url, data)
+
+
+class TestEc2(test_helpers.HttprettyTestCase):
+ with_logs = True
+ maxDiff = None
+
+ valid_platform_data = {
+ "uuid": "ec212f79-87d1-2f1d-588f-d86dc0fd5412",
+ "uuid_source": "dmi",
+ "serial": "ec212f79-87d1-2f1d-588f-d86dc0fd5412",
+ }
+
+ def setUp(self):
+ super(TestEc2, self).setUp()
+ self.datasource = ec2.DataSourceEc2
+ self.metadata_addr = self.datasource.metadata_urls[0]
+ self.tmp = self.tmp_dir()
+
+ def data_url(self, version, data_item="meta-data"):
+ """Return a metadata url based on the version provided."""
+ return "/".join([self.metadata_addr, version, data_item])
+
+ def _patch_add_cleanup(self, mpath, *args, **kwargs):
+ p = mock.patch(mpath, *args, **kwargs)
+ p.start()
+ self.addCleanup(p.stop)
+
+ def _setup_ds(self, sys_cfg, platform_data, md, md_version=None):
+ self.uris = []
+ distro = {}
+ paths = helpers.Paths({"run_dir": self.tmp})
+ if sys_cfg is None:
+ sys_cfg = {}
+ ds = self.datasource(sys_cfg=sys_cfg, distro=distro, paths=paths)
+ if not md_version:
+ md_version = ds.min_metadata_version
+ if platform_data is not None:
+ self._patch_add_cleanup(
+ "cloudinit.sources.DataSourceEc2._collect_platform_data",
+ return_value=platform_data,
+ )
+
+ if md:
+ all_versions = [
+ ds.min_metadata_version
+ ] + ds.extended_metadata_versions
+ token_url = self.data_url("latest", data_item="api/token")
+ register_mock_metaserver(token_url, "API-TOKEN")
+ for version in all_versions:
+ metadata_url = self.data_url(version) + "/"
+ if version == md_version:
+ # Register all metadata for desired version
+ register_mock_metaserver(
+ metadata_url, md.get("md", DEFAULT_METADATA)
+ )
+ userdata_url = self.data_url(
+ version, data_item="user-data"
+ )
+ register_mock_metaserver(userdata_url, md.get("ud", ""))
+ identity_url = self.data_url(
+ version, data_item="dynamic/instance-identity"
+ )
+ register_mock_metaserver(
+ identity_url, md.get("id", DYNAMIC_METADATA)
+ )
+ else:
+ instance_id_url = metadata_url + "instance-id"
+ if version == ds.min_metadata_version:
+ # Add min_metadata_version service availability check
+ register_mock_metaserver(
+ instance_id_url, DEFAULT_METADATA["instance-id"]
+ )
+ else:
+ # Register 404s for all unrequested extended versions
+ register_mock_metaserver(instance_id_url, None)
+ return ds
+
+ def test_network_config_property_returns_version_2_network_data(self):
+ """network_config property returns network version 2 for metadata"""
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": DEFAULT_METADATA},
+ )
+ find_fallback_path = M_PATH_NET + "find_fallback_nic"
+ with mock.patch(find_fallback_path) as m_find_fallback:
+ m_find_fallback.return_value = "eth9"
+ ds.get_data()
+
+ mac1 = "06:17:04:d7:26:09" # Defined in DEFAULT_METADATA
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": "06:17:04:d7:26:09"},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": True,
+ }
+ },
+ }
+ patch_path = M_PATH_NET + "get_interfaces_by_mac"
+ get_interface_mac_path = M_PATH_NET + "get_interface_mac"
+ with mock.patch(patch_path) as m_get_interfaces_by_mac:
+ with mock.patch(find_fallback_path) as m_find_fallback:
+ with mock.patch(get_interface_mac_path) as m_get_mac:
+ m_get_interfaces_by_mac.return_value = {mac1: "eth9"}
+ m_find_fallback.return_value = "eth9"
+ m_get_mac.return_value = mac1
+ self.assertEqual(expected, ds.network_config)
+
+ def test_network_config_property_set_dhcp4(self):
+ """network_config property configures dhcp4 on nics with local-ipv4s.
+
+ Only one device is configured based on get_interfaces_by_mac even when
+ multiple MACs exist in metadata.
+ """
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": DEFAULT_METADATA},
+ )
+ find_fallback_path = M_PATH_NET + "find_fallback_nic"
+ with mock.patch(find_fallback_path) as m_find_fallback:
+ m_find_fallback.return_value = "eth9"
+ ds.get_data()
+
+ mac1 = "06:17:04:d7:26:08" # IPv4 only in DEFAULT_METADATA
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": mac1.lower()},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": False,
+ }
+ },
+ }
+ patch_path = M_PATH_NET + "get_interfaces_by_mac"
+ get_interface_mac_path = M_PATH_NET + "get_interface_mac"
+ with mock.patch(patch_path) as m_get_interfaces_by_mac:
+ with mock.patch(find_fallback_path) as m_find_fallback:
+ with mock.patch(get_interface_mac_path) as m_get_mac:
+ m_get_interfaces_by_mac.return_value = {mac1: "eth9"}
+ m_find_fallback.return_value = "eth9"
+ m_get_mac.return_value = mac1
+ self.assertEqual(expected, ds.network_config)
+
+ def test_network_config_property_secondary_private_ips(self):
+ """network_config property configures any secondary ipv4 addresses.
+
+ Only one device is configured based on get_interfaces_by_mac even when
+ multiple MACs exist in metadata.
+ """
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": SECONDARY_IP_METADATA_2018_09_24},
+ )
+ find_fallback_path = M_PATH_NET + "find_fallback_nic"
+ with mock.patch(find_fallback_path) as m_find_fallback:
+ m_find_fallback.return_value = "eth9"
+ ds.get_data()
+
+ mac1 = "0a:07:84:3d:6e:38" # 1 secondary IPv4 and 2 secondary IPv6
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": mac1},
+ "set-name": "eth9",
+ "addresses": [
+ "172.31.45.70/20",
+ "2600:1f16:292:100:f152:2222:3333:4444/128",
+ "2600:1f16:292:100:f153:12a3:c37c:11f9/128",
+ ],
+ "dhcp4": True,
+ "dhcp6": True,
+ }
+ },
+ }
+ patch_path = M_PATH_NET + "get_interfaces_by_mac"
+ get_interface_mac_path = M_PATH_NET + "get_interface_mac"
+ with mock.patch(patch_path) as m_get_interfaces_by_mac:
+ with mock.patch(find_fallback_path) as m_find_fallback:
+ with mock.patch(get_interface_mac_path) as m_get_mac:
+ m_get_interfaces_by_mac.return_value = {mac1: "eth9"}
+ m_find_fallback.return_value = "eth9"
+ m_get_mac.return_value = mac1
+ self.assertEqual(expected, ds.network_config)
+
+ def test_network_config_property_is_cached_in_datasource(self):
+ """network_config property is cached in DataSourceEc2."""
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": DEFAULT_METADATA},
+ )
+ ds._network_config = {"cached": "data"}
+ self.assertEqual({"cached": "data"}, ds.network_config)
+
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ def test_network_config_cached_property_refreshed_on_upgrade(self, m_dhcp):
+ """Refresh the network_config Ec2 cache if network key is absent.
+
+ This catches an upgrade issue where obj.pkl contained stale metadata
+ which lacked newly required network key.
+ """
+ old_metadata = copy.deepcopy(DEFAULT_METADATA)
+ old_metadata.pop("network")
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": old_metadata},
+ )
+ self.assertTrue(ds.get_data())
+ # Provide new revision of metadata that contains network data
+ register_mock_metaserver(
+ "http://169.254.169.254/2009-04-04/meta-data/", DEFAULT_METADATA
+ )
+ mac1 = "06:17:04:d7:26:09" # Defined in DEFAULT_METADATA
+ get_interface_mac_path = M_PATH_NET + "get_interfaces_by_mac"
+ ds.fallback_nic = "eth9"
+ with mock.patch(get_interface_mac_path) as m_get_interfaces_by_mac:
+ m_get_interfaces_by_mac.return_value = {mac1: "eth9"}
+ nc = ds.network_config # Will re-crawl network metadata
+ self.assertIsNotNone(nc)
+ self.assertIn(
+ "Refreshing stale metadata from prior to upgrade",
+ self.logs.getvalue(),
+ )
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": True,
+ }
+ },
+ }
+ self.assertEqual(expected, ds.network_config)
+
+ def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self):
+ """get_instance-id gets DataSourceEc2Local.identity if not present.
+
+ This handles an upgrade case where the old pickled datasource didn't
+ set up self.identity, but 'systemctl cloud-init init' runs
+ get_instance_id which traces on missing self.identity. lp:1748354.
+ """
+ self.datasource = ec2.DataSourceEc2Local
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
+ # Mock 404s on all versions except latest
+ all_versions = [
+ ds.min_metadata_version
+ ] + ds.extended_metadata_versions
+ for ver in all_versions[:-1]:
+ register_mock_metaserver(
+ "http://169.254.169.254/{0}/meta-data/instance-id".format(ver),
+ None,
+ )
+ ds.metadata_address = "http://169.254.169.254"
+ register_mock_metaserver(
+ "{0}/{1}/meta-data/".format(ds.metadata_address, all_versions[-1]),
+ DEFAULT_METADATA,
+ )
+ # Register dynamic/instance-identity document which we now read.
+ register_mock_metaserver(
+ "{0}/{1}/dynamic/".format(ds.metadata_address, all_versions[-1]),
+ DYNAMIC_METADATA,
+ )
+ ds._cloud_name = ec2.CloudNames.AWS
+ # Setup cached metadata on the Datasource
+ ds.metadata = DEFAULT_METADATA
+ self.assertEqual("my-identity-id", ds.get_instance_id())
+
+ def test_classic_instance_true(self):
+ """If no vpc-id in metadata, is_classic_instance must return true."""
+ md_copy = copy.deepcopy(DEFAULT_METADATA)
+ ifaces_md = md_copy.get("network", {}).get("interfaces", {})
+ for _mac, mac_data in ifaces_md.get("macs", {}).items():
+ if "vpc-id" in mac_data:
+ del mac_data["vpc-id"]
+
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": md_copy},
+ )
+ self.assertTrue(ds.get_data())
+ self.assertTrue(ds.is_classic_instance())
+
+ def test_classic_instance_false(self):
+ """If vpc-id in metadata, is_classic_instance must return false."""
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
+ self.assertTrue(ds.get_data())
+ self.assertFalse(ds.is_classic_instance())
+
+ def test_aws_inaccessible_imds_service_fails_with_retries(self):
+ """Inaccessibility of http://169.254.169.254 are retried."""
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md=None,
+ )
+
+ conn_error = requests.exceptions.ConnectionError(
+ "[Errno 113] no route to host"
+ )
+
+ mock_success = mock.MagicMock(contents=b"fakesuccess")
+ mock_success.ok.return_value = True
+
+ with mock.patch("cloudinit.url_helper.readurl") as m_readurl:
+ m_readurl.side_effect = (conn_error, conn_error, mock_success)
+ with mock.patch("cloudinit.url_helper.time.sleep"):
+ self.assertTrue(ds.wait_for_metadata_service())
+
+ # Just one /latest/api/token request
+ self.assertEqual(3, len(m_readurl.call_args_list))
+ for readurl_call in m_readurl.call_args_list:
+ self.assertIn("latest/api/token", readurl_call[0][0])
+
+ def test_aws_token_403_fails_without_retries(self):
+ """Verify that 403s fetching AWS tokens are not retried."""
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md=None,
+ )
+ token_url = self.data_url("latest", data_item="api/token")
+ httpretty.register_uri(httpretty.PUT, token_url, body={}, status=403)
+ self.assertFalse(ds.get_data())
+ # Just one /latest/api/token request
+ logs = self.logs.getvalue()
+ failed_put_log = '"PUT /latest/api/token HTTP/1.1" 403 0'
+ expected_logs = [
+ "WARNING: Ec2 IMDS endpoint returned a 403 error. HTTP endpoint is"
+ " disabled. Aborting.",
+ "WARNING: IMDS's HTTP endpoint is probably disabled",
+ failed_put_log,
+ ]
+ for log in expected_logs:
+ self.assertIn(log, logs)
+ self.assertEqual(
+ 1,
+ len(
+ [line for line in logs.splitlines() if failed_put_log in line]
+ ),
+ )
+
+ def test_aws_token_redacted(self):
+ """Verify that aws tokens are redacted when logged."""
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
+ self.assertTrue(ds.get_data())
+ all_logs = self.logs.getvalue().splitlines()
+ REDACT_TTL = "'X-aws-ec2-metadata-token-ttl-seconds': 'REDACTED'"
+ REDACT_TOK = "'X-aws-ec2-metadata-token': 'REDACTED'"
+ logs_with_redacted_ttl = [log for log in all_logs if REDACT_TTL in log]
+ logs_with_redacted = [log for log in all_logs if REDACT_TOK in log]
+ logs_with_token = [log for log in all_logs if "API-TOKEN" in log]
+ self.assertEqual(1, len(logs_with_redacted_ttl))
+ self.assertEqual(81, len(logs_with_redacted))
+ self.assertEqual(0, len(logs_with_token))
+
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ def test_valid_platform_with_strict_true(self, m_dhcp):
+ """Valid platform data should return true with strict_id true."""
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": DEFAULT_METADATA},
+ )
+ ret = ds.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(0, m_dhcp.call_count)
+ self.assertEqual("aws", ds.cloud_name)
+ self.assertEqual("ec2", ds.platform_type)
+ self.assertEqual("metadata (%s)" % ds.metadata_address, ds.subplatform)
+
+ def test_valid_platform_with_strict_false(self):
+ """Valid platform data should return true with strict_id false."""
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ def test_unknown_platform_with_strict_true(self):
+ """Unknown platform data with strict_id true should return False."""
+ uuid = "ab439480-72bf-11d3-91fc-b8aded755F9a"
+ ds = self._setup_ds(
+ platform_data={"uuid": uuid, "uuid_source": "dmi", "serial": ""},
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": DEFAULT_METADATA},
+ )
+ ret = ds.get_data()
+ self.assertFalse(ret)
+
+ def test_unknown_platform_with_strict_false(self):
+ """Unknown platform data with strict_id false should return True."""
+ uuid = "ab439480-72bf-11d3-91fc-b8aded755F9a"
+ ds = self._setup_ds(
+ platform_data={"uuid": uuid, "uuid_source": "dmi", "serial": ""},
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ def test_ec2_local_returns_false_on_non_aws(self):
+ """DataSourceEc2Local returns False when platform is not AWS."""
+ self.datasource = ec2.DataSourceEc2Local
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
+ platform_attrs = [
+ attr
+ for attr in ec2.CloudNames.__dict__.keys()
+ if not attr.startswith("__")
+ ]
+ for attr_name in platform_attrs:
+ platform_name = getattr(ec2.CloudNames, attr_name)
+ if platform_name != "aws":
+ ds._cloud_name = platform_name
+ ret = ds.get_data()
+ self.assertEqual("ec2", ds.platform_type)
+ self.assertFalse(ret)
+ message = (
+ "Local Ec2 mode only supported on ('aws',),"
+ " not {0}".format(platform_name)
+ )
+ self.assertIn(message, self.logs.getvalue())
+
+ @mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD")
+ def test_ec2_local_returns_false_on_bsd(self, m_is_freebsd):
+ """DataSourceEc2Local returns False on BSD.
+
+ FreeBSD dhclient doesn't support dhclient -sf to run in a sandbox.
+ """
+ m_is_freebsd.return_value = True
+ self.datasource = ec2.DataSourceEc2Local
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
+ ret = ds.get_data()
+ self.assertFalse(ret)
+ self.assertIn(
+ "FreeBSD doesn't support running dhclient with -sf",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
+ @mock.patch("cloudinit.net.find_fallback_nic")
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ @mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD")
+ def test_ec2_local_performs_dhcp_on_non_bsd(
+ self, m_is_bsd, m_dhcp, m_fallback_nic, m_net
+ ):
+ """Ec2Local returns True for valid platform data on non-BSD with dhcp.
+
+ DataSourceEc2Local will setup initial IPv4 network via dhcp discovery.
+ Then the metadata services is crawled for more network config info.
+ When the platform data is valid, return True.
+ """
+
+ m_fallback_nic.return_value = "eth9"
+ m_is_bsd.return_value = False
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "broadcast-address": "192.168.2.255",
+ }
+ ]
+ self.datasource = ec2.DataSourceEc2Local
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
+
+ ret = ds.get_data()
+ self.assertTrue(ret)
+ m_dhcp.assert_called_once_with("eth9", None)
+ m_net.assert_called_once_with(
+ broadcast="192.168.2.255",
+ interface="eth9",
+ ip="192.168.2.9",
+ prefix_or_mask="255.255.255.0",
+ router="192.168.2.1",
+ static_routes=None,
+ )
+ self.assertIn("Crawl of metadata service took", self.logs.getvalue())
+
+
+class TestGetSecondaryAddresses(test_helpers.CiTestCase):
+
+ mac = "06:17:04:d7:26:ff"
+ with_logs = True
+
+ def test_md_with_no_secondary_addresses(self):
+ """Empty list is returned when nic metadata contains no secondary ip"""
+ self.assertEqual([], ec2.get_secondary_addresses(NIC2_MD, self.mac))
+
+ def test_md_with_secondary_v4_and_v6_addresses(self):
+ """All secondary addresses are returned from nic metadata"""
+ self.assertEqual(
+ [
+ "172.31.45.70/20",
+ "2600:1f16:292:100:f152:2222:3333:4444/128",
+ "2600:1f16:292:100:f153:12a3:c37c:11f9/128",
+ ],
+ ec2.get_secondary_addresses(NIC1_MD_IPV4_IPV6_MULTI_IP, self.mac),
+ )
+
+ def test_invalid_ipv4_ipv6_cidr_metadata_logged_with_defaults(self):
+ """Any invalid subnet-ipv(4|6)-cidr-block values use defaults"""
+ invalid_cidr_md = copy.deepcopy(NIC1_MD_IPV4_IPV6_MULTI_IP)
+ invalid_cidr_md["subnet-ipv4-cidr-block"] = "something-unexpected"
+ invalid_cidr_md["subnet-ipv6-cidr-block"] = "not/sure/what/this/is"
+ self.assertEqual(
+ [
+ "172.31.45.70/24",
+ "2600:1f16:292:100:f152:2222:3333:4444/128",
+ "2600:1f16:292:100:f153:12a3:c37c:11f9/128",
+ ],
+ ec2.get_secondary_addresses(invalid_cidr_md, self.mac),
+ )
+ expected_logs = [
+ "WARNING: Could not parse subnet-ipv4-cidr-block"
+ " something-unexpected for mac 06:17:04:d7:26:ff."
+ " ipv4 network config prefix defaults to /24",
+ "WARNING: Could not parse subnet-ipv6-cidr-block"
+ " not/sure/what/this/is for mac 06:17:04:d7:26:ff."
+ " ipv6 network config prefix defaults to /128",
+ ]
+ logs = self.logs.getvalue()
+ for log in expected_logs:
+ self.assertIn(log, logs)
+
+
+class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase):
+ def setUp(self):
+ super(TestConvertEc2MetadataNetworkConfig, self).setUp()
+ self.mac1 = "06:17:04:d7:26:09"
+ interface_dict = copy.deepcopy(
+ DEFAULT_METADATA["network"]["interfaces"]["macs"][self.mac1]
+ )
+ # These tests are written assuming the base interface doesn't have IPv6
+ interface_dict.pop("ipv6s")
+ self.network_metadata = {
+ "interfaces": {"macs": {self.mac1: interface_dict}}
+ }
+
+ def test_convert_ec2_metadata_network_config_skips_absent_macs(self):
+ """Any mac absent from metadata is skipped by network config."""
+ macs_to_nics = {self.mac1: "eth9", "DE:AD:BE:EF:FF:FF": "vitualnic2"}
+
+ # DE:AD:BE:EF:FF:FF represented by OS but not in metadata
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": False,
+ }
+ },
+ }
+ self.assertEqual(
+ expected,
+ ec2.convert_ec2_metadata_network_config(
+ self.network_metadata, macs_to_nics
+ ),
+ )
+
+ def test_convert_ec2_metadata_network_config_handles_only_dhcp6(self):
+ """Config dhcp6 when ipv6s is in metadata for a mac."""
+ macs_to_nics = {self.mac1: "eth9"}
+ network_metadata_ipv6 = copy.deepcopy(self.network_metadata)
+ nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1]
+ nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64"
+ nic1_metadata.pop("public-ipv4s")
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": True,
+ }
+ },
+ }
+ self.assertEqual(
+ expected,
+ ec2.convert_ec2_metadata_network_config(
+ network_metadata_ipv6, macs_to_nics
+ ),
+ )
+
+ def test_convert_ec2_metadata_network_config_local_only_dhcp4(self):
+ """Config dhcp4 when there are no public addresses in public-ipv4s."""
+ macs_to_nics = {self.mac1: "eth9"}
+ network_metadata_ipv6 = copy.deepcopy(self.network_metadata)
+ nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1]
+ nic1_metadata["local-ipv4s"] = "172.3.3.15"
+ nic1_metadata.pop("public-ipv4s")
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": False,
+ }
+ },
+ }
+ self.assertEqual(
+ expected,
+ ec2.convert_ec2_metadata_network_config(
+ network_metadata_ipv6, macs_to_nics
+ ),
+ )
+
+ def test_convert_ec2_metadata_network_config_handles_absent_dhcp4(self):
+ """Config dhcp4 on fallback_nic when there are no ipv4 addresses."""
+ macs_to_nics = {self.mac1: "eth9"}
+ network_metadata_ipv6 = copy.deepcopy(self.network_metadata)
+ nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1]
+ nic1_metadata["public-ipv4s"] = ""
+
+ # When no ipv4 or ipv6 content but fallback_nic set, set dhcp4 config.
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": False,
+ }
+ },
+ }
+ self.assertEqual(
+ expected,
+ ec2.convert_ec2_metadata_network_config(
+ network_metadata_ipv6, macs_to_nics, fallback_nic="eth9"
+ ),
+ )
+
+ def test_convert_ec2_metadata_network_config_handles_local_v4_and_v6(self):
+ """When ipv6s and local-ipv4s are non-empty, enable dhcp6 and dhcp4."""
+ macs_to_nics = {self.mac1: "eth9"}
+ network_metadata_both = copy.deepcopy(self.network_metadata)
+ nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1]
+ nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64"
+ nic1_metadata.pop("public-ipv4s")
+ nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": True,
+ }
+ },
+ }
+ self.assertEqual(
+ expected,
+ ec2.convert_ec2_metadata_network_config(
+ network_metadata_both, macs_to_nics
+ ),
+ )
+
+ def test_convert_ec2_metadata_network_config_handles_multiple_nics(self):
+ """DHCP route-metric increases on secondary NICs for IPv4 and IPv6."""
+ mac2 = "06:17:04:d7:26:08"
+ macs_to_nics = {self.mac1: "eth9", mac2: "eth10"}
+ network_metadata_both = copy.deepcopy(self.network_metadata)
+ # Add 2nd nic info
+ network_metadata_both["interfaces"]["macs"][mac2] = NIC2_MD
+ nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1]
+ nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64"
+ nic1_metadata.pop("public-ipv4s") # No public-ipv4 IPs in cfg
+ nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": True,
+ "dhcp6-overrides": {"route-metric": 100},
+ },
+ "eth10": {
+ "match": {"macaddress": mac2},
+ "set-name": "eth10",
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 200},
+ "dhcp6": False,
+ },
+ },
+ }
+ self.assertEqual(
+ expected,
+ ec2.convert_ec2_metadata_network_config(
+ network_metadata_both, macs_to_nics
+ ),
+ )
+
+ def test_convert_ec2_metadata_network_config_handles_dhcp4_and_dhcp6(self):
+ """Config both dhcp4 and dhcp6 when both vpc-ipv6 and ipv4 exists."""
+ macs_to_nics = {self.mac1: "eth9"}
+ network_metadata_both = copy.deepcopy(self.network_metadata)
+ nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1]
+ nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64"
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": True,
+ }
+ },
+ }
+ self.assertEqual(
+ expected,
+ ec2.convert_ec2_metadata_network_config(
+ network_metadata_both, macs_to_nics
+ ),
+ )
+
+ def test_convert_ec2_metadata_gets_macs_from_get_interfaces_by_mac(self):
+ """Convert Ec2 Metadata calls get_interfaces_by_mac by default."""
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": False,
+ }
+ },
+ }
+ patch_path = M_PATH_NET + "get_interfaces_by_mac"
+ with mock.patch(patch_path) as m_get_interfaces_by_mac:
+ m_get_interfaces_by_mac.return_value = {self.mac1: "eth9"}
+ self.assertEqual(
+ expected,
+ ec2.convert_ec2_metadata_network_config(self.network_metadata),
+ )
+
+
+class TesIdentifyPlatform(test_helpers.CiTestCase):
+ def collmock(self, **kwargs):
+ """return non-special _collect_platform_data updated with changes."""
+ unspecial = {
+ "asset_tag": "3857-0037-2746-7462-1818-3997-77",
+ "serial": "H23-C4J3JV-R6",
+ "uuid": "81c7e555-6471-4833-9551-1ab366c4cfd2",
+ "uuid_source": "dmi",
+ "vendor": "tothecloud",
+ }
+ unspecial.update(**kwargs)
+ return unspecial
+
+ @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data")
+ def test_identify_zstack(self, m_collect):
+ """zstack should be identified if chassis-asset-tag
+ ends in .zstack.io
+ """
+ m_collect.return_value = self.collmock(asset_tag="123456.zstack.io")
+ self.assertEqual(ec2.CloudNames.ZSTACK, ec2.identify_platform())
+
+ @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data")
+ def test_identify_zstack_full_domain_only(self, m_collect):
+ """zstack asset-tag matching should match only on
+ full domain boundary.
+ """
+ m_collect.return_value = self.collmock(asset_tag="123456.buzzstack.io")
+ self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform())
+
+ @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data")
+ def test_identify_e24cloud(self, m_collect):
+ """e24cloud identified if vendor is e24cloud"""
+ m_collect.return_value = self.collmock(vendor="e24cloud")
+ self.assertEqual(ec2.CloudNames.E24CLOUD, ec2.identify_platform())
+
+ @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data")
+ def test_identify_e24cloud_negative(self, m_collect):
+ """e24cloud identified if vendor is e24cloud"""
+ m_collect.return_value = self.collmock(vendor="e24cloudyday")
+ self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform())
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_exoscale.py b/tests/unittests/sources/test_exoscale.py
new file mode 100644
index 00000000..591256d8
--- /dev/null
+++ b/tests/unittests/sources/test_exoscale.py
@@ -0,0 +1,241 @@
+# Author: Mathieu Corbin <mathieu.corbin@exoscale.com>
+# Author: Christopher Glass <christopher.glass@exoscale.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+import os
+
+import httpretty
+import requests
+
+from cloudinit import helpers, util
+from cloudinit.sources.DataSourceExoscale import (
+ API_VERSION,
+ METADATA_URL,
+ PASSWORD_SERVER_PORT,
+ DataSourceExoscale,
+ get_password,
+ read_metadata,
+)
+from tests.unittests.helpers import HttprettyTestCase, mock
+
+TEST_PASSWORD_URL = "{}:{}/{}/".format(
+ METADATA_URL, PASSWORD_SERVER_PORT, API_VERSION
+)
+
+TEST_METADATA_URL = "{}/{}/meta-data/".format(METADATA_URL, API_VERSION)
+
+TEST_USERDATA_URL = "{}/{}/user-data".format(METADATA_URL, API_VERSION)
+
+
+@httpretty.activate
+class TestDatasourceExoscale(HttprettyTestCase):
+ def setUp(self):
+ super(TestDatasourceExoscale, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.password_url = TEST_PASSWORD_URL
+ self.metadata_url = TEST_METADATA_URL
+ self.userdata_url = TEST_USERDATA_URL
+
+ def test_password_saved(self):
+ """The password is not set when it is not found
+ in the metadata service."""
+ httpretty.register_uri(
+ httpretty.GET, self.password_url, body="saved_password"
+ )
+ self.assertFalse(get_password())
+
+ def test_password_empty(self):
+ """No password is set if the metadata service returns
+ an empty string."""
+ httpretty.register_uri(httpretty.GET, self.password_url, body="")
+ self.assertFalse(get_password())
+
+ def test_password(self):
+ """The password is set to what is found in the metadata
+ service."""
+ expected_password = "p@ssw0rd"
+ httpretty.register_uri(
+ httpretty.GET, self.password_url, body=expected_password
+ )
+ password = get_password()
+ self.assertEqual(expected_password, password)
+
+ def test_activate_removes_set_passwords_semaphore(self):
+ """Allow set_passwords to run every boot by removing the semaphore."""
+ path = helpers.Paths({"cloud_dir": self.tmp})
+ sem_dir = self.tmp_path("instance/sem", dir=self.tmp)
+ util.ensure_dir(sem_dir)
+ sem_file = os.path.join(sem_dir, "config_set_passwords")
+ with open(sem_file, "w") as stream:
+ stream.write("")
+ ds = DataSourceExoscale({}, None, path)
+ ds.activate(None, None)
+ self.assertFalse(os.path.exists(sem_file))
+
+ def test_get_data(self):
+ """The datasource conforms to expected behavior when supplied
+ full test data."""
+ path = helpers.Paths({"run_dir": self.tmp})
+ ds = DataSourceExoscale({}, None, path)
+ ds._is_platform_viable = lambda: True
+ expected_password = "p@ssw0rd"
+ expected_id = "12345"
+ expected_hostname = "myname"
+ expected_userdata = "#cloud-config"
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url, body=expected_userdata
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.password_url, body=expected_password
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ self.metadata_url,
+ body="instance-id\nlocal-hostname",
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}local-hostname".format(self.metadata_url),
+ body=expected_hostname,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}instance-id".format(self.metadata_url),
+ body=expected_id,
+ )
+ self.assertTrue(ds._get_data())
+ self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
+ self.assertEqual(
+ ds.metadata,
+ {"instance-id": expected_id, "local-hostname": expected_hostname},
+ )
+ self.assertEqual(
+ ds.get_config_obj(),
+ {
+ "ssh_pwauth": True,
+ "password": expected_password,
+ "chpasswd": {
+ "expire": False,
+ },
+ },
+ )
+
+ def test_get_data_saved_password(self):
+ """The datasource conforms to expected behavior when saved_password is
+ returned by the password server."""
+ path = helpers.Paths({"run_dir": self.tmp})
+ ds = DataSourceExoscale({}, None, path)
+ ds._is_platform_viable = lambda: True
+ expected_answer = "saved_password"
+ expected_id = "12345"
+ expected_hostname = "myname"
+ expected_userdata = "#cloud-config"
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url, body=expected_userdata
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.password_url, body=expected_answer
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ self.metadata_url,
+ body="instance-id\nlocal-hostname",
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}local-hostname".format(self.metadata_url),
+ body=expected_hostname,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}instance-id".format(self.metadata_url),
+ body=expected_id,
+ )
+ self.assertTrue(ds._get_data())
+ self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
+ self.assertEqual(
+ ds.metadata,
+ {"instance-id": expected_id, "local-hostname": expected_hostname},
+ )
+ self.assertEqual(ds.get_config_obj(), {})
+
+ def test_get_data_no_password(self):
+ """The datasource conforms to expected behavior when no password is
+ returned by the password server."""
+ path = helpers.Paths({"run_dir": self.tmp})
+ ds = DataSourceExoscale({}, None, path)
+ ds._is_platform_viable = lambda: True
+ expected_answer = ""
+ expected_id = "12345"
+ expected_hostname = "myname"
+ expected_userdata = "#cloud-config"
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url, body=expected_userdata
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.password_url, body=expected_answer
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ self.metadata_url,
+ body="instance-id\nlocal-hostname",
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}local-hostname".format(self.metadata_url),
+ body=expected_hostname,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}instance-id".format(self.metadata_url),
+ body=expected_id,
+ )
+ self.assertTrue(ds._get_data())
+ self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
+ self.assertEqual(
+ ds.metadata,
+ {"instance-id": expected_id, "local-hostname": expected_hostname},
+ )
+ self.assertEqual(ds.get_config_obj(), {})
+
+ @mock.patch("cloudinit.sources.DataSourceExoscale.get_password")
+ def test_read_metadata_when_password_server_unreachable(self, m_password):
+ """The read_metadata function returns partial results in case the
+ password server (only) is unreachable."""
+ expected_id = "12345"
+ expected_hostname = "myname"
+ expected_userdata = "#cloud-config"
+
+ m_password.side_effect = requests.Timeout("Fake Connection Timeout")
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url, body=expected_userdata
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ self.metadata_url,
+ body="instance-id\nlocal-hostname",
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}local-hostname".format(self.metadata_url),
+ body=expected_hostname,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}instance-id".format(self.metadata_url),
+ body=expected_id,
+ )
+
+ result = read_metadata()
+
+ self.assertIsNone(result.get("password"))
+ self.assertEqual(
+ result.get("user-data").decode("utf-8"), expected_userdata
+ )
+
+ def test_non_viable_platform(self):
+ """The datasource fails fast when the platform is not viable."""
+ path = helpers.Paths({"run_dir": self.tmp})
+ ds = DataSourceExoscale({}, None, path)
+ ds._is_platform_viable = lambda: False
+ self.assertFalse(ds._get_data())
diff --git a/tests/unittests/sources/test_gce.py b/tests/unittests/sources/test_gce.py
new file mode 100644
index 00000000..e030931b
--- /dev/null
+++ b/tests/unittests/sources/test_gce.py
@@ -0,0 +1,416 @@
+# Copyright (C) 2014 Vaidas Jablonskis
+#
+# Author: Vaidas Jablonskis <jablonskis@gmail.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import datetime
+import json
+import re
+from base64 import b64decode, b64encode
+from unittest import mock
+from urllib.parse import urlparse
+
+import httpretty
+
+from cloudinit import distros, helpers, settings
+from cloudinit.sources import DataSourceGCE
+from tests.unittests import helpers as test_helpers
+
+GCE_META = {
+ "instance/id": "123",
+ "instance/zone": "foo/bar",
+ "instance/hostname": "server.project-foo.local",
+}
+
+GCE_META_PARTIAL = {
+ "instance/id": "1234",
+ "instance/hostname": "server.project-bar.local",
+ "instance/zone": "bar/baz",
+}
+
+GCE_META_ENCODING = {
+ "instance/id": "12345",
+ "instance/hostname": "server.project-baz.local",
+ "instance/zone": "baz/bang",
+ "instance/attributes": {
+ "user-data": b64encode(b"#!/bin/echo baz\n").decode("utf-8"),
+ "user-data-encoding": "base64",
+ },
+}
+
+GCE_USER_DATA_TEXT = {
+ "instance/id": "12345",
+ "instance/hostname": "server.project-baz.local",
+ "instance/zone": "baz/bang",
+ "instance/attributes": {
+ "user-data": "#!/bin/sh\necho hi mom\ntouch /run/up-now\n",
+ },
+}
+
+HEADERS = {"Metadata-Flavor": "Google"}
+MD_URL_RE = re.compile(
+ r"http://metadata.google.internal/computeMetadata/v1/.*"
+)
+GUEST_ATTRIBUTES_URL = (
+ "http://metadata.google.internal/computeMetadata/"
+ "v1/instance/guest-attributes/hostkeys/"
+)
+
+
+def _set_mock_metadata(gce_meta=None):
+ if gce_meta is None:
+ gce_meta = GCE_META
+
+ def _request_callback(method, uri, headers):
+ url_path = urlparse(uri).path
+ if url_path.startswith("/computeMetadata/v1/"):
+ path = url_path.split("/computeMetadata/v1/")[1:][0]
+ recursive = path.endswith("/")
+ path = path.rstrip("/")
+ else:
+ path = None
+ if path in gce_meta:
+ response = gce_meta.get(path)
+ if recursive:
+ response = json.dumps(response)
+ return (200, headers, response)
+ else:
+ return (404, headers, "")
+
+ # reset is needed. https://github.com/gabrielfalcao/HTTPretty/issues/316
+ httpretty.register_uri(httpretty.GET, MD_URL_RE, body=_request_callback)
+
+
+@httpretty.activate
+class TestDataSourceGCE(test_helpers.HttprettyTestCase):
+ def _make_distro(self, dtype, def_user=None):
+ cfg = dict(settings.CFG_BUILTIN)
+ cfg["system_info"]["distro"] = dtype
+ paths = helpers.Paths(cfg["system_info"]["paths"])
+ distro_cls = distros.fetch(dtype)
+ if def_user:
+ cfg["system_info"]["default_user"] = def_user.copy()
+ distro = distro_cls(dtype, cfg["system_info"], paths)
+ return distro
+
+ def setUp(self):
+ tmp = self.tmp_dir()
+ self.ds = DataSourceGCE.DataSourceGCE(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": tmp})
+ )
+ ppatch = self.m_platform_reports_gce = mock.patch(
+ "cloudinit.sources.DataSourceGCE.platform_reports_gce"
+ )
+ self.m_platform_reports_gce = ppatch.start()
+ self.m_platform_reports_gce.return_value = True
+ self.addCleanup(ppatch.stop)
+ self.add_patch("time.sleep", "m_sleep") # just to speed up tests
+ super(TestDataSourceGCE, self).setUp()
+
+ def test_connection(self):
+ _set_mock_metadata()
+ success = self.ds.get_data()
+ self.assertTrue(success)
+
+ req_header = httpretty.last_request().headers
+ for header_name, expected_value in HEADERS.items():
+ self.assertEqual(expected_value, req_header.get(header_name))
+
+ def test_metadata(self):
+ # UnicodeDecodeError if set to ds.userdata instead of userdata_raw
+ meta = GCE_META.copy()
+ meta["instance/attributes/user-data"] = b"/bin/echo \xff\n"
+
+ _set_mock_metadata()
+ self.ds.get_data()
+
+ shostname = GCE_META.get("instance/hostname").split(".")[0]
+ self.assertEqual(shostname, self.ds.get_hostname())
+
+ self.assertEqual(
+ GCE_META.get("instance/id"), self.ds.get_instance_id()
+ )
+
+ self.assertEqual(
+ GCE_META.get("instance/attributes/user-data"),
+ self.ds.get_userdata_raw(),
+ )
+
+ # test partial metadata (missing user-data in particular)
+ def test_metadata_partial(self):
+ _set_mock_metadata(GCE_META_PARTIAL)
+ self.ds.get_data()
+
+ self.assertEqual(
+ GCE_META_PARTIAL.get("instance/id"), self.ds.get_instance_id()
+ )
+
+ shostname = GCE_META_PARTIAL.get("instance/hostname").split(".")[0]
+ self.assertEqual(shostname, self.ds.get_hostname())
+
+ def test_userdata_no_encoding(self):
+ """check that user-data is read."""
+ _set_mock_metadata(GCE_USER_DATA_TEXT)
+ self.ds.get_data()
+ self.assertEqual(
+ GCE_USER_DATA_TEXT["instance/attributes"]["user-data"].encode(),
+ self.ds.get_userdata_raw(),
+ )
+
+ def test_metadata_encoding(self):
+ """user-data is base64 encoded if user-data-encoding is 'base64'."""
+ _set_mock_metadata(GCE_META_ENCODING)
+ self.ds.get_data()
+
+ instance_data = GCE_META_ENCODING.get("instance/attributes")
+ decoded = b64decode(instance_data.get("user-data"))
+ self.assertEqual(decoded, self.ds.get_userdata_raw())
+
+ def test_missing_required_keys_return_false(self):
+ for required_key in [
+ "instance/id",
+ "instance/zone",
+ "instance/hostname",
+ ]:
+ meta = GCE_META_PARTIAL.copy()
+ del meta[required_key]
+ _set_mock_metadata(meta)
+ self.assertEqual(False, self.ds.get_data())
+ httpretty.reset()
+
+ def test_no_ssh_keys_metadata(self):
+ _set_mock_metadata()
+ self.ds.get_data()
+ self.assertEqual([], self.ds.get_public_ssh_keys())
+
+ def test_cloudinit_ssh_keys(self):
+ valid_key = "ssh-rsa VALID {0}"
+ invalid_key = "ssh-rsa INVALID {0}"
+ project_attributes = {
+ "sshKeys": "\n".join(
+ [
+ "cloudinit:{0}".format(valid_key.format(0)),
+ "user:{0}".format(invalid_key.format(0)),
+ ]
+ ),
+ "ssh-keys": "\n".join(
+ [
+ "cloudinit:{0}".format(valid_key.format(1)),
+ "user:{0}".format(invalid_key.format(1)),
+ ]
+ ),
+ }
+ instance_attributes = {
+ "ssh-keys": "\n".join(
+ [
+ "cloudinit:{0}".format(valid_key.format(2)),
+ "user:{0}".format(invalid_key.format(2)),
+ ]
+ ),
+ "block-project-ssh-keys": "False",
+ }
+
+ meta = GCE_META.copy()
+ meta["project/attributes"] = project_attributes
+ meta["instance/attributes"] = instance_attributes
+
+ _set_mock_metadata(meta)
+ self.ds.get_data()
+
+ expected = [valid_key.format(key) for key in range(3)]
+ self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys()))
+
+ @mock.patch("cloudinit.sources.DataSourceGCE.ug_util")
+ def test_default_user_ssh_keys(self, mock_ug_util):
+ mock_ug_util.normalize_users_groups.return_value = None, None
+ mock_ug_util.extract_default.return_value = "ubuntu", None
+ ubuntu_ds = DataSourceGCE.DataSourceGCE(
+ settings.CFG_BUILTIN,
+ self._make_distro("ubuntu"),
+ helpers.Paths({"run_dir": self.tmp_dir()}),
+ )
+
+ valid_key = "ssh-rsa VALID {0}"
+ invalid_key = "ssh-rsa INVALID {0}"
+ project_attributes = {
+ "sshKeys": "\n".join(
+ [
+ "ubuntu:{0}".format(valid_key.format(0)),
+ "user:{0}".format(invalid_key.format(0)),
+ ]
+ ),
+ "ssh-keys": "\n".join(
+ [
+ "ubuntu:{0}".format(valid_key.format(1)),
+ "user:{0}".format(invalid_key.format(1)),
+ ]
+ ),
+ }
+ instance_attributes = {
+ "ssh-keys": "\n".join(
+ [
+ "ubuntu:{0}".format(valid_key.format(2)),
+ "user:{0}".format(invalid_key.format(2)),
+ ]
+ ),
+ "block-project-ssh-keys": "False",
+ }
+
+ meta = GCE_META.copy()
+ meta["project/attributes"] = project_attributes
+ meta["instance/attributes"] = instance_attributes
+
+ _set_mock_metadata(meta)
+ ubuntu_ds.get_data()
+
+ expected = [valid_key.format(key) for key in range(3)]
+ self.assertEqual(set(expected), set(ubuntu_ds.get_public_ssh_keys()))
+
+ def test_instance_ssh_keys_override(self):
+ valid_key = "ssh-rsa VALID {0}"
+ invalid_key = "ssh-rsa INVALID {0}"
+ project_attributes = {
+ "sshKeys": "cloudinit:{0}".format(invalid_key.format(0)),
+ "ssh-keys": "cloudinit:{0}".format(invalid_key.format(1)),
+ }
+ instance_attributes = {
+ "sshKeys": "cloudinit:{0}".format(valid_key.format(0)),
+ "ssh-keys": "cloudinit:{0}".format(valid_key.format(1)),
+ "block-project-ssh-keys": "False",
+ }
+
+ meta = GCE_META.copy()
+ meta["project/attributes"] = project_attributes
+ meta["instance/attributes"] = instance_attributes
+
+ _set_mock_metadata(meta)
+ self.ds.get_data()
+
+ expected = [valid_key.format(key) for key in range(2)]
+ self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys()))
+
+ def test_block_project_ssh_keys_override(self):
+ valid_key = "ssh-rsa VALID {0}"
+ invalid_key = "ssh-rsa INVALID {0}"
+ project_attributes = {
+ "sshKeys": "cloudinit:{0}".format(invalid_key.format(0)),
+ "ssh-keys": "cloudinit:{0}".format(invalid_key.format(1)),
+ }
+ instance_attributes = {
+ "ssh-keys": "cloudinit:{0}".format(valid_key.format(0)),
+ "block-project-ssh-keys": "True",
+ }
+
+ meta = GCE_META.copy()
+ meta["project/attributes"] = project_attributes
+ meta["instance/attributes"] = instance_attributes
+
+ _set_mock_metadata(meta)
+ self.ds.get_data()
+
+ expected = [valid_key.format(0)]
+ self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys()))
+
+ def test_only_last_part_of_zone_used_for_availability_zone(self):
+ _set_mock_metadata()
+ r = self.ds.get_data()
+ self.assertEqual(True, r)
+ self.assertEqual("bar", self.ds.availability_zone)
+
+ @mock.patch("cloudinit.sources.DataSourceGCE.GoogleMetadataFetcher")
+ def test_get_data_returns_false_if_not_on_gce(self, m_fetcher):
+ self.m_platform_reports_gce.return_value = False
+ ret = self.ds.get_data()
+ self.assertEqual(False, ret)
+ m_fetcher.assert_not_called()
+
+ def test_has_expired(self):
+ def _get_timestamp(days):
+ format_str = "%Y-%m-%dT%H:%M:%S+0000"
+ today = datetime.datetime.now()
+ timestamp = today + datetime.timedelta(days=days)
+ return timestamp.strftime(format_str)
+
+ past = _get_timestamp(-1)
+ future = _get_timestamp(1)
+ ssh_keys = {
+ None: False,
+ "": False,
+ "Invalid": False,
+ "user:ssh-rsa key user@domain.com": False,
+ 'user:ssh-rsa key google {"expireOn":"%s"}' % past: False,
+ "user:ssh-rsa key google-ssh": False,
+ "user:ssh-rsa key google-ssh {invalid:json}": False,
+ 'user:ssh-rsa key google-ssh {"userName":"user"}': False,
+ 'user:ssh-rsa key google-ssh {"expireOn":"invalid"}': False,
+ 'user:xyz key google-ssh {"expireOn":"%s"}' % future: False,
+ 'user:xyz key google-ssh {"expireOn":"%s"}' % past: True,
+ }
+
+ for key, expired in ssh_keys.items():
+ self.assertEqual(DataSourceGCE._has_expired(key), expired)
+
+ def test_parse_public_keys_non_ascii(self):
+ public_key_data = [
+ "cloudinit:rsa ssh-ke%s invalid" % chr(165),
+ "use%sname:rsa ssh-key" % chr(174),
+ "cloudinit:test 1",
+ "default:test 2",
+ "user:test 3",
+ ]
+ expected = ["test 1", "test 2"]
+ found = DataSourceGCE._parse_public_keys(
+ public_key_data, default_user="default"
+ )
+ self.assertEqual(sorted(found), sorted(expected))
+
+ @mock.patch("cloudinit.url_helper.readurl")
+ def test_publish_host_keys(self, m_readurl):
+ hostkeys = [("ssh-rsa", "asdfasdf"), ("ssh-ed25519", "qwerqwer")]
+ readurl_expected_calls = [
+ mock.call(
+ check_status=False,
+ data=b"asdfasdf",
+ headers=HEADERS,
+ request_method="PUT",
+ url="%s%s" % (GUEST_ATTRIBUTES_URL, "ssh-rsa"),
+ ),
+ mock.call(
+ check_status=False,
+ data=b"qwerqwer",
+ headers=HEADERS,
+ request_method="PUT",
+ url="%s%s" % (GUEST_ATTRIBUTES_URL, "ssh-ed25519"),
+ ),
+ ]
+ self.ds.publish_host_keys(hostkeys)
+ m_readurl.assert_has_calls(readurl_expected_calls, any_order=True)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceGCE.EphemeralDHCPv4",
+ autospec=True,
+ )
+ @mock.patch(
+ "cloudinit.sources.DataSourceGCE.DataSourceGCELocal.fallback_interface"
+ )
+ def test_local_datasource_uses_ephemeral_dhcp(self, _m_fallback, m_dhcp):
+ _set_mock_metadata()
+ ds = DataSourceGCE.DataSourceGCELocal(
+ sys_cfg={}, distro=None, paths=None
+ )
+ ds._get_data()
+ assert m_dhcp.call_count == 1
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceGCE.EphemeralDHCPv4",
+ autospec=True,
+ )
+ def test_datasource_doesnt_use_ephemeral_dhcp(self, m_dhcp):
+ _set_mock_metadata()
+ ds = DataSourceGCE.DataSourceGCE(sys_cfg={}, distro=None, paths=None)
+ ds._get_data()
+ assert m_dhcp.call_count == 0
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_hetzner.py b/tests/unittests/sources/test_hetzner.py
new file mode 100644
index 00000000..f80ed45f
--- /dev/null
+++ b/tests/unittests/sources/test_hetzner.py
@@ -0,0 +1,164 @@
+# Copyright (C) 2018 Jonas Keidel
+#
+# Author: Jonas Keidel <jonas.keidel@hetzner.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import base64
+
+import pytest
+
+import cloudinit.sources.helpers.hetzner as hc_helper
+from cloudinit import helpers, settings, util
+from cloudinit.sources import DataSourceHetzner
+from tests.unittests.helpers import CiTestCase, mock
+
+METADATA = util.load_yaml(
+ """
+hostname: cloudinit-test
+instance-id: 123456
+local-ipv4: ''
+network-config:
+ config:
+ - mac_address: 96:00:00:08:19:da
+ name: eth0
+ subnets:
+ - dns_nameservers:
+ - 185.12.64.1
+ - 185.12.64.2
+ ipv4: true
+ type: dhcp
+ - address: 2a01:4f8:beef:beef::1/64
+ dns_nameservers:
+ - 2a01:4ff:ff00::add:2
+ - 2a01:4ff:ff00::add:1
+ gateway: fe80::1
+ ipv6: true
+ type: physical
+ version: 1
+network-sysconfig: "DEVICE='eth0'\nTYPE=Ethernet\nBOOTPROTO=dhcp\n\
+ ONBOOT='yes'\nHWADDR=96:00:00:08:19:da\n\
+ IPV6INIT=yes\nIPV6ADDR=2a01:4f8:beef:beef::1/64\n\
+ IPV6_DEFAULTGW=fe80::1%eth0\nIPV6_AUTOCONF=no\n\
+ DNS1=185.12.64.1\nDNS2=185.12.64.2\n"
+public-ipv4: 192.168.0.2
+public-keys:
+- ssh-ed25519 \
+ AAAAC3Nzac1lZdI1NTE5AaaAIaFrcac0yVITsmRrmueq6MD0qYNKlEvW8O1Ib4nkhmWh \
+ test-key@workstation
+vendor_data: "test"
+"""
+)
+
+USERDATA = b"""#cloud-config
+runcmd:
+- [touch, /root/cloud-init-worked ]
+"""
+
+
+class TestDataSourceHetzner(CiTestCase):
+ """
+ Test reading the meta-data
+ """
+
+ def setUp(self):
+ super(TestDataSourceHetzner, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def get_ds(self):
+ ds = DataSourceHetzner.DataSourceHetzner(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ return ds
+
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ @mock.patch("cloudinit.sources.DataSourceHetzner.EphemeralDHCPv4")
+ @mock.patch("cloudinit.net.find_fallback_nic")
+ @mock.patch("cloudinit.sources.helpers.hetzner.read_metadata")
+ @mock.patch("cloudinit.sources.helpers.hetzner.read_userdata")
+ @mock.patch("cloudinit.sources.DataSourceHetzner.get_hcloud_data")
+ def test_read_data(
+ self,
+ m_get_hcloud_data,
+ m_usermd,
+ m_readmd,
+ m_fallback_nic,
+ m_net,
+ m_dhcp,
+ ):
+ m_get_hcloud_data.return_value = (
+ True,
+ str(METADATA.get("instance-id")),
+ )
+ m_readmd.return_value = METADATA.copy()
+ m_usermd.return_value = USERDATA
+ m_fallback_nic.return_value = "eth0"
+ m_dhcp.return_value = [
+ {
+ "interface": "eth0",
+ "fixed-address": "192.168.0.2",
+ "routers": "192.168.0.1",
+ "subnet-mask": "255.255.255.0",
+ "broadcast-address": "192.168.0.255",
+ }
+ ]
+
+ ds = self.get_ds()
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ m_net.assert_called_once_with(
+ iface="eth0",
+ connectivity_url_data={
+ "url": "http://169.254.169.254/hetzner/v1/metadata/instance-id"
+ },
+ )
+
+ self.assertTrue(m_readmd.called)
+
+ self.assertEqual(METADATA.get("hostname"), ds.get_hostname())
+
+ self.assertEqual(METADATA.get("public-keys"), ds.get_public_ssh_keys())
+
+ self.assertIsInstance(ds.get_public_ssh_keys(), list)
+ self.assertEqual(ds.get_userdata_raw(), USERDATA)
+ self.assertEqual(ds.get_vendordata_raw(), METADATA.get("vendor_data"))
+
+ @mock.patch("cloudinit.sources.helpers.hetzner.read_metadata")
+ @mock.patch("cloudinit.net.find_fallback_nic")
+ @mock.patch("cloudinit.sources.DataSourceHetzner.get_hcloud_data")
+ def test_not_on_hetzner_returns_false(
+ self, m_get_hcloud_data, m_find_fallback, m_read_md
+ ):
+ """If helper 'get_hcloud_data' returns False,
+ return False from get_data."""
+ m_get_hcloud_data.return_value = (False, None)
+ ds = self.get_ds()
+ ret = ds.get_data()
+
+ self.assertFalse(ret)
+ # These are a white box attempt to ensure it did not search.
+ m_find_fallback.assert_not_called()
+ m_read_md.assert_not_called()
+
+
+class TestMaybeB64Decode:
+ """Test the maybe_b64decode helper function."""
+
+ @pytest.mark.parametrize("invalid_input", (str("not bytes"), int(4)))
+ def test_raises_error_on_non_bytes(self, invalid_input):
+ """maybe_b64decode should raise error if data is not bytes."""
+ with pytest.raises(TypeError):
+ hc_helper.maybe_b64decode(invalid_input)
+
+ @pytest.mark.parametrize(
+ "in_data,expected",
+ [
+ # If data is not b64 encoded, then return value should be the same.
+ (b"this is my data", b"this is my data"),
+ # If data is b64 encoded, then return value should be decoded.
+ (base64.b64encode(b"data"), b"data"),
+ ],
+ )
+ def test_happy_path(self, in_data, expected):
+ assert expected == hc_helper.maybe_b64decode(in_data)
diff --git a/tests/unittests/sources/test_ibmcloud.py b/tests/unittests/sources/test_ibmcloud.py
new file mode 100644
index 00000000..17a8be64
--- /dev/null
+++ b/tests/unittests/sources/test_ibmcloud.py
@@ -0,0 +1,426 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import base64
+import copy
+import json
+from textwrap import dedent
+
+from cloudinit import util
+from cloudinit.helpers import Paths
+from cloudinit.sources import DataSourceIBMCloud as ibm
+from tests.unittests import helpers as test_helpers
+
+mock = test_helpers.mock
+
+D_PATH = "cloudinit.sources.DataSourceIBMCloud."
+
+
+@mock.patch(D_PATH + "_is_xen", return_value=True)
+@mock.patch(D_PATH + "_is_ibm_provisioning")
+@mock.patch(D_PATH + "util.blkid")
+class TestGetIBMPlatform(test_helpers.CiTestCase):
+ """Test the get_ibm_platform helper."""
+
+ blkid_base = {
+ "/dev/xvda1": {
+ "DEVNAME": "/dev/xvda1",
+ "LABEL": "cloudimg-bootfs",
+ "TYPE": "ext3",
+ },
+ "/dev/xvda2": {
+ "DEVNAME": "/dev/xvda2",
+ "LABEL": "cloudimg-rootfs",
+ "TYPE": "ext4",
+ },
+ }
+
+ blkid_metadata_disk = {
+ "/dev/xvdh1": {
+ "DEVNAME": "/dev/xvdh1",
+ "LABEL": "METADATA",
+ "TYPE": "vfat",
+ "SEC_TYPE": "msdos",
+ "UUID": "681B-8C5D",
+ "PARTUUID": "3d631e09-01",
+ },
+ }
+
+ blkid_oscode_disk = {
+ "/dev/xvdh": {
+ "DEVNAME": "/dev/xvdh",
+ "LABEL": "config-2",
+ "TYPE": "vfat",
+ "SEC_TYPE": "msdos",
+ "UUID": ibm.IBM_CONFIG_UUID,
+ }
+ }
+
+ def setUp(self):
+ self.blkid_metadata = copy.deepcopy(self.blkid_base)
+ self.blkid_metadata.update(copy.deepcopy(self.blkid_metadata_disk))
+
+ self.blkid_oscode = copy.deepcopy(self.blkid_base)
+ self.blkid_oscode.update(copy.deepcopy(self.blkid_oscode_disk))
+
+ def test_id_template_live_metadata(self, m_blkid, m_is_prov, _m_xen):
+ """identify TEMPLATE_LIVE_METADATA."""
+ m_blkid.return_value = self.blkid_metadata
+ m_is_prov.return_value = False
+ self.assertEqual(
+ (ibm.Platforms.TEMPLATE_LIVE_METADATA, "/dev/xvdh1"),
+ ibm.get_ibm_platform(),
+ )
+
+ def test_id_template_prov_metadata(self, m_blkid, m_is_prov, _m_xen):
+ """identify TEMPLATE_PROVISIONING_METADATA."""
+ m_blkid.return_value = self.blkid_metadata
+ m_is_prov.return_value = True
+ self.assertEqual(
+ (ibm.Platforms.TEMPLATE_PROVISIONING_METADATA, "/dev/xvdh1"),
+ ibm.get_ibm_platform(),
+ )
+
+ def test_id_template_prov_nodata(self, m_blkid, m_is_prov, _m_xen):
+ """identify TEMPLATE_PROVISIONING_NODATA."""
+ m_blkid.return_value = self.blkid_base
+ m_is_prov.return_value = True
+ self.assertEqual(
+ (ibm.Platforms.TEMPLATE_PROVISIONING_NODATA, None),
+ ibm.get_ibm_platform(),
+ )
+
+ def test_id_os_code(self, m_blkid, m_is_prov, _m_xen):
+ """Identify OS_CODE."""
+ m_blkid.return_value = self.blkid_oscode
+ m_is_prov.return_value = False
+ self.assertEqual(
+ (ibm.Platforms.OS_CODE, "/dev/xvdh"), ibm.get_ibm_platform()
+ )
+
+ def test_id_os_code_must_match_uuid(self, m_blkid, m_is_prov, _m_xen):
+ """Test against false positive on openstack with non-ibm UUID."""
+ blkid = self.blkid_oscode
+ blkid["/dev/xvdh"]["UUID"] = "9999-9999"
+ m_blkid.return_value = blkid
+ m_is_prov.return_value = False
+ self.assertEqual((None, None), ibm.get_ibm_platform())
+
+
+@mock.patch(D_PATH + "_read_system_uuid", return_value=None)
+@mock.patch(D_PATH + "get_ibm_platform")
+class TestReadMD(test_helpers.CiTestCase):
+ """Test the read_datasource helper."""
+
+ template_md = {
+ "files": [],
+ "network_config": {"content_path": "/content/interfaces"},
+ "hostname": "ci-fond-ram",
+ "name": "ci-fond-ram",
+ "domain": "testing.ci.cloud-init.org",
+ "meta": {"dsmode": "net"},
+ "uuid": "8e636730-9f5d-c4a5-327c-d7123c46e82f",
+ "public_keys": {"1091307": "ssh-rsa AAAAB3NzaC1...Hw== ci-pubkey"},
+ }
+
+ oscode_md = {
+ "hostname": "ci-grand-gannet.testing.ci.cloud-init.org",
+ "name": "ci-grand-gannet",
+ "uuid": "2f266908-8e6c-4818-9b5c-42e9cc66a785",
+ "random_seed": "bm90LXJhbmRvbQo=",
+ "crypt_key": "ssh-rsa AAAAB3NzaC1yc2..n6z/",
+ "configuration_token": "eyJhbGciOi..M3ZA",
+ "public_keys": {"1091307": "ssh-rsa AAAAB3N..Hw== ci-pubkey"},
+ }
+
+ content_interfaces = dedent(
+ """\
+ auto lo
+ iface lo inet loopback
+
+ auto eth0
+ allow-hotplug eth0
+ iface eth0 inet static
+ address 10.82.43.5
+ netmask 255.255.255.192
+ """
+ )
+
+ userdata = b"#!/bin/sh\necho hi mom\n"
+ # meta.js file gets json encoded userdata as a list.
+ meta_js = '["#!/bin/sh\necho hi mom\n"]'
+ vendor_data = {
+ "cloud-init": "#!/bin/bash\necho 'root:$6$5ab01p1m1' | chpasswd -e"
+ }
+
+ network_data = {
+ "links": [
+ {
+ "id": "interface_29402281",
+ "name": "eth0",
+ "mtu": None,
+ "type": "phy",
+ "ethernet_mac_address": "06:00:f1:bd:da:25",
+ },
+ {
+ "id": "interface_29402279",
+ "name": "eth1",
+ "mtu": None,
+ "type": "phy",
+ "ethernet_mac_address": "06:98:5e:d0:7f:86",
+ },
+ ],
+ "networks": [
+ {
+ "id": "network_109887563",
+ "link": "interface_29402281",
+ "type": "ipv4",
+ "ip_address": "10.82.43.2",
+ "netmask": "255.255.255.192",
+ "routes": [
+ {
+ "network": "10.0.0.0",
+ "netmask": "255.0.0.0",
+ "gateway": "10.82.43.1",
+ },
+ {
+ "network": "161.26.0.0",
+ "netmask": "255.255.0.0",
+ "gateway": "10.82.43.1",
+ },
+ ],
+ },
+ {
+ "id": "network_109887551",
+ "link": "interface_29402279",
+ "type": "ipv4",
+ "ip_address": "108.168.194.252",
+ "netmask": "255.255.255.248",
+ "routes": [
+ {
+ "network": "0.0.0.0",
+ "netmask": "0.0.0.0",
+ "gateway": "108.168.194.249",
+ }
+ ],
+ },
+ ],
+ "services": [
+ {"type": "dns", "address": "10.0.80.11"},
+ {"type": "dns", "address": "10.0.80.12"},
+ ],
+ }
+
+ sysuuid = "7f79ebf5-d791-43c3-a723-854e8389d59f"
+
+ def _get_expected_metadata(self, os_md):
+ """return expected 'metadata' for data loaded from meta_data.json."""
+ os_md = copy.deepcopy(os_md)
+ renames = (
+ ("hostname", "local-hostname"),
+ ("uuid", "instance-id"),
+ ("public_keys", "public-keys"),
+ )
+ ret = {}
+ for osname, mdname in renames:
+ if osname in os_md:
+ ret[mdname] = os_md[osname]
+ if "random_seed" in os_md:
+ ret["random_seed"] = base64.b64decode(os_md["random_seed"])
+
+ return ret
+
+ def test_provisioning_md(self, m_platform, m_sysuuid):
+ """Provisioning env with a metadata disk should return None."""
+ m_platform.return_value = (
+ ibm.Platforms.TEMPLATE_PROVISIONING_METADATA,
+ "/dev/xvdh",
+ )
+ self.assertIsNone(ibm.read_md())
+
+ def test_provisioning_no_metadata(self, m_platform, m_sysuuid):
+ """Provisioning env with no metadata disk should return None."""
+ m_platform.return_value = (
+ ibm.Platforms.TEMPLATE_PROVISIONING_NODATA,
+ None,
+ )
+ self.assertIsNone(ibm.read_md())
+
+ def test_provisioning_not_ibm(self, m_platform, m_sysuuid):
+ """Provisioning env but not identified as IBM should return None."""
+ m_platform.return_value = (None, None)
+ self.assertIsNone(ibm.read_md())
+
+ def test_template_live(self, m_platform, m_sysuuid):
+ """Template live environment should be identified."""
+ tmpdir = self.tmp_dir()
+ m_platform.return_value = (
+ ibm.Platforms.TEMPLATE_LIVE_METADATA,
+ tmpdir,
+ )
+ m_sysuuid.return_value = self.sysuuid
+
+ test_helpers.populate_dir(
+ tmpdir,
+ {
+ "openstack/latest/meta_data.json": json.dumps(
+ self.template_md
+ ),
+ "openstack/latest/user_data": self.userdata,
+ "openstack/content/interfaces": self.content_interfaces,
+ "meta.js": self.meta_js,
+ },
+ )
+
+ ret = ibm.read_md()
+ self.assertEqual(ibm.Platforms.TEMPLATE_LIVE_METADATA, ret["platform"])
+ self.assertEqual(tmpdir, ret["source"])
+ self.assertEqual(self.userdata, ret["userdata"])
+ self.assertEqual(
+ self._get_expected_metadata(self.template_md), ret["metadata"]
+ )
+ self.assertEqual(self.sysuuid, ret["system-uuid"])
+
+ def test_os_code_live(self, m_platform, m_sysuuid):
+ """Verify an os_code metadata path."""
+ tmpdir = self.tmp_dir()
+ m_platform.return_value = (ibm.Platforms.OS_CODE, tmpdir)
+ netdata = json.dumps(self.network_data)
+ test_helpers.populate_dir(
+ tmpdir,
+ {
+ "openstack/latest/meta_data.json": json.dumps(self.oscode_md),
+ "openstack/latest/user_data": self.userdata,
+ "openstack/latest/vendor_data.json": json.dumps(
+ self.vendor_data
+ ),
+ "openstack/latest/network_data.json": netdata,
+ },
+ )
+
+ ret = ibm.read_md()
+ self.assertEqual(ibm.Platforms.OS_CODE, ret["platform"])
+ self.assertEqual(tmpdir, ret["source"])
+ self.assertEqual(self.userdata, ret["userdata"])
+ self.assertEqual(
+ self._get_expected_metadata(self.oscode_md), ret["metadata"]
+ )
+
+ def test_os_code_live_no_userdata(self, m_platform, m_sysuuid):
+ """Verify os_code without user-data."""
+ tmpdir = self.tmp_dir()
+ m_platform.return_value = (ibm.Platforms.OS_CODE, tmpdir)
+ test_helpers.populate_dir(
+ tmpdir,
+ {
+ "openstack/latest/meta_data.json": json.dumps(self.oscode_md),
+ "openstack/latest/vendor_data.json": json.dumps(
+ self.vendor_data
+ ),
+ },
+ )
+
+ ret = ibm.read_md()
+ self.assertEqual(ibm.Platforms.OS_CODE, ret["platform"])
+ self.assertEqual(tmpdir, ret["source"])
+ self.assertIsNone(ret["userdata"])
+ self.assertEqual(
+ self._get_expected_metadata(self.oscode_md), ret["metadata"]
+ )
+
+
+class TestIsIBMProvisioning(test_helpers.FilesystemMockingTestCase):
+ """Test the _is_ibm_provisioning method."""
+
+ inst_log = "/root/swinstall.log"
+ prov_cfg = "/root/provisioningConfiguration.cfg"
+ boot_ref = "/proc/1/environ"
+ with_logs = True
+
+ def _call_with_root(self, rootd):
+ self.reRoot(rootd)
+ return ibm._is_ibm_provisioning()
+
+ def test_no_config(self):
+ """No provisioning config means not provisioning."""
+ self.assertFalse(self._call_with_root(self.tmp_dir()))
+
+ def test_config_only(self):
+ """A provisioning config without a log means provisioning."""
+ rootd = self.tmp_dir()
+ test_helpers.populate_dir(rootd, {self.prov_cfg: "key=value"})
+ self.assertTrue(self._call_with_root(rootd))
+
+ def test_config_with_old_log(self):
+ """A config with a log from previous boot is not provisioning."""
+ rootd = self.tmp_dir()
+ data = {
+ self.prov_cfg: ("key=value\nkey2=val2\n", -10),
+ self.inst_log: ("log data\n", -30),
+ self.boot_ref: ("PWD=/", 0),
+ }
+ test_helpers.populate_dir_with_ts(rootd, data)
+ self.assertFalse(self._call_with_root(rootd=rootd))
+ self.assertIn("from previous boot", self.logs.getvalue())
+
+ def test_config_with_new_log(self):
+ """A config with a log from this boot is provisioning."""
+ rootd = self.tmp_dir()
+ data = {
+ self.prov_cfg: ("key=value\nkey2=val2\n", -10),
+ self.inst_log: ("log data\n", 30),
+ self.boot_ref: ("PWD=/", 0),
+ }
+ test_helpers.populate_dir_with_ts(rootd, data)
+ self.assertTrue(self._call_with_root(rootd=rootd))
+ self.assertIn("from current boot", self.logs.getvalue())
+
+ def test_config_and_log_no_reference(self):
+ """If the config and log existed, but no reference, assume not."""
+ rootd = self.tmp_dir()
+ test_helpers.populate_dir(
+ rootd, {self.prov_cfg: "key=value", self.inst_log: "log data\n"}
+ )
+ self.assertFalse(self._call_with_root(rootd=rootd))
+ self.assertIn("no reference file", self.logs.getvalue())
+
+
+class TestDataSourceIBMCloud(test_helpers.CiTestCase):
+ def setUp(self):
+ super(TestDataSourceIBMCloud, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.cloud_dir = self.tmp_path("cloud", dir=self.tmp)
+ util.ensure_dir(self.cloud_dir)
+ paths = Paths({"run_dir": self.tmp, "cloud_dir": self.cloud_dir})
+ self.ds = ibm.DataSourceIBMCloud(sys_cfg={}, distro=None, paths=paths)
+
+ def test_get_data_false(self):
+ """When read_md returns None, get_data returns False."""
+ with mock.patch(D_PATH + "read_md", return_value=None):
+ self.assertFalse(self.ds.get_data())
+
+ def test_get_data_processes_read_md(self):
+ """get_data processes and caches content returned by read_md."""
+ md = {
+ "metadata": {},
+ "networkdata": "net",
+ "platform": "plat",
+ "source": "src",
+ "system-uuid": "uuid",
+ "userdata": "ud",
+ "vendordata": "vd",
+ }
+ with mock.patch(D_PATH + "read_md", return_value=md):
+ self.assertTrue(self.ds.get_data())
+ self.assertEqual("src", self.ds.source)
+ self.assertEqual("plat", self.ds.platform)
+ self.assertEqual({}, self.ds.metadata)
+ self.assertEqual("ud", self.ds.userdata_raw)
+ self.assertEqual("net", self.ds.network_json)
+ self.assertEqual("vd", self.ds.vendordata_pure)
+ self.assertEqual("uuid", self.ds.system_uuid)
+ self.assertEqual("ibmcloud", self.ds.cloud_name)
+ self.assertEqual("ibmcloud", self.ds.platform_type)
+ self.assertEqual("plat (src)", self.ds.subplatform)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_init.py b/tests/unittests/sources/test_init.py
new file mode 100644
index 00000000..ce8fc970
--- /dev/null
+++ b/tests/unittests/sources/test_init.py
@@ -0,0 +1,994 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import copy
+import inspect
+import os
+import stat
+
+from cloudinit import importer, util
+from cloudinit.event import EventScope, EventType
+from cloudinit.helpers import Paths
+from cloudinit.sources import (
+ EXPERIMENTAL_TEXT,
+ INSTANCE_JSON_FILE,
+ INSTANCE_JSON_SENSITIVE_FILE,
+ METADATA_UNKNOWN,
+ REDACT_SENSITIVE_VALUE,
+ UNSET,
+ DataSource,
+ canonical_cloud_id,
+ redact_sensitive_keys,
+)
+from cloudinit.user_data import UserDataProcessor
+from tests.unittests.helpers import CiTestCase, mock
+
+
+class DataSourceTestSubclassNet(DataSource):
+
+ dsname = "MyTestSubclass"
+ url_max_wait = 55
+
+ def __init__(
+ self,
+ sys_cfg,
+ distro,
+ paths,
+ custom_metadata=None,
+ custom_userdata=None,
+ get_data_retval=True,
+ ):
+ super(DataSourceTestSubclassNet, self).__init__(sys_cfg, distro, paths)
+ self._custom_userdata = custom_userdata
+ self._custom_metadata = custom_metadata
+ self._get_data_retval = get_data_retval
+
+ def _get_cloud_name(self):
+ return "SubclassCloudName"
+
+ def _get_data(self):
+ if self._custom_metadata:
+ self.metadata = self._custom_metadata
+ else:
+ self.metadata = {
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ }
+ if self._custom_userdata:
+ self.userdata_raw = self._custom_userdata
+ else:
+ self.userdata_raw = "userdata_raw"
+ self.vendordata_raw = "vendordata_raw"
+ return self._get_data_retval
+
+
+class InvalidDataSourceTestSubclassNet(DataSource):
+ pass
+
+
+class TestDataSource(CiTestCase):
+
+ with_logs = True
+ maxDiff = None
+
+ def setUp(self):
+ super(TestDataSource, self).setUp()
+ self.sys_cfg = {"datasource": {"_undef": {"key1": False}}}
+ self.distro = "distrotest" # generally should be a Distro object
+ self.paths = Paths({})
+ self.datasource = DataSource(self.sys_cfg, self.distro, self.paths)
+
+ def test_datasource_init(self):
+ """DataSource initializes metadata attributes, ds_cfg and ud_proc."""
+ self.assertEqual(self.paths, self.datasource.paths)
+ self.assertEqual(self.sys_cfg, self.datasource.sys_cfg)
+ self.assertEqual(self.distro, self.datasource.distro)
+ self.assertIsNone(self.datasource.userdata)
+ self.assertEqual({}, self.datasource.metadata)
+ self.assertIsNone(self.datasource.userdata_raw)
+ self.assertIsNone(self.datasource.vendordata)
+ self.assertIsNone(self.datasource.vendordata_raw)
+ self.assertEqual({"key1": False}, self.datasource.ds_cfg)
+ self.assertIsInstance(self.datasource.ud_proc, UserDataProcessor)
+
+ def test_datasource_init_gets_ds_cfg_using_dsname(self):
+ """Init uses DataSource.dsname for sourcing ds_cfg."""
+ sys_cfg = {"datasource": {"MyTestSubclass": {"key2": False}}}
+ distro = "distrotest" # generally should be a Distro object
+ datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths)
+ self.assertEqual({"key2": False}, datasource.ds_cfg)
+
+ def test_str_is_classname(self):
+ """The string representation of the datasource is the classname."""
+ self.assertEqual("DataSource", str(self.datasource))
+ self.assertEqual(
+ "DataSourceTestSubclassNet",
+ str(DataSourceTestSubclassNet("", "", self.paths)),
+ )
+
+ def test_datasource_get_url_params_defaults(self):
+ """get_url_params default url config settings for the datasource."""
+ params = self.datasource.get_url_params()
+ self.assertEqual(params.max_wait_seconds, self.datasource.url_max_wait)
+ self.assertEqual(params.timeout_seconds, self.datasource.url_timeout)
+ self.assertEqual(params.num_retries, self.datasource.url_retries)
+ self.assertEqual(
+ params.sec_between_retries, self.datasource.url_sec_between_retries
+ )
+
+ def test_datasource_get_url_params_subclassed(self):
+ """Subclasses can override get_url_params defaults."""
+ sys_cfg = {"datasource": {"MyTestSubclass": {"key2": False}}}
+ distro = "distrotest" # generally should be a Distro object
+ datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths)
+ expected = (
+ datasource.url_max_wait,
+ datasource.url_timeout,
+ datasource.url_retries,
+ datasource.url_sec_between_retries,
+ )
+ url_params = datasource.get_url_params()
+ self.assertNotEqual(self.datasource.get_url_params(), url_params)
+ self.assertEqual(expected, url_params)
+
+ def test_datasource_get_url_params_ds_config_override(self):
+ """Datasource configuration options can override url param defaults."""
+ sys_cfg = {
+ "datasource": {
+ "MyTestSubclass": {
+ "max_wait": "1",
+ "timeout": "2",
+ "retries": "3",
+ "sec_between_retries": 4,
+ }
+ }
+ }
+ datasource = DataSourceTestSubclassNet(
+ sys_cfg, self.distro, self.paths
+ )
+ expected = (1, 2, 3, 4)
+ url_params = datasource.get_url_params()
+ self.assertNotEqual(
+ (
+ datasource.url_max_wait,
+ datasource.url_timeout,
+ datasource.url_retries,
+ datasource.url_sec_between_retries,
+ ),
+ url_params,
+ )
+ self.assertEqual(expected, url_params)
+
+ def test_datasource_get_url_params_is_zero_or_greater(self):
+ """get_url_params ignores timeouts with a value below 0."""
+ # Set an override that is below 0 which gets ignored.
+ sys_cfg = {"datasource": {"_undef": {"timeout": "-1"}}}
+ datasource = DataSource(sys_cfg, self.distro, self.paths)
+ (
+ _max_wait,
+ timeout,
+ _retries,
+ _sec_between_retries,
+ ) = datasource.get_url_params()
+ self.assertEqual(0, timeout)
+
+ def test_datasource_get_url_uses_defaults_on_errors(self):
+ """On invalid system config values for url_params defaults are used."""
+ # All invalid values should be logged
+ sys_cfg = {
+ "datasource": {
+ "_undef": {
+ "max_wait": "nope",
+ "timeout": "bug",
+ "retries": "nonint",
+ }
+ }
+ }
+ datasource = DataSource(sys_cfg, self.distro, self.paths)
+ url_params = datasource.get_url_params()
+ expected = (
+ datasource.url_max_wait,
+ datasource.url_timeout,
+ datasource.url_retries,
+ datasource.url_sec_between_retries,
+ )
+ self.assertEqual(expected, url_params)
+ logs = self.logs.getvalue()
+ expected_logs = [
+ "Config max_wait 'nope' is not an int, using default '-1'",
+ "Config timeout 'bug' is not an int, using default '10'",
+ "Config retries 'nonint' is not an int, using default '5'",
+ ]
+ for log in expected_logs:
+ self.assertIn(log, logs)
+
+ @mock.patch("cloudinit.sources.net.find_fallback_nic")
+ def test_fallback_interface_is_discovered(self, m_get_fallback_nic):
+ """The fallback_interface is discovered via find_fallback_nic."""
+ m_get_fallback_nic.return_value = "nic9"
+ self.assertEqual("nic9", self.datasource.fallback_interface)
+
+ @mock.patch("cloudinit.sources.net.find_fallback_nic")
+ def test_fallback_interface_logs_undiscovered(self, m_get_fallback_nic):
+ """Log a warning when fallback_interface can not discover the nic."""
+ self.datasource._cloud_name = "MySupahCloud"
+ m_get_fallback_nic.return_value = None # Couldn't discover nic
+ self.assertIsNone(self.datasource.fallback_interface)
+ self.assertEqual(
+ "WARNING: Did not find a fallback interface on MySupahCloud.\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.sources.net.find_fallback_nic")
+ def test_wb_fallback_interface_is_cached(self, m_get_fallback_nic):
+ """The fallback_interface is cached and won't be rediscovered."""
+ self.datasource._fallback_interface = "nic10"
+ self.assertEqual("nic10", self.datasource.fallback_interface)
+ m_get_fallback_nic.assert_not_called()
+
+ def test__get_data_unimplemented(self):
+ """Raise an error when _get_data is not implemented."""
+ with self.assertRaises(NotImplementedError) as context_manager:
+ self.datasource.get_data()
+ self.assertIn(
+ "Subclasses of DataSource must implement _get_data",
+ str(context_manager.exception),
+ )
+ datasource2 = InvalidDataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, self.paths
+ )
+ with self.assertRaises(NotImplementedError) as context_manager:
+ datasource2.get_data()
+ self.assertIn(
+ "Subclasses of DataSource must implement _get_data",
+ str(context_manager.exception),
+ )
+
+ def test_get_data_calls_subclass__get_data(self):
+ """Datasource.get_data uses the subclass' version of _get_data."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ self.assertTrue(datasource.get_data())
+ self.assertEqual(
+ {
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ },
+ datasource.metadata,
+ )
+ self.assertEqual("userdata_raw", datasource.userdata_raw)
+ self.assertEqual("vendordata_raw", datasource.vendordata_raw)
+
+ def test_get_hostname_strips_local_hostname_without_domain(self):
+ """Datasource.get_hostname strips metadata local-hostname of domain."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ self.assertTrue(datasource.get_data())
+ self.assertEqual(
+ "test-subclass-hostname", datasource.metadata["local-hostname"]
+ )
+ self.assertEqual("test-subclass-hostname", datasource.get_hostname())
+ datasource.metadata["local-hostname"] = "hostname.my.domain.com"
+ self.assertEqual("hostname", datasource.get_hostname())
+
+ def test_get_hostname_with_fqdn_returns_local_hostname_with_domain(self):
+ """Datasource.get_hostname with fqdn set gets qualified hostname."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ self.assertTrue(datasource.get_data())
+ datasource.metadata["local-hostname"] = "hostname.my.domain.com"
+ self.assertEqual(
+ "hostname.my.domain.com", datasource.get_hostname(fqdn=True)
+ )
+
+ def test_get_hostname_without_metadata_uses_system_hostname(self):
+ """Datasource.gethostname runs util.get_hostname when no metadata."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ self.assertEqual({}, datasource.metadata)
+ mock_fqdn = "cloudinit.sources.util.get_fqdn_from_hosts"
+ with mock.patch("cloudinit.sources.util.get_hostname") as m_gethost:
+ with mock.patch(mock_fqdn) as m_fqdn:
+ m_gethost.return_value = "systemhostname.domain.com"
+ m_fqdn.return_value = None # No maching fqdn in /etc/hosts
+ self.assertEqual("systemhostname", datasource.get_hostname())
+ self.assertEqual(
+ "systemhostname.domain.com",
+ datasource.get_hostname(fqdn=True),
+ )
+
+ def test_get_hostname_without_metadata_returns_none(self):
+ """Datasource.gethostname returns None when metadata_only and no MD."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ self.assertEqual({}, datasource.metadata)
+ mock_fqdn = "cloudinit.sources.util.get_fqdn_from_hosts"
+ with mock.patch("cloudinit.sources.util.get_hostname") as m_gethost:
+ with mock.patch(mock_fqdn) as m_fqdn:
+ self.assertIsNone(datasource.get_hostname(metadata_only=True))
+ self.assertIsNone(
+ datasource.get_hostname(fqdn=True, metadata_only=True)
+ )
+ self.assertEqual([], m_gethost.call_args_list)
+ self.assertEqual([], m_fqdn.call_args_list)
+
+ def test_get_hostname_without_metadata_prefers_etc_hosts(self):
+ """Datasource.gethostname prefers /etc/hosts to util.get_hostname."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ self.assertEqual({}, datasource.metadata)
+ mock_fqdn = "cloudinit.sources.util.get_fqdn_from_hosts"
+ with mock.patch("cloudinit.sources.util.get_hostname") as m_gethost:
+ with mock.patch(mock_fqdn) as m_fqdn:
+ m_gethost.return_value = "systemhostname.domain.com"
+ m_fqdn.return_value = "fqdnhostname.domain.com"
+ self.assertEqual("fqdnhostname", datasource.get_hostname())
+ self.assertEqual(
+ "fqdnhostname.domain.com",
+ datasource.get_hostname(fqdn=True),
+ )
+
+ def test_get_data_does_not_write_instance_data_on_failure(self):
+ """get_data does not write INSTANCE_JSON_FILE on get_data False."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg,
+ self.distro,
+ Paths({"run_dir": tmp}),
+ get_data_retval=False,
+ )
+ self.assertFalse(datasource.get_data())
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ self.assertFalse(
+ os.path.exists(json_file), "Found unexpected file %s" % json_file
+ )
+
+ def test_get_data_writes_json_instance_data_on_success(self):
+ """get_data writes INSTANCE_JSON_FILE to run_dir as world readable."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ sys_info = {
+ "python": "3.7",
+ "platform": (
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal"
+ ),
+ "uname": [
+ "Linux",
+ "myhost",
+ "5.4.0-24-generic",
+ "SMP blah",
+ "x86_64",
+ ],
+ "variant": "ubuntu",
+ "dist": ["ubuntu", "20.04", "focal"],
+ }
+ with mock.patch("cloudinit.util.system_info", return_value=sys_info):
+ with mock.patch(
+ "cloudinit.sources.canonical_cloud_id",
+ return_value="canonical_cloud_id",
+ ):
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ content = util.load_file(json_file)
+ expected = {
+ "base64_encoded_keys": [],
+ "merged_cfg": REDACT_SENSITIVE_VALUE,
+ "sensitive_keys": ["merged_cfg"],
+ "sys_info": sys_info,
+ "v1": {
+ "_beta_keys": ["subplatform"],
+ "availability-zone": "myaz",
+ "availability_zone": "myaz",
+ "cloud_id": "canonical_cloud_id",
+ "cloud-name": "subclasscloudname",
+ "cloud_name": "subclasscloudname",
+ "distro": "ubuntu",
+ "distro_release": "focal",
+ "distro_version": "20.04",
+ "instance-id": "iid-datasource",
+ "instance_id": "iid-datasource",
+ "local-hostname": "test-subclass-hostname",
+ "local_hostname": "test-subclass-hostname",
+ "kernel_release": "5.4.0-24-generic",
+ "machine": "x86_64",
+ "platform": "mytestsubclass",
+ "public_ssh_keys": [],
+ "python_version": "3.7",
+ "region": "myregion",
+ "system_platform": (
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal"
+ ),
+ "subplatform": "unknown",
+ "variant": "ubuntu",
+ },
+ "ds": {
+ "_doc": EXPERIMENTAL_TEXT,
+ "meta_data": {
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ },
+ },
+ }
+ self.assertEqual(expected, util.load_json(content))
+ file_stat = os.stat(json_file)
+ self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
+ self.assertEqual(expected, util.load_json(content))
+
+ def test_get_data_writes_redacted_public_json_instance_data(self):
+ """get_data writes redacted content to public INSTANCE_JSON_FILE."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg,
+ self.distro,
+ Paths({"run_dir": tmp}),
+ custom_metadata={
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ "some": {
+ "security-credentials": {
+ "cred1": "sekret",
+ "cred2": "othersekret",
+ }
+ },
+ },
+ )
+ self.assertCountEqual(
+ (
+ "merged_cfg",
+ "security-credentials",
+ ),
+ datasource.sensitive_metadata_keys,
+ )
+ sys_info = {
+ "python": "3.7",
+ "platform": (
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal"
+ ),
+ "uname": [
+ "Linux",
+ "myhost",
+ "5.4.0-24-generic",
+ "SMP blah",
+ "x86_64",
+ ],
+ "variant": "ubuntu",
+ "dist": ["ubuntu", "20.04", "focal"],
+ }
+ with mock.patch("cloudinit.util.system_info", return_value=sys_info):
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ redacted = util.load_json(util.load_file(json_file))
+ expected = {
+ "base64_encoded_keys": [],
+ "merged_cfg": REDACT_SENSITIVE_VALUE,
+ "sensitive_keys": [
+ "ds/meta_data/some/security-credentials",
+ "merged_cfg",
+ ],
+ "sys_info": sys_info,
+ "v1": {
+ "_beta_keys": ["subplatform"],
+ "availability-zone": "myaz",
+ "availability_zone": "myaz",
+ "cloud-name": "subclasscloudname",
+ "cloud_name": "subclasscloudname",
+ "distro": "ubuntu",
+ "distro_release": "focal",
+ "distro_version": "20.04",
+ "instance-id": "iid-datasource",
+ "instance_id": "iid-datasource",
+ "local-hostname": "test-subclass-hostname",
+ "local_hostname": "test-subclass-hostname",
+ "kernel_release": "5.4.0-24-generic",
+ "machine": "x86_64",
+ "platform": "mytestsubclass",
+ "public_ssh_keys": [],
+ "python_version": "3.7",
+ "region": "myregion",
+ "system_platform": (
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal"
+ ),
+ "subplatform": "unknown",
+ "variant": "ubuntu",
+ },
+ "ds": {
+ "_doc": EXPERIMENTAL_TEXT,
+ "meta_data": {
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ "some": {"security-credentials": REDACT_SENSITIVE_VALUE},
+ },
+ },
+ }
+ self.assertCountEqual(expected, redacted)
+ file_stat = os.stat(json_file)
+ self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
+
+ def test_get_data_writes_json_instance_data_sensitive(self):
+ """
+ get_data writes unmodified data to sensitive file as root-readonly.
+ """
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg,
+ self.distro,
+ Paths({"run_dir": tmp}),
+ custom_metadata={
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ "some": {
+ "security-credentials": {
+ "cred1": "sekret",
+ "cred2": "othersekret",
+ }
+ },
+ },
+ )
+ sys_info = {
+ "python": "3.7",
+ "platform": (
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal"
+ ),
+ "uname": [
+ "Linux",
+ "myhost",
+ "5.4.0-24-generic",
+ "SMP blah",
+ "x86_64",
+ ],
+ "variant": "ubuntu",
+ "dist": ["ubuntu", "20.04", "focal"],
+ }
+
+ self.assertCountEqual(
+ (
+ "merged_cfg",
+ "security-credentials",
+ ),
+ datasource.sensitive_metadata_keys,
+ )
+ with mock.patch("cloudinit.util.system_info", return_value=sys_info):
+ with mock.patch(
+ "cloudinit.sources.canonical_cloud_id",
+ return_value="canonical-cloud-id",
+ ):
+ datasource.get_data()
+ sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp)
+ content = util.load_file(sensitive_json_file)
+ expected = {
+ "base64_encoded_keys": [],
+ "merged_cfg": {
+ "_doc": (
+ "Merged cloud-init system config from "
+ "/etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/"
+ ),
+ "datasource": {"_undef": {"key1": False}},
+ },
+ "sensitive_keys": [
+ "ds/meta_data/some/security-credentials",
+ "merged_cfg",
+ ],
+ "sys_info": sys_info,
+ "v1": {
+ "_beta_keys": ["subplatform"],
+ "availability-zone": "myaz",
+ "availability_zone": "myaz",
+ "cloud_id": "canonical-cloud-id",
+ "cloud-name": "subclasscloudname",
+ "cloud_name": "subclasscloudname",
+ "distro": "ubuntu",
+ "distro_release": "focal",
+ "distro_version": "20.04",
+ "instance-id": "iid-datasource",
+ "instance_id": "iid-datasource",
+ "kernel_release": "5.4.0-24-generic",
+ "local-hostname": "test-subclass-hostname",
+ "local_hostname": "test-subclass-hostname",
+ "machine": "x86_64",
+ "platform": "mytestsubclass",
+ "public_ssh_keys": [],
+ "python_version": "3.7",
+ "region": "myregion",
+ "subplatform": "unknown",
+ "system_platform": (
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal"
+ ),
+ "variant": "ubuntu",
+ },
+ "ds": {
+ "_doc": EXPERIMENTAL_TEXT,
+ "meta_data": {
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ "some": {
+ "security-credentials": {
+ "cred1": "sekret",
+ "cred2": "othersekret",
+ }
+ },
+ },
+ },
+ }
+ self.assertCountEqual(expected, util.load_json(content))
+ file_stat = os.stat(sensitive_json_file)
+ self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode))
+ self.assertEqual(expected, util.load_json(content))
+
+ def test_get_data_handles_redacted_unserializable_content(self):
+ """get_data warns unserializable content in INSTANCE_JSON_FILE."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg,
+ self.distro,
+ Paths({"run_dir": tmp}),
+ custom_metadata={"key1": "val1", "key2": {"key2.1": self.paths}},
+ )
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ content = util.load_file(json_file)
+ expected_metadata = {
+ "key1": "val1",
+ "key2": {
+ "key2.1": (
+ "Warning: redacted unserializable type <class"
+ " 'cloudinit.helpers.Paths'>"
+ )
+ },
+ }
+ instance_json = util.load_json(content)
+ self.assertEqual(expected_metadata, instance_json["ds"]["meta_data"])
+
+ def test_persist_instance_data_writes_ec2_metadata_when_set(self):
+ """When ec2_metadata class attribute is set, persist to json."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ datasource.ec2_metadata = UNSET
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ instance_data = util.load_json(util.load_file(json_file))
+ self.assertNotIn("ec2_metadata", instance_data["ds"])
+ datasource.ec2_metadata = {"ec2stuff": "is good"}
+ datasource.persist_instance_data()
+ instance_data = util.load_json(util.load_file(json_file))
+ self.assertEqual(
+ {"ec2stuff": "is good"}, instance_data["ds"]["ec2_metadata"]
+ )
+
+ def test_persist_instance_data_writes_canonical_cloud_id_and_symlink(self):
+ """canonical-cloud-id class attribute is set, persist to json."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ cloud_id_link = os.path.join(tmp, "cloud-id")
+ cloud_id_file = os.path.join(tmp, "cloud-id-my-cloud")
+ cloud_id2_file = os.path.join(tmp, "cloud-id-my-cloud2")
+ for filename in (cloud_id_file, cloud_id_link, cloud_id2_file):
+ self.assertFalse(
+ os.path.exists(filename), "Unexpected link found {filename}"
+ )
+ with mock.patch(
+ "cloudinit.sources.canonical_cloud_id", return_value="my-cloud"
+ ):
+ datasource.get_data()
+ self.assertEqual("my-cloud\n", util.load_file(cloud_id_link))
+ # A symlink with the generic /run/cloud-init/cloud-id link is present
+ self.assertTrue(util.is_link(cloud_id_link))
+ # When cloud-id changes, symlink and content change
+ with mock.patch(
+ "cloudinit.sources.canonical_cloud_id", return_value="my-cloud2"
+ ):
+ datasource.persist_instance_data()
+ self.assertEqual("my-cloud2\n", util.load_file(cloud_id2_file))
+ # Previous cloud-id-<cloud-type> file removed
+ self.assertFalse(os.path.exists(cloud_id_file))
+ # Generic link persisted which contains canonical-cloud-id as content
+ self.assertTrue(util.is_link(cloud_id_link))
+ self.assertEqual("my-cloud2\n", util.load_file(cloud_id_link))
+
+ def test_persist_instance_data_writes_network_json_when_set(self):
+ """When network_data.json class attribute is set, persist to json."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ instance_data = util.load_json(util.load_file(json_file))
+ self.assertNotIn("network_json", instance_data["ds"])
+ datasource.network_json = {"network_json": "is good"}
+ datasource.persist_instance_data()
+ instance_data = util.load_json(util.load_file(json_file))
+ self.assertEqual(
+ {"network_json": "is good"}, instance_data["ds"]["network_json"]
+ )
+
+ def test_get_data_base64encodes_unserializable_bytes(self):
+ """On py3, get_data base64encodes any unserializable content."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg,
+ self.distro,
+ Paths({"run_dir": tmp}),
+ custom_metadata={"key1": "val1", "key2": {"key2.1": b"\x123"}},
+ )
+ self.assertTrue(datasource.get_data())
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ content = util.load_file(json_file)
+ instance_json = util.load_json(content)
+ self.assertCountEqual(
+ ["ds/meta_data/key2/key2.1"], instance_json["base64_encoded_keys"]
+ )
+ self.assertEqual(
+ {"key1": "val1", "key2": {"key2.1": "EjM="}},
+ instance_json["ds"]["meta_data"],
+ )
+
+ def test_get_hostname_subclass_support(self):
+ """Validate get_hostname signature on all subclasses of DataSource."""
+ base_args = inspect.getfullargspec(DataSource.get_hostname)
+ # Import all DataSource subclasses so we can inspect them.
+ modules = util.find_modules(os.path.dirname(os.path.dirname(__file__)))
+ for _loc, name in modules.items():
+ mod_locs, _ = importer.find_module(name, ["cloudinit.sources"], [])
+ if mod_locs:
+ importer.import_module(mod_locs[0])
+ for child in DataSource.__subclasses__():
+ if "Test" in child.dsname:
+ continue
+ self.assertEqual(
+ base_args,
+ inspect.getfullargspec(child.get_hostname),
+ "%s does not implement DataSource.get_hostname params" % child,
+ )
+ for grandchild in child.__subclasses__():
+ self.assertEqual(
+ base_args,
+ inspect.getfullargspec(grandchild.get_hostname),
+ "%s does not implement DataSource.get_hostname params"
+ % grandchild,
+ )
+
+ def test_clear_cached_attrs_resets_cached_attr_class_attributes(self):
+ """Class attributes listed in cached_attr_defaults are reset."""
+ count = 0
+ # Setup values for all cached class attributes
+ for attr, value in self.datasource.cached_attr_defaults:
+ setattr(self.datasource, attr, count)
+ count += 1
+ self.datasource._dirty_cache = True
+ self.datasource.clear_cached_attrs()
+ for attr, value in self.datasource.cached_attr_defaults:
+ self.assertEqual(value, getattr(self.datasource, attr))
+
+ def test_clear_cached_attrs_noops_on_clean_cache(self):
+ """Class attributes listed in cached_attr_defaults are reset."""
+ count = 0
+ # Setup values for all cached class attributes
+ for attr, _ in self.datasource.cached_attr_defaults:
+ setattr(self.datasource, attr, count)
+ count += 1
+ self.datasource._dirty_cache = False # Fake clean cache
+ self.datasource.clear_cached_attrs()
+ count = 0
+ for attr, _ in self.datasource.cached_attr_defaults:
+ self.assertEqual(count, getattr(self.datasource, attr))
+ count += 1
+
+ def test_clear_cached_attrs_skips_non_attr_class_attributes(self):
+ """Skip any cached_attr_defaults which aren't class attributes."""
+ self.datasource._dirty_cache = True
+ self.datasource.clear_cached_attrs()
+ for attr in ("ec2_metadata", "network_json"):
+ self.assertFalse(hasattr(self.datasource, attr))
+
+ def test_clear_cached_attrs_of_custom_attrs(self):
+ """Custom attr_values can be passed to clear_cached_attrs."""
+ self.datasource._dirty_cache = True
+ cached_attr_name = self.datasource.cached_attr_defaults[0][0]
+ setattr(self.datasource, cached_attr_name, "himom")
+ self.datasource.myattr = "orig"
+ self.datasource.clear_cached_attrs(
+ attr_defaults=(("myattr", "updated"),)
+ )
+ self.assertEqual("himom", getattr(self.datasource, cached_attr_name))
+ self.assertEqual("updated", self.datasource.myattr)
+
+ @mock.patch.dict(
+ DataSource.default_update_events,
+ {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}},
+ )
+ @mock.patch.dict(
+ DataSource.supported_update_events,
+ {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}},
+ )
+ def test_update_metadata_only_acts_on_supported_update_events(self):
+ """update_metadata_if_supported wont get_data on unsupported events."""
+ self.assertEqual(
+ {EventScope.NETWORK: set([EventType.BOOT_NEW_INSTANCE])},
+ self.datasource.default_update_events,
+ )
+
+ def fake_get_data():
+ raise Exception("get_data should not be called")
+
+ self.datasource.get_data = fake_get_data
+ self.assertFalse(
+ self.datasource.update_metadata_if_supported(
+ source_event_types=[EventType.BOOT]
+ )
+ )
+
+ @mock.patch.dict(
+ DataSource.supported_update_events,
+ {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}},
+ )
+ def test_update_metadata_returns_true_on_supported_update_event(self):
+ """update_metadata_if_supported returns get_data on supported events"""
+
+ def fake_get_data():
+ return True
+
+ self.datasource.get_data = fake_get_data
+ self.datasource._network_config = "something"
+ self.datasource._dirty_cache = True
+ self.assertTrue(
+ self.datasource.update_metadata_if_supported(
+ source_event_types=[
+ EventType.BOOT,
+ EventType.BOOT_NEW_INSTANCE,
+ ]
+ )
+ )
+ self.assertEqual(UNSET, self.datasource._network_config)
+
+ self.assertIn(
+ "DEBUG: Update datasource metadata and network config due to"
+ " events: boot-new-instance",
+ self.logs.getvalue(),
+ )
+
+
+class TestRedactSensitiveData(CiTestCase):
+ def test_redact_sensitive_data_noop_when_no_sensitive_keys_present(self):
+ """When sensitive_keys is absent or empty from metadata do nothing."""
+ md = {"my": "data"}
+ self.assertEqual(
+ md, redact_sensitive_keys(md, redact_value="redacted")
+ )
+ md["sensitive_keys"] = []
+ self.assertEqual(
+ md, redact_sensitive_keys(md, redact_value="redacted")
+ )
+
+ def test_redact_sensitive_data_redacts_exact_match_name(self):
+ """Only exact matched sensitive_keys are redacted from metadata."""
+ md = {
+ "sensitive_keys": ["md/secure"],
+ "md": {"secure": "s3kr1t", "insecure": "publik"},
+ }
+ secure_md = copy.deepcopy(md)
+ secure_md["md"]["secure"] = "redacted"
+ self.assertEqual(
+ secure_md, redact_sensitive_keys(md, redact_value="redacted")
+ )
+
+ def test_redact_sensitive_data_does_redacts_with_default_string(self):
+ """When redact_value is absent, REDACT_SENSITIVE_VALUE is used."""
+ md = {
+ "sensitive_keys": ["md/secure"],
+ "md": {"secure": "s3kr1t", "insecure": "publik"},
+ }
+ secure_md = copy.deepcopy(md)
+ secure_md["md"]["secure"] = "redacted for non-root user"
+ self.assertEqual(secure_md, redact_sensitive_keys(md))
+
+
+class TestCanonicalCloudID(CiTestCase):
+ def test_cloud_id_returns_platform_on_unknowns(self):
+ """When region and cloud_name are unknown, return platform."""
+ self.assertEqual(
+ "platform",
+ canonical_cloud_id(
+ cloud_name=METADATA_UNKNOWN,
+ region=METADATA_UNKNOWN,
+ platform="platform",
+ ),
+ )
+
+ def test_cloud_id_returns_platform_on_none(self):
+ """When region and cloud_name are unknown, return platform."""
+ self.assertEqual(
+ "platform",
+ canonical_cloud_id(
+ cloud_name=None, region=None, platform="platform"
+ ),
+ )
+
+ def test_cloud_id_returns_cloud_name_on_unknown_region(self):
+ """When region is unknown, return cloud_name."""
+ for region in (None, METADATA_UNKNOWN):
+ self.assertEqual(
+ "cloudname",
+ canonical_cloud_id(
+ cloud_name="cloudname", region=region, platform="platform"
+ ),
+ )
+
+ def test_cloud_id_returns_platform_on_unknown_cloud_name(self):
+ """When region is set but cloud_name is unknown return cloud_name."""
+ self.assertEqual(
+ "platform",
+ canonical_cloud_id(
+ cloud_name=METADATA_UNKNOWN,
+ region="region",
+ platform="platform",
+ ),
+ )
+
+ def test_cloud_id_aws_based_on_region_and_cloud_name(self):
+ """When cloud_name is aws, return proper cloud-id based on region."""
+ self.assertEqual(
+ "aws-china",
+ canonical_cloud_id(
+ cloud_name="aws", region="cn-north-1", platform="platform"
+ ),
+ )
+ self.assertEqual(
+ "aws",
+ canonical_cloud_id(
+ cloud_name="aws", region="us-east-1", platform="platform"
+ ),
+ )
+ self.assertEqual(
+ "aws-gov",
+ canonical_cloud_id(
+ cloud_name="aws", region="us-gov-1", platform="platform"
+ ),
+ )
+ self.assertEqual( # Overrideen non-aws cloud_name is returned
+ "!aws",
+ canonical_cloud_id(
+ cloud_name="!aws", region="us-gov-1", platform="platform"
+ ),
+ )
+
+ def test_cloud_id_azure_based_on_region_and_cloud_name(self):
+ """Report cloud-id when cloud_name is azure and region is in china."""
+ self.assertEqual(
+ "azure-china",
+ canonical_cloud_id(
+ cloud_name="azure", region="chinaeast", platform="platform"
+ ),
+ )
+ self.assertEqual(
+ "azure",
+ canonical_cloud_id(
+ cloud_name="azure", region="!chinaeast", platform="platform"
+ ),
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_lxd.py b/tests/unittests/sources/test_lxd.py
new file mode 100644
index 00000000..e11c3746
--- /dev/null
+++ b/tests/unittests/sources/test_lxd.py
@@ -0,0 +1,394 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+import re
+import stat
+from collections import namedtuple
+from copy import deepcopy
+from unittest import mock
+
+import pytest
+import yaml
+
+from cloudinit.sources import UNSET
+from cloudinit.sources import DataSourceLXD as lxd
+from cloudinit.sources import InvalidMetaDataException
+
+DS_PATH = "cloudinit.sources.DataSourceLXD."
+
+
+LStatResponse = namedtuple("lstatresponse", "st_mode")
+
+
+NETWORK_V1 = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "eth0",
+ "subnets": [{"type": "dhcp", "control": "auto"}],
+ }
+ ],
+}
+
+
+def _add_network_v1_device(devname) -> dict:
+ """Helper to inject device name into default network v1 config."""
+ network_cfg = deepcopy(NETWORK_V1)
+ network_cfg["config"][0]["name"] = devname
+ return network_cfg
+
+
+LXD_V1_METADATA = {
+ "meta-data": "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n",
+ "network-config": NETWORK_V1,
+ "user-data": "#cloud-config\npackages: [sl]\n",
+ "vendor-data": "#cloud-config\nruncmd: ['echo vendor-data']\n",
+ "config": {
+ "user.user-data": "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n",
+ "user.vendor-data": "#cloud-config\nruncmd: ['echo vendor-data']\n",
+ "user.network-config": yaml.safe_dump(NETWORK_V1),
+ },
+}
+
+
+@pytest.fixture
+def lxd_metadata():
+ return LXD_V1_METADATA
+
+
+@pytest.fixture
+def lxd_ds(request, paths, lxd_metadata):
+ """
+ Return an instantiated DataSourceLXD.
+
+ This also performs the mocking required for the default test case:
+ * ``is_platform_viable`` returns True,
+ * ``read_metadata`` returns ``LXD_V1_METADATA``
+
+ (This uses the paths fixture for the required helpers.Paths object)
+ """
+ with mock.patch(DS_PATH + "is_platform_viable", return_value=True):
+ with mock.patch(DS_PATH + "read_metadata", return_value=lxd_metadata):
+ yield lxd.DataSourceLXD(
+ sys_cfg={}, distro=mock.Mock(), paths=paths
+ )
+
+
+class TestGenerateFallbackNetworkConfig:
+ @pytest.mark.parametrize(
+ "uname_machine,systemd_detect_virt,expected",
+ (
+ # None for systemd_detect_virt returns None from which
+ ({}, None, NETWORK_V1),
+ ({}, None, NETWORK_V1),
+ ("anything", "lxc\n", NETWORK_V1),
+ # `uname -m` on kvm determines devname
+ ("x86_64", "kvm\n", _add_network_v1_device("enp5s0")),
+ ("ppc64le", "kvm\n", _add_network_v1_device("enp0s5")),
+ ("s390x", "kvm\n", _add_network_v1_device("enc9")),
+ ),
+ )
+ @mock.patch(DS_PATH + "util.system_info")
+ @mock.patch(DS_PATH + "subp.subp")
+ @mock.patch(DS_PATH + "subp.which")
+ def test_net_v2_based_on_network_mode_virt_type_and_uname_machine(
+ self,
+ m_which,
+ m_subp,
+ m_system_info,
+ uname_machine,
+ systemd_detect_virt,
+ expected,
+ ):
+ """Return network config v2 based on uname -m, systemd-detect-virt."""
+ if systemd_detect_virt is None:
+ m_which.return_value = None
+ m_system_info.return_value = {"uname": ["", "", "", "", uname_machine]}
+ m_subp.return_value = (systemd_detect_virt, "")
+ assert expected == lxd.generate_fallback_network_config()
+ if systemd_detect_virt is None:
+ assert 0 == m_subp.call_count
+ assert 0 == m_system_info.call_count
+ else:
+ assert [
+ mock.call(["systemd-detect-virt"])
+ ] == m_subp.call_args_list
+ if systemd_detect_virt != "kvm\n":
+ assert 0 == m_system_info.call_count
+ else:
+ assert 1 == m_system_info.call_count
+
+
+class TestDataSourceLXD:
+ def test_platform_info(self, lxd_ds):
+ assert "LXD" == lxd_ds.dsname
+ assert "lxd" == lxd_ds.cloud_name
+ assert "lxd" == lxd_ds.platform_type
+
+ def test_subplatform(self, lxd_ds):
+ assert "LXD socket API v. 1.0 (/dev/lxd/sock)" == lxd_ds.subplatform
+
+ def test__get_data(self, lxd_ds):
+ """get_data calls read_metadata, setting appropiate instance attrs."""
+ assert UNSET == lxd_ds._crawled_metadata
+ assert UNSET == lxd_ds._network_config
+ assert None is lxd_ds.userdata_raw
+ assert True is lxd_ds._get_data()
+ assert LXD_V1_METADATA == lxd_ds._crawled_metadata
+ # network-config is dumped from YAML
+ assert NETWORK_V1 == lxd_ds._network_config
+ # Any user-data and vendor-data are saved as raw
+ assert LXD_V1_METADATA["user-data"] == lxd_ds.userdata_raw
+ assert LXD_V1_METADATA["vendor-data"] == lxd_ds.vendordata_raw
+
+
+class TestIsPlatformViable:
+ @pytest.mark.parametrize(
+ "exists,lstat_mode,expected",
+ (
+ (False, None, False),
+ (True, stat.S_IFREG, False),
+ (True, stat.S_IFSOCK, True),
+ ),
+ )
+ @mock.patch(DS_PATH + "os.lstat")
+ @mock.patch(DS_PATH + "os.path.exists")
+ def test_expected_viable(
+ self, m_exists, m_lstat, exists, lstat_mode, expected
+ ):
+ """Return True only when LXD_SOCKET_PATH exists and is a socket."""
+ m_exists.return_value = exists
+ m_lstat.return_value = LStatResponse(lstat_mode)
+ assert expected is lxd.is_platform_viable()
+ m_exists.assert_has_calls([mock.call(lxd.LXD_SOCKET_PATH)])
+ if exists:
+ m_lstat.assert_has_calls([mock.call(lxd.LXD_SOCKET_PATH)])
+ else:
+ assert 0 == m_lstat.call_count
+
+
+class TestReadMetadata:
+ @pytest.mark.parametrize(
+ "url_responses,expected,logs",
+ (
+ ( # Assert non-JSON format from config route
+ {
+ "http://lxd/1.0/meta-data": "local-hostname: md\n",
+ "http://lxd/1.0/config": "[NOT_JSON",
+ },
+ InvalidMetaDataException(
+ "Unable to determine cloud-init config from"
+ " http://lxd/1.0/config. Expected JSON but found:"
+ " [NOT_JSON"
+ ),
+ [
+ "[GET] [HTTP:200] http://lxd/1.0/meta-data",
+ "[GET] [HTTP:200] http://lxd/1.0/config",
+ ],
+ ),
+ ( # Assert success on just meta-data
+ {
+ "http://lxd/1.0/meta-data": "local-hostname: md\n",
+ "http://lxd/1.0/config": "[]",
+ },
+ {
+ "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION,
+ "config": {},
+ "meta-data": "local-hostname: md\n",
+ },
+ [
+ "[GET] [HTTP:200] http://lxd/1.0/meta-data",
+ "[GET] [HTTP:200] http://lxd/1.0/config",
+ ],
+ ),
+ ( # Assert 404s for config routes log skipping
+ {
+ "http://lxd/1.0/meta-data": "local-hostname: md\n",
+ "http://lxd/1.0/config": (
+ '["/1.0/config/user.custom1",'
+ ' "/1.0/config/user.meta-data",'
+ ' "/1.0/config/user.network-config",'
+ ' "/1.0/config/user.user-data",'
+ ' "/1.0/config/user.vendor-data"]'
+ ),
+ "http://lxd/1.0/config/user.custom1": "custom1",
+ "http://lxd/1.0/config/user.meta-data": "", # 404
+ "http://lxd/1.0/config/user.network-config": "net-config",
+ "http://lxd/1.0/config/user.user-data": "", # 404
+ "http://lxd/1.0/config/user.vendor-data": "", # 404
+ },
+ {
+ "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION,
+ "config": {
+ "user.custom1": "custom1", # Not promoted
+ "user.network-config": "net-config",
+ },
+ "meta-data": "local-hostname: md\n",
+ "network-config": "net-config",
+ },
+ [
+ "Skipping http://lxd/1.0/config/user.vendor-data on"
+ " [HTTP:404]",
+ "Skipping http://lxd/1.0/config/user.meta-data on"
+ " [HTTP:404]",
+ "Skipping http://lxd/1.0/config/user.user-data on"
+ " [HTTP:404]",
+ "[GET] [HTTP:200] http://lxd/1.0/config",
+ "[GET] [HTTP:200] http://lxd/1.0/config/user.custom1",
+ "[GET] [HTTP:200]"
+ " http://lxd/1.0/config/user.network-config",
+ ],
+ ),
+ ( # Assert all CONFIG_KEY_ALIASES promoted to top-level keys
+ {
+ "http://lxd/1.0/meta-data": "local-hostname: md\n",
+ "http://lxd/1.0/config": (
+ '["/1.0/config/user.custom1",'
+ ' "/1.0/config/user.meta-data",'
+ ' "/1.0/config/user.network-config",'
+ ' "/1.0/config/user.user-data",'
+ ' "/1.0/config/user.vendor-data"]'
+ ),
+ "http://lxd/1.0/config/user.custom1": "custom1",
+ "http://lxd/1.0/config/user.meta-data": "meta-data",
+ "http://lxd/1.0/config/user.network-config": "net-config",
+ "http://lxd/1.0/config/user.user-data": "user-data",
+ "http://lxd/1.0/config/user.vendor-data": "vendor-data",
+ },
+ {
+ "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION,
+ "config": {
+ "user.custom1": "custom1", # Not promoted
+ "user.meta-data": "meta-data",
+ "user.network-config": "net-config",
+ "user.user-data": "user-data",
+ "user.vendor-data": "vendor-data",
+ },
+ "meta-data": "local-hostname: md\n",
+ "network-config": "net-config",
+ "user-data": "user-data",
+ "vendor-data": "vendor-data",
+ },
+ [
+ "[GET] [HTTP:200] http://lxd/1.0/meta-data",
+ "[GET] [HTTP:200] http://lxd/1.0/config",
+ "[GET] [HTTP:200] http://lxd/1.0/config/user.custom1",
+ "[GET] [HTTP:200] http://lxd/1.0/config/user.meta-data",
+ "[GET] [HTTP:200]"
+ " http://lxd/1.0/config/user.network-config",
+ "[GET] [HTTP:200] http://lxd/1.0/config/user.user-data",
+ "[GET] [HTTP:200] http://lxd/1.0/config/user.vendor-data",
+ ],
+ ),
+ ( # Assert cloud-init.* config key values prefered over user.*
+ {
+ "http://lxd/1.0/meta-data": "local-hostname: md\n",
+ "http://lxd/1.0/config": (
+ '["/1.0/config/user.meta-data",'
+ ' "/1.0/config/user.network-config",'
+ ' "/1.0/config/user.user-data",'
+ ' "/1.0/config/user.vendor-data",'
+ ' "/1.0/config/cloud-init.network-config",'
+ ' "/1.0/config/cloud-init.user-data",'
+ ' "/1.0/config/cloud-init.vendor-data"]'
+ ),
+ "http://lxd/1.0/config/user.meta-data": "user.meta-data",
+ "http://lxd/1.0/config/user.network-config": (
+ "user.network-config"
+ ),
+ "http://lxd/1.0/config/user.user-data": "user.user-data",
+ "http://lxd/1.0/config/user.vendor-data": (
+ "user.vendor-data"
+ ),
+ "http://lxd/1.0/config/cloud-init.meta-data": (
+ "cloud-init.meta-data"
+ ),
+ "http://lxd/1.0/config/cloud-init.network-config": (
+ "cloud-init.network-config"
+ ),
+ "http://lxd/1.0/config/cloud-init.user-data": (
+ "cloud-init.user-data"
+ ),
+ "http://lxd/1.0/config/cloud-init.vendor-data": (
+ "cloud-init.vendor-data"
+ ),
+ },
+ {
+ "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION,
+ "config": {
+ "user.meta-data": "user.meta-data",
+ "user.network-config": "user.network-config",
+ "user.user-data": "user.user-data",
+ "user.vendor-data": "user.vendor-data",
+ "cloud-init.network-config": (
+ "cloud-init.network-config"
+ ),
+ "cloud-init.user-data": "cloud-init.user-data",
+ "cloud-init.vendor-data": "cloud-init.vendor-data",
+ },
+ "meta-data": "local-hostname: md\n",
+ "network-config": "cloud-init.network-config",
+ "user-data": "cloud-init.user-data",
+ "vendor-data": "cloud-init.vendor-data",
+ },
+ [
+ "[GET] [HTTP:200] http://lxd/1.0/meta-data",
+ "[GET] [HTTP:200] http://lxd/1.0/config",
+ "[GET] [HTTP:200] http://lxd/1.0/config/user.meta-data",
+ "[GET] [HTTP:200]"
+ " http://lxd/1.0/config/user.network-config",
+ "[GET] [HTTP:200] http://lxd/1.0/config/user.user-data",
+ "[GET] [HTTP:200] http://lxd/1.0/config/user.vendor-data",
+ "[GET] [HTTP:200]"
+ " http://lxd/1.0/config/cloud-init.network-config",
+ "[GET] [HTTP:200]"
+ " http://lxd/1.0/config/cloud-init.user-data",
+ "[GET] [HTTP:200]"
+ " http://lxd/1.0/config/cloud-init.vendor-data",
+ "Ignoring LXD config user.user-data in favor of"
+ " cloud-init.user-data value.",
+ "Ignoring LXD config user.network-config in favor of"
+ " cloud-init.network-config value.",
+ "Ignoring LXD config user.vendor-data in favor of"
+ " cloud-init.vendor-data value.",
+ ],
+ ),
+ ),
+ )
+ @mock.patch.object(lxd.requests.Session, "get")
+ def test_read_metadata_handles_unexpected_content_or_http_status(
+ self, session_get, url_responses, expected, logs, caplog
+ ):
+ """read_metadata handles valid and invalid content and status codes."""
+
+ def fake_get(url):
+ """Mock Response json, ok, status_code, text from url_responses."""
+ m_resp = mock.MagicMock()
+ content = url_responses.get(url, "")
+ m_resp.json.side_effect = lambda: json.loads(content)
+ if content:
+ mock_ok = mock.PropertyMock(return_value=True)
+ mock_status_code = mock.PropertyMock(return_value=200)
+ else:
+ mock_ok = mock.PropertyMock(return_value=False)
+ mock_status_code = mock.PropertyMock(return_value=404)
+ type(m_resp).ok = mock_ok
+ type(m_resp).status_code = mock_status_code
+ mock_text = mock.PropertyMock(return_value=content)
+ type(m_resp).text = mock_text
+ return m_resp
+
+ session_get.side_effect = fake_get
+
+ if isinstance(expected, Exception):
+ with pytest.raises(type(expected), match=re.escape(str(expected))):
+ lxd.read_metadata()
+ else:
+ assert expected == lxd.read_metadata()
+ caplogs = caplog.text
+ for log in logs:
+ assert log in caplogs
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_maas.py b/tests/unittests/sources/test_maas.py
new file mode 100644
index 00000000..e95ba374
--- /dev/null
+++ b/tests/unittests/sources/test_maas.py
@@ -0,0 +1,227 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+import shutil
+import tempfile
+from copy import copy
+from unittest import mock
+
+import yaml
+
+from cloudinit import url_helper
+from cloudinit.sources import DataSourceMAAS
+from tests.unittests.helpers import CiTestCase, populate_dir
+
+
+class TestMAASDataSource(CiTestCase):
+ def setUp(self):
+ super(TestMAASDataSource, self).setUp()
+ # Make a temp directoy for tests to use.
+ self.tmp = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.tmp)
+
+ def test_seed_dir_valid(self):
+ """Verify a valid seeddir is read as such."""
+
+ userdata = b"valid01-userdata"
+ data = {
+ "meta-data/instance-id": "i-valid01",
+ "meta-data/local-hostname": "valid01-hostname",
+ "user-data": userdata,
+ "public-keys": "ssh-rsa AAAAB3Nz...aC1yc2E= keyname",
+ }
+
+ my_d = os.path.join(self.tmp, "valid")
+ populate_dir(my_d, data)
+
+ ud, md, vd = DataSourceMAAS.read_maas_seed_dir(my_d)
+
+ self.assertEqual(userdata, ud)
+ for key in ("instance-id", "local-hostname"):
+ self.assertEqual(data["meta-data/" + key], md[key])
+
+ # verify that 'userdata' is not returned as part of the metadata
+ self.assertFalse(("user-data" in md))
+ self.assertIsNone(vd)
+
+ def test_seed_dir_valid_extra(self):
+ """Verify extra files do not affect seed_dir validity."""
+
+ userdata = b"valid-extra-userdata"
+ data = {
+ "meta-data/instance-id": "i-valid-extra",
+ "meta-data/local-hostname": "valid-extra-hostname",
+ "user-data": userdata,
+ "foo": "bar",
+ }
+
+ my_d = os.path.join(self.tmp, "valid_extra")
+ populate_dir(my_d, data)
+
+ ud, md, _vd = DataSourceMAAS.read_maas_seed_dir(my_d)
+
+ self.assertEqual(userdata, ud)
+ for key in ("instance-id", "local-hostname"):
+ self.assertEqual(data["meta-data/" + key], md[key])
+
+ # additional files should not just appear as keys in metadata atm
+ self.assertFalse(("foo" in md))
+
+ def test_seed_dir_invalid(self):
+ """Verify that invalid seed_dir raises MAASSeedDirMalformed."""
+
+ valid = {
+ "instance-id": "i-instanceid",
+ "local-hostname": "test-hostname",
+ "user-data": "",
+ }
+
+ my_based = os.path.join(self.tmp, "valid_extra")
+
+ # missing 'userdata' file
+ my_d = "%s-01" % my_based
+ invalid_data = copy(valid)
+ del invalid_data["local-hostname"]
+ populate_dir(my_d, invalid_data)
+ self.assertRaises(
+ DataSourceMAAS.MAASSeedDirMalformed,
+ DataSourceMAAS.read_maas_seed_dir,
+ my_d,
+ )
+
+ # missing 'instance-id'
+ my_d = "%s-02" % my_based
+ invalid_data = copy(valid)
+ del invalid_data["instance-id"]
+ populate_dir(my_d, invalid_data)
+ self.assertRaises(
+ DataSourceMAAS.MAASSeedDirMalformed,
+ DataSourceMAAS.read_maas_seed_dir,
+ my_d,
+ )
+
+ def test_seed_dir_none(self):
+ """Verify that empty seed_dir raises MAASSeedDirNone."""
+
+ my_d = os.path.join(self.tmp, "valid_empty")
+ self.assertRaises(
+ DataSourceMAAS.MAASSeedDirNone,
+ DataSourceMAAS.read_maas_seed_dir,
+ my_d,
+ )
+
+ def test_seed_dir_missing(self):
+ """Verify that missing seed_dir raises MAASSeedDirNone."""
+ self.assertRaises(
+ DataSourceMAAS.MAASSeedDirNone,
+ DataSourceMAAS.read_maas_seed_dir,
+ os.path.join(self.tmp, "nonexistantdirectory"),
+ )
+
+ def mock_read_maas_seed_url(self, data, seed, version="19991231"):
+ """mock up readurl to appear as a web server at seed has provided data.
+ return what read_maas_seed_url returns."""
+
+ def my_readurl(*args, **kwargs):
+ if len(args):
+ url = args[0]
+ else:
+ url = kwargs["url"]
+ prefix = "%s/%s/" % (seed, version)
+ if not url.startswith(prefix):
+ raise ValueError("unexpected call %s" % url)
+
+ short = url[len(prefix) :]
+ if short not in data:
+ raise url_helper.UrlError("not found", code=404, url=url)
+ return url_helper.StringResponse(data[short])
+
+ # Now do the actual call of the code under test.
+ with mock.patch("cloudinit.url_helper.readurl") as mock_readurl:
+ mock_readurl.side_effect = my_readurl
+ return DataSourceMAAS.read_maas_seed_url(seed, version=version)
+
+ def test_seed_url_valid(self):
+ """Verify that valid seed_url is read as such."""
+ valid = {
+ "meta-data/instance-id": "i-instanceid",
+ "meta-data/local-hostname": "test-hostname",
+ "meta-data/public-keys": "test-hostname",
+ "meta-data/vendor-data": b"my-vendordata",
+ "user-data": b"foodata",
+ }
+ my_seed = "http://example.com/xmeta"
+ my_ver = "1999-99-99"
+ ud, md, vd = self.mock_read_maas_seed_url(valid, my_seed, my_ver)
+
+ self.assertEqual(valid["meta-data/instance-id"], md["instance-id"])
+ self.assertEqual(
+ valid["meta-data/local-hostname"], md["local-hostname"]
+ )
+ self.assertEqual(valid["meta-data/public-keys"], md["public-keys"])
+ self.assertEqual(valid["user-data"], ud)
+ # vendor-data is yaml, which decodes a string
+ self.assertEqual(valid["meta-data/vendor-data"].decode(), vd)
+
+ def test_seed_url_vendor_data_dict(self):
+ expected_vd = {"key1": "value1"}
+ valid = {
+ "meta-data/instance-id": "i-instanceid",
+ "meta-data/local-hostname": "test-hostname",
+ "meta-data/vendor-data": yaml.safe_dump(expected_vd).encode(),
+ }
+ _ud, md, vd = self.mock_read_maas_seed_url(
+ valid, "http://example.com/foo"
+ )
+
+ self.assertEqual(valid["meta-data/instance-id"], md["instance-id"])
+ self.assertEqual(expected_vd, vd)
+
+
+@mock.patch("cloudinit.sources.DataSourceMAAS.url_helper.OauthUrlHelper")
+class TestGetOauthHelper(CiTestCase):
+ base_cfg = {
+ "consumer_key": "FAKE_CONSUMER_KEY",
+ "token_key": "FAKE_TOKEN_KEY",
+ "token_secret": "FAKE_TOKEN_SECRET",
+ "consumer_secret": None,
+ }
+
+ def test_all_required(self, m_helper):
+ """Valid config as expected."""
+ DataSourceMAAS.get_oauth_helper(self.base_cfg.copy())
+ m_helper.assert_has_calls([mock.call(**self.base_cfg)])
+
+ def test_other_fields_not_passed_through(self, m_helper):
+ """Only relevant fields are passed through."""
+ mycfg = self.base_cfg.copy()
+ mycfg["unrelated_field"] = "unrelated"
+ DataSourceMAAS.get_oauth_helper(mycfg)
+ m_helper.assert_has_calls([mock.call(**self.base_cfg)])
+
+
+class TestGetIdHash(CiTestCase):
+ v1_cfg = {
+ "consumer_key": "CKEY",
+ "token_key": "TKEY",
+ "token_secret": "TSEC",
+ }
+ v1_id = (
+ "v1:403ee5f19c956507f1d0e50814119c405902137ea4f8838bde167c5da8110392"
+ )
+
+ def test_v1_expected(self):
+ """Test v1 id generated as expected working behavior from config."""
+ result = DataSourceMAAS.get_id_from_ds_cfg(self.v1_cfg.copy())
+ self.assertEqual(self.v1_id, result)
+
+ def test_v1_extra_fields_are_ignored(self):
+ """Test v1 id ignores unused entries in config."""
+ cfg = self.v1_cfg.copy()
+ cfg["consumer_secret"] = "BOO"
+ cfg["unrelated"] = "HI MOM"
+ result = DataSourceMAAS.get_id_from_ds_cfg(cfg)
+ self.assertEqual(self.v1_id, result)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_nocloud.py b/tests/unittests/sources/test_nocloud.py
new file mode 100644
index 00000000..1f6b722d
--- /dev/null
+++ b/tests/unittests/sources/test_nocloud.py
@@ -0,0 +1,453 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+import textwrap
+
+import yaml
+
+from cloudinit import dmi, helpers, util
+from cloudinit.sources.DataSourceNoCloud import DataSourceNoCloud as dsNoCloud
+from cloudinit.sources.DataSourceNoCloud import (
+ _maybe_remove_top_network,
+ parse_cmdline_data,
+)
+from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir
+
+
+@mock.patch("cloudinit.sources.DataSourceNoCloud.util.is_lxd")
+class TestNoCloudDataSource(CiTestCase):
+ def setUp(self):
+ super(TestNoCloudDataSource, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.paths = helpers.Paths(
+ {"cloud_dir": self.tmp, "run_dir": self.tmp}
+ )
+
+ self.cmdline = "root=TESTCMDLINE"
+
+ self.mocks = ExitStack()
+ self.addCleanup(self.mocks.close)
+
+ self.mocks.enter_context(
+ mock.patch.object(util, "get_cmdline", return_value=self.cmdline)
+ )
+ self.mocks.enter_context(
+ mock.patch.object(dmi, "read_dmi_data", return_value=None)
+ )
+
+ def _test_fs_config_is_read(self, fs_label, fs_label_to_search):
+ vfat_device = "device-1"
+
+ def m_mount_cb(device, callback, mtype):
+ if device == vfat_device:
+ return {"meta-data": yaml.dump({"instance-id": "IID"})}
+ else:
+ return {}
+
+ def m_find_devs_with(query="", path=""):
+ if "TYPE=vfat" == query:
+ return [vfat_device]
+ elif "LABEL={}".format(fs_label) == query:
+ return [vfat_device]
+ else:
+ return []
+
+ self.mocks.enter_context(
+ mock.patch.object(
+ util, "find_devs_with", side_effect=m_find_devs_with
+ )
+ )
+ self.mocks.enter_context(
+ mock.patch.object(util, "mount_cb", side_effect=m_mount_cb)
+ )
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": fs_label_to_search}}}
+ dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
+ ret = dsrc.get_data()
+
+ self.assertEqual(dsrc.metadata.get("instance-id"), "IID")
+ self.assertTrue(ret)
+
+ def test_nocloud_seed_dir_on_lxd(self, m_is_lxd):
+ md = {"instance-id": "IID", "dsmode": "local"}
+ ud = b"USER_DATA_HERE"
+ seed_dir = os.path.join(self.paths.seed_dir, "nocloud")
+ populate_dir(
+ seed_dir, {"user-data": ud, "meta-data": yaml.safe_dump(md)}
+ )
+
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
+
+ dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
+ ret = dsrc.get_data()
+ self.assertEqual(dsrc.userdata_raw, ud)
+ self.assertEqual(dsrc.metadata, md)
+ self.assertEqual(dsrc.platform_type, "lxd")
+ self.assertEqual(dsrc.subplatform, "seed-dir (%s)" % seed_dir)
+ self.assertTrue(ret)
+
+ def test_nocloud_seed_dir_non_lxd_platform_is_nocloud(self, m_is_lxd):
+ """Non-lxd environments will list nocloud as the platform."""
+ m_is_lxd.return_value = False
+ md = {"instance-id": "IID", "dsmode": "local"}
+ seed_dir = os.path.join(self.paths.seed_dir, "nocloud")
+ populate_dir(
+ seed_dir, {"user-data": "", "meta-data": yaml.safe_dump(md)}
+ )
+
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
+
+ dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
+ self.assertTrue(dsrc.get_data())
+ self.assertEqual(dsrc.platform_type, "nocloud")
+ self.assertEqual(dsrc.subplatform, "seed-dir (%s)" % seed_dir)
+
+ def test_fs_label(self, m_is_lxd):
+ # find_devs_with should not be called ff fs_label is None
+ class PsuedoException(Exception):
+ pass
+
+ self.mocks.enter_context(
+ mock.patch.object(
+ util, "find_devs_with", side_effect=PsuedoException
+ )
+ )
+
+ # by default, NoCloud should search for filesystems by label
+ sys_cfg = {"datasource": {"NoCloud": {}}}
+ dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
+ self.assertRaises(PsuedoException, dsrc.get_data)
+
+ # but disabling searching should just end up with None found
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
+ dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
+ ret = dsrc.get_data()
+ self.assertFalse(ret)
+
+ def test_fs_config_lowercase_label(self, m_is_lxd):
+ self._test_fs_config_is_read("cidata", "cidata")
+
+ def test_fs_config_uppercase_label(self, m_is_lxd):
+ self._test_fs_config_is_read("CIDATA", "cidata")
+
+ def test_fs_config_lowercase_label_search_uppercase(self, m_is_lxd):
+ self._test_fs_config_is_read("cidata", "CIDATA")
+
+ def test_fs_config_uppercase_label_search_uppercase(self, m_is_lxd):
+ self._test_fs_config_is_read("CIDATA", "CIDATA")
+
+ def test_no_datasource_expected(self, m_is_lxd):
+ # no source should be found if no cmdline, config, and fs_label=None
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
+
+ dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
+ self.assertFalse(dsrc.get_data())
+
+ def test_seed_in_config(self, m_is_lxd):
+ data = {
+ "fs_label": None,
+ "meta-data": yaml.safe_dump({"instance-id": "IID"}),
+ "user-data": b"USER_DATA_RAW",
+ }
+
+ sys_cfg = {"datasource": {"NoCloud": data}}
+ dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
+ ret = dsrc.get_data()
+ self.assertEqual(dsrc.userdata_raw, b"USER_DATA_RAW")
+ self.assertEqual(dsrc.metadata.get("instance-id"), "IID")
+ self.assertTrue(ret)
+
+ def test_nocloud_seed_with_vendordata(self, m_is_lxd):
+ md = {"instance-id": "IID", "dsmode": "local"}
+ ud = b"USER_DATA_HERE"
+ vd = b"THIS IS MY VENDOR_DATA"
+
+ populate_dir(
+ os.path.join(self.paths.seed_dir, "nocloud"),
+ {
+ "user-data": ud,
+ "meta-data": yaml.safe_dump(md),
+ "vendor-data": vd,
+ },
+ )
+
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
+
+ dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
+ ret = dsrc.get_data()
+ self.assertEqual(dsrc.userdata_raw, ud)
+ self.assertEqual(dsrc.metadata, md)
+ self.assertEqual(dsrc.vendordata_raw, vd)
+ self.assertTrue(ret)
+
+ def test_nocloud_no_vendordata(self, m_is_lxd):
+ populate_dir(
+ os.path.join(self.paths.seed_dir, "nocloud"),
+ {"user-data": b"ud", "meta-data": "instance-id: IID\n"},
+ )
+
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
+
+ dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
+ ret = dsrc.get_data()
+ self.assertEqual(dsrc.userdata_raw, b"ud")
+ self.assertFalse(dsrc.vendordata)
+ self.assertTrue(ret)
+
+ def test_metadata_network_interfaces(self, m_is_lxd):
+ gateway = "103.225.10.1"
+ md = {
+ "instance-id": "i-abcd",
+ "local-hostname": "hostname1",
+ "network-interfaces": textwrap.dedent(
+ """\
+ auto eth0
+ iface eth0 inet static
+ hwaddr 00:16:3e:70:e1:04
+ address 103.225.10.12
+ netmask 255.255.255.0
+ gateway """
+ + gateway
+ + """
+ dns-servers 8.8.8.8"""
+ ),
+ }
+
+ populate_dir(
+ os.path.join(self.paths.seed_dir, "nocloud"),
+ {"user-data": b"ud", "meta-data": yaml.dump(md) + "\n"},
+ )
+
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
+
+ dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ # very simple check just for the strings above
+ self.assertIn(gateway, str(dsrc.network_config))
+
+ def test_metadata_network_config(self, m_is_lxd):
+ # network-config needs to get into network_config
+ netconf = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "interface0",
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
+ }
+ populate_dir(
+ os.path.join(self.paths.seed_dir, "nocloud"),
+ {
+ "user-data": b"ud",
+ "meta-data": "instance-id: IID\n",
+ "network-config": yaml.dump(netconf) + "\n",
+ },
+ )
+
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
+
+ dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(netconf, dsrc.network_config)
+
+ def test_metadata_network_config_with_toplevel_network(self, m_is_lxd):
+ """network-config may have 'network' top level key."""
+ netconf = {"config": "disabled"}
+ populate_dir(
+ os.path.join(self.paths.seed_dir, "nocloud"),
+ {
+ "user-data": b"ud",
+ "meta-data": "instance-id: IID\n",
+ "network-config": yaml.dump({"network": netconf}) + "\n",
+ },
+ )
+
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
+
+ dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(netconf, dsrc.network_config)
+
+ def test_metadata_network_config_over_interfaces(self, m_is_lxd):
+ # network-config should override meta-data/network-interfaces
+ gateway = "103.225.10.1"
+ md = {
+ "instance-id": "i-abcd",
+ "local-hostname": "hostname1",
+ "network-interfaces": textwrap.dedent(
+ """\
+ auto eth0
+ iface eth0 inet static
+ hwaddr 00:16:3e:70:e1:04
+ address 103.225.10.12
+ netmask 255.255.255.0
+ gateway """
+ + gateway
+ + """
+ dns-servers 8.8.8.8"""
+ ),
+ }
+
+ netconf = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "interface0",
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
+ }
+ populate_dir(
+ os.path.join(self.paths.seed_dir, "nocloud"),
+ {
+ "user-data": b"ud",
+ "meta-data": yaml.dump(md) + "\n",
+ "network-config": yaml.dump(netconf) + "\n",
+ },
+ )
+
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
+
+ dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(netconf, dsrc.network_config)
+ self.assertNotIn(gateway, str(dsrc.network_config))
+
+ @mock.patch("cloudinit.util.blkid")
+ def test_nocloud_get_devices_freebsd(self, m_is_lxd, fake_blkid):
+ populate_dir(
+ os.path.join(self.paths.seed_dir, "nocloud"),
+ {"user-data": b"ud", "meta-data": "instance-id: IID\n"},
+ )
+
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
+
+ self.mocks.enter_context(
+ mock.patch.object(util, "is_FreeBSD", return_value=True)
+ )
+
+ def _mfind_devs_with_freebsd(
+ criteria=None,
+ oformat="device",
+ tag=None,
+ no_cache=False,
+ path=None,
+ ):
+ if not criteria:
+ return ["/dev/msdosfs/foo", "/dev/iso9660/foo"]
+ if criteria.startswith("LABEL="):
+ return ["/dev/msdosfs/foo", "/dev/iso9660/foo"]
+ elif criteria == "TYPE=vfat":
+ return ["/dev/msdosfs/foo"]
+ elif criteria == "TYPE=iso9660":
+ return ["/dev/iso9660/foo"]
+ return []
+
+ self.mocks.enter_context(
+ mock.patch.object(
+ util,
+ "find_devs_with_freebsd",
+ side_effect=_mfind_devs_with_freebsd,
+ )
+ )
+
+ dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
+ ret = dsrc._get_devices("foo")
+ self.assertEqual(["/dev/msdosfs/foo", "/dev/iso9660/foo"], ret)
+ fake_blkid.assert_not_called()
+
+
+class TestParseCommandLineData(CiTestCase):
+ def test_parse_cmdline_data_valid(self):
+ ds_id = "ds=nocloud"
+ pairs = (
+ ("root=/dev/sda1 %(ds_id)s", {}),
+ ("%(ds_id)s; root=/dev/foo", {}),
+ ("%(ds_id)s", {}),
+ ("%(ds_id)s;", {}),
+ ("%(ds_id)s;s=SEED", {"seedfrom": "SEED"}),
+ (
+ "%(ds_id)s;seedfrom=SEED;local-hostname=xhost",
+ {"seedfrom": "SEED", "local-hostname": "xhost"},
+ ),
+ ("%(ds_id)s;h=xhost", {"local-hostname": "xhost"}),
+ (
+ "%(ds_id)s;h=xhost;i=IID",
+ {"local-hostname": "xhost", "instance-id": "IID"},
+ ),
+ )
+
+ for (fmt, expected) in pairs:
+ fill = {}
+ cmdline = fmt % {"ds_id": ds_id}
+ ret = parse_cmdline_data(ds_id=ds_id, fill=fill, cmdline=cmdline)
+ self.assertEqual(expected, fill)
+ self.assertTrue(ret)
+
+ def test_parse_cmdline_data_none(self):
+ ds_id = "ds=foo"
+ cmdlines = (
+ "root=/dev/sda1 ro",
+ "console=/dev/ttyS0 root=/dev/foo",
+ "",
+ "ds=foocloud",
+ "ds=foo-net",
+ "ds=nocloud;s=SEED",
+ )
+
+ for cmdline in cmdlines:
+ fill = {}
+ ret = parse_cmdline_data(ds_id=ds_id, fill=fill, cmdline=cmdline)
+ self.assertEqual(fill, {})
+ self.assertFalse(ret)
+
+
+class TestMaybeRemoveToplevelNetwork(CiTestCase):
+ """test _maybe_remove_top_network function."""
+
+ basecfg = [
+ {
+ "type": "physical",
+ "name": "interface0",
+ "subnets": [{"type": "dhcp"}],
+ }
+ ]
+
+ def test_should_remove_safely(self):
+ mcfg = {"config": self.basecfg, "version": 1}
+ self.assertEqual(mcfg, _maybe_remove_top_network({"network": mcfg}))
+
+ def test_no_remove_if_other_keys(self):
+ """should not shift if other keys at top level."""
+ mcfg = {
+ "network": {"config": self.basecfg, "version": 1},
+ "unknown_keyname": "keyval",
+ }
+ self.assertEqual(mcfg, _maybe_remove_top_network(mcfg))
+
+ def test_no_remove_if_non_dict(self):
+ """should not shift if not a dict."""
+ mcfg = {"network": '"content here'}
+ self.assertEqual(mcfg, _maybe_remove_top_network(mcfg))
+
+ def test_no_remove_if_missing_config_or_version(self):
+ """should not shift unless network entry has config and version."""
+ mcfg = {"network": {"config": self.basecfg}}
+ self.assertEqual(mcfg, _maybe_remove_top_network(mcfg))
+
+ mcfg = {"network": {"version": 1}}
+ self.assertEqual(mcfg, _maybe_remove_top_network(mcfg))
+
+ def test_remove_with_config_disabled(self):
+ """network/config=disabled should be shifted."""
+ mcfg = {"config": "disabled"}
+ self.assertEqual(mcfg, _maybe_remove_top_network({"network": mcfg}))
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_opennebula.py b/tests/unittests/sources/test_opennebula.py
new file mode 100644
index 00000000..e05c4749
--- /dev/null
+++ b/tests/unittests/sources/test_opennebula.py
@@ -0,0 +1,1085 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+import pwd
+import unittest
+
+import pytest
+
+from cloudinit import helpers, util
+from cloudinit.sources import DataSourceOpenNebula as ds
+from tests.unittests.helpers import CiTestCase, mock, populate_dir
+
+TEST_VARS = {
+ "VAR1": "single",
+ "VAR2": "double word",
+ "VAR3": "multi\nline\n",
+ "VAR4": "'single'",
+ "VAR5": "'double word'",
+ "VAR6": "'multi\nline\n'",
+ "VAR7": "single\\t",
+ "VAR8": "double\\tword",
+ "VAR9": "multi\\t\nline\n",
+ "VAR10": "\\", # expect '\'
+ "VAR11": "'", # expect '
+ "VAR12": "$", # expect $
+}
+
+INVALID_CONTEXT = ";"
+USER_DATA = "#cloud-config\napt_upgrade: true"
+SSH_KEY = "ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i"
+HOSTNAME = "foo.example.com"
+PUBLIC_IP = "10.0.0.3"
+MACADDR = "02:00:0a:12:01:01"
+IP_BY_MACADDR = "10.18.1.1"
+IP4_PREFIX = "24"
+IP6_GLOBAL = "2001:db8:1:0:400:c0ff:fea8:1ba"
+IP6_ULA = "fd01:dead:beaf:0:400:c0ff:fea8:1ba"
+IP6_GW = "2001:db8:1::ffff"
+IP6_PREFIX = "48"
+
+DS_PATH = "cloudinit.sources.DataSourceOpenNebula"
+
+
+class TestOpenNebulaDataSource(CiTestCase):
+ parsed_user = None
+ allowed_subp = ["bash"]
+
+ def setUp(self):
+ super(TestOpenNebulaDataSource, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.paths = helpers.Paths(
+ {"cloud_dir": self.tmp, "run_dir": self.tmp}
+ )
+
+ # defaults for few tests
+ self.ds = ds.DataSourceOpenNebula
+ self.seed_dir = os.path.join(self.paths.seed_dir, "opennebula")
+ self.sys_cfg = {"datasource": {"OpenNebula": {"dsmode": "local"}}}
+
+ # we don't want 'sudo' called in tests. so we patch switch_user_cmd
+ def my_switch_user_cmd(user):
+ self.parsed_user = user
+ return []
+
+ self.switch_user_cmd_real = ds.switch_user_cmd
+ ds.switch_user_cmd = my_switch_user_cmd
+
+ def tearDown(self):
+ ds.switch_user_cmd = self.switch_user_cmd_real
+ super(TestOpenNebulaDataSource, self).tearDown()
+
+ def test_get_data_non_contextdisk(self):
+ orig_find_devs_with = util.find_devs_with
+ try:
+ # dont' try to lookup for CDs
+ util.find_devs_with = lambda n: []
+ dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths)
+ ret = dsrc.get_data()
+ self.assertFalse(ret)
+ finally:
+ util.find_devs_with = orig_find_devs_with
+
+ def test_get_data_broken_contextdisk(self):
+ orig_find_devs_with = util.find_devs_with
+ try:
+ # dont' try to lookup for CDs
+ util.find_devs_with = lambda n: []
+ populate_dir(self.seed_dir, {"context.sh": INVALID_CONTEXT})
+ dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths)
+ self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data)
+ finally:
+ util.find_devs_with = orig_find_devs_with
+
+ def test_get_data_invalid_identity(self):
+ orig_find_devs_with = util.find_devs_with
+ try:
+ # generate non-existing system user name
+ sys_cfg = self.sys_cfg
+ invalid_user = "invalid"
+ while not sys_cfg["datasource"]["OpenNebula"].get("parseuser"):
+ try:
+ pwd.getpwnam(invalid_user)
+ invalid_user += "X"
+ except KeyError:
+ sys_cfg["datasource"]["OpenNebula"][
+ "parseuser"
+ ] = invalid_user
+
+ # dont' try to lookup for CDs
+ util.find_devs_with = lambda n: []
+ populate_context_dir(self.seed_dir, {"KEY1": "val1"})
+ dsrc = self.ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
+ self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data)
+ finally:
+ util.find_devs_with = orig_find_devs_with
+
+ def test_get_data(self):
+ orig_find_devs_with = util.find_devs_with
+ try:
+ # dont' try to lookup for CDs
+ util.find_devs_with = lambda n: []
+ populate_context_dir(self.seed_dir, {"KEY1": "val1"})
+ dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ finally:
+ util.find_devs_with = orig_find_devs_with
+ self.assertEqual("opennebula", dsrc.cloud_name)
+ self.assertEqual("opennebula", dsrc.platform_type)
+ self.assertEqual(
+ "seed-dir (%s/seed/opennebula)" % self.tmp, dsrc.subplatform
+ )
+
+ def test_seed_dir_non_contextdisk(self):
+ self.assertRaises(
+ ds.NonContextDiskDir,
+ ds.read_context_disk_dir,
+ self.seed_dir,
+ mock.Mock(),
+ )
+
+ def test_seed_dir_empty1_context(self):
+ populate_dir(self.seed_dir, {"context.sh": ""})
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
+
+ self.assertIsNone(results["userdata"])
+ self.assertEqual(results["metadata"], {})
+
+ def test_seed_dir_empty2_context(self):
+ populate_context_dir(self.seed_dir, {})
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
+
+ self.assertIsNone(results["userdata"])
+ self.assertEqual(results["metadata"], {})
+
+ def test_seed_dir_broken_context(self):
+ populate_dir(self.seed_dir, {"context.sh": INVALID_CONTEXT})
+
+ self.assertRaises(
+ ds.BrokenContextDiskDir,
+ ds.read_context_disk_dir,
+ self.seed_dir,
+ mock.Mock(),
+ )
+
+ def test_context_parser(self):
+ populate_context_dir(self.seed_dir, TEST_VARS)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
+
+ self.assertTrue("metadata" in results)
+ self.assertEqual(TEST_VARS, results["metadata"])
+
+ def test_ssh_key(self):
+ public_keys = ["first key", "second key"]
+ for c in range(4):
+ for k in ("SSH_KEY", "SSH_PUBLIC_KEY"):
+ my_d = os.path.join(self.tmp, "%s-%i" % (k, c))
+ populate_context_dir(my_d, {k: "\n".join(public_keys)})
+ results = ds.read_context_disk_dir(my_d, mock.Mock())
+
+ self.assertTrue("metadata" in results)
+ self.assertTrue("public-keys" in results["metadata"])
+ self.assertEqual(
+ public_keys, results["metadata"]["public-keys"]
+ )
+
+ public_keys.append(SSH_KEY % (c + 1,))
+
+ def test_user_data_plain(self):
+ for k in ("USER_DATA", "USERDATA"):
+ my_d = os.path.join(self.tmp, k)
+ populate_context_dir(my_d, {k: USER_DATA, "USERDATA_ENCODING": ""})
+ results = ds.read_context_disk_dir(my_d, mock.Mock())
+
+ self.assertTrue("userdata" in results)
+ self.assertEqual(USER_DATA, results["userdata"])
+
+ def test_user_data_encoding_required_for_decode(self):
+ b64userdata = util.b64e(USER_DATA)
+ for k in ("USER_DATA", "USERDATA"):
+ my_d = os.path.join(self.tmp, k)
+ populate_context_dir(my_d, {k: b64userdata})
+ results = ds.read_context_disk_dir(my_d, mock.Mock())
+
+ self.assertTrue("userdata" in results)
+ self.assertEqual(b64userdata, results["userdata"])
+
+ def test_user_data_base64_encoding(self):
+ for k in ("USER_DATA", "USERDATA"):
+ my_d = os.path.join(self.tmp, k)
+ populate_context_dir(
+ my_d, {k: util.b64e(USER_DATA), "USERDATA_ENCODING": "base64"}
+ )
+ results = ds.read_context_disk_dir(my_d, mock.Mock())
+
+ self.assertTrue("userdata" in results)
+ self.assertEqual(USER_DATA, results["userdata"])
+
+ @mock.patch(DS_PATH + ".get_physical_nics_by_mac")
+ def test_hostname(self, m_get_phys_by_mac):
+ for dev in ("eth0", "ens3"):
+ m_get_phys_by_mac.return_value = {MACADDR: dev}
+ for k in (
+ "SET_HOSTNAME",
+ "HOSTNAME",
+ "PUBLIC_IP",
+ "IP_PUBLIC",
+ "ETH0_IP",
+ ):
+ my_d = os.path.join(self.tmp, k)
+ populate_context_dir(my_d, {k: PUBLIC_IP})
+ results = ds.read_context_disk_dir(my_d, mock.Mock())
+
+ self.assertTrue("metadata" in results)
+ self.assertTrue("local-hostname" in results["metadata"])
+ self.assertEqual(
+ PUBLIC_IP, results["metadata"]["local-hostname"]
+ )
+
+ @mock.patch(DS_PATH + ".get_physical_nics_by_mac")
+ def test_network_interfaces(self, m_get_phys_by_mac):
+ for dev in ("eth0", "ens3"):
+ m_get_phys_by_mac.return_value = {MACADDR: dev}
+
+ # without ETH0_MAC
+ # for Older OpenNebula?
+ populate_context_dir(self.seed_dir, {"ETH0_IP": IP_BY_MACADDR})
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
+
+ self.assertTrue("network-interfaces" in results)
+ self.assertTrue(
+ IP_BY_MACADDR + "/" + IP4_PREFIX
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
+
+ # ETH0_IP and ETH0_MAC
+ populate_context_dir(
+ self.seed_dir, {"ETH0_IP": IP_BY_MACADDR, "ETH0_MAC": MACADDR}
+ )
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
+
+ self.assertTrue("network-interfaces" in results)
+ self.assertTrue(
+ IP_BY_MACADDR + "/" + IP4_PREFIX
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
+
+ # ETH0_IP with empty string and ETH0_MAC
+ # in the case of using Virtual Network contains
+ # "AR = [ TYPE = ETHER ]"
+ populate_context_dir(
+ self.seed_dir, {"ETH0_IP": "", "ETH0_MAC": MACADDR}
+ )
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
+
+ self.assertTrue("network-interfaces" in results)
+ self.assertTrue(
+ IP_BY_MACADDR + "/" + IP4_PREFIX
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
+
+ # ETH0_MASK
+ populate_context_dir(
+ self.seed_dir,
+ {
+ "ETH0_IP": IP_BY_MACADDR,
+ "ETH0_MAC": MACADDR,
+ "ETH0_MASK": "255.255.0.0",
+ },
+ )
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
+
+ self.assertTrue("network-interfaces" in results)
+ self.assertTrue(
+ IP_BY_MACADDR + "/16"
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
+
+ # ETH0_MASK with empty string
+ populate_context_dir(
+ self.seed_dir,
+ {
+ "ETH0_IP": IP_BY_MACADDR,
+ "ETH0_MAC": MACADDR,
+ "ETH0_MASK": "",
+ },
+ )
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
+
+ self.assertTrue("network-interfaces" in results)
+ self.assertTrue(
+ IP_BY_MACADDR + "/" + IP4_PREFIX
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
+
+ # ETH0_IP6
+ populate_context_dir(
+ self.seed_dir,
+ {
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_MAC": MACADDR,
+ },
+ )
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
+
+ self.assertTrue("network-interfaces" in results)
+ self.assertTrue(
+ IP6_GLOBAL + "/64"
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
+
+ # ETH0_IP6_ULA
+ populate_context_dir(
+ self.seed_dir,
+ {
+ "ETH0_IP6_ULA": IP6_ULA,
+ "ETH0_MAC": MACADDR,
+ },
+ )
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
+
+ self.assertTrue("network-interfaces" in results)
+ self.assertTrue(
+ IP6_ULA + "/64"
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
+
+ # ETH0_IP6 and ETH0_IP6_PREFIX_LENGTH
+ populate_context_dir(
+ self.seed_dir,
+ {
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_PREFIX_LENGTH": IP6_PREFIX,
+ "ETH0_MAC": MACADDR,
+ },
+ )
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
+
+ self.assertTrue("network-interfaces" in results)
+ self.assertTrue(
+ IP6_GLOBAL + "/" + IP6_PREFIX
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
+
+ # ETH0_IP6 and ETH0_IP6_PREFIX_LENGTH with empty string
+ populate_context_dir(
+ self.seed_dir,
+ {
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_PREFIX_LENGTH": "",
+ "ETH0_MAC": MACADDR,
+ },
+ )
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
+
+ self.assertTrue("network-interfaces" in results)
+ self.assertTrue(
+ IP6_GLOBAL + "/64"
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
+
+ def test_find_candidates(self):
+ def my_devs_with(criteria):
+ return {
+ "LABEL=CONTEXT": ["/dev/sdb"],
+ "LABEL=CDROM": ["/dev/sr0"],
+ "TYPE=iso9660": ["/dev/vdb"],
+ }.get(criteria, [])
+
+ orig_find_devs_with = util.find_devs_with
+ try:
+ util.find_devs_with = my_devs_with
+ self.assertEqual(
+ ["/dev/sdb", "/dev/sr0", "/dev/vdb"], ds.find_candidate_devs()
+ )
+ finally:
+ util.find_devs_with = orig_find_devs_with
+
+
+@mock.patch(DS_PATH + ".net.get_interfaces_by_mac", mock.Mock(return_value={}))
+class TestOpenNebulaNetwork(unittest.TestCase):
+
+ system_nics = ("eth0", "ens3")
+
+ def test_context_devname(self):
+ """Verify context_devname correctly returns mac and name."""
+ context = {
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH1_MAC": "02:00:0a:12:0f:0f",
+ }
+ expected = {
+ "02:00:0a:12:01:01": "ETH0",
+ "02:00:0a:12:0f:0f": "ETH1",
+ }
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ self.assertEqual(expected, net.context_devname)
+
+ def test_get_nameservers(self):
+ """
+ Verify get_nameservers('device') correctly returns DNS server addresses
+ and search domains.
+ """
+ context = {
+ "DNS": "1.2.3.8",
+ "ETH0_DNS": "1.2.3.6 1.2.3.7",
+ "ETH0_SEARCH_DOMAIN": "example.com example.org",
+ }
+ expected = {
+ "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"],
+ "search": ["example.com", "example.org"],
+ }
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ val = net.get_nameservers("eth0")
+ self.assertEqual(expected, val)
+
+ def test_get_mtu(self):
+ """Verify get_mtu('device') correctly returns MTU size."""
+ context = {"ETH0_MTU": "1280"}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ val = net.get_mtu("eth0")
+ self.assertEqual("1280", val)
+
+ def test_get_ip(self):
+ """Verify get_ip('device') correctly returns IPv4 address."""
+ context = {"ETH0_IP": PUBLIC_IP}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ val = net.get_ip("eth0", MACADDR)
+ self.assertEqual(PUBLIC_IP, val)
+
+ def test_get_ip_emptystring(self):
+ """
+ Verify get_ip('device') correctly returns IPv4 address.
+ It returns IP address created by MAC address if ETH0_IP has empty
+ string.
+ """
+ context = {"ETH0_IP": ""}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ val = net.get_ip("eth0", MACADDR)
+ self.assertEqual(IP_BY_MACADDR, val)
+
+ def test_get_ip6(self):
+ """
+ Verify get_ip6('device') correctly returns IPv6 address.
+ In this case, IPv6 address is Given by ETH0_IP6.
+ """
+ context = {
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_ULA": "",
+ }
+ expected = [IP6_GLOBAL]
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ val = net.get_ip6("eth0")
+ self.assertEqual(expected, val)
+
+ def test_get_ip6_ula(self):
+ """
+ Verify get_ip6('device') correctly returns IPv6 address.
+ In this case, IPv6 address is Given by ETH0_IP6_ULA.
+ """
+ context = {
+ "ETH0_IP6": "",
+ "ETH0_IP6_ULA": IP6_ULA,
+ }
+ expected = [IP6_ULA]
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ val = net.get_ip6("eth0")
+ self.assertEqual(expected, val)
+
+ def test_get_ip6_dual(self):
+ """
+ Verify get_ip6('device') correctly returns IPv6 address.
+ In this case, IPv6 addresses are Given by ETH0_IP6 and ETH0_IP6_ULA.
+ """
+ context = {
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_ULA": IP6_ULA,
+ }
+ expected = [IP6_GLOBAL, IP6_ULA]
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ val = net.get_ip6("eth0")
+ self.assertEqual(expected, val)
+
+ def test_get_ip6_prefix(self):
+ """
+ Verify get_ip6_prefix('device') correctly returns IPv6 prefix.
+ """
+ context = {"ETH0_IP6_PREFIX_LENGTH": IP6_PREFIX}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ val = net.get_ip6_prefix("eth0")
+ self.assertEqual(IP6_PREFIX, val)
+
+ def test_get_ip6_prefix_emptystring(self):
+ """
+ Verify get_ip6_prefix('device') correctly returns IPv6 prefix.
+ It returns default value '64' if ETH0_IP6_PREFIX_LENGTH has empty
+ string.
+ """
+ context = {"ETH0_IP6_PREFIX_LENGTH": ""}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ val = net.get_ip6_prefix("eth0")
+ self.assertEqual("64", val)
+
+ def test_get_gateway(self):
+ """
+ Verify get_gateway('device') correctly returns IPv4 default gateway
+ address.
+ """
+ context = {"ETH0_GATEWAY": "1.2.3.5"}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ val = net.get_gateway("eth0")
+ self.assertEqual("1.2.3.5", val)
+
+ def test_get_gateway6(self):
+ """
+ Verify get_gateway6('device') correctly returns IPv6 default gateway
+ address.
+ """
+ for k in ("GATEWAY6", "IP6_GATEWAY"):
+ context = {"ETH0_" + k: IP6_GW}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ val = net.get_gateway6("eth0")
+ self.assertEqual(IP6_GW, val)
+
+ def test_get_mask(self):
+ """
+ Verify get_mask('device') correctly returns IPv4 subnet mask.
+ """
+ context = {"ETH0_MASK": "255.255.0.0"}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ val = net.get_mask("eth0")
+ self.assertEqual("255.255.0.0", val)
+
+ def test_get_mask_emptystring(self):
+ """
+ Verify get_mask('device') correctly returns IPv4 subnet mask.
+ It returns default value '255.255.255.0' if ETH0_MASK has empty string.
+ """
+ context = {"ETH0_MASK": ""}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ val = net.get_mask("eth0")
+ self.assertEqual("255.255.255.0", val)
+
+ def test_get_network(self):
+ """
+ Verify get_network('device') correctly returns IPv4 network address.
+ """
+ context = {"ETH0_NETWORK": "1.2.3.0"}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ val = net.get_network("eth0", MACADDR)
+ self.assertEqual("1.2.3.0", val)
+
+ def test_get_network_emptystring(self):
+ """
+ Verify get_network('device') correctly returns IPv4 network address.
+ It returns network address created by MAC address if ETH0_NETWORK has
+ empty string.
+ """
+ context = {"ETH0_NETWORK": ""}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ val = net.get_network("eth0", MACADDR)
+ self.assertEqual("10.18.1.0", val)
+
+ def test_get_field(self):
+ """
+ Verify get_field('device', 'name') returns *context* value.
+ """
+ context = {"ETH9_DUMMY": "DUMMY_VALUE"}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ val = net.get_field("eth9", "dummy")
+ self.assertEqual("DUMMY_VALUE", val)
+
+ def test_get_field_withdefaultvalue(self):
+ """
+ Verify get_field('device', 'name', 'default value') returns *context*
+ value.
+ """
+ context = {"ETH9_DUMMY": "DUMMY_VALUE"}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ val = net.get_field("eth9", "dummy", "DEFAULT_VALUE")
+ self.assertEqual("DUMMY_VALUE", val)
+
+ def test_get_field_withdefaultvalue_emptycontext(self):
+ """
+ Verify get_field('device', 'name', 'default value') returns *default*
+ value if context value is empty string.
+ """
+ context = {"ETH9_DUMMY": ""}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ val = net.get_field("eth9", "dummy", "DEFAULT_VALUE")
+ self.assertEqual("DEFAULT_VALUE", val)
+
+ def test_get_field_emptycontext(self):
+ """
+ Verify get_field('device', 'name') returns None if context value is
+ empty string.
+ """
+ context = {"ETH9_DUMMY": ""}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ val = net.get_field("eth9", "dummy")
+ self.assertEqual(None, val)
+
+ def test_get_field_nonecontext(self):
+ """
+ Verify get_field('device', 'name') returns None if context value is
+ None.
+ """
+ context = {"ETH9_DUMMY": None}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ val = net.get_field("eth9", "dummy")
+ self.assertEqual(None, val)
+
+ @mock.patch(DS_PATH + ".get_physical_nics_by_mac")
+ def test_gen_conf_gateway(self, m_get_phys_by_mac):
+ """Test rendering with/without IPv4 gateway"""
+ self.maxDiff = None
+ # empty ETH0_GATEWAY
+ context = {
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_GATEWAY": "",
+ }
+ for nic in self.system_nics:
+ expected = {
+ "version": 2,
+ "ethernets": {
+ nic: {
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ self.assertEqual(net.gen_conf(), expected)
+
+ # set ETH0_GATEWAY
+ context = {
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_GATEWAY": "1.2.3.5",
+ }
+ for nic in self.system_nics:
+ expected = {
+ "version": 2,
+ "ethernets": {
+ nic: {
+ "gateway4": "1.2.3.5",
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ self.assertEqual(net.gen_conf(), expected)
+
+ @mock.patch(DS_PATH + ".get_physical_nics_by_mac")
+ def test_gen_conf_gateway6(self, m_get_phys_by_mac):
+ """Test rendering with/without IPv6 gateway"""
+ self.maxDiff = None
+ # empty ETH0_GATEWAY6
+ context = {
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_GATEWAY6": "",
+ }
+ for nic in self.system_nics:
+ expected = {
+ "version": 2,
+ "ethernets": {
+ nic: {
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ self.assertEqual(net.gen_conf(), expected)
+
+ # set ETH0_GATEWAY6
+ context = {
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_GATEWAY6": IP6_GW,
+ }
+ for nic in self.system_nics:
+ expected = {
+ "version": 2,
+ "ethernets": {
+ nic: {
+ "gateway6": IP6_GW,
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ self.assertEqual(net.gen_conf(), expected)
+
+ @mock.patch(DS_PATH + ".get_physical_nics_by_mac")
+ def test_gen_conf_ipv6address(self, m_get_phys_by_mac):
+ """Test rendering with/without IPv6 address"""
+ self.maxDiff = None
+ # empty ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH
+ context = {
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_IP6": "",
+ "ETH0_IP6_ULA": "",
+ "ETH0_IP6_PREFIX_LENGTH": "",
+ }
+ for nic in self.system_nics:
+ expected = {
+ "version": 2,
+ "ethernets": {
+ nic: {
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ self.assertEqual(net.gen_conf(), expected)
+
+ # set ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH
+ context = {
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_PREFIX_LENGTH": IP6_PREFIX,
+ "ETH0_IP6_ULA": IP6_ULA,
+ }
+ for nic in self.system_nics:
+ expected = {
+ "version": 2,
+ "ethernets": {
+ nic: {
+ "match": {"macaddress": MACADDR},
+ "addresses": [
+ IP_BY_MACADDR + "/" + IP4_PREFIX,
+ IP6_GLOBAL + "/" + IP6_PREFIX,
+ IP6_ULA + "/" + IP6_PREFIX,
+ ],
+ }
+ },
+ }
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ self.assertEqual(net.gen_conf(), expected)
+
+ @mock.patch(DS_PATH + ".get_physical_nics_by_mac")
+ def test_gen_conf_dns(self, m_get_phys_by_mac):
+ """Test rendering with/without DNS server, search domain"""
+ self.maxDiff = None
+ # empty DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN
+ context = {
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "DNS": "",
+ "ETH0_DNS": "",
+ "ETH0_SEARCH_DOMAIN": "",
+ }
+ for nic in self.system_nics:
+ expected = {
+ "version": 2,
+ "ethernets": {
+ nic: {
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ self.assertEqual(net.gen_conf(), expected)
+
+ # set DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN
+ context = {
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "DNS": "1.2.3.8",
+ "ETH0_DNS": "1.2.3.6 1.2.3.7",
+ "ETH0_SEARCH_DOMAIN": "example.com example.org",
+ }
+ for nic in self.system_nics:
+ expected = {
+ "version": 2,
+ "ethernets": {
+ nic: {
+ "nameservers": {
+ "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"],
+ "search": ["example.com", "example.org"],
+ },
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ self.assertEqual(net.gen_conf(), expected)
+
+ @mock.patch(DS_PATH + ".get_physical_nics_by_mac")
+ def test_gen_conf_mtu(self, m_get_phys_by_mac):
+ """Test rendering with/without MTU"""
+ self.maxDiff = None
+ # empty ETH0_MTU
+ context = {
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_MTU": "",
+ }
+ for nic in self.system_nics:
+ expected = {
+ "version": 2,
+ "ethernets": {
+ nic: {
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ self.assertEqual(net.gen_conf(), expected)
+
+ # set ETH0_MTU
+ context = {
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_MTU": "1280",
+ }
+ for nic in self.system_nics:
+ expected = {
+ "version": 2,
+ "ethernets": {
+ nic: {
+ "mtu": "1280",
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ self.assertEqual(net.gen_conf(), expected)
+
+ @mock.patch(DS_PATH + ".get_physical_nics_by_mac")
+ def test_eth0(self, m_get_phys_by_mac):
+ for nic in self.system_nics:
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork({}, mock.Mock())
+ expected = {
+ "version": 2,
+ "ethernets": {
+ nic: {
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
+
+ self.assertEqual(net.gen_conf(), expected)
+
+ @mock.patch(DS_PATH + ".get_physical_nics_by_mac")
+ def test_distro_passed_through(self, m_get_physical_nics_by_mac):
+ ds.OpenNebulaNetwork({}, mock.sentinel.distro)
+ self.assertEqual(
+ [mock.call(mock.sentinel.distro)],
+ m_get_physical_nics_by_mac.call_args_list,
+ )
+
+ def test_eth0_override(self):
+ self.maxDiff = None
+ context = {
+ "DNS": "1.2.3.8",
+ "ETH0_DNS": "1.2.3.6 1.2.3.7",
+ "ETH0_GATEWAY": "1.2.3.5",
+ "ETH0_GATEWAY6": "",
+ "ETH0_IP": IP_BY_MACADDR,
+ "ETH0_IP6": "",
+ "ETH0_IP6_PREFIX_LENGTH": "",
+ "ETH0_IP6_ULA": "",
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_MASK": "255.255.0.0",
+ "ETH0_MTU": "",
+ "ETH0_NETWORK": "10.18.0.0",
+ "ETH0_SEARCH_DOMAIN": "",
+ }
+ for nic in self.system_nics:
+ net = ds.OpenNebulaNetwork(
+ context, mock.Mock(), system_nics_by_mac={MACADDR: nic}
+ )
+ expected = {
+ "version": 2,
+ "ethernets": {
+ nic: {
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/16"],
+ "gateway4": "1.2.3.5",
+ "nameservers": {
+ "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"]
+ },
+ }
+ },
+ }
+
+ self.assertEqual(expected, net.gen_conf())
+
+ def test_eth0_v4v6_override(self):
+ self.maxDiff = None
+ context = {
+ "DNS": "1.2.3.8",
+ "ETH0_DNS": "1.2.3.6 1.2.3.7",
+ "ETH0_GATEWAY": "1.2.3.5",
+ "ETH0_GATEWAY6": IP6_GW,
+ "ETH0_IP": IP_BY_MACADDR,
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_PREFIX_LENGTH": IP6_PREFIX,
+ "ETH0_IP6_ULA": IP6_ULA,
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_MASK": "255.255.0.0",
+ "ETH0_MTU": "1280",
+ "ETH0_NETWORK": "10.18.0.0",
+ "ETH0_SEARCH_DOMAIN": "example.com example.org",
+ }
+ for nic in self.system_nics:
+ net = ds.OpenNebulaNetwork(
+ context, mock.Mock(), system_nics_by_mac={MACADDR: nic}
+ )
+
+ expected = {
+ "version": 2,
+ "ethernets": {
+ nic: {
+ "match": {"macaddress": MACADDR},
+ "addresses": [
+ IP_BY_MACADDR + "/16",
+ IP6_GLOBAL + "/" + IP6_PREFIX,
+ IP6_ULA + "/" + IP6_PREFIX,
+ ],
+ "gateway4": "1.2.3.5",
+ "gateway6": IP6_GW,
+ "nameservers": {
+ "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"],
+ "search": ["example.com", "example.org"],
+ },
+ "mtu": "1280",
+ }
+ },
+ }
+
+ self.assertEqual(expected, net.gen_conf())
+
+ def test_multiple_nics(self):
+ """Test rendering multiple nics with names that differ from context."""
+ self.maxDiff = None
+ MAC_1 = "02:00:0a:12:01:01"
+ MAC_2 = "02:00:0a:12:01:02"
+ context = {
+ "DNS": "1.2.3.8",
+ "ETH0_DNS": "1.2.3.6 1.2.3.7",
+ "ETH0_GATEWAY": "1.2.3.5",
+ "ETH0_GATEWAY6": IP6_GW,
+ "ETH0_IP": "10.18.1.1",
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_PREFIX_LENGTH": "",
+ "ETH0_IP6_ULA": IP6_ULA,
+ "ETH0_MAC": MAC_2,
+ "ETH0_MASK": "255.255.0.0",
+ "ETH0_MTU": "1280",
+ "ETH0_NETWORK": "10.18.0.0",
+ "ETH0_SEARCH_DOMAIN": "example.com",
+ "ETH3_DNS": "10.3.1.2",
+ "ETH3_GATEWAY": "10.3.0.1",
+ "ETH3_GATEWAY6": "",
+ "ETH3_IP": "10.3.1.3",
+ "ETH3_IP6": "",
+ "ETH3_IP6_PREFIX_LENGTH": "",
+ "ETH3_IP6_ULA": "",
+ "ETH3_MAC": MAC_1,
+ "ETH3_MASK": "255.255.0.0",
+ "ETH3_MTU": "",
+ "ETH3_NETWORK": "10.3.0.0",
+ "ETH3_SEARCH_DOMAIN": "third.example.com third.example.org",
+ }
+ net = ds.OpenNebulaNetwork(
+ context,
+ mock.Mock(),
+ system_nics_by_mac={MAC_1: "enp0s25", MAC_2: "enp1s2"},
+ )
+
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "enp1s2": {
+ "match": {"macaddress": MAC_2},
+ "addresses": [
+ "10.18.1.1/16",
+ IP6_GLOBAL + "/64",
+ IP6_ULA + "/64",
+ ],
+ "gateway4": "1.2.3.5",
+ "gateway6": IP6_GW,
+ "nameservers": {
+ "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"],
+ "search": ["example.com"],
+ },
+ "mtu": "1280",
+ },
+ "enp0s25": {
+ "match": {"macaddress": MAC_1},
+ "addresses": ["10.3.1.3/16"],
+ "gateway4": "10.3.0.1",
+ "nameservers": {
+ "addresses": ["10.3.1.2", "1.2.3.8"],
+ "search": ["third.example.com", "third.example.org"],
+ },
+ },
+ },
+ }
+
+ self.assertEqual(expected, net.gen_conf())
+
+
+class TestParseShellConfig:
+ @pytest.mark.allow_subp_for("bash")
+ def test_no_seconds(self):
+ cfg = "\n".join(["foo=bar", "SECONDS=2", "xx=foo"])
+ # we could test 'sleep 2', but that would make the test run slower.
+ ret = ds.parse_shell_config(cfg)
+ assert ret == {"foo": "bar", "xx": "foo"}
+
+
+class TestGetPhysicalNicsByMac:
+ @pytest.mark.parametrize(
+ "interfaces_by_mac,physical_devs,expected_return",
+ [
+ # No interfaces => empty return
+ ({}, [], {}),
+ # Only virtual interface => empty return
+ ({"mac1": "virtual0"}, [], {}),
+ # Only physical interface => it is returned
+ ({"mac2": "physical0"}, ["physical0"], {"mac2": "physical0"}),
+ # Combination of physical and virtual => only physical returned
+ (
+ {"mac3": "physical1", "mac4": "virtual1"},
+ ["physical1"],
+ {"mac3": "physical1"},
+ ),
+ ],
+ )
+ def test(self, interfaces_by_mac, physical_devs, expected_return):
+ distro = mock.Mock()
+ distro.networking.is_physical.side_effect = (
+ lambda devname: devname in physical_devs
+ )
+ with mock.patch(
+ DS_PATH + ".net.get_interfaces_by_mac",
+ return_value=interfaces_by_mac,
+ ):
+ assert expected_return == ds.get_physical_nics_by_mac(distro)
+
+
+def populate_context_dir(path, variables):
+ data = "# Context variables generated by OpenNebula\n"
+ for k, v in variables.items():
+ data += "%s='%s'\n" % (k.upper(), v.replace(r"'", r"'\''"))
+ populate_dir(path, {"context.sh": data})
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_openstack.py b/tests/unittests/sources/test_openstack.py
new file mode 100644
index 00000000..c111bbcd
--- /dev/null
+++ b/tests/unittests/sources/test_openstack.py
@@ -0,0 +1,788 @@
+# Copyright (C) 2014 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import copy
+import json
+import re
+from io import StringIO
+from urllib.parse import urlparse
+
+import httpretty as hp
+
+from cloudinit import helpers, settings, util
+from cloudinit.sources import UNSET, BrokenMetadata
+from cloudinit.sources import DataSourceOpenStack as ds
+from cloudinit.sources import convert_vendordata
+from cloudinit.sources.helpers import openstack
+from tests.unittests import helpers as test_helpers
+
+BASE_URL = "http://169.254.169.254"
+PUBKEY = "ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n"
+EC2_META = {
+ "ami-id": "ami-00000001",
+ "ami-launch-index": "0",
+ "ami-manifest-path": "FIXME",
+ "hostname": "sm-foo-test.novalocal",
+ "instance-action": "none",
+ "instance-id": "i-00000001",
+ "instance-type": "m1.tiny",
+ "local-hostname": "sm-foo-test.novalocal",
+ "local-ipv4": "0.0.0.0",
+ "public-hostname": "sm-foo-test.novalocal",
+ "public-ipv4": "0.0.0.1",
+ "reservation-id": "r-iru5qm4m",
+}
+USER_DATA = b"#!/bin/sh\necho This is user data\n"
+VENDOR_DATA = {
+ "magic": "",
+}
+VENDOR_DATA2 = {"static": {}}
+OSTACK_META = {
+ "availability_zone": "nova",
+ "files": [
+ {"content_path": "/content/0000", "path": "/etc/foo.cfg"},
+ {"content_path": "/content/0001", "path": "/etc/bar/bar.cfg"},
+ ],
+ "hostname": "sm-foo-test.novalocal",
+ "meta": {"dsmode": "local", "my-meta": "my-value"},
+ "name": "sm-foo-test",
+ "public_keys": {"mykey": PUBKEY},
+ "uuid": "b0fa911b-69d4-4476-bbe2-1c92bff6535c",
+}
+CONTENT_0 = b"This is contents of /etc/foo.cfg\n"
+CONTENT_1 = b"# this is /etc/bar/bar.cfg\n"
+OS_FILES = {
+ "openstack/content/0000": CONTENT_0,
+ "openstack/content/0001": CONTENT_1,
+ "openstack/latest/meta_data.json": json.dumps(OSTACK_META),
+ "openstack/latest/network_data.json": json.dumps(
+ {"links": [], "networks": [], "services": []}
+ ),
+ "openstack/latest/user_data": USER_DATA,
+ "openstack/latest/vendor_data.json": json.dumps(VENDOR_DATA),
+ "openstack/latest/vendor_data2.json": json.dumps(VENDOR_DATA2),
+}
+EC2_FILES = {
+ "latest/user-data": USER_DATA,
+}
+EC2_VERSIONS = [
+ "latest",
+]
+
+MOCK_PATH = "cloudinit.sources.DataSourceOpenStack."
+
+
+# TODO _register_uris should leverage test_ec2.register_mock_metaserver.
+def _register_uris(version, ec2_files, ec2_meta, os_files):
+ """Registers a set of url patterns into httpretty that will mimic the
+ same data returned by the openstack metadata service (and ec2 service)."""
+
+ def match_ec2_url(uri, headers):
+ path = uri.path.strip("/")
+ if len(path) == 0:
+ return (200, headers, "\n".join(EC2_VERSIONS))
+ path = uri.path.lstrip("/")
+ if path in ec2_files:
+ return (200, headers, ec2_files.get(path))
+ if path == "latest/meta-data/":
+ buf = StringIO()
+ for (k, v) in ec2_meta.items():
+ if isinstance(v, (list, tuple)):
+ buf.write("%s/" % (k))
+ else:
+ buf.write("%s" % (k))
+ buf.write("\n")
+ return (200, headers, buf.getvalue())
+ if path.startswith("latest/meta-data/"):
+ value = None
+ pieces = path.split("/")
+ if path.endswith("/"):
+ pieces = pieces[2:-1]
+ value = util.get_cfg_by_path(ec2_meta, pieces)
+ else:
+ pieces = pieces[2:]
+ value = util.get_cfg_by_path(ec2_meta, pieces)
+ if value is not None:
+ return (200, headers, str(value))
+ return (404, headers, "")
+
+ def match_os_uri(uri, headers):
+ path = uri.path.strip("/")
+ if path == "openstack":
+ return (200, headers, "\n".join([openstack.OS_LATEST]))
+ path = uri.path.lstrip("/")
+ if path in os_files:
+ return (200, headers, os_files.get(path))
+ return (404, headers, "")
+
+ def get_request_callback(method, uri, headers):
+ uri = urlparse(uri)
+ path = uri.path.lstrip("/").split("/")
+ if path[0] == "openstack":
+ return match_os_uri(uri, headers)
+ return match_ec2_url(uri, headers)
+
+ hp.register_uri(
+ hp.GET,
+ re.compile(r"http://169.254.169.254/.*"),
+ body=get_request_callback,
+ )
+
+
+def _read_metadata_service():
+ return ds.read_metadata_service(BASE_URL, retries=0, timeout=0.1)
+
+
+class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
+
+ with_logs = True
+ VERSION = "latest"
+
+ def setUp(self):
+ super(TestOpenStackDataSource, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def test_successful(self):
+ _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
+ f = _read_metadata_service()
+ self.assertEqual(VENDOR_DATA, f.get("vendordata"))
+ self.assertEqual(VENDOR_DATA2, f.get("vendordata2"))
+ self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"])
+ self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"])
+ self.assertEqual(2, len(f["files"]))
+ self.assertEqual(USER_DATA, f.get("userdata"))
+ self.assertEqual(EC2_META, f.get("ec2-metadata"))
+ self.assertEqual(2, f.get("version"))
+ metadata = f["metadata"]
+ self.assertEqual("nova", metadata.get("availability_zone"))
+ self.assertEqual("sm-foo-test.novalocal", metadata.get("hostname"))
+ self.assertEqual(
+ "sm-foo-test.novalocal", metadata.get("local-hostname")
+ )
+ self.assertEqual("sm-foo-test", metadata.get("name"))
+ self.assertEqual(
+ "b0fa911b-69d4-4476-bbe2-1c92bff6535c", metadata.get("uuid")
+ )
+ self.assertEqual(
+ "b0fa911b-69d4-4476-bbe2-1c92bff6535c", metadata.get("instance-id")
+ )
+
+ def test_no_ec2(self):
+ _register_uris(self.VERSION, {}, {}, OS_FILES)
+ f = _read_metadata_service()
+ self.assertEqual(VENDOR_DATA, f.get("vendordata"))
+ self.assertEqual(VENDOR_DATA2, f.get("vendordata2"))
+ self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"])
+ self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"])
+ self.assertEqual(USER_DATA, f.get("userdata"))
+ self.assertEqual({}, f.get("ec2-metadata"))
+ self.assertEqual(2, f.get("version"))
+
+ def test_bad_metadata(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("meta_data.json"):
+ os_files.pop(k, None)
+ _register_uris(self.VERSION, {}, {}, os_files)
+ self.assertRaises(openstack.NonReadable, _read_metadata_service)
+
+ def test_bad_uuid(self):
+ os_files = copy.deepcopy(OS_FILES)
+ os_meta = copy.deepcopy(OSTACK_META)
+ os_meta.pop("uuid")
+ for k in list(os_files.keys()):
+ if k.endswith("meta_data.json"):
+ os_files[k] = json.dumps(os_meta)
+ _register_uris(self.VERSION, {}, {}, os_files)
+ self.assertRaises(BrokenMetadata, _read_metadata_service)
+
+ def test_userdata_empty(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("user_data"):
+ os_files.pop(k, None)
+ _register_uris(self.VERSION, {}, {}, os_files)
+ f = _read_metadata_service()
+ self.assertEqual(VENDOR_DATA, f.get("vendordata"))
+ self.assertEqual(VENDOR_DATA2, f.get("vendordata2"))
+ self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"])
+ self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"])
+ self.assertFalse(f.get("userdata"))
+
+ def test_vendordata_empty(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("vendor_data.json"):
+ os_files.pop(k, None)
+ _register_uris(self.VERSION, {}, {}, os_files)
+ f = _read_metadata_service()
+ self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"])
+ self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"])
+ self.assertFalse(f.get("vendordata"))
+
+ def test_vendordata2_empty(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("vendor_data2.json"):
+ os_files.pop(k, None)
+ _register_uris(self.VERSION, {}, {}, os_files)
+ f = _read_metadata_service()
+ self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"])
+ self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"])
+ self.assertFalse(f.get("vendordata2"))
+
+ def test_vendordata_invalid(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("vendor_data.json"):
+ os_files[k] = "{" # some invalid json
+ _register_uris(self.VERSION, {}, {}, os_files)
+ self.assertRaises(BrokenMetadata, _read_metadata_service)
+
+ def test_vendordata2_invalid(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("vendor_data2.json"):
+ os_files[k] = "{" # some invalid json
+ _register_uris(self.VERSION, {}, {}, os_files)
+ self.assertRaises(BrokenMetadata, _read_metadata_service)
+
+ def test_metadata_invalid(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("meta_data.json"):
+ os_files[k] = "{" # some invalid json
+ _register_uris(self.VERSION, {}, {}, os_files)
+ self.assertRaises(BrokenMetadata, _read_metadata_service)
+
+ @test_helpers.mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ def test_datasource(self, m_dhcp):
+ _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ self.assertIsNone(ds_os.version)
+ mock_path = MOCK_PATH + "detect_openstack"
+ with test_helpers.mock.patch(mock_path) as m_detect_os:
+ m_detect_os.return_value = True
+ found = ds_os.get_data()
+ self.assertTrue(found)
+ self.assertEqual(2, ds_os.version)
+ md = dict(ds_os.metadata)
+ md.pop("instance-id", None)
+ md.pop("local-hostname", None)
+ self.assertEqual(OSTACK_META, md)
+ self.assertEqual(EC2_META, ds_os.ec2_metadata)
+ self.assertEqual(USER_DATA, ds_os.userdata_raw)
+ self.assertEqual(2, len(ds_os.files))
+ self.assertEqual(VENDOR_DATA, ds_os.vendordata_pure)
+ self.assertEqual(VENDOR_DATA2, ds_os.vendordata2_pure)
+ self.assertIsNone(ds_os.vendordata_raw)
+ m_dhcp.assert_not_called()
+
+ @hp.activate
+ @test_helpers.mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
+ @test_helpers.mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ def test_local_datasource(self, m_dhcp, m_net):
+ """OpenStackLocal calls EphemeralDHCPNetwork and gets instance data."""
+ _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
+ ds_os_local = ds.DataSourceOpenStackLocal(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ ds_os_local._fallback_interface = "eth9" # Monkey patch for dhcp
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "broadcast-address": "192.168.2.255",
+ }
+ ]
+
+ self.assertIsNone(ds_os_local.version)
+ mock_path = MOCK_PATH + "detect_openstack"
+ with test_helpers.mock.patch(mock_path) as m_detect_os:
+ m_detect_os.return_value = True
+ found = ds_os_local.get_data()
+ self.assertTrue(found)
+ self.assertEqual(2, ds_os_local.version)
+ md = dict(ds_os_local.metadata)
+ md.pop("instance-id", None)
+ md.pop("local-hostname", None)
+ self.assertEqual(OSTACK_META, md)
+ self.assertEqual(EC2_META, ds_os_local.ec2_metadata)
+ self.assertEqual(USER_DATA, ds_os_local.userdata_raw)
+ self.assertEqual(2, len(ds_os_local.files))
+ self.assertEqual(VENDOR_DATA, ds_os_local.vendordata_pure)
+ self.assertEqual(VENDOR_DATA2, ds_os_local.vendordata2_pure)
+ self.assertIsNone(ds_os_local.vendordata_raw)
+ m_dhcp.assert_called_with("eth9", None)
+
+ def test_bad_datasource_meta(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("meta_data.json"):
+ os_files[k] = "{" # some invalid json
+ _register_uris(self.VERSION, {}, {}, os_files)
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ self.assertIsNone(ds_os.version)
+ mock_path = MOCK_PATH + "detect_openstack"
+ with test_helpers.mock.patch(mock_path) as m_detect_os:
+ m_detect_os.return_value = True
+ found = ds_os.get_data()
+ self.assertFalse(found)
+ self.assertIsNone(ds_os.version)
+ self.assertIn(
+ "InvalidMetaDataException: Broken metadata address"
+ " http://169.254.169.25",
+ self.logs.getvalue(),
+ )
+
+ def test_no_datasource(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("meta_data.json"):
+ os_files.pop(k)
+ _register_uris(self.VERSION, {}, {}, os_files)
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ ds_os.ds_cfg = {
+ "max_wait": 0,
+ "timeout": 0,
+ }
+ self.assertIsNone(ds_os.version)
+ mock_path = MOCK_PATH + "detect_openstack"
+ with test_helpers.mock.patch(mock_path) as m_detect_os:
+ m_detect_os.return_value = True
+ found = ds_os.get_data()
+ self.assertFalse(found)
+ self.assertIsNone(ds_os.version)
+
+ def test_network_config_disabled_by_datasource_config(self):
+ """The network_config can be disabled from datasource config."""
+ mock_path = MOCK_PATH + "openstack.convert_net_json"
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ ds_os.ds_cfg = {"apply_network_config": False}
+ sample_json = {
+ "links": [{"ethernet_mac_address": "mymac"}],
+ "networks": [],
+ "services": [],
+ }
+ ds_os.network_json = sample_json # Ignore this content from metadata
+ with test_helpers.mock.patch(mock_path) as m_convert_json:
+ self.assertIsNone(ds_os.network_config)
+ m_convert_json.assert_not_called()
+
+ def test_network_config_from_network_json(self):
+ """The datasource gets network_config from network_data.json."""
+ mock_path = MOCK_PATH + "openstack.convert_net_json"
+ example_cfg = {"version": 1, "config": []}
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ sample_json = {
+ "links": [{"ethernet_mac_address": "mymac"}],
+ "networks": [],
+ "services": [],
+ }
+ ds_os.network_json = sample_json
+ with test_helpers.mock.patch(mock_path) as m_convert_json:
+ m_convert_json.return_value = example_cfg
+ self.assertEqual(example_cfg, ds_os.network_config)
+ self.assertIn(
+ "network config provided via network_json", self.logs.getvalue()
+ )
+ m_convert_json.assert_called_with(sample_json, known_macs=None)
+
+ def test_network_config_cached(self):
+ """The datasource caches the network_config property."""
+ mock_path = MOCK_PATH + "openstack.convert_net_json"
+ example_cfg = {"version": 1, "config": []}
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ ds_os._network_config = example_cfg
+ with test_helpers.mock.patch(mock_path) as m_convert_json:
+ self.assertEqual(example_cfg, ds_os.network_config)
+ m_convert_json.assert_not_called()
+
+ def test_disabled_datasource(self):
+ os_files = copy.deepcopy(OS_FILES)
+ os_meta = copy.deepcopy(OSTACK_META)
+ os_meta["meta"] = {
+ "dsmode": "disabled",
+ }
+ for k in list(os_files.keys()):
+ if k.endswith("meta_data.json"):
+ os_files[k] = json.dumps(os_meta)
+ _register_uris(self.VERSION, {}, {}, os_files)
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ ds_os.ds_cfg = {
+ "max_wait": 0,
+ "timeout": 0,
+ }
+ self.assertIsNone(ds_os.version)
+ mock_path = MOCK_PATH + "detect_openstack"
+ with test_helpers.mock.patch(mock_path) as m_detect_os:
+ m_detect_os.return_value = True
+ found = ds_os.get_data()
+ self.assertFalse(found)
+ self.assertIsNone(ds_os.version)
+
+ @hp.activate
+ def test_wb__crawl_metadata_does_not_persist(self):
+ """_crawl_metadata returns current metadata and does not cache."""
+ _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ crawled_data = ds_os._crawl_metadata()
+ self.assertEqual(UNSET, ds_os.ec2_metadata)
+ self.assertIsNone(ds_os.userdata_raw)
+ self.assertEqual(0, len(ds_os.files))
+ self.assertIsNone(ds_os.vendordata_raw)
+ self.assertEqual(
+ [
+ "dsmode",
+ "ec2-metadata",
+ "files",
+ "metadata",
+ "networkdata",
+ "userdata",
+ "vendordata",
+ "vendordata2",
+ "version",
+ ],
+ sorted(crawled_data.keys()),
+ )
+ self.assertEqual("local", crawled_data["dsmode"])
+ self.assertEqual(EC2_META, crawled_data["ec2-metadata"])
+ self.assertEqual(2, len(crawled_data["files"]))
+ md = copy.deepcopy(crawled_data["metadata"])
+ md.pop("instance-id")
+ md.pop("local-hostname")
+ self.assertEqual(OSTACK_META, md)
+ self.assertEqual(
+ json.loads(OS_FILES["openstack/latest/network_data.json"]),
+ crawled_data["networkdata"],
+ )
+ self.assertEqual(USER_DATA, crawled_data["userdata"])
+ self.assertEqual(VENDOR_DATA, crawled_data["vendordata"])
+ self.assertEqual(VENDOR_DATA2, crawled_data["vendordata2"])
+ self.assertEqual(2, crawled_data["version"])
+
+
+class TestVendorDataLoading(test_helpers.TestCase):
+ def cvj(self, data):
+ return convert_vendordata(data)
+
+ def test_vd_load_none(self):
+ # non-existant vendor-data should return none
+ self.assertIsNone(self.cvj(None))
+
+ def test_vd_load_string(self):
+ self.assertEqual(self.cvj("foobar"), "foobar")
+
+ def test_vd_load_list(self):
+ data = [{"foo": "bar"}, "mystring", list(["another", "list"])]
+ self.assertEqual(self.cvj(data), data)
+
+ def test_vd_load_dict_no_ci(self):
+ self.assertIsNone(self.cvj({"foo": "bar"}))
+
+ def test_vd_load_dict_ci_dict(self):
+ self.assertRaises(
+ ValueError, self.cvj, {"foo": "bar", "cloud-init": {"x": 1}}
+ )
+
+ def test_vd_load_dict_ci_string(self):
+ data = {"foo": "bar", "cloud-init": "VENDOR_DATA"}
+ self.assertEqual(self.cvj(data), data["cloud-init"])
+
+ def test_vd_load_dict_ci_list(self):
+ data = {"foo": "bar", "cloud-init": ["VD_1", "VD_2"]}
+ self.assertEqual(self.cvj(data), data["cloud-init"])
+
+
+@test_helpers.mock.patch(MOCK_PATH + "util.is_x86")
+class TestDetectOpenStack(test_helpers.CiTestCase):
+ def test_detect_openstack_non_intel_x86(self, m_is_x86):
+ """Return True on non-intel platforms because dmi isn't conclusive."""
+ m_is_x86.return_value = False
+ self.assertTrue(
+ ds.detect_openstack(), "Expected detect_openstack == True"
+ )
+
+ @test_helpers.mock.patch(MOCK_PATH + "util.get_proc_env")
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_not_detect_openstack_intel_x86_ec2(
+ self, m_dmi, m_proc_env, m_is_x86
+ ):
+ """Return False on EC2 platforms."""
+ m_is_x86.return_value = True
+ # No product_name in proc/1/environ
+ m_proc_env.return_value = {"HOME": "/"}
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == "system-product-name":
+ return "HVM domU" # Nothing 'openstackish' on EC2
+ if dmi_key == "chassis-asset-tag":
+ return "" # Empty string on EC2
+ assert False, "Unexpected dmi read of %s" % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertFalse(
+ ds.detect_openstack(), "Expected detect_openstack == False on EC2"
+ )
+ m_proc_env.assert_called_with(1)
+
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_intel_product_name_compute(
+ self, m_dmi, m_is_x86
+ ):
+ """Return True on OpenStack compute and nova instances."""
+ m_is_x86.return_value = True
+ openstack_product_names = ["OpenStack Nova", "OpenStack Compute"]
+
+ for product_name in openstack_product_names:
+ m_dmi.return_value = product_name
+ self.assertTrue(
+ ds.detect_openstack(), "Failed to detect_openstack"
+ )
+
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_opentelekomcloud_chassis_asset_tag(
+ self, m_dmi, m_is_x86
+ ):
+ """Return True on OpenStack reporting OpenTelekomCloud asset-tag."""
+ m_is_x86.return_value = True
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == "system-product-name":
+ return "HVM domU" # Nothing 'openstackish' on OpenTelekomCloud
+ if dmi_key == "chassis-asset-tag":
+ return "OpenTelekomCloud"
+ assert False, "Unexpected dmi read of %s" % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertTrue(
+ ds.detect_openstack(),
+ "Expected detect_openstack == True on OpenTelekomCloud",
+ )
+
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_sapccloud_chassis_asset_tag(
+ self, m_dmi, m_is_x86
+ ):
+ """Return True on OpenStack reporting SAP CCloud VM asset-tag."""
+ m_is_x86.return_value = True
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == "system-product-name":
+ return "VMware Virtual Platform" # SAP CCloud uses VMware
+ if dmi_key == "chassis-asset-tag":
+ return "SAP CCloud VM"
+ assert False, "Unexpected dmi read of %s" % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertTrue(
+ ds.detect_openstack(),
+ "Expected detect_openstack == True on SAP CCloud VM",
+ )
+
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_oraclecloud_chassis_asset_tag(
+ self, m_dmi, m_is_x86
+ ):
+ """Return True on OpenStack reporting Oracle cloud asset-tag."""
+ m_is_x86.return_value = True
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == "system-product-name":
+ return "Standard PC (i440FX + PIIX, 1996)" # No match
+ if dmi_key == "chassis-asset-tag":
+ return "OracleCloud.com"
+ assert False, "Unexpected dmi read of %s" % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertTrue(
+ ds.detect_openstack(accept_oracle=True),
+ "Expected detect_openstack == True on OracleCloud.com",
+ )
+ self.assertFalse(
+ ds.detect_openstack(accept_oracle=False),
+ "Expected detect_openstack == False.",
+ )
+
+ def _test_detect_openstack_nova_compute_chassis_asset_tag(
+ self, m_dmi, m_is_x86, chassis_tag
+ ):
+ """Return True on OpenStack reporting generic asset-tag."""
+ m_is_x86.return_value = True
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == "system-product-name":
+ return "Generic OpenStack Platform"
+ if dmi_key == "chassis-asset-tag":
+ return chassis_tag
+ assert False, "Unexpected dmi read of %s" % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertTrue(
+ ds.detect_openstack(),
+ "Expected detect_openstack == True on Generic OpenStack Platform",
+ )
+
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_nova_chassis_asset_tag(self, m_dmi, m_is_x86):
+ self._test_detect_openstack_nova_compute_chassis_asset_tag(
+ m_dmi, m_is_x86, "OpenStack Nova"
+ )
+
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_compute_chassis_asset_tag(self, m_dmi, m_is_x86):
+ self._test_detect_openstack_nova_compute_chassis_asset_tag(
+ m_dmi, m_is_x86, "OpenStack Compute"
+ )
+
+ @test_helpers.mock.patch(MOCK_PATH + "util.get_proc_env")
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_by_proc_1_environ(
+ self, m_dmi, m_proc_env, m_is_x86
+ ):
+ """Return True when nova product_name specified in /proc/1/environ."""
+ m_is_x86.return_value = True
+ # Nova product_name in proc/1/environ
+ m_proc_env.return_value = {
+ "HOME": "/",
+ "product_name": "OpenStack Nova",
+ }
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == "system-product-name":
+ return "HVM domU" # Nothing 'openstackish'
+ if dmi_key == "chassis-asset-tag":
+ return "" # Nothin 'openstackish'
+ assert False, "Unexpected dmi read of %s" % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertTrue(
+ ds.detect_openstack(),
+ "Expected detect_openstack == True on OpenTelekomCloud",
+ )
+ m_proc_env.assert_called_with(1)
+
+
+class TestMetadataReader(test_helpers.HttprettyTestCase):
+ """Test the MetadataReader."""
+
+ burl = "http://169.254.169.254/"
+ md_base = {
+ "availability_zone": "myaz1",
+ "hostname": "sm-foo-test.novalocal",
+ "keys": [{"data": PUBKEY, "name": "brickies", "type": "ssh"}],
+ "launch_index": 0,
+ "name": "sm-foo-test",
+ "public_keys": {"mykey": PUBKEY},
+ "project_id": "6a103f813b774b9fb15a4fcd36e1c056",
+ "uuid": "b0fa911b-69d4-4476-bbe2-1c92bff6535c",
+ }
+
+ def register(self, path, body=None, status=200):
+ content = body if not isinstance(body, str) else body.encode("utf-8")
+ hp.register_uri(
+ hp.GET, self.burl + "openstack" + path, status=status, body=content
+ )
+
+ def register_versions(self, versions):
+ self.register("", "\n".join(versions))
+ self.register("/", "\n".join(versions))
+
+ def register_version(self, version, data):
+ content = "\n".join(sorted(data.keys()))
+ self.register(version, content)
+ self.register(version + "/", content)
+ for path, content in data.items():
+ self.register("/%s/%s" % (version, path), content)
+ self.register("/%s/%s" % (version, path), content)
+ if "user_data" not in data:
+ self.register("/%s/user_data" % version, "nodata", status=404)
+
+ def test__find_working_version(self):
+ """Test a working version ignores unsupported."""
+ unsup = "2016-11-09"
+ self.register_versions(
+ [
+ openstack.OS_FOLSOM,
+ openstack.OS_LIBERTY,
+ unsup,
+ openstack.OS_LATEST,
+ ]
+ )
+ self.assertEqual(
+ openstack.OS_LIBERTY,
+ openstack.MetadataReader(self.burl)._find_working_version(),
+ )
+
+ def test__find_working_version_uses_latest(self):
+ """'latest' should be used if no supported versions."""
+ unsup1, unsup2 = ("2016-11-09", "2017-06-06")
+ self.register_versions([unsup1, unsup2, openstack.OS_LATEST])
+ self.assertEqual(
+ openstack.OS_LATEST,
+ openstack.MetadataReader(self.burl)._find_working_version(),
+ )
+
+ def test_read_v2_os_ocata(self):
+ """Validate return value of read_v2 for os_ocata data."""
+ md = copy.deepcopy(self.md_base)
+ md["devices"] = []
+ network_data = {"links": [], "networks": [], "services": []}
+ vendor_data = {}
+ vendor_data2 = {"static": {}}
+
+ data = {
+ "meta_data.json": json.dumps(md),
+ "network_data.json": json.dumps(network_data),
+ "vendor_data.json": json.dumps(vendor_data),
+ "vendor_data2.json": json.dumps(vendor_data2),
+ }
+
+ self.register_versions([openstack.OS_OCATA, openstack.OS_LATEST])
+ self.register_version(openstack.OS_OCATA, data)
+
+ mock_read_ec2 = test_helpers.mock.MagicMock(
+ return_value={"instance-id": "unused-ec2"}
+ )
+ expected_md = copy.deepcopy(md)
+ expected_md.update(
+ {"instance-id": md["uuid"], "local-hostname": md["hostname"]}
+ )
+ expected = {
+ "userdata": "", # Annoying, no user-data results in empty string.
+ "version": 2,
+ "metadata": expected_md,
+ "vendordata": vendor_data,
+ "vendordata2": vendor_data2,
+ "networkdata": network_data,
+ "ec2-metadata": mock_read_ec2.return_value,
+ "files": {},
+ }
+ reader = openstack.MetadataReader(self.burl)
+ reader._read_ec2_metadata = mock_read_ec2
+ self.assertEqual(expected, reader.read_v2())
+ self.assertEqual(1, mock_read_ec2.call_count)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_oracle.py b/tests/unittests/sources/test_oracle.py
new file mode 100644
index 00000000..b3e6f10c
--- /dev/null
+++ b/tests/unittests/sources/test_oracle.py
@@ -0,0 +1,933 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import base64
+import copy
+import json
+from contextlib import ExitStack
+from unittest import mock
+
+import pytest
+
+from cloudinit.sources import DataSourceOracle as oracle
+from cloudinit.sources import NetworkConfigSource
+from cloudinit.sources.DataSourceOracle import OpcMetadata
+from cloudinit.url_helper import UrlError
+from tests.unittests import helpers as test_helpers
+
+DS_PATH = "cloudinit.sources.DataSourceOracle"
+
+# `curl -L http://169.254.169.254/opc/v1/vnics/` on a Oracle Bare Metal Machine
+# with a secondary VNIC attached (vnicId truncated for Python line length)
+OPC_BM_SECONDARY_VNIC_RESPONSE = """\
+[ {
+ "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtyvcucqkhdqmgjszebxe4hrb!!TRUNCATED||",
+ "privateIp" : "10.0.0.8",
+ "vlanTag" : 0,
+ "macAddr" : "90:e2:ba:d4:f1:68",
+ "virtualRouterIp" : "10.0.0.1",
+ "subnetCidrBlock" : "10.0.0.0/24",
+ "nicIndex" : 0
+}, {
+ "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtfmkxjdy2sqidndiwrsg63zf!!TRUNCATED||",
+ "privateIp" : "10.0.4.5",
+ "vlanTag" : 1,
+ "macAddr" : "02:00:17:05:CF:51",
+ "virtualRouterIp" : "10.0.4.1",
+ "subnetCidrBlock" : "10.0.4.0/24",
+ "nicIndex" : 0
+} ]"""
+
+# `curl -L http://169.254.169.254/opc/v1/vnics/` on a Oracle Virtual Machine
+# with a secondary VNIC attached
+OPC_VM_SECONDARY_VNIC_RESPONSE = """\
+[ {
+ "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtch72z5pd76cc2636qeqh7z_truncated",
+ "privateIp" : "10.0.0.230",
+ "vlanTag" : 1039,
+ "macAddr" : "02:00:17:05:D1:DB",
+ "virtualRouterIp" : "10.0.0.1",
+ "subnetCidrBlock" : "10.0.0.0/24"
+}, {
+ "vnicId" : "ocid1.vnic.oc1.phx.abyhqljt4iew3gwmvrwrhhf3bp5drj_truncated",
+ "privateIp" : "10.0.0.231",
+ "vlanTag" : 1041,
+ "macAddr" : "00:00:17:02:2B:B1",
+ "virtualRouterIp" : "10.0.0.1",
+ "subnetCidrBlock" : "10.0.0.0/24"
+} ]"""
+
+
+# Fetched with `curl http://169.254.169.254/opc/v1/instance/` (and then
+# truncated for line length)
+OPC_V2_METADATA = """\
+{
+ "availabilityDomain" : "qIZq:PHX-AD-1",
+ "faultDomain" : "FAULT-DOMAIN-2",
+ "compartmentId" : "ocid1.tenancy.oc1..aaaaaaaao7f7cccogqrg5emjxkxmTRUNCATED",
+ "displayName" : "instance-20200320-1400",
+ "hostname" : "instance-20200320-1400",
+ "id" : "ocid1.instance.oc1.phx.anyhqljtniwq6syc3nex55sep5w34qbwmw6TRUNCATED",
+ "image" : "ocid1.image.oc1.phx.aaaaaaaagmkn4gdhvvx24kiahh2b2qchsicTRUNCATED",
+ "metadata" : {
+ "ssh_authorized_keys" : "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ truncated",
+ "user_data" : "IyEvYmluL3NoCnRvdWNoIC90bXAvZm9v"
+ },
+ "region" : "phx",
+ "canonicalRegionName" : "us-phoenix-1",
+ "ociAdName" : "phx-ad-3",
+ "shape" : "VM.Standard2.1",
+ "state" : "Running",
+ "timeCreated" : 1584727285318,
+ "agentConfig" : {
+ "monitoringDisabled" : true,
+ "managementDisabled" : true
+ }
+}"""
+
+# Just a small meaningless change to differentiate the two metadatas
+OPC_V1_METADATA = OPC_V2_METADATA.replace("ocid1.instance", "ocid2.instance")
+
+
+@pytest.fixture
+def metadata_version():
+ return 2
+
+
+@pytest.fixture
+def oracle_ds(request, fixture_utils, paths, metadata_version):
+ """
+ Return an instantiated DataSourceOracle.
+
+ This also performs the mocking required for the default test case:
+ * ``_read_system_uuid`` returns something,
+ * ``_is_platform_viable`` returns True,
+ * ``_is_iscsi_root`` returns True (the simpler code path),
+ * ``read_opc_metadata`` returns ``OPC_V1_METADATA``
+
+ (This uses the paths fixture for the required helpers.Paths object, and the
+ fixture_utils fixture for fetching markers.)
+ """
+ sys_cfg = fixture_utils.closest_marker_first_arg_or(
+ request, "ds_sys_cfg", mock.MagicMock()
+ )
+ metadata = OpcMetadata(metadata_version, json.loads(OPC_V2_METADATA), None)
+ with mock.patch(DS_PATH + "._read_system_uuid", return_value="someuuid"):
+ with mock.patch(DS_PATH + "._is_platform_viable", return_value=True):
+ with mock.patch(DS_PATH + "._is_iscsi_root", return_value=True):
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata",
+ return_value=metadata,
+ ):
+ yield oracle.DataSourceOracle(
+ sys_cfg=sys_cfg,
+ distro=mock.Mock(),
+ paths=paths,
+ )
+
+
+class TestDataSourceOracle:
+ def test_platform_info(self, oracle_ds):
+ assert "oracle" == oracle_ds.cloud_name
+ assert "oracle" == oracle_ds.platform_type
+
+ def test_subplatform_before_fetch(self, oracle_ds):
+ assert "unknown" == oracle_ds.subplatform
+
+ def test_platform_info_after_fetch(self, oracle_ds):
+ oracle_ds._get_data()
+ assert (
+ "metadata (http://169.254.169.254/opc/v2/)"
+ == oracle_ds.subplatform
+ )
+
+ @pytest.mark.parametrize("metadata_version", [1])
+ def test_v1_platform_info_after_fetch(self, oracle_ds):
+ oracle_ds._get_data()
+ assert (
+ "metadata (http://169.254.169.254/opc/v1/)"
+ == oracle_ds.subplatform
+ )
+
+ def test_secondary_nics_disabled_by_default(self, oracle_ds):
+ assert not oracle_ds.ds_cfg["configure_secondary_nics"]
+
+ @pytest.mark.ds_sys_cfg(
+ {"datasource": {"Oracle": {"configure_secondary_nics": True}}}
+ )
+ def test_sys_cfg_can_enable_configure_secondary_nics(self, oracle_ds):
+ assert oracle_ds.ds_cfg["configure_secondary_nics"]
+
+
+class TestIsPlatformViable(test_helpers.CiTestCase):
+ @mock.patch(
+ DS_PATH + ".dmi.read_dmi_data", return_value=oracle.CHASSIS_ASSET_TAG
+ )
+ def test_expected_viable(self, m_read_dmi_data):
+ """System with known chassis tag is viable."""
+ self.assertTrue(oracle._is_platform_viable())
+ m_read_dmi_data.assert_has_calls([mock.call("chassis-asset-tag")])
+
+ @mock.patch(DS_PATH + ".dmi.read_dmi_data", return_value=None)
+ def test_expected_not_viable_dmi_data_none(self, m_read_dmi_data):
+ """System without known chassis tag is not viable."""
+ self.assertFalse(oracle._is_platform_viable())
+ m_read_dmi_data.assert_has_calls([mock.call("chassis-asset-tag")])
+
+ @mock.patch(DS_PATH + ".dmi.read_dmi_data", return_value="LetsGoCubs")
+ def test_expected_not_viable_other(self, m_read_dmi_data):
+ """System with unnown chassis tag is not viable."""
+ self.assertFalse(oracle._is_platform_viable())
+ m_read_dmi_data.assert_has_calls([mock.call("chassis-asset-tag")])
+
+
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
+class TestNetworkConfigFromOpcImds:
+ def test_no_secondary_nics_does_not_mutate_input(self, oracle_ds):
+ oracle_ds._vnics_data = [{}]
+ # We test this by using in a non-dict to ensure that no dict
+ # operations are used; failure would be seen as exceptions
+ oracle_ds._network_config = object()
+ oracle_ds._add_network_config_from_opc_imds()
+
+ def test_bare_metal_machine_skipped(self, oracle_ds, caplog):
+ # nicIndex in the first entry indicates a bare metal machine
+ oracle_ds._vnics_data = json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)
+ # We test this by using a non-dict to ensure that no dict
+ # operations are used
+ oracle_ds._network_config = object()
+ oracle_ds._add_network_config_from_opc_imds()
+ assert "bare metal machine" in caplog.text
+
+ def test_missing_mac_skipped(self, oracle_ds, caplog):
+ oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
+
+ oracle_ds._network_config = {
+ "version": 1,
+ "config": [{"primary": "nic"}],
+ }
+ with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}):
+ oracle_ds._add_network_config_from_opc_imds()
+
+ assert 1 == len(oracle_ds.network_config["config"])
+ assert (
+ "Interface with MAC 00:00:17:02:2b:b1 not found; skipping"
+ in caplog.text
+ )
+
+ def test_missing_mac_skipped_v2(self, oracle_ds, caplog):
+ oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
+
+ oracle_ds._network_config = {
+ "version": 2,
+ "ethernets": {"primary": {"nic": {}}},
+ }
+ with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}):
+ oracle_ds._add_network_config_from_opc_imds()
+
+ assert 1 == len(oracle_ds.network_config["ethernets"])
+ assert (
+ "Interface with MAC 00:00:17:02:2b:b1 not found; skipping"
+ in caplog.text
+ )
+
+ def test_secondary_nic(self, oracle_ds):
+ oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
+ oracle_ds._network_config = {
+ "version": 1,
+ "config": [{"primary": "nic"}],
+ }
+ mac_addr, nic_name = "00:00:17:02:2b:b1", "ens3"
+ with mock.patch(
+ DS_PATH + ".get_interfaces_by_mac",
+ return_value={mac_addr: nic_name},
+ ):
+ oracle_ds._add_network_config_from_opc_imds()
+
+ # The input is mutated
+ assert 2 == len(oracle_ds.network_config["config"])
+
+ secondary_nic_cfg = oracle_ds.network_config["config"][1]
+ assert nic_name == secondary_nic_cfg["name"]
+ assert "physical" == secondary_nic_cfg["type"]
+ assert mac_addr == secondary_nic_cfg["mac_address"]
+ assert 9000 == secondary_nic_cfg["mtu"]
+
+ assert 1 == len(secondary_nic_cfg["subnets"])
+ subnet_cfg = secondary_nic_cfg["subnets"][0]
+ # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE
+ assert "10.0.0.231" == subnet_cfg["address"]
+
+ def test_secondary_nic_v2(self, oracle_ds):
+ oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
+ oracle_ds._network_config = {
+ "version": 2,
+ "ethernets": {"primary": {"nic": {}}},
+ }
+ mac_addr, nic_name = "00:00:17:02:2b:b1", "ens3"
+ with mock.patch(
+ DS_PATH + ".get_interfaces_by_mac",
+ return_value={mac_addr: nic_name},
+ ):
+ oracle_ds._add_network_config_from_opc_imds()
+
+ # The input is mutated
+ assert 2 == len(oracle_ds.network_config["ethernets"])
+
+ secondary_nic_cfg = oracle_ds.network_config["ethernets"]["ens3"]
+ assert secondary_nic_cfg["dhcp4"] is False
+ assert secondary_nic_cfg["dhcp6"] is False
+ assert mac_addr == secondary_nic_cfg["match"]["macaddress"]
+ assert 9000 == secondary_nic_cfg["mtu"]
+
+ assert 1 == len(secondary_nic_cfg["addresses"])
+ # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE
+ assert "10.0.0.231" == secondary_nic_cfg["addresses"][0]
+
+
+class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase):
+ def setUp(self):
+ super(TestNetworkConfigFiltersNetFailover, self).setUp()
+ self.add_patch(
+ DS_PATH + ".get_interfaces_by_mac", "m_get_interfaces_by_mac"
+ )
+ self.add_patch(DS_PATH + ".is_netfail_master", "m_netfail_master")
+
+ def test_ignore_bogus_network_config(self):
+ netcfg = {"something": "here"}
+ passed_netcfg = copy.copy(netcfg)
+ oracle._ensure_netfailover_safe(passed_netcfg)
+ self.assertEqual(netcfg, passed_netcfg)
+
+ def test_ignore_network_config_unknown_versions(self):
+ netcfg = {"something": "here", "version": 3}
+ passed_netcfg = copy.copy(netcfg)
+ oracle._ensure_netfailover_safe(passed_netcfg)
+ self.assertEqual(netcfg, passed_netcfg)
+
+ def test_checks_v1_type_physical_interfaces(self):
+ mac_addr, nic_name = "00:00:17:02:2b:b1", "ens3"
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_addr: nic_name,
+ }
+ netcfg = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": nic_name,
+ "mac_address": mac_addr,
+ "subnets": [{"type": "dhcp4"}],
+ }
+ ],
+ }
+ passed_netcfg = copy.copy(netcfg)
+ self.m_netfail_master.return_value = False
+ oracle._ensure_netfailover_safe(passed_netcfg)
+ self.assertEqual(netcfg, passed_netcfg)
+ self.assertEqual(
+ [mock.call(nic_name)], self.m_netfail_master.call_args_list
+ )
+
+ def test_checks_v1_skips_non_phys_interfaces(self):
+ mac_addr, nic_name = "00:00:17:02:2b:b1", "bond0"
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_addr: nic_name,
+ }
+ netcfg = {
+ "version": 1,
+ "config": [
+ {
+ "type": "bond",
+ "name": nic_name,
+ "mac_address": mac_addr,
+ "subnets": [{"type": "dhcp4"}],
+ }
+ ],
+ }
+ passed_netcfg = copy.copy(netcfg)
+ oracle._ensure_netfailover_safe(passed_netcfg)
+ self.assertEqual(netcfg, passed_netcfg)
+ self.assertEqual(0, self.m_netfail_master.call_count)
+
+ def test_removes_master_mac_property_v1(self):
+ nic_master, mac_master = "ens3", self.random_string()
+ nic_other, mac_other = "ens7", self.random_string()
+ nic_extra, mac_extra = "enp0s1f2", self.random_string()
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_master: nic_master,
+ mac_other: nic_other,
+ mac_extra: nic_extra,
+ }
+ netcfg = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": nic_master,
+ "mac_address": mac_master,
+ },
+ {
+ "type": "physical",
+ "name": nic_other,
+ "mac_address": mac_other,
+ },
+ {
+ "type": "physical",
+ "name": nic_extra,
+ "mac_address": mac_extra,
+ },
+ ],
+ }
+
+ def _is_netfail_master(iface):
+ if iface == "ens3":
+ return True
+ return False
+
+ self.m_netfail_master.side_effect = _is_netfail_master
+ expected_cfg = {
+ "version": 1,
+ "config": [
+ {"type": "physical", "name": nic_master},
+ {
+ "type": "physical",
+ "name": nic_other,
+ "mac_address": mac_other,
+ },
+ {
+ "type": "physical",
+ "name": nic_extra,
+ "mac_address": mac_extra,
+ },
+ ],
+ }
+ oracle._ensure_netfailover_safe(netcfg)
+ self.assertEqual(expected_cfg, netcfg)
+
+ def test_checks_v2_type_ethernet_interfaces(self):
+ mac_addr, nic_name = "00:00:17:02:2b:b1", "ens3"
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_addr: nic_name,
+ }
+ netcfg = {
+ "version": 2,
+ "ethernets": {
+ nic_name: {
+ "dhcp4": True,
+ "critical": True,
+ "set-name": nic_name,
+ "match": {"macaddress": mac_addr},
+ }
+ },
+ }
+ passed_netcfg = copy.copy(netcfg)
+ self.m_netfail_master.return_value = False
+ oracle._ensure_netfailover_safe(passed_netcfg)
+ self.assertEqual(netcfg, passed_netcfg)
+ self.assertEqual(
+ [mock.call(nic_name)], self.m_netfail_master.call_args_list
+ )
+
+ def test_skips_v2_non_ethernet_interfaces(self):
+ mac_addr, nic_name = "00:00:17:02:2b:b1", "wlps0"
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_addr: nic_name,
+ }
+ netcfg = {
+ "version": 2,
+ "wifis": {
+ nic_name: {
+ "dhcp4": True,
+ "critical": True,
+ "set-name": nic_name,
+ "match": {"macaddress": mac_addr},
+ }
+ },
+ }
+ passed_netcfg = copy.copy(netcfg)
+ oracle._ensure_netfailover_safe(passed_netcfg)
+ self.assertEqual(netcfg, passed_netcfg)
+ self.assertEqual(0, self.m_netfail_master.call_count)
+
+ def test_removes_master_mac_property_v2(self):
+ nic_master, mac_master = "ens3", self.random_string()
+ nic_other, mac_other = "ens7", self.random_string()
+ nic_extra, mac_extra = "enp0s1f2", self.random_string()
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_master: nic_master,
+ mac_other: nic_other,
+ mac_extra: nic_extra,
+ }
+ netcfg = {
+ "version": 2,
+ "ethernets": {
+ nic_extra: {
+ "dhcp4": True,
+ "set-name": nic_extra,
+ "match": {"macaddress": mac_extra},
+ },
+ nic_other: {
+ "dhcp4": True,
+ "set-name": nic_other,
+ "match": {"macaddress": mac_other},
+ },
+ nic_master: {
+ "dhcp4": True,
+ "set-name": nic_master,
+ "match": {"macaddress": mac_master},
+ },
+ },
+ }
+
+ def _is_netfail_master(iface):
+ if iface == "ens3":
+ return True
+ return False
+
+ self.m_netfail_master.side_effect = _is_netfail_master
+
+ expected_cfg = {
+ "version": 2,
+ "ethernets": {
+ nic_master: {"dhcp4": True, "match": {"name": nic_master}},
+ nic_extra: {
+ "dhcp4": True,
+ "set-name": nic_extra,
+ "match": {"macaddress": mac_extra},
+ },
+ nic_other: {
+ "dhcp4": True,
+ "set-name": nic_other,
+ "match": {"macaddress": mac_other},
+ },
+ },
+ }
+ oracle._ensure_netfailover_safe(netcfg)
+ import pprint
+
+ pprint.pprint(netcfg)
+ print("---- ^^ modified ^^ ---- vv original vv ----")
+ pprint.pprint(expected_cfg)
+ self.assertEqual(expected_cfg, netcfg)
+
+
+def _mock_v2_urls(httpretty):
+ def instance_callback(request, uri, response_headers):
+ print(response_headers)
+ assert request.headers.get("Authorization") == "Bearer Oracle"
+ return [200, response_headers, OPC_V2_METADATA]
+
+ def vnics_callback(request, uri, response_headers):
+ assert request.headers.get("Authorization") == "Bearer Oracle"
+ return [200, response_headers, OPC_BM_SECONDARY_VNIC_RESPONSE]
+
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v2/instance/",
+ body=instance_callback,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v2/vnics/",
+ body=vnics_callback,
+ )
+
+
+def _mock_no_v2_urls(httpretty):
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v2/instance/",
+ status=404,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v1/instance/",
+ body=OPC_V1_METADATA,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v1/vnics/",
+ body=OPC_BM_SECONDARY_VNIC_RESPONSE,
+ )
+
+
+class TestReadOpcMetadata:
+ # See https://docs.pytest.org/en/stable/example
+ # /parametrize.html#parametrizing-conditional-raising
+ does_not_raise = ExitStack
+
+ @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None)
+ @pytest.mark.parametrize(
+ "version,setup_urls,instance_data,fetch_vnics,vnics_data",
+ [
+ (
+ 2,
+ _mock_v2_urls,
+ json.loads(OPC_V2_METADATA),
+ True,
+ json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE),
+ ),
+ (2, _mock_v2_urls, json.loads(OPC_V2_METADATA), False, None),
+ (
+ 1,
+ _mock_no_v2_urls,
+ json.loads(OPC_V1_METADATA),
+ True,
+ json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE),
+ ),
+ (1, _mock_no_v2_urls, json.loads(OPC_V1_METADATA), False, None),
+ ],
+ )
+ def test_metadata_returned(
+ self,
+ version,
+ setup_urls,
+ instance_data,
+ fetch_vnics,
+ vnics_data,
+ httpretty,
+ ):
+ setup_urls(httpretty)
+ metadata = oracle.read_opc_metadata(fetch_vnics_data=fetch_vnics)
+
+ assert version == metadata.version
+ assert instance_data == metadata.instance_data
+ assert vnics_data == metadata.vnics_data
+
+ # No need to actually wait between retries in the tests
+ @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None)
+ @pytest.mark.parametrize(
+ "v2_failure_count,v1_failure_count,expected_body,expectation",
+ [
+ (1, 0, json.loads(OPC_V2_METADATA), does_not_raise()),
+ (2, 0, json.loads(OPC_V2_METADATA), does_not_raise()),
+ (3, 0, json.loads(OPC_V1_METADATA), does_not_raise()),
+ (3, 1, json.loads(OPC_V1_METADATA), does_not_raise()),
+ (3, 2, json.loads(OPC_V1_METADATA), does_not_raise()),
+ (3, 3, None, pytest.raises(UrlError)),
+ ],
+ )
+ def test_retries(
+ self,
+ v2_failure_count,
+ v1_failure_count,
+ expected_body,
+ expectation,
+ httpretty,
+ ):
+ v2_responses = [httpretty.Response("", status=404)] * v2_failure_count
+ v2_responses.append(httpretty.Response(OPC_V2_METADATA))
+ v1_responses = [httpretty.Response("", status=404)] * v1_failure_count
+ v1_responses.append(httpretty.Response(OPC_V1_METADATA))
+
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v1/instance/",
+ responses=v1_responses,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v2/instance/",
+ responses=v2_responses,
+ )
+ with expectation:
+ assert expected_body == oracle.read_opc_metadata().instance_data
+
+
+class TestCommon_GetDataBehaviour:
+ """This test class tests behaviour common to iSCSI and non-iSCSI root.
+
+ It defines a fixture, parameterized_oracle_ds, which is used in all the
+ tests herein to test that the commonly expected behaviour is the same with
+ iSCSI root and without.
+
+ (As non-iSCSI root behaviour is a superset of iSCSI root behaviour this
+ class is implicitly also testing all iSCSI root behaviour so there is no
+ separate class for that case.)
+ """
+
+ @pytest.fixture(params=[True, False])
+ def parameterized_oracle_ds(self, request, oracle_ds):
+ """oracle_ds parameterized for iSCSI and non-iSCSI root respectively"""
+ is_iscsi_root = request.param
+ with ExitStack() as stack:
+ stack.enter_context(
+ mock.patch(
+ DS_PATH + "._is_iscsi_root", return_value=is_iscsi_root
+ )
+ )
+ if not is_iscsi_root:
+ stack.enter_context(
+ mock.patch(DS_PATH + ".net.find_fallback_nic")
+ )
+ stack.enter_context(
+ mock.patch(DS_PATH + ".dhcp.EphemeralDHCPv4")
+ )
+ yield oracle_ds
+
+ @mock.patch(
+ DS_PATH + "._is_platform_viable", mock.Mock(return_value=False)
+ )
+ def test_false_if_platform_not_viable(
+ self,
+ parameterized_oracle_ds,
+ ):
+ assert not parameterized_oracle_ds._get_data()
+
+ @pytest.mark.parametrize(
+ "keyname,expected_value",
+ (
+ ("availability-zone", "phx-ad-3"),
+ ("launch-index", 0),
+ ("local-hostname", "instance-20200320-1400"),
+ (
+ "instance-id",
+ "ocid1.instance.oc1.phx"
+ ".anyhqljtniwq6syc3nex55sep5w34qbwmw6TRUNCATED",
+ ),
+ ("name", "instance-20200320-1400"),
+ (
+ "public_keys",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ truncated",
+ ),
+ ),
+ )
+ def test_metadata_keys_set_correctly(
+ self,
+ keyname,
+ expected_value,
+ parameterized_oracle_ds,
+ ):
+ assert parameterized_oracle_ds._get_data()
+ assert expected_value == parameterized_oracle_ds.metadata[keyname]
+
+ @pytest.mark.parametrize(
+ "attribute_name,expected_value",
+ [
+ ("_crawled_metadata", json.loads(OPC_V2_METADATA)),
+ (
+ "userdata_raw",
+ base64.b64decode(b"IyEvYmluL3NoCnRvdWNoIC90bXAvZm9v"),
+ ),
+ ("system_uuid", "my-test-uuid"),
+ ],
+ )
+ @mock.patch(
+ DS_PATH + "._read_system_uuid", mock.Mock(return_value="my-test-uuid")
+ )
+ def test_attributes_set_correctly(
+ self,
+ attribute_name,
+ expected_value,
+ parameterized_oracle_ds,
+ ):
+ assert parameterized_oracle_ds._get_data()
+ assert expected_value == getattr(
+ parameterized_oracle_ds, attribute_name
+ )
+
+ @pytest.mark.parametrize(
+ "ssh_keys,expected_value",
+ [
+ # No SSH keys in metadata => no keys detected
+ (None, []),
+ # Empty SSH keys in metadata => no keys detected
+ ("", []),
+ # Single SSH key in metadata => single key detected
+ ("ssh-rsa ... test@test", ["ssh-rsa ... test@test"]),
+ # Multiple SSH keys in metadata => multiple keys detected
+ (
+ "ssh-rsa ... test@test\nssh-rsa ... test2@test2",
+ ["ssh-rsa ... test@test", "ssh-rsa ... test2@test2"],
+ ),
+ ],
+ )
+ def test_public_keys_handled_correctly(
+ self, ssh_keys, expected_value, parameterized_oracle_ds
+ ):
+ instance_data = json.loads(OPC_V1_METADATA)
+ if ssh_keys is None:
+ del instance_data["metadata"]["ssh_authorized_keys"]
+ else:
+ instance_data["metadata"]["ssh_authorized_keys"] = ssh_keys
+ metadata = OpcMetadata(None, instance_data, None)
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata",
+ mock.Mock(return_value=metadata),
+ ):
+ assert parameterized_oracle_ds._get_data()
+ assert (
+ expected_value == parameterized_oracle_ds.get_public_ssh_keys()
+ )
+
+ def test_missing_user_data_handled_gracefully(
+ self, parameterized_oracle_ds
+ ):
+ instance_data = json.loads(OPC_V1_METADATA)
+ del instance_data["metadata"]["user_data"]
+ metadata = OpcMetadata(None, instance_data, None)
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata",
+ mock.Mock(return_value=metadata),
+ ):
+ assert parameterized_oracle_ds._get_data()
+
+ assert parameterized_oracle_ds.userdata_raw is None
+
+ def test_missing_metadata_handled_gracefully(
+ self, parameterized_oracle_ds
+ ):
+ instance_data = json.loads(OPC_V1_METADATA)
+ del instance_data["metadata"]
+ metadata = OpcMetadata(None, instance_data, None)
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata",
+ mock.Mock(return_value=metadata),
+ ):
+ assert parameterized_oracle_ds._get_data()
+
+ assert parameterized_oracle_ds.userdata_raw is None
+ assert [] == parameterized_oracle_ds.get_public_ssh_keys()
+
+
+@mock.patch(DS_PATH + "._is_iscsi_root", lambda: False)
+class TestNonIscsiRoot_GetDataBehaviour:
+ @mock.patch(DS_PATH + ".dhcp.EphemeralDHCPv4")
+ @mock.patch(DS_PATH + ".net.find_fallback_nic")
+ def test_read_opc_metadata_called_with_ephemeral_dhcp(
+ self, m_find_fallback_nic, m_EphemeralDHCPv4, oracle_ds
+ ):
+ in_context_manager = False
+
+ def enter_context_manager():
+ nonlocal in_context_manager
+ in_context_manager = True
+
+ def exit_context_manager(*args):
+ nonlocal in_context_manager
+ in_context_manager = False
+
+ m_EphemeralDHCPv4.return_value.__enter__.side_effect = (
+ enter_context_manager
+ )
+ m_EphemeralDHCPv4.return_value.__exit__.side_effect = (
+ exit_context_manager
+ )
+
+ def assert_in_context_manager(**kwargs):
+ assert in_context_manager
+ return mock.MagicMock()
+
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata",
+ mock.Mock(side_effect=assert_in_context_manager),
+ ):
+ assert oracle_ds._get_data()
+
+ assert [
+ mock.call(
+ iface=m_find_fallback_nic.return_value,
+ connectivity_url_data={
+ "headers": {"Authorization": "Bearer Oracle"},
+ "url": "http://169.254.169.254/opc/v2/instance/",
+ },
+ )
+ ] == m_EphemeralDHCPv4.call_args_list
+
+
+@mock.patch(DS_PATH + ".get_interfaces_by_mac", lambda: {})
+@mock.patch(DS_PATH + ".cmdline.read_initramfs_config")
+class TestNetworkConfig:
+ def test_network_config_cached(self, m_read_initramfs_config, oracle_ds):
+ """.network_config should be cached"""
+ assert 0 == m_read_initramfs_config.call_count
+ oracle_ds.network_config # pylint: disable=pointless-statement
+ assert 1 == m_read_initramfs_config.call_count
+ oracle_ds.network_config # pylint: disable=pointless-statement
+ assert 1 == m_read_initramfs_config.call_count
+
+ def test_network_cmdline(self, m_read_initramfs_config, oracle_ds):
+ """network_config should prefer initramfs config over fallback"""
+ ncfg = {"version": 1, "config": [{"a": "b"}]}
+ m_read_initramfs_config.return_value = copy.deepcopy(ncfg)
+
+ assert ncfg == oracle_ds.network_config
+ assert 0 == oracle_ds.distro.generate_fallback_config.call_count
+
+ def test_network_fallback(self, m_read_initramfs_config, oracle_ds):
+ """network_config should prefer initramfs config over fallback"""
+ ncfg = {"version": 1, "config": [{"a": "b"}]}
+
+ m_read_initramfs_config.return_value = None
+ oracle_ds.distro.generate_fallback_config.return_value = copy.deepcopy(
+ ncfg
+ )
+
+ assert ncfg == oracle_ds.network_config
+
+ @pytest.mark.parametrize(
+ "configure_secondary_nics,expect_secondary_nics",
+ [(True, True), (False, False), (None, False)],
+ )
+ def test_secondary_nic_addition(
+ self,
+ m_read_initramfs_config,
+ configure_secondary_nics,
+ expect_secondary_nics,
+ oracle_ds,
+ ):
+ """Test that _add_network_config_from_opc_imds is called as expected
+
+ (configure_secondary_nics=None is used to test the default behaviour.)
+ """
+ m_read_initramfs_config.return_value = {"version": 1, "config": []}
+
+ if configure_secondary_nics is not None:
+ oracle_ds.ds_cfg[
+ "configure_secondary_nics"
+ ] = configure_secondary_nics
+
+ def side_effect(self):
+ self._network_config["secondary_added"] = mock.sentinel.needle
+
+ oracle_ds._vnics_data = "DummyData"
+ with mock.patch.object(
+ oracle.DataSourceOracle,
+ "_add_network_config_from_opc_imds",
+ new=side_effect,
+ ):
+ was_secondary_added = "secondary_added" in oracle_ds.network_config
+ assert expect_secondary_nics == was_secondary_added
+
+ def test_secondary_nic_failure_isnt_blocking(
+ self,
+ m_read_initramfs_config,
+ caplog,
+ oracle_ds,
+ ):
+ oracle_ds.ds_cfg["configure_secondary_nics"] = True
+ oracle_ds._vnics_data = "DummyData"
+
+ with mock.patch.object(
+ oracle.DataSourceOracle,
+ "_add_network_config_from_opc_imds",
+ side_effect=Exception(),
+ ):
+ network_config = oracle_ds.network_config
+ assert network_config == m_read_initramfs_config.return_value
+ assert "Failed to parse secondary network configuration" in caplog.text
+
+ def test_ds_network_cfg_order(self, _m):
+ """Ensure that DS net config is preferred over initramfs config
+ but less than system config."""
+ config_sources = oracle.DataSourceOracle.network_config_sources
+ system_idx = config_sources.index(NetworkConfigSource.system_cfg)
+ ds_idx = config_sources.index(NetworkConfigSource.ds)
+ initramfs_idx = config_sources.index(NetworkConfigSource.initramfs)
+ assert system_idx < ds_idx < initramfs_idx
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_ovf.py b/tests/unittests/sources/test_ovf.py
new file mode 100644
index 00000000..c2c87f12
--- /dev/null
+++ b/tests/unittests/sources/test_ovf.py
@@ -0,0 +1,1237 @@
+# Copyright (C) 2016 Canonical Ltd.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import base64
+import os
+from collections import OrderedDict
+from textwrap import dedent
+
+from cloudinit import subp, util
+from cloudinit.helpers import Paths
+from cloudinit.safeyaml import YAMLError
+from cloudinit.sources import DataSourceOVF as dsovf
+from cloudinit.sources.helpers.vmware.imc.config_custom_script import (
+ CustomScriptNotFound,
+)
+from tests.unittests.helpers import CiTestCase, mock, wrap_and_call
+
+MPATH = "cloudinit.sources.DataSourceOVF."
+
+NOT_FOUND = None
+
+OVF_ENV_CONTENT = """<?xml version="1.0" encoding="UTF-8"?>
+<Environment xmlns="http://schemas.dmtf.org/ovf/environment/1"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns:oe="http://schemas.dmtf.org/ovf/environment/1"
+ xsi:schemaLocation="http://schemas.dmtf.org/ovf/environment/1 ../dsp8027.xsd"
+ oe:id="WebTier">
+ <!-- Information about hypervisor platform -->
+ <oe:PlatformSection>
+ <Kind>ESX Server</Kind>
+ <Version>3.0.1</Version>
+ <Vendor>VMware, Inc.</Vendor>
+ <Locale>en_US</Locale>
+ </oe:PlatformSection>
+ <!--- Properties defined for this virtual machine -->
+ <PropertySection>
+{properties}
+ </PropertySection>
+</Environment>
+"""
+
+
+def fill_properties(props, template=OVF_ENV_CONTENT):
+ lines = []
+ prop_tmpl = '<Property oe:key="{key}" oe:value="{val}"/>'
+ for key, val in props.items():
+ lines.append(prop_tmpl.format(key=key, val=val))
+ indent = " "
+ properties = "".join([indent + line + "\n" for line in lines])
+ return template.format(properties=properties)
+
+
+class TestReadOvfEnv(CiTestCase):
+ def test_with_b64_userdata(self):
+ user_data = "#!/bin/sh\necho hello world\n"
+ user_data_b64 = base64.b64encode(user_data.encode()).decode()
+ props = {
+ "user-data": user_data_b64,
+ "password": "passw0rd",
+ "instance-id": "inst-001",
+ }
+ env = fill_properties(props)
+ md, ud, cfg = dsovf.read_ovf_environment(env)
+ self.assertEqual({"instance-id": "inst-001"}, md)
+ self.assertEqual(user_data.encode(), ud)
+ self.assertEqual({"password": "passw0rd"}, cfg)
+
+ def test_with_non_b64_userdata(self):
+ user_data = "my-user-data"
+ props = {"user-data": user_data, "instance-id": "inst-001"}
+ env = fill_properties(props)
+ md, ud, cfg = dsovf.read_ovf_environment(env)
+ self.assertEqual({"instance-id": "inst-001"}, md)
+ self.assertEqual(user_data.encode(), ud)
+ self.assertEqual({}, cfg)
+
+ def test_with_no_userdata(self):
+ props = {"password": "passw0rd", "instance-id": "inst-001"}
+ env = fill_properties(props)
+ md, ud, cfg = dsovf.read_ovf_environment(env)
+ self.assertEqual({"instance-id": "inst-001"}, md)
+ self.assertEqual({"password": "passw0rd"}, cfg)
+ self.assertIsNone(ud)
+
+ def test_with_b64_network_config_enable_read_network(self):
+ network_config = dedent(
+ """\
+ network:
+ version: 2
+ ethernets:
+ nics:
+ nameservers:
+ addresses:
+ - 127.0.0.53
+ search:
+ - eng.vmware.com
+ - vmware.com
+ match:
+ name: eth*
+ gateway4: 10.10.10.253
+ dhcp4: false
+ addresses:
+ - 10.10.10.1/24
+ """
+ )
+ network_config_b64 = base64.b64encode(network_config.encode()).decode()
+ props = {
+ "network-config": network_config_b64,
+ "password": "passw0rd",
+ "instance-id": "inst-001",
+ }
+ env = fill_properties(props)
+ md, ud, cfg = dsovf.read_ovf_environment(env, True)
+ self.assertEqual("inst-001", md["instance-id"])
+ self.assertEqual({"password": "passw0rd"}, cfg)
+ self.assertEqual(
+ {
+ "version": 2,
+ "ethernets": {
+ "nics": {
+ "nameservers": {
+ "addresses": ["127.0.0.53"],
+ "search": ["eng.vmware.com", "vmware.com"],
+ },
+ "match": {"name": "eth*"},
+ "gateway4": "10.10.10.253",
+ "dhcp4": False,
+ "addresses": ["10.10.10.1/24"],
+ }
+ },
+ },
+ md["network-config"],
+ )
+ self.assertIsNone(ud)
+
+ def test_with_non_b64_network_config_enable_read_network(self):
+ network_config = dedent(
+ """\
+ network:
+ version: 2
+ ethernets:
+ nics:
+ nameservers:
+ addresses:
+ - 127.0.0.53
+ search:
+ - eng.vmware.com
+ - vmware.com
+ match:
+ name: eth*
+ gateway4: 10.10.10.253
+ dhcp4: false
+ addresses:
+ - 10.10.10.1/24
+ """
+ )
+ props = {
+ "network-config": network_config,
+ "password": "passw0rd",
+ "instance-id": "inst-001",
+ }
+ env = fill_properties(props)
+ md, ud, cfg = dsovf.read_ovf_environment(env, True)
+ self.assertEqual({"instance-id": "inst-001"}, md)
+ self.assertEqual({"password": "passw0rd"}, cfg)
+ self.assertIsNone(ud)
+
+ def test_with_b64_network_config_disable_read_network(self):
+ network_config = dedent(
+ """\
+ network:
+ version: 2
+ ethernets:
+ nics:
+ nameservers:
+ addresses:
+ - 127.0.0.53
+ search:
+ - eng.vmware.com
+ - vmware.com
+ match:
+ name: eth*
+ gateway4: 10.10.10.253
+ dhcp4: false
+ addresses:
+ - 10.10.10.1/24
+ """
+ )
+ network_config_b64 = base64.b64encode(network_config.encode()).decode()
+ props = {
+ "network-config": network_config_b64,
+ "password": "passw0rd",
+ "instance-id": "inst-001",
+ }
+ env = fill_properties(props)
+ md, ud, cfg = dsovf.read_ovf_environment(env)
+ self.assertEqual({"instance-id": "inst-001"}, md)
+ self.assertEqual({"password": "passw0rd"}, cfg)
+ self.assertIsNone(ud)
+
+
+class TestMarkerFiles(CiTestCase):
+ def setUp(self):
+ super(TestMarkerFiles, self).setUp()
+ self.tdir = self.tmp_dir()
+
+ def test_false_when_markerid_none(self):
+ """Return False when markerid provided is None."""
+ self.assertFalse(
+ dsovf.check_marker_exists(markerid=None, marker_dir=self.tdir)
+ )
+
+ def test_markerid_file_exist(self):
+ """Return False when markerid file path does not exist,
+ True otherwise."""
+ self.assertFalse(dsovf.check_marker_exists("123", self.tdir))
+
+ marker_file = self.tmp_path(".markerfile-123.txt", self.tdir)
+ util.write_file(marker_file, "")
+ self.assertTrue(dsovf.check_marker_exists("123", self.tdir))
+
+ def test_marker_file_setup(self):
+ """Test creation of marker files."""
+ markerfilepath = self.tmp_path(".markerfile-hi.txt", self.tdir)
+ self.assertFalse(os.path.exists(markerfilepath))
+ dsovf.setup_marker_files(markerid="hi", marker_dir=self.tdir)
+ self.assertTrue(os.path.exists(markerfilepath))
+
+
+class TestDatasourceOVF(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestDatasourceOVF, self).setUp()
+ self.datasource = dsovf.DataSourceOVF
+ self.tdir = self.tmp_dir()
+
+ def test_get_data_false_on_none_dmi_data(self):
+ """When dmi for system-product-name is None, get_data returns False."""
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
+ retcode = wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": None,
+ "transport_iso9660": NOT_FOUND,
+ "transport_vmware_guestinfo": NOT_FOUND,
+ },
+ ds.get_data,
+ )
+ self.assertFalse(retcode, "Expected False return from ds.get_data")
+ self.assertIn(
+ "DEBUG: No system-product-name found", self.logs.getvalue()
+ )
+
+ def test_get_data_vmware_customization_disabled(self):
+ """When vmware customization is disabled via sys_cfg and
+ allow_raw_data is disabled via ds_cfg, log a message.
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={
+ "disable_vmware_customization": True,
+ "datasource": {"OVF": {"allow_raw_data": False}},
+ },
+ distro={},
+ paths=paths,
+ )
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [MISC]
+ MARKER-ID = 12345345
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ retcode = wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "transport_iso9660": NOT_FOUND,
+ "transport_vmware_guestinfo": NOT_FOUND,
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ },
+ ds.get_data,
+ )
+ self.assertFalse(retcode, "Expected False return from ds.get_data")
+ self.assertIn(
+ "DEBUG: Customization for VMware platform is disabled.",
+ self.logs.getvalue(),
+ )
+
+ def test_get_data_vmware_customization_sys_cfg_disabled(self):
+ """When vmware customization is disabled via sys_cfg and
+ no meta data is found, log a message.
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={
+ "disable_vmware_customization": True,
+ "datasource": {"OVF": {"allow_raw_data": True}},
+ },
+ distro={},
+ paths=paths,
+ )
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [MISC]
+ MARKER-ID = 12345345
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ retcode = wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "transport_iso9660": NOT_FOUND,
+ "transport_vmware_guestinfo": NOT_FOUND,
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ },
+ ds.get_data,
+ )
+ self.assertFalse(retcode, "Expected False return from ds.get_data")
+ self.assertIn(
+ "DEBUG: Customization using VMware config is disabled.",
+ self.logs.getvalue(),
+ )
+
+ def test_get_data_allow_raw_data_disabled(self):
+ """When allow_raw_data is disabled via ds_cfg and
+ meta data is found, log a message.
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={
+ "disable_vmware_customization": False,
+ "datasource": {"OVF": {"allow_raw_data": False}},
+ },
+ distro={},
+ paths=paths,
+ )
+
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ # Prepare the meta data file
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ util.write_file(metadata_file, "This is meta data")
+ retcode = wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "transport_iso9660": NOT_FOUND,
+ "transport_vmware_guestinfo": NOT_FOUND,
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "collect_imc_file_paths": [self.tdir + "/test-meta", "", ""],
+ },
+ ds.get_data,
+ )
+ self.assertFalse(retcode, "Expected False return from ds.get_data")
+ self.assertIn(
+ "DEBUG: Customization using raw data is disabled.",
+ self.logs.getvalue(),
+ )
+
+ def test_get_data_vmware_customization_enabled(self):
+ """When cloud-init workflow for vmware is enabled via sys_cfg log a
+ message.
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CUSTOM-SCRIPT]
+ SCRIPT-NAME = test-script
+ [MISC]
+ MARKER-ID = 12345345
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ with mock.patch(MPATH + "get_tools_config", return_value="true"):
+ with self.assertRaises(CustomScriptNotFound) as context:
+ wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
+ customscript = self.tmp_path("test-script", self.tdir)
+ self.assertIn(
+ "Script %s not found!!" % customscript, str(context.exception)
+ )
+
+ def test_get_data_cust_script_disabled(self):
+ """If custom script is disabled by VMware tools configuration,
+ raise a RuntimeError.
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CUSTOM-SCRIPT]
+ SCRIPT-NAME = test-script
+ [MISC]
+ MARKER-ID = 12345346
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ # Prepare the custom sript
+ customscript = self.tmp_path("test-script", self.tdir)
+ util.write_file(customscript, "This is the post cust script")
+
+ with mock.patch(MPATH + "get_tools_config", return_value="invalid"):
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ with self.assertRaises(RuntimeError) as context:
+ wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
+ self.assertIn(
+ "Custom script is disabled by VM Administrator",
+ str(context.exception),
+ )
+
+ def test_get_data_cust_script_enabled(self):
+ """If custom script is enabled by VMware tools configuration,
+ execute the script.
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CUSTOM-SCRIPT]
+ SCRIPT-NAME = test-script
+ [MISC]
+ MARKER-ID = 12345346
+ """
+ )
+ util.write_file(conf_file, conf_content)
+
+ # Mock custom script is enabled by return true when calling
+ # get_tools_config
+ with mock.patch(MPATH + "get_tools_config", return_value="true"):
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ with self.assertRaises(CustomScriptNotFound) as context:
+ wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
+ # Verify custom script is trying to be executed
+ customscript = self.tmp_path("test-script", self.tdir)
+ self.assertIn(
+ "Script %s not found!!" % customscript, str(context.exception)
+ )
+
+ def test_get_data_force_run_post_script_is_yes(self):
+ """If DEFAULT-RUN-POST-CUST-SCRIPT is yes, custom script could run if
+ enable-custom-scripts is not defined in VM Tools configuration
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ # set DEFAULT-RUN-POST-CUST-SCRIPT = yes so that enable-custom-scripts
+ # default value is TRUE
+ conf_content = dedent(
+ """\
+ [CUSTOM-SCRIPT]
+ SCRIPT-NAME = test-script
+ [MISC]
+ MARKER-ID = 12345346
+ DEFAULT-RUN-POST-CUST-SCRIPT = yes
+ """
+ )
+ util.write_file(conf_file, conf_content)
+
+ # Mock get_tools_config(section, key, defaultVal) to return
+ # defaultVal
+ def my_get_tools_config(*args, **kwargs):
+ return args[2]
+
+ with mock.patch(
+ MPATH + "get_tools_config", side_effect=my_get_tools_config
+ ):
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ with self.assertRaises(CustomScriptNotFound) as context:
+ wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
+ # Verify custom script still runs although it is
+ # disabled by VMware Tools
+ customscript = self.tmp_path("test-script", self.tdir)
+ self.assertIn(
+ "Script %s not found!!" % customscript, str(context.exception)
+ )
+
+ def test_get_data_non_vmware_seed_platform_info(self):
+ """Platform info properly reports when on non-vmware platforms."""
+ paths = Paths({"cloud_dir": self.tdir, "run_dir": self.tdir})
+ # Write ovf-env.xml seed file
+ seed_dir = self.tmp_path("seed", dir=self.tdir)
+ ovf_env = self.tmp_path("ovf-env.xml", dir=seed_dir)
+ util.write_file(ovf_env, OVF_ENV_CONTENT)
+ ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
+
+ self.assertEqual("ovf", ds.cloud_name)
+ self.assertEqual("ovf", ds.platform_type)
+ with mock.patch(MPATH + "dmi.read_dmi_data", return_value="!VMware"):
+ with mock.patch(MPATH + "transport_vmware_guestinfo") as m_guestd:
+ with mock.patch(MPATH + "transport_iso9660") as m_iso9660:
+ m_iso9660.return_value = NOT_FOUND
+ m_guestd.return_value = NOT_FOUND
+ self.assertTrue(ds.get_data())
+ self.assertEqual(
+ "ovf (%s/seed/ovf-env.xml)" % self.tdir, ds.subplatform
+ )
+
+ def test_get_data_vmware_seed_platform_info(self):
+ """Platform info properly reports when on VMware platform."""
+ paths = Paths({"cloud_dir": self.tdir, "run_dir": self.tdir})
+ # Write ovf-env.xml seed file
+ seed_dir = self.tmp_path("seed", dir=self.tdir)
+ ovf_env = self.tmp_path("ovf-env.xml", dir=seed_dir)
+ util.write_file(ovf_env, OVF_ENV_CONTENT)
+ ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
+
+ self.assertEqual("ovf", ds.cloud_name)
+ self.assertEqual("ovf", ds.platform_type)
+ with mock.patch(MPATH + "dmi.read_dmi_data", return_value="VMWare"):
+ with mock.patch(MPATH + "transport_vmware_guestinfo") as m_guestd:
+ with mock.patch(MPATH + "transport_iso9660") as m_iso9660:
+ m_iso9660.return_value = NOT_FOUND
+ m_guestd.return_value = NOT_FOUND
+ self.assertTrue(ds.get_data())
+ self.assertEqual(
+ "vmware (%s/seed/ovf-env.xml)" % self.tdir,
+ ds.subplatform,
+ )
+
+ @mock.patch("cloudinit.subp.subp")
+ @mock.patch("cloudinit.sources.DataSource.persist_instance_data")
+ def test_get_data_vmware_guestinfo_with_network_config(
+ self, m_persist, m_subp
+ ):
+ self._test_get_data_with_network_config(guestinfo=False, iso=True)
+
+ @mock.patch("cloudinit.subp.subp")
+ @mock.patch("cloudinit.sources.DataSource.persist_instance_data")
+ def test_get_data_iso9660_with_network_config(self, m_persist, m_subp):
+ self._test_get_data_with_network_config(guestinfo=True, iso=False)
+
+ def _test_get_data_with_network_config(self, guestinfo, iso):
+ network_config = dedent(
+ """\
+ network:
+ version: 2
+ ethernets:
+ nics:
+ nameservers:
+ addresses:
+ - 127.0.0.53
+ search:
+ - vmware.com
+ match:
+ name: eth*
+ gateway4: 10.10.10.253
+ dhcp4: false
+ addresses:
+ - 10.10.10.1/24
+ """
+ )
+ network_config_b64 = base64.b64encode(network_config.encode()).decode()
+ props = {
+ "network-config": network_config_b64,
+ "password": "passw0rd",
+ "instance-id": "inst-001",
+ }
+ env = fill_properties(props)
+ paths = Paths({"cloud_dir": self.tdir, "run_dir": self.tdir})
+ ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
+ with mock.patch(
+ MPATH + "transport_vmware_guestinfo",
+ return_value=env if guestinfo else NOT_FOUND,
+ ):
+ with mock.patch(
+ MPATH + "transport_iso9660",
+ return_value=env if iso else NOT_FOUND,
+ ):
+ self.assertTrue(ds.get_data())
+ self.assertEqual("inst-001", ds.metadata["instance-id"])
+ self.assertEqual(
+ {
+ "version": 2,
+ "ethernets": {
+ "nics": {
+ "nameservers": {
+ "addresses": ["127.0.0.53"],
+ "search": ["vmware.com"],
+ },
+ "match": {"name": "eth*"},
+ "gateway4": "10.10.10.253",
+ "dhcp4": False,
+ "addresses": ["10.10.10.1/24"],
+ }
+ },
+ },
+ ds.network_config,
+ )
+
+ def test_get_data_cloudinit_metadata_json(self):
+ """Test metadata can be loaded to cloud-init metadata and network.
+ The metadata format is json.
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ # Prepare the meta data file
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ metadata_content = dedent(
+ """\
+ {
+ "instance-id": "cloud-vm",
+ "local-hostname": "my-host.domain.com",
+ "network": {
+ "version": 2,
+ "ethernets": {
+ "eths": {
+ "match": {
+ "name": "ens*"
+ },
+ "dhcp4": true
+ }
+ }
+ }
+ }
+ """
+ )
+ util.write_file(metadata_file, metadata_content)
+
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ result = wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "collect_imc_file_paths": [
+ self.tdir + "/test-meta",
+ "",
+ "",
+ ],
+ "get_nics_to_enable": "",
+ },
+ ds._get_data,
+ )
+
+ self.assertTrue(result)
+ self.assertEqual("cloud-vm", ds.metadata["instance-id"])
+ self.assertEqual("my-host.domain.com", ds.metadata["local-hostname"])
+ self.assertEqual(2, ds.network_config["version"])
+ self.assertTrue(ds.network_config["ethernets"]["eths"]["dhcp4"])
+
+ def test_get_data_cloudinit_metadata_yaml(self):
+ """Test metadata can be loaded to cloud-init metadata and network.
+ The metadata format is yaml.
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ # Prepare the meta data file
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ metadata_content = dedent(
+ """\
+ instance-id: cloud-vm
+ local-hostname: my-host.domain.com
+ network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+ """
+ )
+ util.write_file(metadata_file, metadata_content)
+
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ result = wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "collect_imc_file_paths": [
+ self.tdir + "/test-meta",
+ "",
+ "",
+ ],
+ "get_nics_to_enable": "",
+ },
+ ds._get_data,
+ )
+
+ self.assertTrue(result)
+ self.assertEqual("cloud-vm", ds.metadata["instance-id"])
+ self.assertEqual("my-host.domain.com", ds.metadata["local-hostname"])
+ self.assertEqual(2, ds.network_config["version"])
+ self.assertTrue(ds.network_config["ethernets"]["nics"]["dhcp4"])
+
+ def test_get_data_cloudinit_metadata_not_valid(self):
+ """Test metadata is not JSON or YAML format."""
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
+
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """
+ )
+ util.write_file(conf_file, conf_content)
+
+ # Prepare the meta data file
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ metadata_content = "[This is not json or yaml format]a=b"
+ util.write_file(metadata_file, metadata_content)
+
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ with self.assertRaises(YAMLError) as context:
+ wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "collect_imc_file_paths": [
+ self.tdir + "/test-meta",
+ "",
+ "",
+ ],
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
+
+ self.assertIn(
+ "expected '<document start>', but found '<scalar>'",
+ str(context.exception),
+ )
+
+ def test_get_data_cloudinit_metadata_not_found(self):
+ """Test metadata file can't be found."""
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ # Don't prepare the meta data file
+
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ with self.assertRaises(FileNotFoundError) as context:
+ wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
+
+ self.assertIn("is not found", str(context.exception))
+
+ def test_get_data_cloudinit_userdata(self):
+ """Test user data can be loaded to cloud-init user data."""
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
+
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ USERDATA = test-user
+ """
+ )
+ util.write_file(conf_file, conf_content)
+
+ # Prepare the meta data file
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ metadata_content = dedent(
+ """\
+ instance-id: cloud-vm
+ local-hostname: my-host.domain.com
+ network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+ """
+ )
+ util.write_file(metadata_file, metadata_content)
+
+ # Prepare the user data file
+ userdata_file = self.tmp_path("test-user", self.tdir)
+ userdata_content = "This is the user data"
+ util.write_file(userdata_file, userdata_content)
+
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ result = wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "collect_imc_file_paths": [
+ self.tdir + "/test-meta",
+ self.tdir + "/test-user",
+ "",
+ ],
+ "get_nics_to_enable": "",
+ },
+ ds._get_data,
+ )
+
+ self.assertTrue(result)
+ self.assertEqual("cloud-vm", ds.metadata["instance-id"])
+ self.assertEqual(userdata_content, ds.userdata_raw)
+
+ def test_get_data_cloudinit_userdata_not_found(self):
+ """Test userdata file can't be found."""
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
+
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ USERDATA = test-user
+ """
+ )
+ util.write_file(conf_file, conf_content)
+
+ # Prepare the meta data file
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ metadata_content = dedent(
+ """\
+ instance-id: cloud-vm
+ local-hostname: my-host.domain.com
+ network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+ """
+ )
+ util.write_file(metadata_file, metadata_content)
+
+ # Don't prepare the user data file
+
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ with self.assertRaises(FileNotFoundError) as context:
+ wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
+
+ self.assertIn("is not found", str(context.exception))
+
+
+class TestTransportIso9660(CiTestCase):
+ def setUp(self):
+ super(TestTransportIso9660, self).setUp()
+ self.add_patch("cloudinit.util.find_devs_with", "m_find_devs_with")
+ self.add_patch("cloudinit.util.mounts", "m_mounts")
+ self.add_patch("cloudinit.util.mount_cb", "m_mount_cb")
+ self.add_patch(
+ "cloudinit.sources.DataSourceOVF.get_ovf_env", "m_get_ovf_env"
+ )
+ self.m_get_ovf_env.return_value = ("myfile", "mycontent")
+
+ def test_find_already_mounted(self):
+ """Check we call get_ovf_env from on matching mounted devices"""
+ mounts = {
+ "/dev/sr9": {
+ "fstype": "iso9660",
+ "mountpoint": "wark/media/sr9",
+ "opts": "ro",
+ }
+ }
+ self.m_mounts.return_value = mounts
+
+ self.assertEqual("mycontent", dsovf.transport_iso9660())
+
+ def test_find_already_mounted_skips_non_iso9660(self):
+ """Check we call get_ovf_env ignoring non iso9660"""
+ mounts = {
+ "/dev/xvdb": {
+ "fstype": "vfat",
+ "mountpoint": "wark/foobar",
+ "opts": "defaults,noatime",
+ },
+ "/dev/xvdc": {
+ "fstype": "iso9660",
+ "mountpoint": "wark/media/sr9",
+ "opts": "ro",
+ },
+ }
+ # We use an OrderedDict here to ensure we check xvdb before xvdc
+ # as we're not mocking the regex matching, however, if we place
+ # an entry in the results then we can be reasonably sure that
+ # we're skipping an entry which fails to match.
+ self.m_mounts.return_value = OrderedDict(
+ sorted(mounts.items(), key=lambda t: t[0])
+ )
+
+ self.assertEqual("mycontent", dsovf.transport_iso9660())
+
+ def test_find_already_mounted_matches_kname(self):
+ """Check we dont regex match on basename of the device"""
+ mounts = {
+ "/dev/foo/bar/xvdc": {
+ "fstype": "iso9660",
+ "mountpoint": "wark/media/sr9",
+ "opts": "ro",
+ }
+ }
+ # we're skipping an entry which fails to match.
+ self.m_mounts.return_value = mounts
+
+ self.assertEqual(NOT_FOUND, dsovf.transport_iso9660())
+
+ def test_mount_cb_called_on_blkdevs_with_iso9660(self):
+ """Check we call mount_cb on blockdevs with iso9660 only"""
+ self.m_mounts.return_value = {}
+ self.m_find_devs_with.return_value = ["/dev/sr0"]
+ self.m_mount_cb.return_value = ("myfile", "mycontent")
+
+ self.assertEqual("mycontent", dsovf.transport_iso9660())
+ self.m_mount_cb.assert_called_with(
+ "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660"
+ )
+
+ def test_mount_cb_called_on_blkdevs_with_iso9660_check_regex(self):
+ """Check we call mount_cb on blockdevs with iso9660 and match regex"""
+ self.m_mounts.return_value = {}
+ self.m_find_devs_with.return_value = [
+ "/dev/abc",
+ "/dev/my-cdrom",
+ "/dev/sr0",
+ ]
+ self.m_mount_cb.return_value = ("myfile", "mycontent")
+
+ self.assertEqual("mycontent", dsovf.transport_iso9660())
+ self.m_mount_cb.assert_called_with(
+ "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660"
+ )
+
+ def test_mount_cb_not_called_no_matches(self):
+ """Check we don't call mount_cb if nothing matches"""
+ self.m_mounts.return_value = {}
+ self.m_find_devs_with.return_value = ["/dev/vg/myovf"]
+
+ self.assertEqual(NOT_FOUND, dsovf.transport_iso9660())
+ self.assertEqual(0, self.m_mount_cb.call_count)
+
+ def test_mount_cb_called_require_iso_false(self):
+ """Check we call mount_cb on blockdevs with require_iso=False"""
+ self.m_mounts.return_value = {}
+ self.m_find_devs_with.return_value = ["/dev/xvdz"]
+ self.m_mount_cb.return_value = ("myfile", "mycontent")
+
+ self.assertEqual(
+ "mycontent", dsovf.transport_iso9660(require_iso=False)
+ )
+
+ self.m_mount_cb.assert_called_with(
+ "/dev/xvdz", dsovf.get_ovf_env, mtype=None
+ )
+
+ def test_maybe_cdrom_device_none(self):
+ """Test maybe_cdrom_device returns False for none/empty input"""
+ self.assertFalse(dsovf.maybe_cdrom_device(None))
+ self.assertFalse(dsovf.maybe_cdrom_device(""))
+
+ def test_maybe_cdrom_device_non_string_exception(self):
+ """Test maybe_cdrom_device raises ValueError on non-string types"""
+ with self.assertRaises(ValueError):
+ dsovf.maybe_cdrom_device({"a": "eleven"})
+
+ def test_maybe_cdrom_device_false_on_multi_dir_paths(self):
+ """Test maybe_cdrom_device is false on /dev[/.*]/* paths"""
+ self.assertFalse(dsovf.maybe_cdrom_device("/dev/foo/sr0"))
+ self.assertFalse(dsovf.maybe_cdrom_device("foo/sr0"))
+ self.assertFalse(dsovf.maybe_cdrom_device("../foo/sr0"))
+ self.assertFalse(dsovf.maybe_cdrom_device("../foo/sr0"))
+
+ def test_maybe_cdrom_device_true_on_hd_partitions(self):
+ """Test maybe_cdrom_device is false on /dev/hd[a-z][0-9]+ paths"""
+ self.assertTrue(dsovf.maybe_cdrom_device("/dev/hda1"))
+ self.assertTrue(dsovf.maybe_cdrom_device("hdz9"))
+
+ def test_maybe_cdrom_device_true_on_valid_relative_paths(self):
+ """Test maybe_cdrom_device normalizes paths"""
+ self.assertTrue(dsovf.maybe_cdrom_device("/dev/wark/../sr9"))
+ self.assertTrue(dsovf.maybe_cdrom_device("///sr0"))
+ self.assertTrue(dsovf.maybe_cdrom_device("/sr0"))
+ self.assertTrue(dsovf.maybe_cdrom_device("//dev//hda"))
+
+ def test_maybe_cdrom_device_true_on_xvd_partitions(self):
+ """Test maybe_cdrom_device returns true on xvd*"""
+ self.assertTrue(dsovf.maybe_cdrom_device("/dev/xvda"))
+ self.assertTrue(dsovf.maybe_cdrom_device("/dev/xvda1"))
+ self.assertTrue(dsovf.maybe_cdrom_device("xvdza1"))
+
+
+@mock.patch(MPATH + "subp.which")
+@mock.patch(MPATH + "subp.subp")
+class TestTransportVmwareGuestinfo(CiTestCase):
+ """Test the com.vmware.guestInfo transport implemented in
+ transport_vmware_guestinfo."""
+
+ rpctool = "vmware-rpctool"
+ with_logs = True
+ rpctool_path = "/not/important/vmware-rpctool"
+
+ def test_without_vmware_rpctool_returns_notfound(self, m_subp, m_which):
+ m_which.return_value = None
+ self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
+ self.assertEqual(
+ 0,
+ m_subp.call_count,
+ "subp should not be called if no rpctool in path.",
+ )
+
+ def test_notfound_on_exit_code_1(self, m_subp, m_which):
+ """If vmware-rpctool exits 1, then must return not found."""
+ m_which.return_value = self.rpctool_path
+ m_subp.side_effect = subp.ProcessExecutionError(
+ stdout="", stderr="No value found", exit_code=1, cmd=["unused"]
+ )
+ self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
+ self.assertEqual(1, m_subp.call_count)
+ self.assertNotIn(
+ "WARNING",
+ self.logs.getvalue(),
+ "exit code of 1 by rpctool should not cause warning.",
+ )
+
+ def test_notfound_if_no_content_but_exit_zero(self, m_subp, m_which):
+ """If vmware-rpctool exited 0 with no stdout is normal not-found.
+
+ This isn't actually a case I've seen. normally on "not found",
+ rpctool would exit 1 with 'No value found' on stderr. But cover
+ the case where it exited 0 and just wrote nothing to stdout.
+ """
+ m_which.return_value = self.rpctool_path
+ m_subp.return_value = ("", "")
+ self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
+ self.assertEqual(1, m_subp.call_count)
+
+ def test_notfound_and_warns_on_unexpected_exit_code(self, m_subp, m_which):
+ """If vmware-rpctool exits non zero or 1, warnings should be logged."""
+ m_which.return_value = self.rpctool_path
+ m_subp.side_effect = subp.ProcessExecutionError(
+ stdout=None, stderr="No value found", exit_code=2, cmd=["unused"]
+ )
+ self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
+ self.assertEqual(1, m_subp.call_count)
+ self.assertIn(
+ "WARNING",
+ self.logs.getvalue(),
+ "exit code of 2 by rpctool should log WARNING.",
+ )
+
+ def test_found_when_guestinfo_present(self, m_subp, m_which):
+ """When there is a ovf info, transport should return it."""
+ m_which.return_value = self.rpctool_path
+ content = fill_properties({})
+ m_subp.return_value = (content, "")
+ self.assertEqual(content, dsovf.transport_vmware_guestinfo())
+ self.assertEqual(1, m_subp.call_count)
+
+
+#
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_rbx.py b/tests/unittests/sources/test_rbx.py
new file mode 100644
index 00000000..475bf498
--- /dev/null
+++ b/tests/unittests/sources/test_rbx.py
@@ -0,0 +1,241 @@
+import json
+
+from cloudinit import distros, helpers, subp
+from cloudinit.sources import DataSourceRbxCloud as ds
+from tests.unittests.helpers import CiTestCase, mock, populate_dir
+
+DS_PATH = "cloudinit.sources.DataSourceRbxCloud"
+
+CRYPTO_PASS = (
+ "$6$uktth46t$FvpDzFD2iL9YNZIG1Epz7957hJqbH0f"
+ "QKhnzcfBcUhEodGAWRqTy7tYG4nEW7SUOYBjxOSFIQW5"
+ "tToyGP41.s1"
+)
+
+CLOUD_METADATA = {
+ "vm": {
+ "memory": 4,
+ "cpu": 2,
+ "name": "vm-image-builder",
+ "_id": "5beab44f680cffd11f0e60fc",
+ },
+ "additionalMetadata": {
+ "username": "guru",
+ "sshKeys": ["ssh-rsa ..."],
+ "password": {"sha512": CRYPTO_PASS},
+ },
+ "disk": [
+ {
+ "size": 10,
+ "type": "ssd",
+ "name": "vm-image-builder-os",
+ "_id": "5beab450680cffd11f0e60fe",
+ },
+ {
+ "size": 2,
+ "type": "ssd",
+ "name": "ubuntu-1804-bionic",
+ "_id": "5bef002c680cffd11f107590",
+ },
+ ],
+ "netadp": [
+ {
+ "ip": [{"address": "62.181.8.174"}],
+ "network": {
+ "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]},
+ "routing": [],
+ "gateway": "62.181.8.1",
+ "netmask": "255.255.248.0",
+ "name": "public",
+ "type": "public",
+ "_id": "5784e97be2627505227b578c",
+ },
+ "speed": 1000,
+ "type": "hv",
+ "macaddress": "00:15:5D:FF:0F:03",
+ "_id": "5beab450680cffd11f0e6102",
+ },
+ {
+ "ip": [{"address": "10.209.78.11"}],
+ "network": {
+ "dns": {"nameservers": ["9.9.9.9", "8.8.8.8"]},
+ "routing": [],
+ "gateway": "10.209.78.1",
+ "netmask": "255.255.255.0",
+ "name": "network-determined-bardeen",
+ "type": "private",
+ "_id": "5beaec64680cffd11f0e7c31",
+ },
+ "speed": 1000,
+ "type": "hv",
+ "macaddress": "00:15:5D:FF:0F:24",
+ "_id": "5bec18c6680cffd11f0f0d8b",
+ },
+ ],
+ "dvddrive": [{"iso": {}}],
+}
+
+
+class TestRbxDataSource(CiTestCase):
+ parsed_user = None
+ allowed_subp = ["bash"]
+
+ def _fetch_distro(self, kind):
+ cls = distros.fetch(kind)
+ paths = helpers.Paths({})
+ return cls(kind, {}, paths)
+
+ def setUp(self):
+ super(TestRbxDataSource, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.paths = helpers.Paths(
+ {"cloud_dir": self.tmp, "run_dir": self.tmp}
+ )
+
+ # defaults for few tests
+ self.ds = ds.DataSourceRbxCloud
+ self.seed_dir = self.paths.seed_dir
+ self.sys_cfg = {"datasource": {"RbxCloud": {"dsmode": "local"}}}
+
+ def test_seed_read_user_data_callback_empty_file(self):
+ populate_user_metadata(self.seed_dir, "")
+ populate_cloud_metadata(self.seed_dir, {})
+ results = ds.read_user_data_callback(self.seed_dir)
+
+ self.assertIsNone(results)
+
+ def test_seed_read_user_data_callback_valid_disk(self):
+ populate_user_metadata(self.seed_dir, "")
+ populate_cloud_metadata(self.seed_dir, CLOUD_METADATA)
+ results = ds.read_user_data_callback(self.seed_dir)
+
+ self.assertNotEqual(results, None)
+ self.assertTrue("userdata" in results)
+ self.assertTrue("metadata" in results)
+ self.assertTrue("cfg" in results)
+
+ def test_seed_read_user_data_callback_userdata(self):
+ userdata = "#!/bin/sh\nexit 1"
+ populate_user_metadata(self.seed_dir, userdata)
+ populate_cloud_metadata(self.seed_dir, CLOUD_METADATA)
+
+ results = ds.read_user_data_callback(self.seed_dir)
+
+ self.assertNotEqual(results, None)
+ self.assertTrue("userdata" in results)
+ self.assertEqual(results["userdata"], userdata)
+
+ def test_generate_network_config(self):
+ expected = {
+ "version": 1,
+ "config": [
+ {
+ "subnets": [
+ {
+ "control": "auto",
+ "dns_nameservers": ["8.8.8.8", "8.8.4.4"],
+ "netmask": "255.255.248.0",
+ "address": "62.181.8.174",
+ "type": "static",
+ "gateway": "62.181.8.1",
+ }
+ ],
+ "type": "physical",
+ "name": "eth0",
+ "mac_address": "00:15:5d:ff:0f:03",
+ },
+ {
+ "subnets": [
+ {
+ "control": "auto",
+ "dns_nameservers": ["9.9.9.9", "8.8.8.8"],
+ "netmask": "255.255.255.0",
+ "address": "10.209.78.11",
+ "type": "static",
+ "gateway": "10.209.78.1",
+ }
+ ],
+ "type": "physical",
+ "name": "eth1",
+ "mac_address": "00:15:5d:ff:0f:24",
+ },
+ ],
+ }
+ self.assertTrue(
+ ds.generate_network_config(CLOUD_METADATA["netadp"]), expected
+ )
+
+ @mock.patch(DS_PATH + ".subp.subp")
+ def test_gratuitous_arp_run_standard_arping(self, m_subp):
+ """Test handle run arping & parameters."""
+ items = [
+ {"destination": "172.17.0.2", "source": "172.16.6.104"},
+ {
+ "destination": "172.17.0.2",
+ "source": "172.16.6.104",
+ },
+ ]
+ ds.gratuitous_arp(items, self._fetch_distro("ubuntu"))
+ self.assertEqual(
+ [
+ mock.call(
+ ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"]
+ ),
+ mock.call(
+ ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"]
+ ),
+ ],
+ m_subp.call_args_list,
+ )
+
+ @mock.patch(DS_PATH + ".subp.subp")
+ def test_handle_rhel_like_arping(self, m_subp):
+ """Test handle on RHEL-like distros."""
+ items = [
+ {
+ "source": "172.16.6.104",
+ "destination": "172.17.0.2",
+ }
+ ]
+ ds.gratuitous_arp(items, self._fetch_distro("fedora"))
+ self.assertEqual(
+ [
+ mock.call(
+ ["arping", "-c", "2", "-s", "172.16.6.104", "172.17.0.2"]
+ )
+ ],
+ m_subp.call_args_list,
+ )
+
+ @mock.patch(
+ DS_PATH + ".subp.subp", side_effect=subp.ProcessExecutionError()
+ )
+ def test_continue_on_arping_error(self, m_subp):
+ """Continue when command error"""
+ items = [
+ {"destination": "172.17.0.2", "source": "172.16.6.104"},
+ {
+ "destination": "172.17.0.2",
+ "source": "172.16.6.104",
+ },
+ ]
+ ds.gratuitous_arp(items, self._fetch_distro("ubuntu"))
+ self.assertEqual(
+ [
+ mock.call(
+ ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"]
+ ),
+ mock.call(
+ ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"]
+ ),
+ ],
+ m_subp.call_args_list,
+ )
+
+
+def populate_cloud_metadata(path, data):
+ populate_dir(path, {"cloud.json": json.dumps(data)})
+
+
+def populate_user_metadata(path, data):
+ populate_dir(path, {"user.data": data})
diff --git a/tests/unittests/sources/test_scaleway.py b/tests/unittests/sources/test_scaleway.py
new file mode 100644
index 00000000..d7e8b969
--- /dev/null
+++ b/tests/unittests/sources/test_scaleway.py
@@ -0,0 +1,526 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+
+import httpretty
+import requests
+
+from cloudinit import helpers, settings, sources
+from cloudinit.sources import DataSourceScaleway
+from tests.unittests.helpers import CiTestCase, HttprettyTestCase, mock
+
+
+class DataResponses(object):
+ """
+ Possible responses of the API endpoint
+ 169.254.42.42/user_data/cloud-init and
+ 169.254.42.42/vendor_data/cloud-init.
+ """
+
+ FAKE_USER_DATA = '#!/bin/bash\necho "user-data"'
+
+ @staticmethod
+ def rate_limited(method, uri, headers):
+ return 429, headers, ""
+
+ @staticmethod
+ def api_error(method, uri, headers):
+ return 500, headers, ""
+
+ @classmethod
+ def get_ok(cls, method, uri, headers):
+ return 200, headers, cls.FAKE_USER_DATA
+
+ @staticmethod
+ def empty(method, uri, headers):
+ """
+ No user data for this server.
+ """
+ return 404, headers, ""
+
+
+class MetadataResponses(object):
+ """
+ Possible responses of the metadata API.
+ """
+
+ FAKE_METADATA = {
+ "id": "00000000-0000-0000-0000-000000000000",
+ "hostname": "scaleway.host",
+ "tags": [
+ "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ ],
+ "ssh_public_keys": [
+ {
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA",
+ "fingerprint": "2048 06:ae:... login (RSA)",
+ },
+ {
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ "fingerprint": "2048 06:ff:... login2 (RSA)",
+ },
+ ],
+ }
+
+ @classmethod
+ def get_ok(cls, method, uri, headers):
+ return 200, headers, json.dumps(cls.FAKE_METADATA)
+
+
+class TestOnScaleway(CiTestCase):
+ def setUp(self):
+ super(TestOnScaleway, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def install_mocks(self, fake_dmi, fake_file_exists, fake_cmdline):
+ mock, faked = fake_dmi
+ mock.return_value = "Scaleway" if faked else "Whatever"
+
+ mock, faked = fake_file_exists
+ mock.return_value = faked
+
+ mock, faked = fake_cmdline
+ mock.return_value = (
+ "initrd=initrd showopts scaleway nousb"
+ if faked
+ else "BOOT_IMAGE=/vmlinuz-3.11.0-26-generic"
+ )
+
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("os.path.exists")
+ @mock.patch("cloudinit.dmi.read_dmi_data")
+ def test_not_on_scaleway(
+ self, m_read_dmi_data, m_file_exists, m_get_cmdline
+ ):
+ self.install_mocks(
+ fake_dmi=(m_read_dmi_data, False),
+ fake_file_exists=(m_file_exists, False),
+ fake_cmdline=(m_get_cmdline, False),
+ )
+ self.assertFalse(DataSourceScaleway.on_scaleway())
+
+ # When not on Scaleway, get_data() returns False.
+ datasource = DataSourceScaleway.DataSourceScaleway(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ self.assertFalse(datasource.get_data())
+
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("os.path.exists")
+ @mock.patch("cloudinit.dmi.read_dmi_data")
+ def test_on_scaleway_dmi(
+ self, m_read_dmi_data, m_file_exists, m_get_cmdline
+ ):
+ """
+ dmidecode returns "Scaleway".
+ """
+ # dmidecode returns "Scaleway"
+ self.install_mocks(
+ fake_dmi=(m_read_dmi_data, True),
+ fake_file_exists=(m_file_exists, False),
+ fake_cmdline=(m_get_cmdline, False),
+ )
+ self.assertTrue(DataSourceScaleway.on_scaleway())
+
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("os.path.exists")
+ @mock.patch("cloudinit.dmi.read_dmi_data")
+ def test_on_scaleway_var_run_scaleway(
+ self, m_read_dmi_data, m_file_exists, m_get_cmdline
+ ):
+ """
+ /var/run/scaleway exists.
+ """
+ self.install_mocks(
+ fake_dmi=(m_read_dmi_data, False),
+ fake_file_exists=(m_file_exists, True),
+ fake_cmdline=(m_get_cmdline, False),
+ )
+ self.assertTrue(DataSourceScaleway.on_scaleway())
+
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("os.path.exists")
+ @mock.patch("cloudinit.dmi.read_dmi_data")
+ def test_on_scaleway_cmdline(
+ self, m_read_dmi_data, m_file_exists, m_get_cmdline
+ ):
+ """
+ "scaleway" in /proc/cmdline.
+ """
+ self.install_mocks(
+ fake_dmi=(m_read_dmi_data, False),
+ fake_file_exists=(m_file_exists, False),
+ fake_cmdline=(m_get_cmdline, True),
+ )
+ self.assertTrue(DataSourceScaleway.on_scaleway())
+
+
+def get_source_address_adapter(*args, **kwargs):
+ """
+ Scaleway user/vendor data API requires to be called with a privileged port.
+
+ If the unittests are run as non-root, the user doesn't have the permission
+ to bind on ports below 1024.
+
+ This function removes the bind on a privileged address, since anyway the
+ HTTP call is mocked by httpretty.
+ """
+ kwargs.pop("source_address")
+ return requests.adapters.HTTPAdapter(*args, **kwargs)
+
+
+class TestDataSourceScaleway(HttprettyTestCase):
+ def setUp(self):
+ tmp = self.tmp_dir()
+ self.datasource = DataSourceScaleway.DataSourceScaleway(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": tmp})
+ )
+ super(TestDataSourceScaleway, self).setUp()
+
+ self.metadata_url = DataSourceScaleway.BUILTIN_DS_CONFIG[
+ "metadata_url"
+ ]
+ self.userdata_url = DataSourceScaleway.BUILTIN_DS_CONFIG[
+ "userdata_url"
+ ]
+ self.vendordata_url = DataSourceScaleway.BUILTIN_DS_CONFIG[
+ "vendordata_url"
+ ]
+
+ self.add_patch(
+ "cloudinit.sources.DataSourceScaleway.on_scaleway",
+ "_m_on_scaleway",
+ return_value=True,
+ )
+ self.add_patch(
+ "cloudinit.sources.DataSourceScaleway.net.find_fallback_nic",
+ "_m_find_fallback_nic",
+ return_value="scalewaynic0",
+ )
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4")
+ @mock.patch(
+ "cloudinit.sources.DataSourceScaleway.SourceAddressAdapter",
+ get_source_address_adapter,
+ )
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("time.sleep", return_value=None)
+ def test_metadata_ok(self, sleep, m_get_cmdline, dhcpv4):
+ """
+ get_data() returns metadata, user data and vendor data.
+ """
+ m_get_cmdline.return_value = "scaleway"
+
+ # Make user data API return a valid response
+ httpretty.register_uri(
+ httpretty.GET, self.metadata_url, body=MetadataResponses.get_ok
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url, body=DataResponses.get_ok
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.vendordata_url, body=DataResponses.get_ok
+ )
+ self.datasource.get_data()
+
+ self.assertEqual(
+ self.datasource.get_instance_id(),
+ MetadataResponses.FAKE_METADATA["id"],
+ )
+ self.assertEqual(
+ self.datasource.get_public_ssh_keys().sort(),
+ [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA",
+ ].sort(),
+ )
+ self.assertEqual(
+ self.datasource.get_hostname(),
+ MetadataResponses.FAKE_METADATA["hostname"],
+ )
+ self.assertEqual(
+ self.datasource.get_userdata_raw(), DataResponses.FAKE_USER_DATA
+ )
+ self.assertEqual(
+ self.datasource.get_vendordata_raw(), DataResponses.FAKE_USER_DATA
+ )
+ self.assertIsNone(self.datasource.availability_zone)
+ self.assertIsNone(self.datasource.region)
+ self.assertEqual(sleep.call_count, 0)
+
+ def test_ssh_keys_empty(self):
+ """
+ get_public_ssh_keys() should return empty list if no ssh key are
+ available
+ """
+ self.datasource.metadata["tags"] = []
+ self.datasource.metadata["ssh_public_keys"] = []
+ self.assertEqual(self.datasource.get_public_ssh_keys(), [])
+
+ def test_ssh_keys_only_tags(self):
+ """
+ get_public_ssh_keys() should return list of keys available in tags
+ """
+ self.datasource.metadata["tags"] = [
+ "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ ]
+ self.datasource.metadata["ssh_public_keys"] = []
+ self.assertEqual(
+ self.datasource.get_public_ssh_keys().sort(),
+ [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ ].sort(),
+ )
+
+ def test_ssh_keys_only_conf(self):
+ """
+ get_public_ssh_keys() should return list of keys available in
+ ssh_public_keys field
+ """
+ self.datasource.metadata["tags"] = []
+ self.datasource.metadata["ssh_public_keys"] = [
+ {
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA",
+ "fingerprint": "2048 06:ae:... login (RSA)",
+ },
+ {
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ "fingerprint": "2048 06:ff:... login2 (RSA)",
+ },
+ ]
+ self.assertEqual(
+ self.datasource.get_public_ssh_keys().sort(),
+ [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA",
+ ].sort(),
+ )
+
+ def test_ssh_keys_both(self):
+ """
+ get_public_ssh_keys() should return a merge of keys available
+ in ssh_public_keys and tags
+ """
+ self.datasource.metadata["tags"] = [
+ "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ ]
+
+ self.datasource.metadata["ssh_public_keys"] = [
+ {
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA",
+ "fingerprint": "2048 06:ae:... login (RSA)",
+ },
+ {
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ "fingerprint": "2048 06:ff:... login2 (RSA)",
+ },
+ ]
+ self.assertEqual(
+ self.datasource.get_public_ssh_keys().sort(),
+ [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA",
+ ].sort(),
+ )
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4")
+ @mock.patch(
+ "cloudinit.sources.DataSourceScaleway.SourceAddressAdapter",
+ get_source_address_adapter,
+ )
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("time.sleep", return_value=None)
+ def test_metadata_404(self, sleep, m_get_cmdline, dhcpv4):
+ """
+ get_data() returns metadata, but no user data nor vendor data.
+ """
+ m_get_cmdline.return_value = "scaleway"
+
+ # Make user and vendor data APIs return HTTP/404, which means there is
+ # no user / vendor data for the server.
+ httpretty.register_uri(
+ httpretty.GET, self.metadata_url, body=MetadataResponses.get_ok
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url, body=DataResponses.empty
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.vendordata_url, body=DataResponses.empty
+ )
+ self.datasource.get_data()
+ self.assertIsNone(self.datasource.get_userdata_raw())
+ self.assertIsNone(self.datasource.get_vendordata_raw())
+ self.assertEqual(sleep.call_count, 0)
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4")
+ @mock.patch(
+ "cloudinit.sources.DataSourceScaleway.SourceAddressAdapter",
+ get_source_address_adapter,
+ )
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("time.sleep", return_value=None)
+ def test_metadata_rate_limit(self, sleep, m_get_cmdline, dhcpv4):
+ """
+ get_data() is rate limited two times by the metadata API when fetching
+ user data.
+ """
+ m_get_cmdline.return_value = "scaleway"
+
+ httpretty.register_uri(
+ httpretty.GET, self.metadata_url, body=MetadataResponses.get_ok
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.vendordata_url, body=DataResponses.empty
+ )
+
+ httpretty.register_uri(
+ httpretty.GET,
+ self.userdata_url,
+ responses=[
+ httpretty.Response(body=DataResponses.rate_limited),
+ httpretty.Response(body=DataResponses.rate_limited),
+ httpretty.Response(body=DataResponses.get_ok),
+ ],
+ )
+ self.datasource.get_data()
+ self.assertEqual(
+ self.datasource.get_userdata_raw(), DataResponses.FAKE_USER_DATA
+ )
+ self.assertEqual(sleep.call_count, 2)
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic")
+ @mock.patch("cloudinit.util.get_cmdline")
+ def test_network_config_ok(self, m_get_cmdline, fallback_nic):
+ """
+ network_config will only generate IPv4 config if no ipv6 data is
+ available in the metadata
+ """
+ m_get_cmdline.return_value = "scaleway"
+ fallback_nic.return_value = "ens2"
+ self.datasource.metadata["ipv6"] = None
+
+ netcfg = self.datasource.network_config
+ resp = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "ens2",
+ "subnets": [{"type": "dhcp4"}],
+ }
+ ],
+ }
+ self.assertEqual(netcfg, resp)
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic")
+ @mock.patch("cloudinit.util.get_cmdline")
+ def test_network_config_ipv6_ok(self, m_get_cmdline, fallback_nic):
+ """
+ network_config will only generate IPv4/v6 configs if ipv6 data is
+ available in the metadata
+ """
+ m_get_cmdline.return_value = "scaleway"
+ fallback_nic.return_value = "ens2"
+ self.datasource.metadata["ipv6"] = {
+ "address": "2000:abc:4444:9876::42:999",
+ "gateway": "2000:abc:4444:9876::42:000",
+ "netmask": "127",
+ }
+
+ netcfg = self.datasource.network_config
+ resp = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "ens2",
+ "subnets": [
+ {"type": "dhcp4"},
+ {
+ "type": "static",
+ "address": "2000:abc:4444:9876::42:999",
+ "gateway": "2000:abc:4444:9876::42:000",
+ "netmask": "127",
+ },
+ ],
+ }
+ ],
+ }
+ self.assertEqual(netcfg, resp)
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic")
+ @mock.patch("cloudinit.util.get_cmdline")
+ def test_network_config_existing(self, m_get_cmdline, fallback_nic):
+ """
+ network_config() should return the same data if a network config
+ already exists
+ """
+ m_get_cmdline.return_value = "scaleway"
+ self.datasource._network_config = "0xdeadbeef"
+
+ netcfg = self.datasource.network_config
+ self.assertEqual(netcfg, "0xdeadbeef")
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic")
+ @mock.patch("cloudinit.util.get_cmdline")
+ def test_network_config_unset(self, m_get_cmdline, fallback_nic):
+ """
+ _network_config will be set to sources.UNSET after the first boot.
+ Make sure it behave correctly.
+ """
+ m_get_cmdline.return_value = "scaleway"
+ fallback_nic.return_value = "ens2"
+ self.datasource.metadata["ipv6"] = None
+ self.datasource._network_config = sources.UNSET
+
+ resp = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "ens2",
+ "subnets": [{"type": "dhcp4"}],
+ }
+ ],
+ }
+
+ netcfg = self.datasource.network_config
+ self.assertEqual(netcfg, resp)
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.LOG.warning")
+ @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic")
+ @mock.patch("cloudinit.util.get_cmdline")
+ def test_network_config_cached_none(
+ self, m_get_cmdline, fallback_nic, logwarning
+ ):
+ """
+ network_config() should return config data if cached data is None
+ rather than sources.UNSET
+ """
+ m_get_cmdline.return_value = "scaleway"
+ fallback_nic.return_value = "ens2"
+ self.datasource.metadata["ipv6"] = None
+ self.datasource._network_config = None
+
+ resp = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "ens2",
+ "subnets": [{"type": "dhcp4"}],
+ }
+ ],
+ }
+
+ netcfg = self.datasource.network_config
+ self.assertEqual(netcfg, resp)
+ logwarning.assert_called_with(
+ "Found None as cached _network_config. Resetting to %s",
+ sources.UNSET,
+ )
diff --git a/tests/unittests/sources/test_smartos.py b/tests/unittests/sources/test_smartos.py
new file mode 100644
index 00000000..55239c4e
--- /dev/null
+++ b/tests/unittests/sources/test_smartos.py
@@ -0,0 +1,1421 @@
+# Copyright (C) 2013 Canonical Ltd.
+# Copyright 2019 Joyent, Inc.
+#
+# Author: Ben Howard <ben.howard@canonical.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""This is a testcase for the SmartOS datasource.
+
+It replicates a serial console and acts like the SmartOS console does in
+order to validate return responses.
+
+"""
+
+import json
+import multiprocessing
+import os
+import os.path
+import re
+import signal
+import stat
+import unittest
+import uuid
+from binascii import crc32
+
+from cloudinit import helpers as c_helpers
+from cloudinit import serial
+from cloudinit.event import EventScope, EventType
+from cloudinit.sources import DataSourceSmartOS
+from cloudinit.sources.DataSourceSmartOS import SERIAL_DEVICE, SMARTOS_ENV_KVM
+from cloudinit.sources.DataSourceSmartOS import (
+ convert_smartos_network_data as convert_net,
+)
+from cloudinit.sources.DataSourceSmartOS import (
+ get_smartos_environ,
+ identify_file,
+)
+from cloudinit.subp import ProcessExecutionError, subp, which
+from cloudinit.util import b64e, write_file
+from tests.unittests.helpers import (
+ CiTestCase,
+ FilesystemMockingTestCase,
+ mock,
+ skipIf,
+)
+
+try:
+ import serial as _pyserial
+
+ assert _pyserial # avoid pyflakes error F401: import unused
+ HAS_PYSERIAL = True
+except ImportError:
+ HAS_PYSERIAL = False
+
+DSMOS = "cloudinit.sources.DataSourceSmartOS"
+SDC_NICS = json.loads(
+ """
+[
+ {
+ "nic_tag": "external",
+ "primary": true,
+ "mtu": 1500,
+ "model": "virtio",
+ "gateway": "8.12.42.1",
+ "netmask": "255.255.255.0",
+ "ip": "8.12.42.102",
+ "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe",
+ "gateways": [
+ "8.12.42.1"
+ ],
+ "vlan_id": 324,
+ "mac": "90:b8:d0:f5:e4:f5",
+ "interface": "net0",
+ "ips": [
+ "8.12.42.102/24"
+ ]
+ },
+ {
+ "nic_tag": "sdc_overlay/16187209",
+ "gateway": "192.168.128.1",
+ "model": "virtio",
+ "mac": "90:b8:d0:a5:ff:cd",
+ "netmask": "255.255.252.0",
+ "ip": "192.168.128.93",
+ "network_uuid": "4cad71da-09bc-452b-986d-03562a03a0a9",
+ "gateways": [
+ "192.168.128.1"
+ ],
+ "vlan_id": 2,
+ "mtu": 8500,
+ "interface": "net1",
+ "ips": [
+ "192.168.128.93/22"
+ ]
+ }
+]
+"""
+)
+
+
+SDC_NICS_ALT = json.loads(
+ """
+[
+ {
+ "interface": "net0",
+ "mac": "90:b8:d0:ae:64:51",
+ "vlan_id": 324,
+ "nic_tag": "external",
+ "gateway": "8.12.42.1",
+ "gateways": [
+ "8.12.42.1"
+ ],
+ "netmask": "255.255.255.0",
+ "ip": "8.12.42.51",
+ "ips": [
+ "8.12.42.51/24"
+ ],
+ "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe",
+ "model": "virtio",
+ "mtu": 1500,
+ "primary": true
+ },
+ {
+ "interface": "net1",
+ "mac": "90:b8:d0:bd:4f:9c",
+ "vlan_id": 600,
+ "nic_tag": "internal",
+ "netmask": "255.255.255.0",
+ "ip": "10.210.1.217",
+ "ips": [
+ "10.210.1.217/24"
+ ],
+ "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6",
+ "model": "virtio",
+ "mtu": 1500
+ }
+]
+"""
+)
+
+SDC_NICS_DHCP = json.loads(
+ """
+[
+ {
+ "interface": "net0",
+ "mac": "90:b8:d0:ae:64:51",
+ "vlan_id": 324,
+ "nic_tag": "external",
+ "gateway": "8.12.42.1",
+ "gateways": [
+ "8.12.42.1"
+ ],
+ "netmask": "255.255.255.0",
+ "ip": "8.12.42.51",
+ "ips": [
+ "8.12.42.51/24"
+ ],
+ "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe",
+ "model": "virtio",
+ "mtu": 1500,
+ "primary": true
+ },
+ {
+ "interface": "net1",
+ "mac": "90:b8:d0:bd:4f:9c",
+ "vlan_id": 600,
+ "nic_tag": "internal",
+ "netmask": "255.255.255.0",
+ "ip": "10.210.1.217",
+ "ips": [
+ "dhcp"
+ ],
+ "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6",
+ "model": "virtio",
+ "mtu": 1500
+ }
+]
+"""
+)
+
+SDC_NICS_MIP = json.loads(
+ """
+[
+ {
+ "interface": "net0",
+ "mac": "90:b8:d0:ae:64:51",
+ "vlan_id": 324,
+ "nic_tag": "external",
+ "gateway": "8.12.42.1",
+ "gateways": [
+ "8.12.42.1"
+ ],
+ "netmask": "255.255.255.0",
+ "ip": "8.12.42.51",
+ "ips": [
+ "8.12.42.51/24",
+ "8.12.42.52/24"
+ ],
+ "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe",
+ "model": "virtio",
+ "mtu": 1500,
+ "primary": true
+ },
+ {
+ "interface": "net1",
+ "mac": "90:b8:d0:bd:4f:9c",
+ "vlan_id": 600,
+ "nic_tag": "internal",
+ "netmask": "255.255.255.0",
+ "ip": "10.210.1.217",
+ "ips": [
+ "10.210.1.217/24",
+ "10.210.1.151/24"
+ ],
+ "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6",
+ "model": "virtio",
+ "mtu": 1500
+ }
+]
+"""
+)
+
+SDC_NICS_MIP_IPV6 = json.loads(
+ """
+[
+ {
+ "interface": "net0",
+ "mac": "90:b8:d0:ae:64:51",
+ "vlan_id": 324,
+ "nic_tag": "external",
+ "gateway": "8.12.42.1",
+ "gateways": [
+ "8.12.42.1"
+ ],
+ "netmask": "255.255.255.0",
+ "ip": "8.12.42.51",
+ "ips": [
+ "2001:4800:78ff:1b:be76:4eff:fe06:96b3/64",
+ "8.12.42.51/24"
+ ],
+ "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe",
+ "model": "virtio",
+ "mtu": 1500,
+ "primary": true
+ },
+ {
+ "interface": "net1",
+ "mac": "90:b8:d0:bd:4f:9c",
+ "vlan_id": 600,
+ "nic_tag": "internal",
+ "netmask": "255.255.255.0",
+ "ip": "10.210.1.217",
+ "ips": [
+ "10.210.1.217/24"
+ ],
+ "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6",
+ "model": "virtio",
+ "mtu": 1500
+ }
+]
+"""
+)
+
+SDC_NICS_IPV4_IPV6 = json.loads(
+ """
+[
+ {
+ "interface": "net0",
+ "mac": "90:b8:d0:ae:64:51",
+ "vlan_id": 324,
+ "nic_tag": "external",
+ "gateway": "8.12.42.1",
+ "gateways": ["8.12.42.1", "2001::1", "2001::2"],
+ "netmask": "255.255.255.0",
+ "ip": "8.12.42.51",
+ "ips": ["2001::10/64", "8.12.42.51/24", "2001::11/64",
+ "8.12.42.52/32"],
+ "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe",
+ "model": "virtio",
+ "mtu": 1500,
+ "primary": true
+ },
+ {
+ "interface": "net1",
+ "mac": "90:b8:d0:bd:4f:9c",
+ "vlan_id": 600,
+ "nic_tag": "internal",
+ "netmask": "255.255.255.0",
+ "ip": "10.210.1.217",
+ "ips": ["10.210.1.217/24"],
+ "gateways": ["10.210.1.210"],
+ "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6",
+ "model": "virtio",
+ "mtu": 1500
+ }
+]
+"""
+)
+
+SDC_NICS_SINGLE_GATEWAY = json.loads(
+ """
+[
+ {
+ "interface":"net0",
+ "mac":"90:b8:d0:d8:82:b4",
+ "vlan_id":324,
+ "nic_tag":"external",
+ "gateway":"8.12.42.1",
+ "gateways":["8.12.42.1"],
+ "netmask":"255.255.255.0",
+ "ip":"8.12.42.26",
+ "ips":["8.12.42.26/24"],
+ "network_uuid":"992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe",
+ "model":"virtio",
+ "mtu":1500,
+ "primary":true
+ },
+ {
+ "interface":"net1",
+ "mac":"90:b8:d0:0a:51:31",
+ "vlan_id":600,
+ "nic_tag":"internal",
+ "netmask":"255.255.255.0",
+ "ip":"10.210.1.27",
+ "ips":["10.210.1.27/24"],
+ "network_uuid":"98657fdf-11f4-4ee2-88a4-ce7fe73e33a6",
+ "model":"virtio",
+ "mtu":1500
+ }
+]
+"""
+)
+
+
+MOCK_RETURNS = {
+ "hostname": "test-host",
+ "root_authorized_keys": "ssh-rsa AAAAB3Nz...aC1yc2E= keyname",
+ "disable_iptables_flag": None,
+ "enable_motd_sys_info": None,
+ "test-var1": "some data",
+ "cloud-init:user-data": "\n".join(["#!/bin/sh", "/bin/true", ""]),
+ "sdc:datacenter_name": "somewhere2",
+ "sdc:operator-script": "\n".join(["bin/true", ""]),
+ "sdc:uuid": str(uuid.uuid4()),
+ "sdc:vendor-data": "\n".join(["VENDOR_DATA", ""]),
+ "user-data": "\n".join(["something", ""]),
+ "user-script": "\n".join(["/bin/true", ""]),
+ "sdc:nics": json.dumps(SDC_NICS),
+}
+
+DMI_DATA_RETURN = "smartdc"
+
+# Useful for calculating the length of a frame body. A SUCCESS body will be
+# followed by more characters or be one character less if SUCCESS with no
+# payload. See Section 4.3 of https://eng.joyent.com/mdata/protocol.html.
+SUCCESS_LEN = len("0123abcd SUCCESS ")
+NOTFOUND_LEN = len("0123abcd NOTFOUND")
+
+
+class PsuedoJoyentClient(object):
+ def __init__(self, data=None):
+ if data is None:
+ data = MOCK_RETURNS.copy()
+ self.data = data
+ self._is_open = False
+ return
+
+ def get(self, key, default=None, strip=False):
+ if key in self.data:
+ r = self.data[key]
+ if strip:
+ r = r.strip()
+ else:
+ r = default
+ return r
+
+ def get_json(self, key, default=None):
+ result = self.get(key, default=default)
+ if result is None:
+ return default
+ return json.loads(result)
+
+ def exists(self):
+ return True
+
+ def open_transport(self):
+ assert not self._is_open
+ self._is_open = True
+
+ def close_transport(self):
+ assert self._is_open
+ self._is_open = False
+
+
+class TestSmartOSDataSource(FilesystemMockingTestCase):
+ jmc_cfact = None
+ get_smartos_environ = None
+
+ def setUp(self):
+ super(TestSmartOSDataSource, self).setUp()
+
+ self.add_patch(DSMOS + ".get_smartos_environ", "get_smartos_environ")
+ self.add_patch(DSMOS + ".jmc_client_factory", "jmc_cfact")
+ self.legacy_user_d = self.tmp_path("legacy_user_tmp")
+ os.mkdir(self.legacy_user_d)
+ self.add_patch(
+ DSMOS + ".LEGACY_USER_D",
+ "m_legacy_user_d",
+ autospec=False,
+ new=self.legacy_user_d,
+ )
+ self.add_patch(
+ DSMOS + ".identify_file",
+ "m_identify_file",
+ return_value="text/plain",
+ )
+
+ def _get_ds(
+ self,
+ mockdata=None,
+ mode=DataSourceSmartOS.SMARTOS_ENV_KVM,
+ sys_cfg=None,
+ ds_cfg=None,
+ ):
+ self.jmc_cfact.return_value = PsuedoJoyentClient(mockdata)
+ self.get_smartos_environ.return_value = mode
+
+ tmpd = self.tmp_dir()
+ dirs = {
+ "cloud_dir": self.tmp_path("cloud_dir", tmpd),
+ "run_dir": self.tmp_path("run_dir"),
+ }
+ for d in dirs.values():
+ os.mkdir(d)
+ paths = c_helpers.Paths(dirs)
+
+ if sys_cfg is None:
+ sys_cfg = {}
+
+ if ds_cfg is not None:
+ sys_cfg["datasource"] = sys_cfg.get("datasource", {})
+ sys_cfg["datasource"]["SmartOS"] = ds_cfg
+
+ return DataSourceSmartOS.DataSourceSmartOS(
+ sys_cfg, distro=None, paths=paths
+ )
+
+ def test_no_base64(self):
+ ds_cfg = {"no_base64_decode": ["test_var1"], "all_base": True}
+ dsrc = self._get_ds(ds_cfg=ds_cfg)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+
+ def test_uuid(self):
+ dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(
+ MOCK_RETURNS["sdc:uuid"], dsrc.metadata["instance-id"]
+ )
+
+ def test_platform_info(self):
+ """All platform-related attributes are properly set."""
+ dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+ self.assertEqual("joyent", dsrc.cloud_name)
+ self.assertEqual("joyent", dsrc.platform_type)
+ self.assertEqual("serial (/dev/ttyS1)", dsrc.subplatform)
+
+ def test_root_keys(self):
+ dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(
+ MOCK_RETURNS["root_authorized_keys"], dsrc.metadata["public-keys"]
+ )
+
+ def test_hostname_b64(self):
+ dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(
+ MOCK_RETURNS["hostname"], dsrc.metadata["local-hostname"]
+ )
+
+ def test_hostname(self):
+ dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(
+ MOCK_RETURNS["hostname"], dsrc.metadata["local-hostname"]
+ )
+
+ def test_hostname_if_no_sdc_hostname(self):
+ my_returns = MOCK_RETURNS.copy()
+ my_returns["sdc:hostname"] = "sdc-" + my_returns["hostname"]
+ dsrc = self._get_ds(mockdata=my_returns)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(
+ my_returns["hostname"], dsrc.metadata["local-hostname"]
+ )
+
+ def test_sdc_hostname_if_no_hostname(self):
+ my_returns = MOCK_RETURNS.copy()
+ my_returns["sdc:hostname"] = "sdc-" + my_returns["hostname"]
+ del my_returns["hostname"]
+ dsrc = self._get_ds(mockdata=my_returns)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(
+ my_returns["sdc:hostname"], dsrc.metadata["local-hostname"]
+ )
+
+ def test_sdc_uuid_if_no_hostname_or_sdc_hostname(self):
+ my_returns = MOCK_RETURNS.copy()
+ del my_returns["hostname"]
+ dsrc = self._get_ds(mockdata=my_returns)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(
+ my_returns["sdc:uuid"], dsrc.metadata["local-hostname"]
+ )
+
+ def test_userdata(self):
+ dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(
+ MOCK_RETURNS["user-data"], dsrc.metadata["legacy-user-data"]
+ )
+ self.assertEqual(
+ MOCK_RETURNS["cloud-init:user-data"], dsrc.userdata_raw
+ )
+
+ def test_sdc_nics(self):
+ dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(
+ json.loads(MOCK_RETURNS["sdc:nics"]), dsrc.metadata["network-data"]
+ )
+
+ def test_sdc_scripts(self):
+ dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(
+ MOCK_RETURNS["user-script"], dsrc.metadata["user-script"]
+ )
+
+ legacy_script_f = "%s/user-script" % self.legacy_user_d
+ print("legacy_script_f=%s" % legacy_script_f)
+ self.assertTrue(os.path.exists(legacy_script_f))
+ self.assertTrue(os.path.islink(legacy_script_f))
+ user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:]
+ self.assertEqual(user_script_perm, "700")
+
+ def test_scripts_shebanged(self):
+ dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(
+ MOCK_RETURNS["user-script"], dsrc.metadata["user-script"]
+ )
+
+ legacy_script_f = "%s/user-script" % self.legacy_user_d
+ self.assertTrue(os.path.exists(legacy_script_f))
+ self.assertTrue(os.path.islink(legacy_script_f))
+ shebang = None
+ with open(legacy_script_f, "r") as f:
+ shebang = f.readlines()[0].strip()
+ self.assertEqual(shebang, "#!/bin/bash")
+ user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:]
+ self.assertEqual(user_script_perm, "700")
+
+ def test_scripts_shebang_not_added(self):
+ """
+ Test that the SmartOS requirement that plain text scripts
+ are executable. This test makes sure that plain texts scripts
+ with out file magic have it added appropriately by cloud-init.
+ """
+
+ my_returns = MOCK_RETURNS.copy()
+ my_returns["user-script"] = "\n".join(
+ ["#!/usr/bin/perl", 'print("hi")', ""]
+ )
+
+ dsrc = self._get_ds(mockdata=my_returns)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(
+ my_returns["user-script"], dsrc.metadata["user-script"]
+ )
+
+ legacy_script_f = "%s/user-script" % self.legacy_user_d
+ self.assertTrue(os.path.exists(legacy_script_f))
+ self.assertTrue(os.path.islink(legacy_script_f))
+ shebang = None
+ with open(legacy_script_f, "r") as f:
+ shebang = f.readlines()[0].strip()
+ self.assertEqual(shebang, "#!/usr/bin/perl")
+
+ def test_userdata_removed(self):
+ """
+ User-data in the SmartOS world is supposed to be written to a file
+ each and every boot. This tests to make sure that in the event the
+ legacy user-data is removed, the existing user-data is backed-up
+ and there is no /var/db/user-data left.
+ """
+
+ user_data_f = "%s/mdata-user-data" % self.legacy_user_d
+ with open(user_data_f, "w") as f:
+ f.write("PREVIOUS")
+
+ my_returns = MOCK_RETURNS.copy()
+ del my_returns["user-data"]
+
+ dsrc = self._get_ds(mockdata=my_returns)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertFalse(dsrc.metadata.get("legacy-user-data"))
+
+ found_new = False
+ for root, _dirs, files in os.walk(self.legacy_user_d):
+ for name in files:
+ name_f = os.path.join(root, name)
+ permissions = oct(os.stat(name_f)[stat.ST_MODE])[-3:]
+ if re.match(r".*\/mdata-user-data$", name_f):
+ found_new = True
+ print(name_f)
+ self.assertEqual(permissions, "400")
+
+ self.assertFalse(found_new)
+
+ def test_vendor_data_not_default(self):
+ dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(
+ MOCK_RETURNS["sdc:vendor-data"], dsrc.metadata["vendor-data"]
+ )
+
+ def test_default_vendor_data(self):
+ my_returns = MOCK_RETURNS.copy()
+ def_op_script = my_returns["sdc:vendor-data"]
+ del my_returns["sdc:vendor-data"]
+ dsrc = self._get_ds(mockdata=my_returns)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertNotEqual(def_op_script, dsrc.metadata["vendor-data"])
+
+ # we expect default vendor-data is a boothook
+ self.assertTrue(dsrc.vendordata_raw.startswith("#cloud-boothook"))
+
+ def test_disable_iptables_flag(self):
+ dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(
+ MOCK_RETURNS["disable_iptables_flag"],
+ dsrc.metadata["iptables_disable"],
+ )
+
+ def test_motd_sys_info(self):
+ dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(
+ MOCK_RETURNS["enable_motd_sys_info"],
+ dsrc.metadata["motd_sys_info"],
+ )
+
+ def test_default_ephemeral(self):
+ # Test to make sure that the builtin config has the ephemeral
+ # configuration.
+ dsrc = self._get_ds()
+ cfg = dsrc.get_config_obj()
+
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+
+ assert "disk_setup" in cfg
+ assert "fs_setup" in cfg
+ self.assertIsInstance(cfg["disk_setup"], dict)
+ self.assertIsInstance(cfg["fs_setup"], list)
+
+ def test_override_disk_aliases(self):
+ # Test to make sure that the built-in DS is overriden
+ builtin = DataSourceSmartOS.BUILTIN_DS_CONFIG
+
+ mydscfg = {"disk_aliases": {"FOO": "/dev/bar"}}
+
+ # expect that these values are in builtin, or this is pointless
+ for k in mydscfg:
+ self.assertIn(k, builtin)
+
+ dsrc = self._get_ds(ds_cfg=mydscfg)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+
+ self.assertEqual(
+ mydscfg["disk_aliases"]["FOO"], dsrc.ds_cfg["disk_aliases"]["FOO"]
+ )
+
+ self.assertEqual(
+ dsrc.device_name_to_device("FOO"), mydscfg["disk_aliases"]["FOO"]
+ )
+
+ def test_reconfig_network_on_boot(self):
+ # Test to ensure that network is configured from metadata on each boot
+ dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+ self.assertSetEqual(
+ {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ },
+ dsrc.default_update_events[EventScope.NETWORK],
+ )
+
+
+class TestIdentifyFile(CiTestCase):
+ """Test the 'identify_file' utility."""
+
+ @skipIf(not which("file"), "command 'file' not available.")
+ def test_file_happy_path(self):
+ """Test file is available and functional on plain text."""
+ fname = self.tmp_path("myfile")
+ write_file(fname, "plain text content here\n")
+ with self.allow_subp(["file"]):
+ self.assertEqual("text/plain", identify_file(fname))
+
+ @mock.patch(DSMOS + ".subp.subp")
+ def test_returns_none_on_error(self, m_subp):
+ """On 'file' execution error, None should be returned."""
+ m_subp.side_effect = ProcessExecutionError("FILE_FAILED", exit_code=99)
+ fname = self.tmp_path("myfile")
+ write_file(fname, "plain text content here\n")
+ self.assertEqual(None, identify_file(fname))
+ self.assertEqual(
+ [mock.call(["file", "--brief", "--mime-type", fname])],
+ m_subp.call_args_list,
+ )
+
+
+class ShortReader(object):
+ """Implements a 'read' interface for bytes provided.
+ much like io.BytesIO but the 'endbyte' acts as if EOF.
+ When it is reached a short will be returned."""
+
+ def __init__(self, initial_bytes, endbyte=b"\0"):
+ self.data = initial_bytes
+ self.index = 0
+ self.len = len(self.data)
+ self.endbyte = endbyte
+
+ @property
+ def emptied(self):
+ return self.index >= self.len
+
+ def read(self, size=-1):
+ """Read size bytes but not past a null."""
+ if size == 0 or self.index >= self.len:
+ return b""
+
+ rsize = size
+ if size < 0 or size + self.index > self.len:
+ rsize = self.len - self.index
+
+ next_null = self.data.find(self.endbyte, self.index, rsize)
+ if next_null >= 0:
+ rsize = next_null - self.index + 1
+ i = self.index
+ self.index += rsize
+ ret = self.data[i : i + rsize]
+ if len(ret) and ret[-1:] == self.endbyte:
+ ret = ret[:-1]
+ return ret
+
+
+class TestJoyentMetadataClient(FilesystemMockingTestCase):
+
+ invalid = b"invalid command\n"
+ failure = b"FAILURE\n"
+ v2_ok = b"V2_OK\n"
+
+ def setUp(self):
+ super(TestJoyentMetadataClient, self).setUp()
+
+ self.serial = mock.MagicMock(spec=serial.Serial)
+ self.request_id = 0xABCDEF12
+ self.metadata_value = "value"
+ self.response_parts = {
+ "command": "SUCCESS",
+ "crc": "b5a9ff00",
+ "length": SUCCESS_LEN + len(b64e(self.metadata_value)),
+ "payload": b64e(self.metadata_value),
+ "request_id": "{0:08x}".format(self.request_id),
+ }
+
+ def make_response():
+ payloadstr = ""
+ if "payload" in self.response_parts:
+ payloadstr = " {0}".format(self.response_parts["payload"])
+ return (
+ "V2 {length} {crc} {request_id} "
+ "{command}{payloadstr}\n".format(
+ payloadstr=payloadstr, **self.response_parts
+ ).encode("ascii")
+ )
+
+ self.metasource_data = None
+
+ def read_response(length):
+ if not self.metasource_data:
+ self.metasource_data = make_response()
+ self.metasource_data_len = len(self.metasource_data)
+ resp = self.metasource_data[:length]
+ self.metasource_data = self.metasource_data[length:]
+ return resp
+
+ self.serial.read.side_effect = read_response
+ self.patched_funcs.enter_context(
+ mock.patch(
+ "cloudinit.sources.DataSourceSmartOS.random.randint",
+ mock.Mock(return_value=self.request_id),
+ )
+ )
+
+ def _get_client(self):
+ return DataSourceSmartOS.JoyentMetadataClient(
+ fp=self.serial, smartos_type=DataSourceSmartOS.SMARTOS_ENV_KVM
+ )
+
+ def _get_serial_client(self):
+ self.serial.timeout = 1
+ return DataSourceSmartOS.JoyentMetadataSerialClient(
+ None, fp=self.serial
+ )
+
+ def assertEndsWith(self, haystack, prefix):
+ self.assertTrue(
+ haystack.endswith(prefix),
+ "{0} does not end with '{1}'".format(repr(haystack), prefix),
+ )
+
+ def assertStartsWith(self, haystack, prefix):
+ self.assertTrue(
+ haystack.startswith(prefix),
+ "{0} does not start with '{1}'".format(repr(haystack), prefix),
+ )
+
+ def assertNoMoreSideEffects(self, obj):
+ self.assertRaises(StopIteration, obj)
+
+ def test_get_metadata_writes_a_single_line(self):
+ client = self._get_client()
+ client.get("some_key")
+ self.assertEqual(1, self.serial.write.call_count)
+ written_line = self.serial.write.call_args[0][0]
+ self.assertEndsWith(
+ written_line.decode("ascii"), b"\n".decode("ascii")
+ )
+ self.assertEqual(1, written_line.count(b"\n"))
+
+ def _get_written_line(self, key="some_key"):
+ client = self._get_client()
+ client.get(key)
+ return self.serial.write.call_args[0][0]
+
+ def test_get_metadata_writes_bytes(self):
+ self.assertIsInstance(self._get_written_line(), bytes)
+
+ def test_get_metadata_line_starts_with_v2(self):
+ foo = self._get_written_line()
+ self.assertStartsWith(foo.decode("ascii"), b"V2".decode("ascii"))
+
+ def test_get_metadata_uses_get_command(self):
+ parts = self._get_written_line().decode("ascii").strip().split(" ")
+ self.assertEqual("GET", parts[4])
+
+ def test_get_metadata_base64_encodes_argument(self):
+ key = "my_key"
+ parts = self._get_written_line(key).decode("ascii").strip().split(" ")
+ self.assertEqual(b64e(key), parts[5])
+
+ def test_get_metadata_calculates_length_correctly(self):
+ parts = self._get_written_line().decode("ascii").strip().split(" ")
+ expected_length = len(" ".join(parts[3:]))
+ self.assertEqual(expected_length, int(parts[1]))
+
+ def test_get_metadata_uses_appropriate_request_id(self):
+ parts = self._get_written_line().decode("ascii").strip().split(" ")
+ request_id = parts[3]
+ self.assertEqual(8, len(request_id))
+ self.assertEqual(request_id, request_id.lower())
+
+ def test_get_metadata_uses_random_number_for_request_id(self):
+ line = self._get_written_line()
+ request_id = line.decode("ascii").strip().split(" ")[3]
+ self.assertEqual("{0:08x}".format(self.request_id), request_id)
+
+ def test_get_metadata_checksums_correctly(self):
+ parts = self._get_written_line().decode("ascii").strip().split(" ")
+ expected_checksum = "{0:08x}".format(
+ crc32(" ".join(parts[3:]).encode("utf-8")) & 0xFFFFFFFF
+ )
+ checksum = parts[2]
+ self.assertEqual(expected_checksum, checksum)
+
+ def test_get_metadata_reads_a_line(self):
+ client = self._get_client()
+ client.get("some_key")
+ self.assertEqual(self.metasource_data_len, self.serial.read.call_count)
+
+ def test_get_metadata_returns_valid_value(self):
+ client = self._get_client()
+ value = client.get("some_key")
+ self.assertEqual(self.metadata_value, value)
+
+ def test_get_metadata_throws_exception_for_incorrect_length(self):
+ self.response_parts["length"] = 0
+ client = self._get_client()
+ self.assertRaises(
+ DataSourceSmartOS.JoyentMetadataFetchException,
+ client.get,
+ "some_key",
+ )
+
+ def test_get_metadata_throws_exception_for_incorrect_crc(self):
+ self.response_parts["crc"] = "deadbeef"
+ client = self._get_client()
+ self.assertRaises(
+ DataSourceSmartOS.JoyentMetadataFetchException,
+ client.get,
+ "some_key",
+ )
+
+ def test_get_metadata_throws_exception_for_request_id_mismatch(self):
+ self.response_parts["request_id"] = "deadbeef"
+ client = self._get_client()
+ client._checksum = lambda _: self.response_parts["crc"]
+ self.assertRaises(
+ DataSourceSmartOS.JoyentMetadataFetchException,
+ client.get,
+ "some_key",
+ )
+
+ def test_get_metadata_returns_None_if_value_not_found(self):
+ self.response_parts["payload"] = ""
+ self.response_parts["command"] = "NOTFOUND"
+ self.response_parts["length"] = NOTFOUND_LEN
+ client = self._get_client()
+ client._checksum = lambda _: self.response_parts["crc"]
+ self.assertIsNone(client.get("some_key"))
+
+ def test_negotiate(self):
+ client = self._get_client()
+ reader = ShortReader(self.v2_ok)
+ client.fp.read.side_effect = reader.read
+ client._negotiate()
+ self.assertTrue(reader.emptied)
+
+ def test_negotiate_short_response(self):
+ client = self._get_client()
+ # chopped '\n' from v2_ok.
+ reader = ShortReader(self.v2_ok[:-1] + b"\0")
+ client.fp.read.side_effect = reader.read
+ self.assertRaises(
+ DataSourceSmartOS.JoyentMetadataTimeoutException, client._negotiate
+ )
+ self.assertTrue(reader.emptied)
+
+ def test_negotiate_bad_response(self):
+ client = self._get_client()
+ reader = ShortReader(b"garbage\n" + self.v2_ok)
+ client.fp.read.side_effect = reader.read
+ self.assertRaises(
+ DataSourceSmartOS.JoyentMetadataFetchException, client._negotiate
+ )
+ self.assertEqual(self.v2_ok, client.fp.read())
+
+ def test_serial_open_transport(self):
+ client = self._get_serial_client()
+ reader = ShortReader(b"garbage\0" + self.invalid + self.v2_ok)
+ client.fp.read.side_effect = reader.read
+ client.open_transport()
+ self.assertTrue(reader.emptied)
+
+ def test_flush_failure(self):
+ client = self._get_serial_client()
+ reader = ShortReader(
+ b"garbage" + b"\0" + self.failure + self.invalid + self.v2_ok
+ )
+ client.fp.read.side_effect = reader.read
+ client.open_transport()
+ self.assertTrue(reader.emptied)
+
+ def test_flush_many_timeouts(self):
+ client = self._get_serial_client()
+ reader = ShortReader(b"\0" * 100 + self.invalid + self.v2_ok)
+ client.fp.read.side_effect = reader.read
+ client.open_transport()
+ self.assertTrue(reader.emptied)
+
+ def test_list_metadata_returns_list(self):
+ parts = ["foo", "bar"]
+ value = b64e("\n".join(parts))
+ self.response_parts["payload"] = value
+ self.response_parts["crc"] = "40873553"
+ self.response_parts["length"] = SUCCESS_LEN + len(value)
+ client = self._get_client()
+ self.assertEqual(client.list(), parts)
+
+ def test_list_metadata_returns_empty_list_if_no_customer_metadata(self):
+ del self.response_parts["payload"]
+ self.response_parts["length"] = SUCCESS_LEN - 1
+ self.response_parts["crc"] = "14e563ba"
+ client = self._get_client()
+ self.assertEqual(client.list(), [])
+
+
+class TestNetworkConversion(CiTestCase):
+ def test_convert_simple(self):
+ expected = {
+ "version": 1,
+ "config": [
+ {
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "type": "static",
+ "gateway": "8.12.42.1",
+ "address": "8.12.42.102/24",
+ }
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:f5:e4:f5",
+ },
+ {
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {"type": "static", "address": "192.168.128.93/22"}
+ ],
+ "mtu": 8500,
+ "mac_address": "90:b8:d0:a5:ff:cd",
+ },
+ ],
+ }
+ found = convert_net(SDC_NICS)
+ self.assertEqual(expected, found)
+
+ def test_convert_simple_alt(self):
+ expected = {
+ "version": 1,
+ "config": [
+ {
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "type": "static",
+ "gateway": "8.12.42.1",
+ "address": "8.12.42.51/24",
+ }
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:ae:64:51",
+ },
+ {
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {"type": "static", "address": "10.210.1.217/24"}
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:bd:4f:9c",
+ },
+ ],
+ }
+ found = convert_net(SDC_NICS_ALT)
+ self.assertEqual(expected, found)
+
+ def test_convert_simple_dhcp(self):
+ expected = {
+ "version": 1,
+ "config": [
+ {
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "type": "static",
+ "gateway": "8.12.42.1",
+ "address": "8.12.42.51/24",
+ }
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:ae:64:51",
+ },
+ {
+ "name": "net1",
+ "type": "physical",
+ "subnets": [{"type": "dhcp4"}],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:bd:4f:9c",
+ },
+ ],
+ }
+ found = convert_net(SDC_NICS_DHCP)
+ self.assertEqual(expected, found)
+
+ def test_convert_simple_multi_ip(self):
+ expected = {
+ "version": 1,
+ "config": [
+ {
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "type": "static",
+ "gateway": "8.12.42.1",
+ "address": "8.12.42.51/24",
+ },
+ {"type": "static", "address": "8.12.42.52/24"},
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:ae:64:51",
+ },
+ {
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {"type": "static", "address": "10.210.1.217/24"},
+ {"type": "static", "address": "10.210.1.151/24"},
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:bd:4f:9c",
+ },
+ ],
+ }
+ found = convert_net(SDC_NICS_MIP)
+ self.assertEqual(expected, found)
+
+ def test_convert_with_dns(self):
+ expected = {
+ "version": 1,
+ "config": [
+ {
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "type": "static",
+ "gateway": "8.12.42.1",
+ "address": "8.12.42.51/24",
+ }
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:ae:64:51",
+ },
+ {
+ "name": "net1",
+ "type": "physical",
+ "subnets": [{"type": "dhcp4"}],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:bd:4f:9c",
+ },
+ {
+ "type": "nameserver",
+ "address": ["8.8.8.8", "8.8.8.1"],
+ "search": ["local"],
+ },
+ ],
+ }
+ found = convert_net(
+ network_data=SDC_NICS_DHCP,
+ dns_servers=["8.8.8.8", "8.8.8.1"],
+ dns_domain="local",
+ )
+ self.assertEqual(expected, found)
+
+ def test_convert_simple_multi_ipv6(self):
+ expected = {
+ "version": 1,
+ "config": [
+ {
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "type": "static",
+ "address": (
+ "2001:4800:78ff:1b:be76:4eff:fe06:96b3/64"
+ ),
+ },
+ {
+ "type": "static",
+ "gateway": "8.12.42.1",
+ "address": "8.12.42.51/24",
+ },
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:ae:64:51",
+ },
+ {
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {"type": "static", "address": "10.210.1.217/24"}
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:bd:4f:9c",
+ },
+ ],
+ }
+ found = convert_net(SDC_NICS_MIP_IPV6)
+ self.assertEqual(expected, found)
+
+ def test_convert_simple_both_ipv4_ipv6(self):
+ expected = {
+ "version": 1,
+ "config": [
+ {
+ "mac_address": "90:b8:d0:ae:64:51",
+ "mtu": 1500,
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "address": "2001::10/64",
+ "gateway": "2001::1",
+ "type": "static",
+ },
+ {
+ "address": "8.12.42.51/24",
+ "gateway": "8.12.42.1",
+ "type": "static",
+ },
+ {"address": "2001::11/64", "type": "static"},
+ {"address": "8.12.42.52/32", "type": "static"},
+ ],
+ },
+ {
+ "mac_address": "90:b8:d0:bd:4f:9c",
+ "mtu": 1500,
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {"address": "10.210.1.217/24", "type": "static"}
+ ],
+ },
+ ],
+ }
+ found = convert_net(SDC_NICS_IPV4_IPV6)
+ self.assertEqual(expected, found)
+
+ def test_gateways_not_on_all_nics(self):
+ expected = {
+ "version": 1,
+ "config": [
+ {
+ "mac_address": "90:b8:d0:d8:82:b4",
+ "mtu": 1500,
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "address": "8.12.42.26/24",
+ "gateway": "8.12.42.1",
+ "type": "static",
+ }
+ ],
+ },
+ {
+ "mac_address": "90:b8:d0:0a:51:31",
+ "mtu": 1500,
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {"address": "10.210.1.27/24", "type": "static"}
+ ],
+ },
+ ],
+ }
+ found = convert_net(SDC_NICS_SINGLE_GATEWAY)
+ self.assertEqual(expected, found)
+
+ def test_routes_on_all_nics(self):
+ routes = [
+ {"linklocal": False, "dst": "3.0.0.0/8", "gateway": "8.12.42.3"},
+ {"linklocal": False, "dst": "4.0.0.0/8", "gateway": "10.210.1.4"},
+ ]
+ expected = {
+ "version": 1,
+ "config": [
+ {
+ "mac_address": "90:b8:d0:d8:82:b4",
+ "mtu": 1500,
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "address": "8.12.42.26/24",
+ "gateway": "8.12.42.1",
+ "type": "static",
+ "routes": [
+ {
+ "network": "3.0.0.0/8",
+ "gateway": "8.12.42.3",
+ },
+ {
+ "network": "4.0.0.0/8",
+ "gateway": "10.210.1.4",
+ },
+ ],
+ }
+ ],
+ },
+ {
+ "mac_address": "90:b8:d0:0a:51:31",
+ "mtu": 1500,
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {
+ "address": "10.210.1.27/24",
+ "type": "static",
+ "routes": [
+ {
+ "network": "3.0.0.0/8",
+ "gateway": "8.12.42.3",
+ },
+ {
+ "network": "4.0.0.0/8",
+ "gateway": "10.210.1.4",
+ },
+ ],
+ }
+ ],
+ },
+ ],
+ }
+ found = convert_net(SDC_NICS_SINGLE_GATEWAY, routes=routes)
+ self.maxDiff = None
+ self.assertEqual(expected, found)
+
+
+@unittest.skipUnless(
+ get_smartos_environ() == SMARTOS_ENV_KVM,
+ "Only supported on KVM and bhyve guests under SmartOS",
+)
+@unittest.skipUnless(
+ os.access(SERIAL_DEVICE, os.W_OK),
+ "Requires write access to " + SERIAL_DEVICE,
+)
+@unittest.skipUnless(HAS_PYSERIAL is True, "pyserial not available")
+class TestSerialConcurrency(CiTestCase):
+ """
+ This class tests locking on an actual serial port, and as such can only
+ be run in a kvm or bhyve guest running on a SmartOS host. A test run on
+ a metadata socket will not be valid because a metadata socket ensures
+ there is only one session over a connection. In contrast, in the
+ absence of proper locking multiple processes opening the same serial
+ port can corrupt each others' exchanges with the metadata server.
+
+ This takes on the order of 2 to 3 minutes to run.
+ """
+
+ allowed_subp = ["mdata-get"]
+
+ def setUp(self):
+ self.mdata_proc = multiprocessing.Process(target=self.start_mdata_loop)
+ self.mdata_proc.start()
+ super(TestSerialConcurrency, self).setUp()
+
+ def tearDown(self):
+ # os.kill() rather than mdata_proc.terminate() to avoid console spam.
+ os.kill(self.mdata_proc.pid, signal.SIGKILL)
+ self.mdata_proc.join()
+ super(TestSerialConcurrency, self).tearDown()
+
+ def start_mdata_loop(self):
+ """
+ The mdata-get command is repeatedly run in a separate process so
+ that it may try to race with metadata operations performed in the
+ main test process. Use of mdata-get is better than two processes
+ using the protocol implementation in DataSourceSmartOS because we
+ are testing to be sure that cloud-init and mdata-get respect each
+ others locks.
+ """
+ rcs = list(range(0, 256))
+ while True:
+ subp(["mdata-get", "sdc:routes"], rcs=rcs)
+
+ def test_all_keys(self):
+ self.assertIsNotNone(self.mdata_proc.pid)
+ ds = DataSourceSmartOS
+ keys = [tup[0] for tup in ds.SMARTOS_ATTRIB_MAP.values()]
+ keys.extend(ds.SMARTOS_ATTRIB_JSON.values())
+
+ client = ds.jmc_client_factory(smartos_type=SMARTOS_ENV_KVM)
+ self.assertIsNotNone(client)
+
+ # The behavior that we are testing for was observed mdata-get running
+ # 10 times at roughly the same time as cloud-init fetched each key
+ # once. cloud-init would regularly see failures before making it
+ # through all keys once.
+ for _ in range(0, 3):
+ for key in keys:
+ # We don't care about the return value, just that it doesn't
+ # thrown any exceptions.
+ client.get(key)
+
+ self.assertIsNone(self.mdata_proc.exitcode)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_upcloud.py b/tests/unittests/sources/test_upcloud.py
new file mode 100644
index 00000000..e1125b65
--- /dev/null
+++ b/tests/unittests/sources/test_upcloud.py
@@ -0,0 +1,331 @@
+# Author: Antti Myyrä <antti.myyra@upcloud.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+
+from cloudinit import helpers, settings, sources
+from cloudinit.sources.DataSourceUpCloud import (
+ DataSourceUpCloud,
+ DataSourceUpCloudLocal,
+)
+from tests.unittests.helpers import CiTestCase, mock
+
+UC_METADATA = json.loads(
+ """
+{
+ "cloud_name": "upcloud",
+ "instance_id": "00322b68-0096-4042-9406-faad61922128",
+ "hostname": "test.example.com",
+ "platform": "servers",
+ "subplatform": "metadata (http://169.254.169.254)",
+ "public_keys": [
+ "ssh-rsa AAAAB.... test1@example.com",
+ "ssh-rsa AAAAB.... test2@example.com"
+ ],
+ "region": "fi-hel2",
+ "network": {
+ "interfaces": [
+ {
+ "index": 1,
+ "ip_addresses": [
+ {
+ "address": "94.237.105.53",
+ "dhcp": true,
+ "dns": [
+ "94.237.127.9",
+ "94.237.40.9"
+ ],
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "94.237.104.1",
+ "network": "94.237.104.0/22"
+ },
+ {
+ "address": "94.237.105.50",
+ "dhcp": false,
+ "dns": null,
+ "family": "IPv4",
+ "floating": true,
+ "gateway": "",
+ "network": "94.237.105.50/32"
+ }
+ ],
+ "mac": "3a:d6:ba:4a:36:e7",
+ "network_id": "031457f4-0f8c-483c-96f2-eccede02909c",
+ "type": "public"
+ },
+ {
+ "index": 2,
+ "ip_addresses": [
+ {
+ "address": "10.6.3.27",
+ "dhcp": true,
+ "dns": null,
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "10.6.0.1",
+ "network": "10.6.0.0/22"
+ }
+ ],
+ "mac": "3a:d6:ba:4a:84:cc",
+ "network_id": "03d82553-5bea-4132-b29a-e1cf67ec2dd1",
+ "type": "utility"
+ },
+ {
+ "index": 3,
+ "ip_addresses": [
+ {
+ "address": "2a04:3545:1000:720:38d6:baff:fe4a:63e7",
+ "dhcp": true,
+ "dns": [
+ "2a04:3540:53::1",
+ "2a04:3544:53::1"
+ ],
+ "family": "IPv6",
+ "floating": false,
+ "gateway": "2a04:3545:1000:720::1",
+ "network": "2a04:3545:1000:720::/64"
+ }
+ ],
+ "mac": "3a:d6:ba:4a:63:e7",
+ "network_id": "03000000-0000-4000-8046-000000000000",
+ "type": "public"
+ },
+ {
+ "index": 4,
+ "ip_addresses": [
+ {
+ "address": "172.30.1.10",
+ "dhcp": true,
+ "dns": null,
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "172.30.1.1",
+ "network": "172.30.1.0/24"
+ }
+ ],
+ "mac": "3a:d6:ba:4a:8a:e1",
+ "network_id": "035a0a4a-7704-4de5-820d-189fc8132714",
+ "type": "private"
+ }
+ ],
+ "dns": [
+ "94.237.127.9",
+ "94.237.40.9"
+ ]
+ },
+ "storage": {
+ "disks": [
+ {
+ "id": "014efb65-223b-4d44-8f0a-c29535b88dcf",
+ "serial": "014efb65223b4d448f0a",
+ "size": 10240,
+ "type": "disk",
+ "tier": "maxiops"
+ }
+ ]
+ },
+ "tags": [],
+ "user_data": "",
+ "vendor_data": ""
+}
+"""
+)
+
+UC_METADATA[
+ "user_data"
+] = b"""#cloud-config
+runcmd:
+- [touch, /root/cloud-init-worked ]
+"""
+
+MD_URL = "http://169.254.169.254/metadata/v1.json"
+
+
+def _mock_dmi():
+ return True, "00322b68-0096-4042-9406-faad61922128"
+
+
+class TestUpCloudMetadata(CiTestCase):
+ """
+ Test reading the meta-data
+ """
+
+ def setUp(self):
+ super(TestUpCloudMetadata, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def get_ds(self, get_sysinfo=_mock_dmi):
+ ds = DataSourceUpCloud(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ if get_sysinfo:
+ ds._get_sysinfo = get_sysinfo
+ return ds
+
+ @mock.patch("cloudinit.sources.helpers.upcloud.read_sysinfo")
+ def test_returns_false_not_on_upcloud(self, m_read_sysinfo):
+ m_read_sysinfo.return_value = (False, None)
+ ds = self.get_ds(get_sysinfo=None)
+ self.assertEqual(False, ds.get_data())
+ self.assertTrue(m_read_sysinfo.called)
+
+ @mock.patch("cloudinit.sources.helpers.upcloud.read_metadata")
+ def test_metadata(self, mock_readmd):
+ mock_readmd.return_value = UC_METADATA.copy()
+
+ ds = self.get_ds()
+ ds.perform_dhcp_setup = False
+
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ self.assertTrue(mock_readmd.called)
+
+ self.assertEqual(UC_METADATA.get("user_data"), ds.get_userdata_raw())
+ self.assertEqual(
+ UC_METADATA.get("vendor_data"), ds.get_vendordata_raw()
+ )
+ self.assertEqual(UC_METADATA.get("region"), ds.availability_zone)
+ self.assertEqual(UC_METADATA.get("instance_id"), ds.get_instance_id())
+ self.assertEqual(UC_METADATA.get("cloud_name"), ds.cloud_name)
+
+ self.assertEqual(
+ UC_METADATA.get("public_keys"), ds.get_public_ssh_keys()
+ )
+ self.assertIsInstance(ds.get_public_ssh_keys(), list)
+
+
+class TestUpCloudNetworkSetup(CiTestCase):
+ """
+ Test reading the meta-data on networked context
+ """
+
+ def setUp(self):
+ super(TestUpCloudNetworkSetup, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def get_ds(self, get_sysinfo=_mock_dmi):
+ ds = DataSourceUpCloudLocal(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ if get_sysinfo:
+ ds._get_sysinfo = get_sysinfo
+ return ds
+
+ @mock.patch("cloudinit.sources.helpers.upcloud.read_metadata")
+ @mock.patch("cloudinit.net.find_fallback_nic")
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ @mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
+ def test_network_configured_metadata(
+ self, m_net, m_dhcp, m_fallback_nic, mock_readmd
+ ):
+ mock_readmd.return_value = UC_METADATA.copy()
+
+ m_fallback_nic.return_value = "eth1"
+ m_dhcp.return_value = [
+ {
+ "interface": "eth1",
+ "fixed-address": "10.6.3.27",
+ "routers": "10.6.0.1",
+ "subnet-mask": "22",
+ "broadcast-address": "10.6.3.255",
+ }
+ ]
+
+ ds = self.get_ds()
+
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ self.assertTrue(m_dhcp.called)
+ m_dhcp.assert_called_with("eth1", None)
+
+ m_net.assert_called_once_with(
+ broadcast="10.6.3.255",
+ interface="eth1",
+ ip="10.6.3.27",
+ prefix_or_mask="22",
+ router="10.6.0.1",
+ static_routes=None,
+ )
+
+ self.assertTrue(mock_readmd.called)
+
+ self.assertEqual(UC_METADATA.get("region"), ds.availability_zone)
+ self.assertEqual(UC_METADATA.get("instance_id"), ds.get_instance_id())
+ self.assertEqual(UC_METADATA.get("cloud_name"), ds.cloud_name)
+
+ @mock.patch("cloudinit.sources.helpers.upcloud.read_metadata")
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ def test_network_configuration(self, m_get_by_mac, mock_readmd):
+ mock_readmd.return_value = UC_METADATA.copy()
+
+ raw_ifaces = UC_METADATA.get("network").get("interfaces")
+ self.assertEqual(4, len(raw_ifaces))
+
+ m_get_by_mac.return_value = {
+ raw_ifaces[0].get("mac"): "eth0",
+ raw_ifaces[1].get("mac"): "eth1",
+ raw_ifaces[2].get("mac"): "eth2",
+ raw_ifaces[3].get("mac"): "eth3",
+ }
+
+ ds = self.get_ds()
+ ds.perform_dhcp_setup = False
+
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ self.assertTrue(mock_readmd.called)
+
+ netcfg = ds.network_config
+
+ self.assertEqual(1, netcfg.get("version"))
+
+ config = netcfg.get("config")
+ self.assertIsInstance(config, list)
+ self.assertEqual(5, len(config))
+ self.assertEqual("physical", config[3].get("type"))
+
+ self.assertEqual(
+ raw_ifaces[2].get("mac"), config[2].get("mac_address")
+ )
+ self.assertEqual(1, len(config[2].get("subnets")))
+ self.assertEqual(
+ "ipv6_dhcpv6-stateless", config[2].get("subnets")[0].get("type")
+ )
+
+ self.assertEqual(2, len(config[0].get("subnets")))
+ self.assertEqual("static", config[0].get("subnets")[1].get("type"))
+
+ dns = config[4]
+ self.assertEqual("nameserver", dns.get("type"))
+ self.assertEqual(2, len(dns.get("address")))
+ self.assertEqual(
+ UC_METADATA.get("network").get("dns")[1], dns.get("address")[1]
+ )
+
+
+class TestUpCloudDatasourceLoading(CiTestCase):
+ def test_get_datasource_list_returns_in_local(self):
+ deps = (sources.DEP_FILESYSTEM,)
+ ds_list = sources.DataSourceUpCloud.get_datasource_list(deps)
+ self.assertEqual(ds_list, [DataSourceUpCloudLocal])
+
+ def test_get_datasource_list_returns_in_normal(self):
+ deps = (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)
+ ds_list = sources.DataSourceUpCloud.get_datasource_list(deps)
+ self.assertEqual(ds_list, [DataSourceUpCloud])
+
+ def test_list_sources_finds_ds(self):
+ found = sources.list_sources(
+ ["UpCloud"],
+ (sources.DEP_FILESYSTEM, sources.DEP_NETWORK),
+ ["cloudinit.sources"],
+ )
+ self.assertEqual([DataSourceUpCloud], found)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_vmware.py b/tests/unittests/sources/test_vmware.py
new file mode 100644
index 00000000..dd331349
--- /dev/null
+++ b/tests/unittests/sources/test_vmware.py
@@ -0,0 +1,389 @@
+# Copyright (c) 2021 VMware, Inc. All Rights Reserved.
+#
+# Authors: Andrew Kutz <akutz@vmware.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import base64
+import gzip
+import os
+
+import pytest
+
+from cloudinit import dmi, helpers, safeyaml, settings
+from cloudinit.sources import DataSourceVMware
+from tests.unittests.helpers import (
+ CiTestCase,
+ FilesystemMockingTestCase,
+ mock,
+ populate_dir,
+)
+
+PRODUCT_NAME_FILE_PATH = "/sys/class/dmi/id/product_name"
+PRODUCT_NAME = "VMware7,1"
+PRODUCT_UUID = "82343CED-E4C7-423B-8F6B-0D34D19067AB"
+REROOT_FILES = {
+ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID,
+ PRODUCT_NAME_FILE_PATH: PRODUCT_NAME,
+}
+
+VMW_MULTIPLE_KEYS = [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@vmw.com",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@vmw.com",
+]
+VMW_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@vmw.com"
+
+VMW_METADATA_YAML = """instance-id: cloud-vm
+local-hostname: cloud-vm
+network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+"""
+
+VMW_USERDATA_YAML = """## template: jinja
+#cloud-config
+users:
+- default
+"""
+
+VMW_VENDORDATA_YAML = """## template: jinja
+#cloud-config
+runcmd:
+- echo "Hello, world."
+"""
+
+
+@pytest.fixture(autouse=True)
+def common_patches():
+ with mock.patch("cloudinit.util.platform.platform", return_value="Linux"):
+ with mock.patch.multiple(
+ "cloudinit.dmi",
+ is_container=mock.Mock(return_value=False),
+ is_FreeBSD=mock.Mock(return_value=False),
+ ):
+ yield
+
+
+class TestDataSourceVMware(CiTestCase):
+ """
+ Test common functionality that is not transport specific.
+ """
+
+ def setUp(self):
+ super(TestDataSourceVMware, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def test_no_data_access_method(self):
+ ds = get_ds(self.tmp)
+ ds.vmware_rpctool = None
+ ret = ds.get_data()
+ self.assertFalse(ret)
+
+ def test_get_host_info(self):
+ host_info = DataSourceVMware.get_host_info()
+ self.assertTrue(host_info)
+ self.assertTrue(host_info["hostname"])
+ self.assertTrue(host_info["local-hostname"])
+ self.assertTrue(host_info["local_hostname"])
+ self.assertTrue(host_info[DataSourceVMware.LOCAL_IPV4])
+
+
+class TestDataSourceVMwareEnvVars(FilesystemMockingTestCase):
+ """
+ Test the envvar transport.
+ """
+
+ def setUp(self):
+ super(TestDataSourceVMwareEnvVars, self).setUp()
+ self.tmp = self.tmp_dir()
+ os.environ[DataSourceVMware.VMX_GUESTINFO] = "1"
+ self.create_system_files()
+
+ def tearDown(self):
+ del os.environ[DataSourceVMware.VMX_GUESTINFO]
+ return super(TestDataSourceVMwareEnvVars, self).tearDown()
+
+ def create_system_files(self):
+ rootd = self.tmp_dir()
+ populate_dir(
+ rootd,
+ {
+ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID,
+ },
+ )
+ self.assertTrue(self.reRoot(rootd))
+
+ def assert_get_data_ok(self, m_fn, m_fn_call_count=6):
+ ds = get_ds(self.tmp)
+ ds.vmware_rpctool = None
+ ret = ds.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(m_fn_call_count, m_fn.call_count)
+ self.assertEqual(
+ ds.data_access_method, DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR
+ )
+ return ds
+
+ def assert_metadata(self, metadata, m_fn, m_fn_call_count=6):
+ ds = self.assert_get_data_ok(m_fn, m_fn_call_count)
+ assert_metadata(self, ds, metadata)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_subplatform(self, m_fn):
+ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""]
+ ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+ self.assertEqual(
+ ds.subplatform,
+ "%s (%s)"
+ % (
+ DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR,
+ DataSourceVMware.get_guestinfo_envvar_key_name("metadata"),
+ ),
+ )
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_only(self, m_fn):
+ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_userdata_only(self, m_fn):
+ m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_vendordata_only(self, m_fn):
+ m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_base64(self, m_fn):
+ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8"))
+ m_fn.side_effect = [data, "base64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_b64(self, m_fn):
+ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8"))
+ m_fn.side_effect = [data, "b64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_gzip_base64(self, m_fn):
+ data = VMW_METADATA_YAML.encode("utf-8")
+ data = gzip.compress(data)
+ data = base64.b64encode(data)
+ m_fn.side_effect = [data, "gzip+base64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_gz_b64(self, m_fn):
+ data = VMW_METADATA_YAML.encode("utf-8")
+ data = gzip.compress(data)
+ data = base64.b64encode(data)
+ m_fn.side_effect = [data, "gz+b64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_metadata_single_ssh_key(self, m_fn):
+ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML)
+ metadata["public_keys"] = VMW_SINGLE_KEY
+ metadata_yaml = safeyaml.dumps(metadata)
+ m_fn.side_effect = [metadata_yaml, "", "", ""]
+ self.assert_metadata(metadata, m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_metadata_multiple_ssh_keys(self, m_fn):
+ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML)
+ metadata["public_keys"] = VMW_MULTIPLE_KEYS
+ metadata_yaml = safeyaml.dumps(metadata)
+ m_fn.side_effect = [metadata_yaml, "", "", ""]
+ self.assert_metadata(metadata, m_fn, m_fn_call_count=4)
+
+
+class TestDataSourceVMwareGuestInfo(FilesystemMockingTestCase):
+ """
+ Test the guestinfo transport on a VMware platform.
+ """
+
+ def setUp(self):
+ super(TestDataSourceVMwareGuestInfo, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.create_system_files()
+
+ def create_system_files(self):
+ rootd = self.tmp_dir()
+ populate_dir(
+ rootd,
+ {
+ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID,
+ PRODUCT_NAME_FILE_PATH: PRODUCT_NAME,
+ },
+ )
+ self.assertTrue(self.reRoot(rootd))
+
+ def assert_get_data_ok(self, m_fn, m_fn_call_count=6):
+ ds = get_ds(self.tmp)
+ ds.vmware_rpctool = "vmware-rpctool"
+ ret = ds.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(m_fn_call_count, m_fn.call_count)
+ self.assertEqual(
+ ds.data_access_method,
+ DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO,
+ )
+ return ds
+
+ def assert_metadata(self, metadata, m_fn, m_fn_call_count=6):
+ ds = self.assert_get_data_ok(m_fn, m_fn_call_count)
+ assert_metadata(self, ds, metadata)
+
+ def test_ds_valid_on_vmware_platform(self):
+ system_type = dmi.read_dmi_data("system-product-name")
+ self.assertEqual(system_type, PRODUCT_NAME)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_subplatform(self, m_fn):
+ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""]
+ ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+ self.assertEqual(
+ ds.subplatform,
+ "%s (%s)"
+ % (
+ DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO,
+ DataSourceVMware.get_guestinfo_key_name("metadata"),
+ ),
+ )
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_userdata_only(self, m_fn):
+ m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_vendordata_only(self, m_fn):
+ m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_metadata_single_ssh_key(self, m_fn):
+ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML)
+ metadata["public_keys"] = VMW_SINGLE_KEY
+ metadata_yaml = safeyaml.dumps(metadata)
+ m_fn.side_effect = [metadata_yaml, "", "", ""]
+ self.assert_metadata(metadata, m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_metadata_multiple_ssh_keys(self, m_fn):
+ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML)
+ metadata["public_keys"] = VMW_MULTIPLE_KEYS
+ metadata_yaml = safeyaml.dumps(metadata)
+ m_fn.side_effect = [metadata_yaml, "", "", ""]
+ self.assert_metadata(metadata, m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_metadata_base64(self, m_fn):
+ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8"))
+ m_fn.side_effect = [data, "base64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_metadata_b64(self, m_fn):
+ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8"))
+ m_fn.side_effect = [data, "b64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_metadata_gzip_base64(self, m_fn):
+ data = VMW_METADATA_YAML.encode("utf-8")
+ data = gzip.compress(data)
+ data = base64.b64encode(data)
+ m_fn.side_effect = [data, "gzip+base64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_metadata_gz_b64(self, m_fn):
+ data = VMW_METADATA_YAML.encode("utf-8")
+ data = gzip.compress(data)
+ data = base64.b64encode(data)
+ m_fn.side_effect = [data, "gz+b64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+
+class TestDataSourceVMwareGuestInfo_InvalidPlatform(FilesystemMockingTestCase):
+ """
+ Test the guestinfo transport on a non-VMware platform.
+ """
+
+ def setUp(self):
+ super(TestDataSourceVMwareGuestInfo_InvalidPlatform, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.create_system_files()
+
+ def create_system_files(self):
+ rootd = self.tmp_dir()
+ populate_dir(
+ rootd,
+ {
+ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID,
+ },
+ )
+ self.assertTrue(self.reRoot(rootd))
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_ds_invalid_on_non_vmware_platform(self, m_fn):
+ system_type = dmi.read_dmi_data("system-product-name")
+ self.assertEqual(system_type, None)
+
+ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""]
+ ds = get_ds(self.tmp)
+ ds.vmware_rpctool = "vmware-rpctool"
+ ret = ds.get_data()
+ self.assertFalse(ret)
+
+
+def assert_metadata(test_obj, ds, metadata):
+ test_obj.assertEqual(metadata.get("instance-id"), ds.get_instance_id())
+ test_obj.assertEqual(metadata.get("local-hostname"), ds.get_hostname())
+
+ expected_public_keys = metadata.get("public_keys")
+ if not isinstance(expected_public_keys, list):
+ expected_public_keys = [expected_public_keys]
+
+ test_obj.assertEqual(expected_public_keys, ds.get_public_ssh_keys())
+ test_obj.assertIsInstance(ds.get_public_ssh_keys(), list)
+
+
+def get_ds(temp_dir):
+ ds = DataSourceVMware.DataSourceVMware(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": temp_dir})
+ )
+ ds.vmware_rpctool = "vmware-rpctool"
+ return ds
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_vultr.py b/tests/unittests/sources/test_vultr.py
new file mode 100644
index 00000000..18b2c084
--- /dev/null
+++ b/tests/unittests/sources/test_vultr.py
@@ -0,0 +1,339 @@
+# Author: Eric Benner <ebenner@vultr.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+# Vultr Metadata API:
+# https://www.vultr.com/metadata/
+
+import json
+
+from cloudinit import helpers, settings
+from cloudinit.net.dhcp import NoDHCPLeaseError
+from cloudinit.sources import DataSourceVultr
+from cloudinit.sources.helpers import vultr
+from tests.unittests.helpers import CiTestCase, mock
+
+# Vultr metadata test data
+VULTR_V1_1 = {
+ "bgp": {
+ "ipv4": {
+ "my-address": "",
+ "my-asn": "",
+ "peer-address": "",
+ "peer-asn": "",
+ },
+ "ipv6": {
+ "my-address": "",
+ "my-asn": "",
+ "peer-address": "",
+ "peer-asn": "",
+ },
+ },
+ "hostname": "CLOUDINIT_1",
+ "instanceid": "42506325",
+ "interfaces": [
+ {
+ "ipv4": {
+ "additional": [],
+ "address": "108.61.89.242",
+ "gateway": "108.61.89.1",
+ "netmask": "255.255.255.0",
+ },
+ "ipv6": {
+ "additional": [],
+ "address": "2001:19f0:5:56c2:5400:03ff:fe15:c465",
+ "network": "2001:19f0:5:56c2::",
+ "prefix": "64",
+ },
+ "mac": "56:00:03:15:c4:65",
+ "network-type": "public",
+ }
+ ],
+ "public-keys": ["ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"],
+ "region": {"regioncode": "EWR"},
+ "user-defined": [],
+ "startup-script": "echo No configured startup script",
+ "raid1-script": "",
+ "user-data": [],
+ "vendor-data": [
+ {
+ "package_upgrade": "true",
+ "disable_root": 0,
+ "ssh_pwauth": 1,
+ "chpasswd": {
+ "expire": False,
+ "list": ["root:$6$S2Smuj.../VqxmIR9Urw0jPZ88i4yvB/"],
+ },
+ "system_info": {"default_user": {"name": "root"}},
+ }
+ ],
+}
+
+VULTR_V1_2 = {
+ "bgp": {
+ "ipv4": {
+ "my-address": "",
+ "my-asn": "",
+ "peer-address": "",
+ "peer-asn": "",
+ },
+ "ipv6": {
+ "my-address": "",
+ "my-asn": "",
+ "peer-address": "",
+ "peer-asn": "",
+ },
+ },
+ "hostname": "CLOUDINIT_2",
+ "instance-v2-id": "29bea708-2e6e-480a-90ad-0e6b5d5ad62f",
+ "instanceid": "42872224",
+ "interfaces": [
+ {
+ "ipv4": {
+ "additional": [],
+ "address": "45.76.7.171",
+ "gateway": "45.76.6.1",
+ "netmask": "255.255.254.0",
+ },
+ "ipv6": {
+ "additional": [
+ {"network": "2002:19f0:5:28a7::", "prefix": "64"}
+ ],
+ "address": "2001:19f0:5:28a7:5400:03ff:fe1b:4eca",
+ "network": "2001:19f0:5:28a7::",
+ "prefix": "64",
+ },
+ "mac": "56:00:03:1b:4e:ca",
+ "network-type": "public",
+ },
+ {
+ "ipv4": {
+ "additional": [],
+ "address": "10.1.112.3",
+ "gateway": "",
+ "netmask": "255.255.240.0",
+ },
+ "ipv6": {"additional": [], "network": "", "prefix": ""},
+ "mac": "5a:00:03:1b:4e:ca",
+ "network-type": "private",
+ "network-v2-id": "fbbe2b5b-b986-4396-87f5-7246660ccb64",
+ "networkid": "net5e7155329d730",
+ },
+ ],
+ "public-keys": ["ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"],
+ "region": {"regioncode": "EWR"},
+ "user-defined": [],
+ "startup-script": "echo No configured startup script",
+ "user-data": [],
+ "vendor-data": [
+ {
+ "package_upgrade": "true",
+ "disable_root": 0,
+ "ssh_pwauth": 1,
+ "chpasswd": {
+ "expire": False,
+ "list": ["root:$6$SxXx...k2mJNIzZB5vMCDBlYT1"],
+ },
+ "system_info": {"default_user": {"name": "root"}},
+ }
+ ],
+}
+
+SSH_KEYS_1 = ["ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"]
+
+INTERFACES = [
+ ["lo", "56:00:03:15:c4:00", "drv", "devid0"],
+ ["dummy0", "56:00:03:15:c4:01", "drv", "devid1"],
+ ["eth1", "56:00:03:15:c4:02", "drv", "devid2"],
+ ["eth0", "56:00:03:15:c4:04", "drv", "devid4"],
+ ["eth2", "56:00:03:15:c4:03", "drv", "devid3"],
+]
+
+# Expected generated objects
+
+# Expected config
+EXPECTED_VULTR_CONFIG = {
+ "package_upgrade": "true",
+ "disable_root": 0,
+ "ssh_pwauth": 1,
+ "chpasswd": {
+ "expire": False,
+ "list": ["root:$6$SxXx...k2mJNIzZB5vMCDBlYT1"],
+ },
+ "system_info": {"default_user": {"name": "root"}},
+}
+
+# Expected network config object from generator
+EXPECTED_VULTR_NETWORK_1 = {
+ "version": 1,
+ "config": [
+ {"type": "nameserver", "address": ["108.61.10.10"]},
+ {
+ "name": "eth0",
+ "type": "physical",
+ "mac_address": "56:00:03:15:c4:65",
+ "accept-ra": 1,
+ "subnets": [
+ {"type": "dhcp", "control": "auto"},
+ {"type": "ipv6_slaac", "control": "auto"},
+ ],
+ },
+ ],
+}
+
+EXPECTED_VULTR_NETWORK_2 = {
+ "version": 1,
+ "config": [
+ {"type": "nameserver", "address": ["108.61.10.10"]},
+ {
+ "name": "eth0",
+ "type": "physical",
+ "mac_address": "56:00:03:1b:4e:ca",
+ "accept-ra": 1,
+ "subnets": [
+ {"type": "dhcp", "control": "auto"},
+ {"type": "ipv6_slaac", "control": "auto"},
+ {
+ "type": "static6",
+ "control": "auto",
+ "address": "2002:19f0:5:28a7::/64",
+ },
+ ],
+ },
+ {
+ "name": "eth1",
+ "type": "physical",
+ "mac_address": "5a:00:03:1b:4e:ca",
+ "subnets": [
+ {
+ "type": "static",
+ "control": "auto",
+ "address": "10.1.112.3",
+ "netmask": "255.255.240.0",
+ }
+ ],
+ },
+ ],
+}
+
+
+INTERFACE_MAP = {
+ "56:00:03:15:c4:65": "eth0",
+ "56:00:03:1b:4e:ca": "eth0",
+ "5a:00:03:1b:4e:ca": "eth1",
+}
+
+
+EPHERMERAL_USED = ""
+
+
+class TestDataSourceVultr(CiTestCase):
+ def setUp(self):
+ super(TestDataSourceVultr, self).setUp()
+
+ # Stored as a dict to make it easier to maintain
+ raw1 = json.dumps(VULTR_V1_1["vendor-data"][0])
+ raw2 = json.dumps(VULTR_V1_2["vendor-data"][0])
+
+ # Make expected format
+ VULTR_V1_1["vendor-data"] = [raw1]
+ VULTR_V1_2["vendor-data"] = [raw2]
+
+ self.tmp = self.tmp_dir()
+
+ # Test the datasource itself
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ @mock.patch("cloudinit.sources.helpers.vultr.is_vultr")
+ @mock.patch("cloudinit.sources.helpers.vultr.get_metadata")
+ def test_datasource(self, mock_getmeta, mock_isvultr, mock_netmap):
+ mock_getmeta.return_value = VULTR_V1_2
+ mock_isvultr.return_value = True
+ mock_netmap.return_value = INTERFACE_MAP
+
+ source = DataSourceVultr.DataSourceVultr(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+
+ # Test for failure
+ self.assertEqual(True, source._get_data())
+
+ # Test instance id
+ self.assertEqual("42872224", source.metadata["instanceid"])
+
+ # Test hostname
+ self.assertEqual("CLOUDINIT_2", source.metadata["local-hostname"])
+
+ # Test ssh keys
+ self.assertEqual(SSH_KEYS_1, source.metadata["public-keys"])
+
+ # Test vendor data generation
+ orig_val = self.maxDiff
+ self.maxDiff = None
+
+ vendordata = source.vendordata_raw
+
+ # Test vendor config
+ self.assertEqual(
+ EXPECTED_VULTR_CONFIG,
+ json.loads(vendordata[0].replace("#cloud-config", "")),
+ )
+
+ self.maxDiff = orig_val
+
+ # Test network config generation
+ self.assertEqual(EXPECTED_VULTR_NETWORK_2, source.network_config)
+
+ # Test network config generation
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ def test_network_config(self, mock_netmap):
+ mock_netmap.return_value = INTERFACE_MAP
+ interf = VULTR_V1_1["interfaces"]
+
+ self.assertEqual(
+ EXPECTED_VULTR_NETWORK_1, vultr.generate_network_config(interf)
+ )
+
+ # Test Private Networking config generation
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ def test_private_network_config(self, mock_netmap):
+ mock_netmap.return_value = INTERFACE_MAP
+ interf = VULTR_V1_2["interfaces"]
+
+ self.assertEqual(
+ EXPECTED_VULTR_NETWORK_2, vultr.generate_network_config(interf)
+ )
+
+ def ephemeral_init(self, iface="", connectivity_url_data=None):
+ global EPHERMERAL_USED
+ EPHERMERAL_USED = iface
+ if iface == "eth0":
+ return
+ raise NoDHCPLeaseError("Generic for testing")
+
+ # Test interface seeking to ensure we are able to find the correct one
+ @mock.patch("cloudinit.net.dhcp.EphemeralDHCPv4.__init__", ephemeral_init)
+ @mock.patch("cloudinit.sources.helpers.vultr.is_vultr")
+ @mock.patch("cloudinit.sources.helpers.vultr.read_metadata")
+ @mock.patch("cloudinit.net.get_interfaces")
+ def test_interface_seek(
+ self, mock_get_interfaces, mock_read_metadata, mock_isvultr
+ ):
+ mock_read_metadata.side_effect = NoDHCPLeaseError(
+ "Generic for testing"
+ )
+ mock_isvultr.return_value = True
+ mock_get_interfaces.return_value = INTERFACES
+
+ source = DataSourceVultr.DataSourceVultr(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+
+ try:
+ source._get_data()
+ except Exception:
+ pass
+
+ self.assertEqual(EPHERMERAL_USED, INTERFACES[3][0])
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/vmware/__init__.py b/tests/unittests/sources/vmware/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/unittests/sources/vmware/__init__.py
diff --git a/tests/unittests/sources/vmware/test_custom_script.py b/tests/unittests/sources/vmware/test_custom_script.py
new file mode 100644
index 00000000..9b3e079f
--- /dev/null
+++ b/tests/unittests/sources/vmware/test_custom_script.py
@@ -0,0 +1,114 @@
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2017-2019 VMware INC.
+#
+# Author: Maitreyee Saikia <msaikia@vmware.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+import stat
+
+from cloudinit import util
+from cloudinit.sources.helpers.vmware.imc.config_custom_script import (
+ CustomScriptConstant,
+ CustomScriptNotFound,
+ PostCustomScript,
+ PreCustomScript,
+)
+from tests.unittests.helpers import CiTestCase, mock
+
+
+class TestVmwareCustomScript(CiTestCase):
+ def setUp(self):
+ self.tmpDir = self.tmp_dir()
+ # Mock the tmpDir as the root dir in VM.
+ self.execDir = os.path.join(self.tmpDir, ".customization")
+ self.execScript = os.path.join(self.execDir, ".customize.sh")
+
+ def test_prepare_custom_script(self):
+ """
+ This test is designed to verify the behavior based on the presence of
+ custom script. Mainly needed for scenario where a custom script is
+ expected, but was not properly copied. "CustomScriptNotFound" exception
+ is raised in such cases.
+ """
+ # Custom script does not exist.
+ preCust = PreCustomScript("random-vmw-test", self.tmpDir)
+ self.assertEqual("random-vmw-test", preCust.scriptname)
+ self.assertEqual(self.tmpDir, preCust.directory)
+ self.assertEqual(
+ self.tmp_path("random-vmw-test", self.tmpDir), preCust.scriptpath
+ )
+ with self.assertRaises(CustomScriptNotFound):
+ preCust.prepare_script()
+
+ # Custom script exists.
+ custScript = self.tmp_path("test-cust", self.tmpDir)
+ util.write_file(custScript, "test-CR-strip\r\r")
+ with mock.patch.object(
+ CustomScriptConstant, "CUSTOM_TMP_DIR", self.execDir
+ ):
+ with mock.patch.object(
+ CustomScriptConstant, "CUSTOM_SCRIPT", self.execScript
+ ):
+ postCust = PostCustomScript(
+ "test-cust", self.tmpDir, self.tmpDir
+ )
+ self.assertEqual("test-cust", postCust.scriptname)
+ self.assertEqual(self.tmpDir, postCust.directory)
+ self.assertEqual(custScript, postCust.scriptpath)
+ postCust.prepare_script()
+
+ # Custom script is copied with exec privilege
+ self.assertTrue(os.path.exists(self.execScript))
+ st = os.stat(self.execScript)
+ self.assertTrue(st.st_mode & stat.S_IEXEC)
+ with open(self.execScript, "r") as f:
+ content = f.read()
+ self.assertEqual(content, "test-CR-strip")
+ # Check if all carraige returns are stripped from script.
+ self.assertFalse("\r" in content)
+
+ def test_execute_post_cust(self):
+ """
+ This test is designed to verify the behavior after execute post
+ customization.
+ """
+ # Prepare the customize package
+ postCustRun = self.tmp_path("post-customize-guest.sh", self.tmpDir)
+ util.write_file(postCustRun, "This is the script to run post cust")
+ userScript = self.tmp_path("test-cust", self.tmpDir)
+ util.write_file(userScript, "This is the post cust script")
+
+ # Mock the cc_scripts_per_instance dir and marker file.
+ # Create another tmp dir for cc_scripts_per_instance.
+ ccScriptDir = self.tmp_dir()
+ ccScript = os.path.join(ccScriptDir, "post-customize-guest.sh")
+ markerFile = os.path.join(self.tmpDir, ".markerFile")
+ with mock.patch.object(
+ CustomScriptConstant, "CUSTOM_TMP_DIR", self.execDir
+ ):
+ with mock.patch.object(
+ CustomScriptConstant, "CUSTOM_SCRIPT", self.execScript
+ ):
+ with mock.patch.object(
+ CustomScriptConstant,
+ "POST_CUSTOM_PENDING_MARKER",
+ markerFile,
+ ):
+ postCust = PostCustomScript(
+ "test-cust", self.tmpDir, ccScriptDir
+ )
+ postCust.execute()
+ # Check cc_scripts_per_instance and marker file
+ # are created.
+ self.assertTrue(os.path.exists(ccScript))
+ with open(ccScript, "r") as f:
+ content = f.read()
+ self.assertEqual(
+ content, "This is the script to run post cust"
+ )
+ self.assertTrue(os.path.exists(markerFile))
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/vmware/test_guestcust_util.py b/tests/unittests/sources/vmware/test_guestcust_util.py
new file mode 100644
index 00000000..fc63bcae
--- /dev/null
+++ b/tests/unittests/sources/vmware/test_guestcust_util.py
@@ -0,0 +1,109 @@
+# Copyright (C) 2019 Canonical Ltd.
+# Copyright (C) 2019 VMware INC.
+#
+# Author: Xiaofeng Wang <xiaofengw@vmware.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import subp
+from cloudinit.sources.helpers.vmware.imc.config import Config
+from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile
+from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
+ get_tools_config,
+ set_gc_status,
+)
+from tests.unittests.helpers import CiTestCase, mock
+
+
+class TestGuestCustUtil(CiTestCase):
+ def test_get_tools_config_not_installed(self):
+ """
+ This test is designed to verify the behavior if vmware-toolbox-cmd
+ is not installed.
+ """
+ with mock.patch.object(subp, "which", return_value=None):
+ self.assertEqual(
+ get_tools_config("section", "key", "defaultVal"), "defaultVal"
+ )
+
+ def test_get_tools_config_internal_exception(self):
+ """
+ This test is designed to verify the behavior if internal exception
+ is raised.
+ """
+ with mock.patch.object(subp, "which", return_value="/dummy/path"):
+ with mock.patch.object(
+ subp,
+ "subp",
+ return_value=("key=value", b""),
+ side_effect=subp.ProcessExecutionError(
+ "subp failed", exit_code=99
+ ),
+ ):
+ # verify return value is 'defaultVal', not 'value'.
+ self.assertEqual(
+ get_tools_config("section", "key", "defaultVal"),
+ "defaultVal",
+ )
+
+ def test_get_tools_config_normal(self):
+ """
+ This test is designed to verify the value could be parsed from
+ key = value of the given [section]
+ """
+ with mock.patch.object(subp, "which", return_value="/dummy/path"):
+ # value is not blank
+ with mock.patch.object(
+ subp, "subp", return_value=("key = value ", b"")
+ ):
+ self.assertEqual(
+ get_tools_config("section", "key", "defaultVal"), "value"
+ )
+ # value is blank
+ with mock.patch.object(subp, "subp", return_value=("key = ", b"")):
+ self.assertEqual(
+ get_tools_config("section", "key", "defaultVal"), ""
+ )
+ # value contains =
+ with mock.patch.object(
+ subp, "subp", return_value=("key=Bar=Wark", b"")
+ ):
+ self.assertEqual(
+ get_tools_config("section", "key", "defaultVal"),
+ "Bar=Wark",
+ )
+
+ # value contains specific characters
+ with mock.patch.object(
+ subp, "subp", return_value=("[a] b.c_d=e-f", b"")
+ ):
+ self.assertEqual(
+ get_tools_config("section", "key", "defaultVal"), "e-f"
+ )
+
+ def test_set_gc_status(self):
+ """
+ This test is designed to verify the behavior of set_gc_status
+ """
+ # config is None, return None
+ self.assertEqual(set_gc_status(None, "Successful"), None)
+
+ # post gc status is NO, return None
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertEqual(set_gc_status(conf, "Successful"), None)
+
+ # post gc status is YES, subp is called to execute command
+ cf._insertKey("MISC|POST-GC-STATUS", "YES")
+ conf = Config(cf)
+ with mock.patch.object(
+ subp, "subp", return_value=("ok", b"")
+ ) as mockobj:
+ self.assertEqual(set_gc_status(conf, "Successful"), ("ok", b""))
+ mockobj.assert_called_once_with(
+ ["vmware-rpctool", "info-set guestinfo.gc.status Successful"],
+ rcs=[0],
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/vmware/test_vmware_config_file.py b/tests/unittests/sources/vmware/test_vmware_config_file.py
new file mode 100644
index 00000000..38d45d0e
--- /dev/null
+++ b/tests/unittests/sources/vmware/test_vmware_config_file.py
@@ -0,0 +1,635 @@
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2016 VMware INC.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+# Pengpeng Sun <pengpengs@vmware.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import logging
+import os
+import sys
+import tempfile
+import textwrap
+
+from cloudinit.sources.DataSourceOVF import (
+ get_network_config_from_conf,
+ read_vmware_imc,
+)
+from cloudinit.sources.helpers.vmware.imc.boot_proto import BootProtoEnum
+from cloudinit.sources.helpers.vmware.imc.config import Config
+from cloudinit.sources.helpers.vmware.imc.config_file import (
+ ConfigFile as WrappedConfigFile,
+)
+from cloudinit.sources.helpers.vmware.imc.config_nic import (
+ NicConfigurator,
+ gen_subnet,
+)
+from tests.unittests.helpers import CiTestCase, cloud_init_project_dir
+
+logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
+logger = logging.getLogger(__name__)
+
+
+def ConfigFile(path: str):
+ return WrappedConfigFile(cloud_init_project_dir(path))
+
+
+class TestVmwareConfigFile(CiTestCase):
+ def test_utility_methods(self):
+ """Tests basic utility methods of ConfigFile class"""
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+
+ cf.clear()
+
+ self.assertEqual(0, len(cf), "clear size")
+
+ cf._insertKey(" PASSWORD|-PASS ", " foo ")
+ cf._insertKey("BAR", " ")
+
+ self.assertEqual(2, len(cf), "insert size")
+ self.assertEqual("foo", cf["PASSWORD|-PASS"], "password")
+ self.assertTrue("PASSWORD|-PASS" in cf, "hasPassword")
+ self.assertFalse(
+ cf.should_keep_current_value("PASSWORD|-PASS"), "keepPassword"
+ )
+ self.assertFalse(
+ cf.should_remove_current_value("PASSWORD|-PASS"), "removePassword"
+ )
+ self.assertFalse("FOO" in cf, "hasFoo")
+ self.assertTrue(cf.should_keep_current_value("FOO"), "keepFoo")
+ self.assertFalse(cf.should_remove_current_value("FOO"), "removeFoo")
+ self.assertTrue("BAR" in cf, "hasBar")
+ self.assertFalse(cf.should_keep_current_value("BAR"), "keepBar")
+ self.assertTrue(cf.should_remove_current_value("BAR"), "removeBar")
+
+ def test_datasource_instance_id(self):
+ """Tests instance id for the DatasourceOVF"""
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+
+ instance_id_prefix = "iid-vmware-"
+
+ conf = Config(cf)
+
+ (md1, _, _) = read_vmware_imc(conf)
+ self.assertIn(instance_id_prefix, md1["instance-id"])
+ self.assertEqual(md1["instance-id"], "iid-vmware-imc")
+
+ (md2, _, _) = read_vmware_imc(conf)
+ self.assertIn(instance_id_prefix, md2["instance-id"])
+ self.assertEqual(md2["instance-id"], "iid-vmware-imc")
+
+ self.assertEqual(md2["instance-id"], md1["instance-id"])
+
+ def test_configfile_static_2nics(self):
+ """Tests Config class for a configuration with two static NICs."""
+ cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg")
+
+ conf = Config(cf)
+
+ self.assertEqual("myhost1", conf.host_name, "hostName")
+ self.assertEqual("Africa/Abidjan", conf.timezone, "tz")
+ self.assertTrue(conf.utc, "utc")
+
+ self.assertEqual(
+ ["10.20.145.1", "10.20.145.2"], conf.name_servers, "dns"
+ )
+ self.assertEqual(
+ ["eng.vmware.com", "proxy.vmware.com"],
+ conf.dns_suffixes,
+ "suffixes",
+ )
+
+ nics = conf.nics
+ ipv40 = nics[0].staticIpv4
+
+ self.assertEqual(2, len(nics), "nics")
+ self.assertEqual("NIC1", nics[0].name, "nic0")
+ self.assertEqual("00:50:56:a6:8c:08", nics[0].mac, "mac0")
+ self.assertEqual(BootProtoEnum.STATIC, nics[0].bootProto, "bootproto0")
+ self.assertEqual("10.20.87.154", ipv40[0].ip, "ipv4Addr0")
+ self.assertEqual("255.255.252.0", ipv40[0].netmask, "ipv4Mask0")
+ self.assertEqual(2, len(ipv40[0].gateways), "ipv4Gw0")
+ self.assertEqual("10.20.87.253", ipv40[0].gateways[0], "ipv4Gw0_0")
+ self.assertEqual("10.20.87.105", ipv40[0].gateways[1], "ipv4Gw0_1")
+
+ self.assertEqual(1, len(nics[0].staticIpv6), "ipv6Cnt0")
+ self.assertEqual(
+ "fc00:10:20:87::154", nics[0].staticIpv6[0].ip, "ipv6Addr0"
+ )
+
+ self.assertEqual("NIC2", nics[1].name, "nic1")
+ self.assertTrue(not nics[1].staticIpv6, "ipv61 dhcp")
+
+ def test_config_file_dhcp_2nics(self):
+ """Tests Config class for a configuration with two DHCP NICs."""
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+
+ conf = Config(cf)
+ nics = conf.nics
+ self.assertEqual(2, len(nics), "nics")
+ self.assertEqual("NIC1", nics[0].name, "nic0")
+ self.assertEqual("00:50:56:a6:8c:08", nics[0].mac, "mac0")
+ self.assertEqual(BootProtoEnum.DHCP, nics[0].bootProto, "bootproto0")
+
+ def test_config_password(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+
+ cf._insertKey("PASSWORD|-PASS", "test-password")
+ cf._insertKey("PASSWORD|RESET", "no")
+
+ conf = Config(cf)
+ self.assertEqual("test-password", conf.admin_password, "password")
+ self.assertFalse(conf.reset_password, "do not reset password")
+
+ def test_config_reset_passwd(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+
+ cf._insertKey("PASSWORD|-PASS", "test-password")
+ cf._insertKey("PASSWORD|RESET", "random")
+
+ conf = Config(cf)
+ with self.assertRaises(ValueError):
+ pw = conf.reset_password
+ self.assertIsNone(pw)
+
+ cf.clear()
+ cf._insertKey("PASSWORD|RESET", "yes")
+ self.assertEqual(1, len(cf), "insert size")
+
+ conf = Config(cf)
+ self.assertTrue(conf.reset_password, "reset password")
+
+ def test_get_config_nameservers(self):
+ """Tests DNS and nameserver settings in a configuration."""
+ cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg")
+
+ config = Config(cf)
+
+ network_config = get_network_config_from_conf(config, False)
+
+ self.assertEqual(1, network_config.get("version"))
+
+ config_types = network_config.get("config")
+ name_servers = None
+ dns_suffixes = None
+
+ for type in config_types:
+ if type.get("type") == "nameserver":
+ name_servers = type.get("address")
+ dns_suffixes = type.get("search")
+ break
+
+ self.assertEqual(["10.20.145.1", "10.20.145.2"], name_servers, "dns")
+ self.assertEqual(
+ ["eng.vmware.com", "proxy.vmware.com"], dns_suffixes, "suffixes"
+ )
+
+ def test_gen_subnet(self):
+ """Tests if gen_subnet properly calculates network subnet from
+ IPv4 address and netmask"""
+ ip_subnet_list = [
+ ["10.20.87.253", "255.255.252.0", "10.20.84.0"],
+ ["10.20.92.105", "255.255.252.0", "10.20.92.0"],
+ ["192.168.0.10", "255.255.0.0", "192.168.0.0"],
+ ]
+ for entry in ip_subnet_list:
+ self.assertEqual(
+ entry[2],
+ gen_subnet(entry[0], entry[1]),
+ "Subnet for a specified ip and netmask",
+ )
+
+ def test_get_config_dns_suffixes(self):
+ """Tests if get_network_config_from_conf properly
+ generates nameservers and dns settings from a
+ specified configuration"""
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+
+ config = Config(cf)
+
+ network_config = get_network_config_from_conf(config, False)
+
+ self.assertEqual(1, network_config.get("version"))
+
+ config_types = network_config.get("config")
+ name_servers = None
+ dns_suffixes = None
+
+ for type in config_types:
+ if type.get("type") == "nameserver":
+ name_servers = type.get("address")
+ dns_suffixes = type.get("search")
+ break
+
+ self.assertEqual([], name_servers, "dns")
+ self.assertEqual(["eng.vmware.com"], dns_suffixes, "suffixes")
+
+ def test_get_nics_list_dhcp(self):
+ """Tests if NicConfigurator properly calculates network subnets
+ for a configuration with a list of DHCP NICs"""
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+
+ config = Config(cf)
+
+ nicConfigurator = NicConfigurator(config.nics, False)
+ nics_cfg_list = nicConfigurator.generate()
+
+ self.assertEqual(2, len(nics_cfg_list), "number of config elements")
+
+ nic1 = {"name": "NIC1"}
+ nic2 = {"name": "NIC2"}
+ for cfg in nics_cfg_list:
+ if cfg.get("name") == nic1.get("name"):
+ nic1.update(cfg)
+ elif cfg.get("name") == nic2.get("name"):
+ nic2.update(cfg)
+
+ self.assertEqual("physical", nic1.get("type"), "type of NIC1")
+ self.assertEqual("NIC1", nic1.get("name"), "name of NIC1")
+ self.assertEqual(
+ "00:50:56:a6:8c:08", nic1.get("mac_address"), "mac address of NIC1"
+ )
+ subnets = nic1.get("subnets")
+ self.assertEqual(1, len(subnets), "number of subnets for NIC1")
+ subnet = subnets[0]
+ self.assertEqual("dhcp", subnet.get("type"), "DHCP type for NIC1")
+ self.assertEqual("auto", subnet.get("control"), "NIC1 Control type")
+
+ self.assertEqual("physical", nic2.get("type"), "type of NIC2")
+ self.assertEqual("NIC2", nic2.get("name"), "name of NIC2")
+ self.assertEqual(
+ "00:50:56:a6:5a:de", nic2.get("mac_address"), "mac address of NIC2"
+ )
+ subnets = nic2.get("subnets")
+ self.assertEqual(1, len(subnets), "number of subnets for NIC2")
+ subnet = subnets[0]
+ self.assertEqual("dhcp", subnet.get("type"), "DHCP type for NIC2")
+ self.assertEqual("auto", subnet.get("control"), "NIC2 Control type")
+
+ def test_get_nics_list_static(self):
+ """Tests if NicConfigurator properly calculates network subnets
+ for a configuration with 2 static NICs"""
+ cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg")
+
+ config = Config(cf)
+
+ nicConfigurator = NicConfigurator(config.nics, False)
+ nics_cfg_list = nicConfigurator.generate()
+
+ self.assertEqual(2, len(nics_cfg_list), "number of elements")
+
+ nic1 = {"name": "NIC1"}
+ nic2 = {"name": "NIC2"}
+ route_list = []
+ for cfg in nics_cfg_list:
+ cfg_type = cfg.get("type")
+ if cfg_type == "physical":
+ if cfg.get("name") == nic1.get("name"):
+ nic1.update(cfg)
+ elif cfg.get("name") == nic2.get("name"):
+ nic2.update(cfg)
+
+ self.assertEqual("physical", nic1.get("type"), "type of NIC1")
+ self.assertEqual("NIC1", nic1.get("name"), "name of NIC1")
+ self.assertEqual(
+ "00:50:56:a6:8c:08", nic1.get("mac_address"), "mac address of NIC1"
+ )
+
+ subnets = nic1.get("subnets")
+ self.assertEqual(2, len(subnets), "Number of subnets")
+
+ static_subnet = []
+ static6_subnet = []
+
+ for subnet in subnets:
+ subnet_type = subnet.get("type")
+ if subnet_type == "static":
+ static_subnet.append(subnet)
+ elif subnet_type == "static6":
+ static6_subnet.append(subnet)
+ else:
+ self.assertEqual(True, False, "Unknown type")
+ if "route" in subnet:
+ for route in subnet.get("routes"):
+ route_list.append(route)
+
+ self.assertEqual(1, len(static_subnet), "Number of static subnet")
+ self.assertEqual(1, len(static6_subnet), "Number of static6 subnet")
+
+ subnet = static_subnet[0]
+ self.assertEqual(
+ "10.20.87.154",
+ subnet.get("address"),
+ "IPv4 address of static subnet",
+ )
+ self.assertEqual(
+ "255.255.252.0", subnet.get("netmask"), "NetMask of static subnet"
+ )
+ self.assertEqual(
+ "auto", subnet.get("control"), "control for static subnet"
+ )
+
+ subnet = static6_subnet[0]
+ self.assertEqual(
+ "fc00:10:20:87::154",
+ subnet.get("address"),
+ "IPv6 address of static subnet",
+ )
+ self.assertEqual(
+ "64", subnet.get("netmask"), "NetMask of static6 subnet"
+ )
+
+ route_set = set(["10.20.87.253", "10.20.87.105", "192.168.0.10"])
+ for route in route_list:
+ self.assertEqual(10000, route.get("metric"), "metric of route")
+ gateway = route.get("gateway")
+ if gateway in route_set:
+ route_set.discard(gateway)
+ else:
+ self.assertEqual(True, False, "invalid gateway %s" % (gateway))
+
+ self.assertEqual("physical", nic2.get("type"), "type of NIC2")
+ self.assertEqual("NIC2", nic2.get("name"), "name of NIC2")
+ self.assertEqual(
+ "00:50:56:a6:ef:7d", nic2.get("mac_address"), "mac address of NIC2"
+ )
+
+ subnets = nic2.get("subnets")
+ self.assertEqual(1, len(subnets), "Number of subnets for NIC2")
+
+ subnet = subnets[0]
+ self.assertEqual("static", subnet.get("type"), "Subnet type")
+ self.assertEqual(
+ "192.168.6.102", subnet.get("address"), "Subnet address"
+ )
+ self.assertEqual(
+ "255.255.0.0", subnet.get("netmask"), "Subnet netmask"
+ )
+
+ def test_custom_script(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertIsNone(conf.custom_script_name)
+ cf._insertKey("CUSTOM-SCRIPT|SCRIPT-NAME", "test-script")
+ conf = Config(cf)
+ self.assertEqual("test-script", conf.custom_script_name)
+
+ def test_post_gc_status(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertFalse(conf.post_gc_status)
+ cf._insertKey("MISC|POST-GC-STATUS", "YES")
+ conf = Config(cf)
+ self.assertTrue(conf.post_gc_status)
+
+ def test_no_default_run_post_script(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertFalse(conf.default_run_post_script)
+ cf._insertKey("MISC|DEFAULT-RUN-POST-CUST-SCRIPT", "NO")
+ conf = Config(cf)
+ self.assertFalse(conf.default_run_post_script)
+
+ def test_yes_default_run_post_script(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ cf._insertKey("MISC|DEFAULT-RUN-POST-CUST-SCRIPT", "yes")
+ conf = Config(cf)
+ self.assertTrue(conf.default_run_post_script)
+
+
+class TestVmwareNetConfig(CiTestCase):
+ """Test conversion of vmware config to cloud-init config."""
+
+ maxDiff = None
+
+ def _get_NicConfigurator(self, text):
+ fp = None
+ try:
+ with tempfile.NamedTemporaryFile(
+ mode="w", dir=self.tmp_dir(), delete=False
+ ) as fp:
+ fp.write(text)
+ fp.close()
+ cfg = Config(ConfigFile(fp.name))
+ return NicConfigurator(cfg.nics, use_system_devices=False)
+ finally:
+ if fp:
+ os.unlink(fp.name)
+
+ def test_non_primary_nic_without_gateway(self):
+ """A non primary nic set is not required to have a gateway."""
+ config = textwrap.dedent(
+ """\
+ [NETWORK]
+ NETWORKING = yes
+ BOOTPROTO = dhcp
+ HOSTNAME = myhost1
+ DOMAINNAME = eng.vmware.com
+
+ [NIC-CONFIG]
+ NICS = NIC1
+
+ [NIC1]
+ MACADDR = 00:50:56:a6:8c:08
+ ONBOOT = yes
+ IPv4_MODE = BACKWARDS_COMPATIBLE
+ BOOTPROTO = static
+ IPADDR = 10.20.87.154
+ NETMASK = 255.255.252.0
+ """
+ )
+ nc = self._get_NicConfigurator(config)
+ self.assertEqual(
+ [
+ {
+ "type": "physical",
+ "name": "NIC1",
+ "mac_address": "00:50:56:a6:8c:08",
+ "subnets": [
+ {
+ "control": "auto",
+ "type": "static",
+ "address": "10.20.87.154",
+ "netmask": "255.255.252.0",
+ }
+ ],
+ }
+ ],
+ nc.generate(),
+ )
+
+ def test_non_primary_nic_with_gateway(self):
+ """A non primary nic set can have a gateway."""
+ config = textwrap.dedent(
+ """\
+ [NETWORK]
+ NETWORKING = yes
+ BOOTPROTO = dhcp
+ HOSTNAME = myhost1
+ DOMAINNAME = eng.vmware.com
+
+ [NIC-CONFIG]
+ NICS = NIC1
+
+ [NIC1]
+ MACADDR = 00:50:56:a6:8c:08
+ ONBOOT = yes
+ IPv4_MODE = BACKWARDS_COMPATIBLE
+ BOOTPROTO = static
+ IPADDR = 10.20.87.154
+ NETMASK = 255.255.252.0
+ GATEWAY = 10.20.87.253
+ """
+ )
+ nc = self._get_NicConfigurator(config)
+ self.assertEqual(
+ [
+ {
+ "type": "physical",
+ "name": "NIC1",
+ "mac_address": "00:50:56:a6:8c:08",
+ "subnets": [
+ {
+ "control": "auto",
+ "type": "static",
+ "address": "10.20.87.154",
+ "netmask": "255.255.252.0",
+ "routes": [
+ {
+ "type": "route",
+ "destination": "10.20.84.0/22",
+ "gateway": "10.20.87.253",
+ "metric": 10000,
+ }
+ ],
+ }
+ ],
+ }
+ ],
+ nc.generate(),
+ )
+
+ def test_cust_non_primary_nic_with_gateway_(self):
+ """A customer non primary nic set can have a gateway."""
+ config = textwrap.dedent(
+ """\
+ [NETWORK]
+ NETWORKING = yes
+ BOOTPROTO = dhcp
+ HOSTNAME = static-debug-vm
+ DOMAINNAME = cluster.local
+
+ [NIC-CONFIG]
+ NICS = NIC1
+
+ [NIC1]
+ MACADDR = 00:50:56:ac:d1:8a
+ ONBOOT = yes
+ IPv4_MODE = BACKWARDS_COMPATIBLE
+ BOOTPROTO = static
+ IPADDR = 100.115.223.75
+ NETMASK = 255.255.255.0
+ GATEWAY = 100.115.223.254
+
+
+ [DNS]
+ DNSFROMDHCP=no
+
+ NAMESERVER|1 = 8.8.8.8
+
+ [DATETIME]
+ UTC = yes
+ """
+ )
+ nc = self._get_NicConfigurator(config)
+ self.assertEqual(
+ [
+ {
+ "type": "physical",
+ "name": "NIC1",
+ "mac_address": "00:50:56:ac:d1:8a",
+ "subnets": [
+ {
+ "control": "auto",
+ "type": "static",
+ "address": "100.115.223.75",
+ "netmask": "255.255.255.0",
+ "routes": [
+ {
+ "type": "route",
+ "destination": "100.115.223.0/24",
+ "gateway": "100.115.223.254",
+ "metric": 10000,
+ }
+ ],
+ }
+ ],
+ }
+ ],
+ nc.generate(),
+ )
+
+ def test_a_primary_nic_with_gateway(self):
+ """A primary nic set can have a gateway."""
+ config = textwrap.dedent(
+ """\
+ [NETWORK]
+ NETWORKING = yes
+ BOOTPROTO = dhcp
+ HOSTNAME = myhost1
+ DOMAINNAME = eng.vmware.com
+
+ [NIC-CONFIG]
+ NICS = NIC1
+
+ [NIC1]
+ MACADDR = 00:50:56:a6:8c:08
+ ONBOOT = yes
+ IPv4_MODE = BACKWARDS_COMPATIBLE
+ BOOTPROTO = static
+ IPADDR = 10.20.87.154
+ NETMASK = 255.255.252.0
+ PRIMARY = true
+ GATEWAY = 10.20.87.253
+ """
+ )
+ nc = self._get_NicConfigurator(config)
+ self.assertEqual(
+ [
+ {
+ "type": "physical",
+ "name": "NIC1",
+ "mac_address": "00:50:56:a6:8c:08",
+ "subnets": [
+ {
+ "control": "auto",
+ "type": "static",
+ "address": "10.20.87.154",
+ "netmask": "255.255.252.0",
+ "gateway": "10.20.87.253",
+ }
+ ],
+ }
+ ],
+ nc.generate(),
+ )
+
+ def test_meta_data(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertIsNone(conf.meta_data_name)
+ cf._insertKey("CLOUDINIT|METADATA", "test-metadata")
+ conf = Config(cf)
+ self.assertEqual("test-metadata", conf.meta_data_name)
+
+ def test_user_data(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertIsNone(conf.user_data_name)
+ cf._insertKey("CLOUDINIT|USERDATA", "test-userdata")
+ conf = Config(cf)
+ self.assertEqual("test-userdata", conf.user_data_name)
+
+
+# vi: ts=4 expandtab