From 039c40f9b3d88ee8158604bb18ca4bf2fb5d5e51 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Fri, 3 Dec 2021 13:11:46 -0700 Subject: Reorganize unit test locations under tests/unittests (#1126) This attempts to standardize unit test file location under test/unittests/ such that any source file located at cloudinit/path/to/file.py may have a corresponding unit test file at test/unittests/path/to/test_file.py. Noteworthy Comments: ==================== Four different duplicate test files existed: test_{gpg,util,cc_mounts,cc_resolv_conf}.py Each of these duplicate file pairs has been merged together. This is a break in git history for these files. The test suite appears to have a dependency on test order. Changing test order causes some tests to fail. This should be rectified, but for now some tests have been modified in tests/unittests/config/test_set_passwords.py. A helper class name starts with "Test" which causes pytest to try executing it as a test case, which then throws warnings "due to Class having __init__()". Silence by changing the name of the class. # helpers.py is imported in many test files, import paths change cloudinit/tests/helpers.py -> tests/unittests/helpers.py # Move directories: cloudinit/distros/tests -> tests/unittests/distros cloudinit/cmd/devel/tests -> tests/unittests/cmd/devel cloudinit/cmd/tests -> tests/unittests/cmd/ cloudinit/sources/helpers/tests -> tests/unittests/sources/helpers cloudinit/sources/tests -> tests/unittests/sources cloudinit/net/tests -> tests/unittests/net cloudinit/config/tests -> tests/unittests/config cloudinit/analyze/tests/ -> tests/unittests/analyze/ # Standardize tests already in tests/unittests/ test_datasource -> sources test_distros -> distros test_vmware -> sources/vmware test_handler -> config # this contains cloudconfig module tests test_runs -> runs --- tests/unittests/sources/__init__.py | 0 tests/unittests/sources/helpers/test_netlink.py | 480 +++ tests/unittests/sources/helpers/test_openstack.py | 49 + tests/unittests/sources/test_aliyun.py | 248 ++ tests/unittests/sources/test_altcloud.py | 450 +++ tests/unittests/sources/test_azure.py | 3394 ++++++++++++++++++++ tests/unittests/sources/test_azure_helper.py | 1441 +++++++++ tests/unittests/sources/test_cloudsigma.py | 137 + tests/unittests/sources/test_cloudstack.py | 186 ++ tests/unittests/sources/test_common.py | 121 + tests/unittests/sources/test_configdrive.py | 844 +++++ tests/unittests/sources/test_digitalocean.py | 372 +++ tests/unittests/sources/test_ec2.py | 978 ++++++ tests/unittests/sources/test_exoscale.py | 211 ++ tests/unittests/sources/test_gce.py | 388 +++ tests/unittests/sources/test_hetzner.py | 142 + tests/unittests/sources/test_ibmcloud.py | 343 ++ tests/unittests/sources/test_init.py | 771 +++++ tests/unittests/sources/test_lxd.py | 376 +++ tests/unittests/sources/test_maas.py | 200 ++ tests/unittests/sources/test_nocloud.py | 393 +++ tests/unittests/sources/test_opennebula.py | 977 ++++++ tests/unittests/sources/test_openstack.py | 724 +++++ tests/unittests/sources/test_oracle.py | 797 +++++ tests/unittests/sources/test_ovf.py | 1046 ++++++ tests/unittests/sources/test_rbx.py | 238 ++ tests/unittests/sources/test_scaleway.py | 473 +++ tests/unittests/sources/test_smartos.py | 1163 +++++++ tests/unittests/sources/test_upcloud.py | 314 ++ tests/unittests/sources/test_vmware.py | 391 +++ tests/unittests/sources/test_vultr.py | 337 ++ tests/unittests/sources/vmware/__init__.py | 0 .../unittests/sources/vmware/test_custom_script.py | 109 + .../sources/vmware/test_guestcust_util.py | 98 + .../sources/vmware/test_vmware_config_file.py | 545 ++++ 35 files changed, 18736 insertions(+) create mode 100644 tests/unittests/sources/__init__.py create mode 100644 tests/unittests/sources/helpers/test_netlink.py create mode 100644 tests/unittests/sources/helpers/test_openstack.py create mode 100644 tests/unittests/sources/test_aliyun.py create mode 100644 tests/unittests/sources/test_altcloud.py create mode 100644 tests/unittests/sources/test_azure.py create mode 100644 tests/unittests/sources/test_azure_helper.py create mode 100644 tests/unittests/sources/test_cloudsigma.py create mode 100644 tests/unittests/sources/test_cloudstack.py create mode 100644 tests/unittests/sources/test_common.py create mode 100644 tests/unittests/sources/test_configdrive.py create mode 100644 tests/unittests/sources/test_digitalocean.py create mode 100644 tests/unittests/sources/test_ec2.py create mode 100644 tests/unittests/sources/test_exoscale.py create mode 100644 tests/unittests/sources/test_gce.py create mode 100644 tests/unittests/sources/test_hetzner.py create mode 100644 tests/unittests/sources/test_ibmcloud.py create mode 100644 tests/unittests/sources/test_init.py create mode 100644 tests/unittests/sources/test_lxd.py create mode 100644 tests/unittests/sources/test_maas.py create mode 100644 tests/unittests/sources/test_nocloud.py create mode 100644 tests/unittests/sources/test_opennebula.py create mode 100644 tests/unittests/sources/test_openstack.py create mode 100644 tests/unittests/sources/test_oracle.py create mode 100644 tests/unittests/sources/test_ovf.py create mode 100644 tests/unittests/sources/test_rbx.py create mode 100644 tests/unittests/sources/test_scaleway.py create mode 100644 tests/unittests/sources/test_smartos.py create mode 100644 tests/unittests/sources/test_upcloud.py create mode 100644 tests/unittests/sources/test_vmware.py create mode 100644 tests/unittests/sources/test_vultr.py create mode 100644 tests/unittests/sources/vmware/__init__.py create mode 100644 tests/unittests/sources/vmware/test_custom_script.py create mode 100644 tests/unittests/sources/vmware/test_guestcust_util.py create mode 100644 tests/unittests/sources/vmware/test_vmware_config_file.py (limited to 'tests/unittests/sources') diff --git a/tests/unittests/sources/__init__.py b/tests/unittests/sources/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/sources/helpers/test_netlink.py b/tests/unittests/sources/helpers/test_netlink.py new file mode 100644 index 00000000..478ce375 --- /dev/null +++ b/tests/unittests/sources/helpers/test_netlink.py @@ -0,0 +1,480 @@ +# Author: Tamilmani Manoharan +# +# This file is part of cloud-init. See LICENSE file for license information. + +from tests.unittests.helpers import CiTestCase, mock +import socket +import struct +import codecs +from cloudinit.sources.helpers.netlink import ( + NetlinkCreateSocketError, create_bound_netlink_socket, read_netlink_socket, + read_rta_oper_state, unpack_rta_attr, wait_for_media_disconnect_connect, + wait_for_nic_attach_event, wait_for_nic_detach_event, + OPER_DOWN, OPER_UP, OPER_DORMANT, OPER_LOWERLAYERDOWN, OPER_NOTPRESENT, + OPER_TESTING, OPER_UNKNOWN, RTATTR_START_OFFSET, RTM_NEWLINK, RTM_DELLINK, + RTM_SETLINK, RTM_GETLINK, MAX_SIZE) + + +def int_to_bytes(i): + '''convert integer to binary: eg: 1 to \x01''' + hex_value = '{0:x}'.format(i) + hex_value = '0' * (len(hex_value) % 2) + hex_value + return codecs.decode(hex_value, 'hex_codec') + + +class TestCreateBoundNetlinkSocket(CiTestCase): + + @mock.patch('cloudinit.sources.helpers.netlink.socket.socket') + def test_socket_error_on_create(self, m_socket): + '''create_bound_netlink_socket catches socket creation exception''' + + """NetlinkCreateSocketError is raised when socket creation errors.""" + m_socket.side_effect = socket.error("Fake socket failure") + with self.assertRaises(NetlinkCreateSocketError) as ctx_mgr: + create_bound_netlink_socket() + self.assertEqual( + 'Exception during netlink socket create: Fake socket failure', + str(ctx_mgr.exception)) + + +class TestReadNetlinkSocket(CiTestCase): + + @mock.patch('cloudinit.sources.helpers.netlink.socket.socket') + @mock.patch('cloudinit.sources.helpers.netlink.select.select') + def test_read_netlink_socket(self, m_select, m_socket): + '''read_netlink_socket able to receive data''' + data = 'netlinktest' + m_select.return_value = [m_socket], None, None + m_socket.recv.return_value = data + recv_data = read_netlink_socket(m_socket, 2) + m_select.assert_called_with([m_socket], [], [], 2) + m_socket.recv.assert_called_with(MAX_SIZE) + self.assertIsNotNone(recv_data) + self.assertEqual(recv_data, data) + + @mock.patch('cloudinit.sources.helpers.netlink.socket.socket') + @mock.patch('cloudinit.sources.helpers.netlink.select.select') + def test_netlink_read_timeout(self, m_select, m_socket): + '''read_netlink_socket should timeout if nothing to read''' + m_select.return_value = [], None, None + data = read_netlink_socket(m_socket, 1) + m_select.assert_called_with([m_socket], [], [], 1) + self.assertEqual(m_socket.recv.call_count, 0) + self.assertIsNone(data) + + def test_read_invalid_socket(self): + '''read_netlink_socket raises assert error if socket is invalid''' + socket = None + with self.assertRaises(AssertionError) as context: + read_netlink_socket(socket, 1) + self.assertTrue('netlink socket is none' in str(context.exception)) + + +class TestParseNetlinkMessage(CiTestCase): + + def test_read_rta_oper_state(self): + '''read_rta_oper_state could parse netlink message and extract data''' + ifname = "eth0" + bytes = ifname.encode("utf-8") + buf = bytearray(48) + struct.pack_into("HH4sHHc", buf, RTATTR_START_OFFSET, 8, 3, bytes, 5, + 16, int_to_bytes(OPER_DOWN)) + interface_state = read_rta_oper_state(buf) + self.assertEqual(interface_state.ifname, ifname) + self.assertEqual(interface_state.operstate, OPER_DOWN) + + def test_read_none_data(self): + '''read_rta_oper_state raises assert error if data is none''' + data = None + with self.assertRaises(AssertionError) as context: + read_rta_oper_state(data) + self.assertEqual('data is none', str(context.exception)) + + def test_read_invalid_rta_operstate_none(self): + '''read_rta_oper_state returns none if operstate is none''' + ifname = "eth0" + buf = bytearray(40) + bytes = ifname.encode("utf-8") + struct.pack_into("HH4s", buf, RTATTR_START_OFFSET, 8, 3, bytes) + interface_state = read_rta_oper_state(buf) + self.assertIsNone(interface_state) + + def test_read_invalid_rta_ifname_none(self): + '''read_rta_oper_state returns none if ifname is none''' + buf = bytearray(40) + struct.pack_into("HHc", buf, RTATTR_START_OFFSET, 5, 16, + int_to_bytes(OPER_DOWN)) + interface_state = read_rta_oper_state(buf) + self.assertIsNone(interface_state) + + def test_read_invalid_data_len(self): + '''raise assert error if data size is smaller than required size''' + buf = bytearray(32) + with self.assertRaises(AssertionError) as context: + read_rta_oper_state(buf) + self.assertTrue('length of data is smaller than RTATTR_START_OFFSET' in + str(context.exception)) + + def test_unpack_rta_attr_none_data(self): + '''unpack_rta_attr raises assert error if data is none''' + data = None + with self.assertRaises(AssertionError) as context: + unpack_rta_attr(data, RTATTR_START_OFFSET) + self.assertTrue('data is none' in str(context.exception)) + + def test_unpack_rta_attr_invalid_offset(self): + '''unpack_rta_attr raises assert error if offset is invalid''' + data = bytearray(48) + with self.assertRaises(AssertionError) as context: + unpack_rta_attr(data, "offset") + self.assertTrue('offset is not integer' in str(context.exception)) + with self.assertRaises(AssertionError) as context: + unpack_rta_attr(data, 31) + self.assertTrue('rta offset is less than expected length' in + str(context.exception)) + + +@mock.patch('cloudinit.sources.helpers.netlink.socket.socket') +@mock.patch('cloudinit.sources.helpers.netlink.read_netlink_socket') +class TestNicAttachDetach(CiTestCase): + with_logs = True + + def _media_switch_data(self, ifname, msg_type, operstate): + '''construct netlink data with specified fields''' + if ifname and operstate is not None: + data = bytearray(48) + bytes = ifname.encode("utf-8") + struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3, + bytes, 5, 16, int_to_bytes(operstate)) + elif ifname: + data = bytearray(40) + bytes = ifname.encode("utf-8") + struct.pack_into("HH4s", data, RTATTR_START_OFFSET, 8, 3, bytes) + elif operstate: + data = bytearray(40) + struct.pack_into("HHc", data, RTATTR_START_OFFSET, 5, 16, + int_to_bytes(operstate)) + struct.pack_into("=LHHLL", data, 0, len(data), msg_type, 0, 0, 0) + return data + + def test_nic_attached_oper_down(self, m_read_netlink_socket, m_socket): + '''Test for a new nic attached''' + ifname = "eth0" + data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) + m_read_netlink_socket.side_effect = [data_op_down] + ifread = wait_for_nic_attach_event(m_socket, []) + self.assertEqual(m_read_netlink_socket.call_count, 1) + self.assertEqual(ifname, ifread) + + def test_nic_attached_oper_up(self, m_read_netlink_socket, m_socket): + '''Test for a new nic attached''' + ifname = "eth0" + data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) + m_read_netlink_socket.side_effect = [data_op_up] + ifread = wait_for_nic_attach_event(m_socket, []) + self.assertEqual(m_read_netlink_socket.call_count, 1) + self.assertEqual(ifname, ifread) + + def test_nic_attach_ignore_existing(self, m_read_netlink_socket, m_socket): + '''Test that we read only the interfaces we are interested in.''' + data_eth0 = self._media_switch_data("eth0", RTM_NEWLINK, OPER_DOWN) + data_eth1 = self._media_switch_data("eth1", RTM_NEWLINK, OPER_DOWN) + m_read_netlink_socket.side_effect = [data_eth0, data_eth1] + ifread = wait_for_nic_attach_event(m_socket, ["eth0"]) + self.assertEqual(m_read_netlink_socket.call_count, 2) + self.assertEqual("eth1", ifread) + + def test_nic_attach_read_first(self, m_read_netlink_socket, m_socket): + '''Test that we read only the interfaces we are interested in.''' + data_eth0 = self._media_switch_data("eth0", RTM_NEWLINK, OPER_DOWN) + data_eth1 = self._media_switch_data("eth1", RTM_NEWLINK, OPER_DOWN) + m_read_netlink_socket.side_effect = [data_eth0, data_eth1] + ifread = wait_for_nic_attach_event(m_socket, ["eth1"]) + self.assertEqual(m_read_netlink_socket.call_count, 1) + self.assertEqual("eth0", ifread) + + def test_nic_detached(self, m_read_netlink_socket, m_socket): + '''Test for an existing nic detached''' + ifname = "eth0" + data_op_down = self._media_switch_data(ifname, RTM_DELLINK, OPER_DOWN) + m_read_netlink_socket.side_effect = [data_op_down] + ifread = wait_for_nic_detach_event(m_socket) + self.assertEqual(m_read_netlink_socket.call_count, 1) + self.assertEqual(ifname, ifread) + + +@mock.patch('cloudinit.sources.helpers.netlink.socket.socket') +@mock.patch('cloudinit.sources.helpers.netlink.read_netlink_socket') +class TestWaitForMediaDisconnectConnect(CiTestCase): + with_logs = True + + def _media_switch_data(self, ifname, msg_type, operstate): + '''construct netlink data with specified fields''' + if ifname and operstate is not None: + data = bytearray(48) + bytes = ifname.encode("utf-8") + struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3, + bytes, 5, 16, int_to_bytes(operstate)) + elif ifname: + data = bytearray(40) + bytes = ifname.encode("utf-8") + struct.pack_into("HH4s", data, RTATTR_START_OFFSET, 8, 3, bytes) + elif operstate: + data = bytearray(40) + struct.pack_into("HHc", data, RTATTR_START_OFFSET, 5, 16, + int_to_bytes(operstate)) + struct.pack_into("=LHHLL", data, 0, len(data), msg_type, 0, 0, 0) + return data + + def test_media_down_up_scenario(self, m_read_netlink_socket, + m_socket): + '''Test for media down up sequence for required interface name''' + ifname = "eth0" + # construct data for Oper State down + data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) + # construct data for Oper State up + data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) + m_read_netlink_socket.side_effect = [data_op_down, data_op_up] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 2) + + def test_wait_for_media_switch_diff_interface(self, m_read_netlink_socket, + m_socket): + '''wait_for_media_disconnect_connect ignores unexpected interfaces. + + The first two messages are for other interfaces and last two are for + expected interface. So the function exit only after receiving last + 2 messages and therefore the call count for m_read_netlink_socket + has to be 4 + ''' + other_ifname = "eth1" + expected_ifname = "eth0" + data_op_down_eth1 = self._media_switch_data( + other_ifname, RTM_NEWLINK, OPER_DOWN + ) + data_op_up_eth1 = self._media_switch_data( + other_ifname, RTM_NEWLINK, OPER_UP + ) + data_op_down_eth0 = self._media_switch_data( + expected_ifname, RTM_NEWLINK, OPER_DOWN + ) + data_op_up_eth0 = self._media_switch_data( + expected_ifname, RTM_NEWLINK, OPER_UP) + m_read_netlink_socket.side_effect = [ + data_op_down_eth1, + data_op_up_eth1, + data_op_down_eth0, + data_op_up_eth0 + ] + wait_for_media_disconnect_connect(m_socket, expected_ifname) + self.assertIn('Ignored netlink event on interface %s' % other_ifname, + self.logs.getvalue()) + self.assertEqual(m_read_netlink_socket.call_count, 4) + + def test_invalid_msgtype_getlink(self, m_read_netlink_socket, m_socket): + '''wait_for_media_disconnect_connect ignores GETLINK events. + + The first two messages are for oper down and up for RTM_GETLINK type + which netlink module will ignore. The last 2 messages are RTM_NEWLINK + with oper state down and up messages. Therefore the call count for + m_read_netlink_socket has to be 4 ignoring first 2 messages + of RTM_GETLINK + ''' + ifname = "eth0" + data_getlink_down = self._media_switch_data( + ifname, RTM_GETLINK, OPER_DOWN + ) + data_getlink_up = self._media_switch_data( + ifname, RTM_GETLINK, OPER_UP + ) + data_newlink_down = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_DOWN + ) + data_newlink_up = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_UP + ) + m_read_netlink_socket.side_effect = [ + data_getlink_down, + data_getlink_up, + data_newlink_down, + data_newlink_up + ] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 4) + + def test_invalid_msgtype_setlink(self, m_read_netlink_socket, m_socket): + '''wait_for_media_disconnect_connect ignores SETLINK events. + + The first two messages are for oper down and up for RTM_GETLINK type + which it will ignore. 3rd and 4th messages are RTM_NEWLINK with down + and up messages. This function should exit after 4th messages since it + sees down->up scenario. So the call count for m_read_netlink_socket + has to be 4 ignoring first 2 messages of RTM_GETLINK and + last 2 messages of RTM_NEWLINK + ''' + ifname = "eth0" + data_setlink_down = self._media_switch_data( + ifname, RTM_SETLINK, OPER_DOWN + ) + data_setlink_up = self._media_switch_data( + ifname, RTM_SETLINK, OPER_UP + ) + data_newlink_down = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_DOWN + ) + data_newlink_up = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_UP + ) + m_read_netlink_socket.side_effect = [ + data_setlink_down, + data_setlink_up, + data_newlink_down, + data_newlink_up, + data_newlink_down, + data_newlink_up + ] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 4) + + def test_netlink_invalid_switch_scenario(self, m_read_netlink_socket, + m_socket): + '''returns only if it receives UP event after a DOWN event''' + ifname = "eth0" + data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) + data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) + data_op_dormant = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_DORMANT + ) + data_op_notpresent = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_NOTPRESENT + ) + data_op_lowerdown = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_LOWERLAYERDOWN + ) + data_op_testing = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_TESTING + ) + data_op_unknown = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_UNKNOWN + ) + m_read_netlink_socket.side_effect = [ + data_op_up, data_op_up, + data_op_dormant, data_op_up, + data_op_notpresent, data_op_up, + data_op_lowerdown, data_op_up, + data_op_testing, data_op_up, + data_op_unknown, data_op_up, + data_op_down, data_op_up + ] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 14) + + def test_netlink_valid_inbetween_transitions(self, m_read_netlink_socket, + m_socket): + '''wait_for_media_disconnect_connect handles in between transitions''' + ifname = "eth0" + data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) + data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) + data_op_dormant = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_DORMANT) + data_op_unknown = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_UNKNOWN) + m_read_netlink_socket.side_effect = [ + data_op_down, data_op_dormant, + data_op_unknown, data_op_up + ] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 4) + + def test_netlink_invalid_operstate(self, m_read_netlink_socket, m_socket): + '''wait_for_media_disconnect_connect should handle invalid operstates. + + The function should not fail and return even if it receives invalid + operstates. It always should wait for down up sequence. + ''' + ifname = "eth0" + data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) + data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) + data_op_invalid = self._media_switch_data(ifname, RTM_NEWLINK, 7) + m_read_netlink_socket.side_effect = [ + data_op_invalid, data_op_up, + data_op_down, data_op_invalid, + data_op_up + ] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 5) + + def test_wait_invalid_socket(self, m_read_netlink_socket, m_socket): + '''wait_for_media_disconnect_connect handle none netlink socket.''' + socket = None + ifname = "eth0" + with self.assertRaises(AssertionError) as context: + wait_for_media_disconnect_connect(socket, ifname) + self.assertTrue('netlink socket is none' in str(context.exception)) + + def test_wait_invalid_ifname(self, m_read_netlink_socket, m_socket): + '''wait_for_media_disconnect_connect handle none interface name''' + ifname = None + with self.assertRaises(AssertionError) as context: + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertTrue('interface name is none' in str(context.exception)) + ifname = "" + with self.assertRaises(AssertionError) as context: + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertTrue('interface name cannot be empty' in + str(context.exception)) + + def test_wait_invalid_rta_attr(self, m_read_netlink_socket, m_socket): + ''' wait_for_media_disconnect_connect handles invalid rta data''' + ifname = "eth0" + data_invalid1 = self._media_switch_data(None, RTM_NEWLINK, OPER_DOWN) + data_invalid2 = self._media_switch_data(ifname, RTM_NEWLINK, None) + data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) + data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) + m_read_netlink_socket.side_effect = [ + data_invalid1, data_invalid2, data_op_down, data_op_up + ] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 4) + + def test_read_multiple_netlink_msgs(self, m_read_netlink_socket, m_socket): + '''Read multiple messages in single receive call''' + ifname = "eth0" + bytes = ifname.encode("utf-8") + data = bytearray(96) + struct.pack_into("=LHHLL", data, 0, 48, RTM_NEWLINK, 0, 0, 0) + struct.pack_into( + "HH4sHHc", data, RTATTR_START_OFFSET, 8, 3, + bytes, 5, 16, int_to_bytes(OPER_DOWN) + ) + struct.pack_into("=LHHLL", data, 48, 48, RTM_NEWLINK, 0, 0, 0) + struct.pack_into( + "HH4sHHc", data, 48 + RTATTR_START_OFFSET, 8, + 3, bytes, 5, 16, int_to_bytes(OPER_UP) + ) + m_read_netlink_socket.return_value = data + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 1) + + def test_read_partial_netlink_msgs(self, m_read_netlink_socket, m_socket): + '''Read partial messages in receive call''' + ifname = "eth0" + bytes = ifname.encode("utf-8") + data1 = bytearray(112) + data2 = bytearray(32) + struct.pack_into("=LHHLL", data1, 0, 48, RTM_NEWLINK, 0, 0, 0) + struct.pack_into( + "HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3, + bytes, 5, 16, int_to_bytes(OPER_DOWN) + ) + struct.pack_into("=LHHLL", data1, 48, 48, RTM_NEWLINK, 0, 0, 0) + struct.pack_into( + "HH4sHHc", data1, 80, 8, 3, bytes, 5, 16, int_to_bytes(OPER_DOWN) + ) + struct.pack_into("=LHHLL", data1, 96, 48, RTM_NEWLINK, 0, 0, 0) + struct.pack_into( + "HH4sHHc", data2, 16, 8, 3, bytes, 5, 16, int_to_bytes(OPER_UP) + ) + m_read_netlink_socket.side_effect = [data1, data2] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 2) diff --git a/tests/unittests/sources/helpers/test_openstack.py b/tests/unittests/sources/helpers/test_openstack.py new file mode 100644 index 00000000..74743e7c --- /dev/null +++ b/tests/unittests/sources/helpers/test_openstack.py @@ -0,0 +1,49 @@ +# This file is part of cloud-init. See LICENSE file for license information. +# ./cloudinit/sources/helpers/tests/test_openstack.py +from unittest import mock + +from cloudinit.sources.helpers import openstack +from tests.unittests import helpers as test_helpers + + +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) +class TestConvertNetJson(test_helpers.CiTestCase): + + def test_phy_types(self): + """Verify the different known physical types are handled.""" + # network_data.json example from + # https://docs.openstack.org/nova/latest/user/metadata.html + mac0 = "fa:16:3e:9c:bf:3d" + net_json = { + "links": [ + {"ethernet_mac_address": mac0, "id": "tapcd9f6d46-4a", + "mtu": None, "type": "bridge", + "vif_id": "cd9f6d46-4a3a-43ab-a466-994af9db96fc"} + ], + "networks": [ + {"id": "network0", "link": "tapcd9f6d46-4a", + "network_id": "99e88329-f20d-4741-9593-25bf07847b16", + "type": "ipv4_dhcp"} + ], + "services": [{"address": "8.8.8.8", "type": "dns"}] + } + macs = {mac0: 'eth0'} + + expected = { + 'version': 1, + 'config': [ + {'mac_address': 'fa:16:3e:9c:bf:3d', + 'mtu': None, 'name': 'eth0', + 'subnets': [{'type': 'dhcp4'}], + 'type': 'physical'}, + {'address': '8.8.8.8', 'type': 'nameserver'}]} + + for t in openstack.KNOWN_PHYSICAL_TYPES: + net_json["links"][0]["type"] = t + self.assertEqual( + expected, + openstack.convert_net_json(network_json=net_json, + known_macs=macs)) diff --git a/tests/unittests/sources/test_aliyun.py b/tests/unittests/sources/test_aliyun.py new file mode 100644 index 00000000..00209913 --- /dev/null +++ b/tests/unittests/sources/test_aliyun.py @@ -0,0 +1,248 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import functools +import httpretty +import os +from unittest import mock + +from cloudinit import helpers +from cloudinit.sources import DataSourceAliYun as ay +from cloudinit.sources.DataSourceEc2 import convert_ec2_metadata_network_config +from tests.unittests import helpers as test_helpers + +DEFAULT_METADATA = { + 'instance-id': 'aliyun-test-vm-00', + 'eipv4': '10.0.0.1', + 'hostname': 'test-hostname', + 'image-id': 'm-test', + 'launch-index': '0', + 'mac': '00:16:3e:00:00:00', + 'network-type': 'vpc', + 'private-ipv4': '192.168.0.1', + 'serial-number': 'test-string', + 'vpc-cidr-block': '192.168.0.0/16', + 'vpc-id': 'test-vpc', + 'vswitch-id': 'test-vpc', + 'vswitch-cidr-block': '192.168.0.0/16', + 'zone-id': 'test-zone-1', + 'ntp-conf': {'ntp_servers': [ + 'ntp1.aliyun.com', + 'ntp2.aliyun.com', + 'ntp3.aliyun.com']}, + 'source-address': ['http://mirrors.aliyun.com', + 'http://mirrors.aliyuncs.com'], + 'public-keys': {'key-pair-1': {'openssh-key': 'ssh-rsa AAAAB3...'}, + 'key-pair-2': {'openssh-key': 'ssh-rsa AAAAB3...'}} +} + +DEFAULT_USERDATA = """\ +#cloud-config + +hostname: localhost""" + + +def register_mock_metaserver(base_url, data): + def register_helper(register, base_url, body): + if isinstance(body, str): + register(base_url, body) + elif isinstance(body, list): + register(base_url.rstrip('/'), '\n'.join(body) + '\n') + elif isinstance(body, dict): + if not body: + register(base_url.rstrip('/') + '/', 'not found', + status_code=404) + vals = [] + for k, v in body.items(): + if isinstance(v, (str, list)): + suffix = k.rstrip('/') + else: + suffix = k.rstrip('/') + '/' + vals.append(suffix) + url = base_url.rstrip('/') + '/' + suffix + register_helper(register, url, v) + register(base_url, '\n'.join(vals) + '\n') + + register = functools.partial(httpretty.register_uri, httpretty.GET) + register_helper(register, base_url, data) + + +class TestAliYunDatasource(test_helpers.HttprettyTestCase): + def setUp(self): + super(TestAliYunDatasource, self).setUp() + cfg = {'datasource': {'AliYun': {'timeout': '1', 'max_wait': '1'}}} + distro = {} + paths = helpers.Paths({'run_dir': self.tmp_dir()}) + self.ds = ay.DataSourceAliYun(cfg, distro, paths) + self.metadata_address = self.ds.metadata_urls[0] + + @property + def default_metadata(self): + return DEFAULT_METADATA + + @property + def default_userdata(self): + return DEFAULT_USERDATA + + @property + def metadata_url(self): + return os.path.join( + self.metadata_address, + self.ds.min_metadata_version, 'meta-data') + '/' + + @property + def userdata_url(self): + return os.path.join( + self.metadata_address, + self.ds.min_metadata_version, 'user-data') + + # EC2 provides an instance-identity document which must return 404 here + # for this test to pass. + @property + def default_identity(self): + return {} + + @property + def identity_url(self): + return os.path.join(self.metadata_address, + self.ds.min_metadata_version, + 'dynamic', 'instance-identity') + + def regist_default_server(self): + register_mock_metaserver(self.metadata_url, self.default_metadata) + register_mock_metaserver(self.userdata_url, self.default_userdata) + register_mock_metaserver(self.identity_url, self.default_identity) + + def _test_get_data(self): + self.assertEqual(self.ds.metadata, self.default_metadata) + self.assertEqual(self.ds.userdata_raw, + self.default_userdata.encode('utf8')) + + def _test_get_sshkey(self): + pub_keys = [v['openssh-key'] for (_, v) in + self.default_metadata['public-keys'].items()] + self.assertEqual(self.ds.get_public_ssh_keys(), pub_keys) + + def _test_get_iid(self): + self.assertEqual(self.default_metadata['instance-id'], + self.ds.get_instance_id()) + + def _test_host_name(self): + self.assertEqual(self.default_metadata['hostname'], + self.ds.get_hostname()) + + @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") + def test_with_mock_server(self, m_is_aliyun): + m_is_aliyun.return_value = True + self.regist_default_server() + ret = self.ds.get_data() + self.assertEqual(True, ret) + self.assertEqual(1, m_is_aliyun.call_count) + self._test_get_data() + self._test_get_sshkey() + self._test_get_iid() + self._test_host_name() + self.assertEqual('aliyun', self.ds.cloud_name) + self.assertEqual('ec2', self.ds.platform) + self.assertEqual( + 'metadata (http://100.100.100.200)', self.ds.subplatform) + + @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") + def test_returns_false_when_not_on_aliyun(self, m_is_aliyun): + """If is_aliyun returns false, then get_data should return False.""" + m_is_aliyun.return_value = False + self.regist_default_server() + ret = self.ds.get_data() + self.assertEqual(1, m_is_aliyun.call_count) + self.assertEqual(False, ret) + + def test_parse_public_keys(self): + public_keys = {} + self.assertEqual(ay.parse_public_keys(public_keys), []) + + public_keys = {'key-pair-0': 'ssh-key-0'} + self.assertEqual(ay.parse_public_keys(public_keys), + [public_keys['key-pair-0']]) + + public_keys = {'key-pair-0': 'ssh-key-0', 'key-pair-1': 'ssh-key-1'} + self.assertEqual(set(ay.parse_public_keys(public_keys)), + set([public_keys['key-pair-0'], + public_keys['key-pair-1']])) + + public_keys = {'key-pair-0': ['ssh-key-0', 'ssh-key-1']} + self.assertEqual(ay.parse_public_keys(public_keys), + public_keys['key-pair-0']) + + public_keys = {'key-pair-0': {'openssh-key': []}} + self.assertEqual(ay.parse_public_keys(public_keys), []) + + public_keys = {'key-pair-0': {'openssh-key': 'ssh-key-0'}} + self.assertEqual(ay.parse_public_keys(public_keys), + [public_keys['key-pair-0']['openssh-key']]) + + public_keys = {'key-pair-0': {'openssh-key': ['ssh-key-0', + 'ssh-key-1']}} + self.assertEqual(ay.parse_public_keys(public_keys), + public_keys['key-pair-0']['openssh-key']) + + def test_route_metric_calculated_without_device_number(self): + """Test that route-metric code works without `device-number` + + `device-number` is part of EC2 metadata, but not supported on aliyun. + Attempting to access it will raise a KeyError. + + LP: #1917875 + """ + netcfg = convert_ec2_metadata_network_config( + {"interfaces": {"macs": { + "06:17:04:d7:26:09": { + "interface-id": "eni-e44ef49e", + }, + "06:17:04:d7:26:08": { + "interface-id": "eni-e44ef49f", + } + }}}, + macs_to_nics={ + '06:17:04:d7:26:09': 'eth0', + '06:17:04:d7:26:08': 'eth1', + } + ) + + met0 = netcfg['ethernets']['eth0']['dhcp4-overrides']['route-metric'] + met1 = netcfg['ethernets']['eth1']['dhcp4-overrides']['route-metric'] + + # route-metric numbers should be 100 apart + assert 100 == abs(met0 - met1) + + +class TestIsAliYun(test_helpers.CiTestCase): + ALIYUN_PRODUCT = 'Alibaba Cloud ECS' + read_dmi_data_expected = [mock.call('system-product-name')] + + @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data") + def test_true_on_aliyun_product(self, m_read_dmi_data): + """Should return true if the dmi product data has expected value.""" + m_read_dmi_data.return_value = self.ALIYUN_PRODUCT + ret = ay._is_aliyun() + self.assertEqual(self.read_dmi_data_expected, + m_read_dmi_data.call_args_list) + self.assertEqual(True, ret) + + @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data") + def test_false_on_empty_string(self, m_read_dmi_data): + """Should return false on empty value returned.""" + m_read_dmi_data.return_value = "" + ret = ay._is_aliyun() + self.assertEqual(self.read_dmi_data_expected, + m_read_dmi_data.call_args_list) + self.assertEqual(False, ret) + + @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data") + def test_false_on_unknown_string(self, m_read_dmi_data): + """Should return false on an unrelated string.""" + m_read_dmi_data.return_value = "cubs win" + ret = ay._is_aliyun() + self.assertEqual(self.read_dmi_data_expected, + m_read_dmi_data.call_args_list) + self.assertEqual(False, ret) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_altcloud.py b/tests/unittests/sources/test_altcloud.py new file mode 100644 index 00000000..7384c104 --- /dev/null +++ b/tests/unittests/sources/test_altcloud.py @@ -0,0 +1,450 @@ +# Copyright (C) 2009-2010 Canonical Ltd. +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joe VLcek +# +# This file is part of cloud-init. See LICENSE file for license information. + +''' +This test file exercises the code in sources DataSourceAltCloud.py +''' + +import os +import shutil +import tempfile + +from cloudinit import dmi +from cloudinit import helpers +from cloudinit import subp +from cloudinit import util + +from tests.unittests.helpers import CiTestCase, mock + +import cloudinit.sources.DataSourceAltCloud as dsac + +OS_UNAME_ORIG = getattr(os, 'uname') + + +def _write_user_data_files(mount_dir, value): + ''' + Populate the deltacloud_user_data_file the user_data_file + which would be populated with user data. + ''' + deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt' + user_data_file = mount_dir + '/user-data.txt' + + udfile = open(deltacloud_user_data_file, 'w') + udfile.write(value) + udfile.close() + os.chmod(deltacloud_user_data_file, 0o664) + + udfile = open(user_data_file, 'w') + udfile.write(value) + udfile.close() + os.chmod(user_data_file, 0o664) + + +def _remove_user_data_files(mount_dir, + dc_file=True, + non_dc_file=True): + ''' + Remove the test files: deltacloud_user_data_file and + user_data_file + ''' + deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt' + user_data_file = mount_dir + '/user-data.txt' + + # Ignore any failures removeing files that are already gone. + if dc_file: + try: + os.remove(deltacloud_user_data_file) + except OSError: + pass + + if non_dc_file: + try: + os.remove(user_data_file) + except OSError: + pass + + +def _dmi_data(expected): + ''' + Spoof the data received over DMI + ''' + def _data(key): + return expected + + return _data + + +class TestGetCloudType(CiTestCase): + '''Test to exercise method: DataSourceAltCloud.get_cloud_type()''' + + with_logs = True + + def setUp(self): + '''Set up.''' + super(TestGetCloudType, self).setUp() + self.tmp = self.tmp_dir() + self.paths = helpers.Paths({'cloud_dir': self.tmp}) + self.dmi_data = dmi.read_dmi_data + # We have a different code path for arm to deal with LP1243287 + # We have to switch arch to x86_64 to avoid test failure + force_arch('x86_64') + + def tearDown(self): + # Reset + dmi.read_dmi_data = self.dmi_data + force_arch() + + def test_cloud_info_file_ioerror(self): + """Return UNKNOWN when /etc/sysconfig/cloud-info exists but errors.""" + self.assertEqual('/etc/sysconfig/cloud-info', dsac.CLOUD_INFO_FILE) + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + # Attempting to read the directory generates IOError + with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.tmp): + self.assertEqual('UNKNOWN', dsrc.get_cloud_type()) + self.assertIn( + "[Errno 21] Is a directory: '%s'" % self.tmp, + self.logs.getvalue()) + + def test_cloud_info_file(self): + """Return uppercase stripped content from /etc/sysconfig/cloud-info.""" + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + cloud_info = self.tmp_path('cloud-info', dir=self.tmp) + util.write_file(cloud_info, ' OverRiDdeN CloudType ') + # Attempting to read the directory generates IOError + with mock.patch.object(dsac, 'CLOUD_INFO_FILE', cloud_info): + self.assertEqual('OVERRIDDEN CLOUDTYPE', dsrc.get_cloud_type()) + + def test_rhev(self): + ''' + Test method get_cloud_type() for RHEVm systems. + Forcing read_dmi_data return to match a RHEVm system: RHEV Hypervisor + ''' + dmi.read_dmi_data = _dmi_data('RHEV') + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + self.assertEqual('RHEV', dsrc.get_cloud_type()) + + def test_vsphere(self): + ''' + Test method get_cloud_type() for vSphere systems. + Forcing read_dmi_data return to match a vSphere system: RHEV Hypervisor + ''' + dmi.read_dmi_data = _dmi_data('VMware Virtual Platform') + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + self.assertEqual('VSPHERE', dsrc.get_cloud_type()) + + def test_unknown(self): + ''' + Test method get_cloud_type() for unknown systems. + Forcing read_dmi_data return to match an unrecognized return. + ''' + dmi.read_dmi_data = _dmi_data('Unrecognized Platform') + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + self.assertEqual('UNKNOWN', dsrc.get_cloud_type()) + + +class TestGetDataCloudInfoFile(CiTestCase): + ''' + Test to exercise method: DataSourceAltCloud.get_data() + With a contrived CLOUD_INFO_FILE + ''' + def setUp(self): + '''Set up.''' + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) + self.cloud_info_file = self.tmp_path('cloud-info', dir=self.tmp) + + def test_rhev(self): + '''Success Test module get_data() forcing RHEV.''' + + util.write_file(self.cloud_info_file, 'RHEV') + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + dsrc.user_data_rhevm = lambda: True + with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): + self.assertEqual(True, dsrc.get_data()) + self.assertEqual('altcloud', dsrc.cloud_name) + self.assertEqual('altcloud', dsrc.platform_type) + self.assertEqual('rhev (/dev/fd0)', dsrc.subplatform) + + def test_vsphere(self): + '''Success Test module get_data() forcing VSPHERE.''' + + util.write_file(self.cloud_info_file, 'VSPHERE') + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + dsrc.user_data_vsphere = lambda: True + with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): + self.assertEqual(True, dsrc.get_data()) + self.assertEqual('altcloud', dsrc.cloud_name) + self.assertEqual('altcloud', dsrc.platform_type) + self.assertEqual('vsphere (unknown)', dsrc.subplatform) + + def test_fail_rhev(self): + '''Failure Test module get_data() forcing RHEV.''' + + util.write_file(self.cloud_info_file, 'RHEV') + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + dsrc.user_data_rhevm = lambda: False + with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): + self.assertEqual(False, dsrc.get_data()) + + def test_fail_vsphere(self): + '''Failure Test module get_data() forcing VSPHERE.''' + + util.write_file(self.cloud_info_file, 'VSPHERE') + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + dsrc.user_data_vsphere = lambda: False + with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): + self.assertEqual(False, dsrc.get_data()) + + def test_unrecognized(self): + '''Failure Test module get_data() forcing unrecognized.''' + + util.write_file(self.cloud_info_file, 'unrecognized') + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): + self.assertEqual(False, dsrc.get_data()) + + +class TestGetDataNoCloudInfoFile(CiTestCase): + ''' + Test to exercise method: DataSourceAltCloud.get_data() + Without a CLOUD_INFO_FILE + ''' + def setUp(self): + '''Set up.''' + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) + self.dmi_data = dmi.read_dmi_data + dsac.CLOUD_INFO_FILE = \ + 'no such file' + # We have a different code path for arm to deal with LP1243287 + # We have to switch arch to x86_64 to avoid test failure + force_arch('x86_64') + + def tearDown(self): + # Reset + dsac.CLOUD_INFO_FILE = \ + '/etc/sysconfig/cloud-info' + dmi.read_dmi_data = self.dmi_data + # Return back to original arch + force_arch() + + def test_rhev_no_cloud_file(self): + '''Test No cloud info file module get_data() forcing RHEV.''' + + dmi.read_dmi_data = _dmi_data('RHEV Hypervisor') + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + dsrc.user_data_rhevm = lambda: True + self.assertEqual(True, dsrc.get_data()) + + def test_vsphere_no_cloud_file(self): + '''Test No cloud info file module get_data() forcing VSPHERE.''' + + dmi.read_dmi_data = _dmi_data('VMware Virtual Platform') + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + dsrc.user_data_vsphere = lambda: True + self.assertEqual(True, dsrc.get_data()) + + def test_failure_no_cloud_file(self): + '''Test No cloud info file module get_data() forcing unrecognized.''' + + dmi.read_dmi_data = _dmi_data('Unrecognized Platform') + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + self.assertEqual(False, dsrc.get_data()) + + +class TestUserDataRhevm(CiTestCase): + ''' + Test to exercise method: DataSourceAltCloud.user_data_rhevm() + ''' + def setUp(self): + '''Set up.''' + self.paths = helpers.Paths({'cloud_dir': '/tmp'}) + self.mount_dir = self.tmp_dir() + _write_user_data_files(self.mount_dir, 'test user data') + self.add_patch( + 'cloudinit.sources.DataSourceAltCloud.modprobe_floppy', + 'm_modprobe_floppy', return_value=None) + self.add_patch( + 'cloudinit.sources.DataSourceAltCloud.util.udevadm_settle', + 'm_udevadm_settle', return_value=('', '')) + self.add_patch( + 'cloudinit.sources.DataSourceAltCloud.util.mount_cb', + 'm_mount_cb') + + def test_mount_cb_fails(self): + '''Test user_data_rhevm() where mount_cb fails.''' + + self.m_mount_cb.side_effect = util.MountFailedError("Failed Mount") + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + self.assertEqual(False, dsrc.user_data_rhevm()) + + def test_modprobe_fails(self): + '''Test user_data_rhevm() where modprobe fails.''' + + self.m_modprobe_floppy.side_effect = subp.ProcessExecutionError( + "Failed modprobe") + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + self.assertEqual(False, dsrc.user_data_rhevm()) + + def test_no_modprobe_cmd(self): + '''Test user_data_rhevm() with no modprobe command.''' + + self.m_modprobe_floppy.side_effect = subp.ProcessExecutionError( + "No such file or dir") + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + self.assertEqual(False, dsrc.user_data_rhevm()) + + def test_udevadm_fails(self): + '''Test user_data_rhevm() where udevadm fails.''' + + self.m_udevadm_settle.side_effect = subp.ProcessExecutionError( + "Failed settle.") + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + self.assertEqual(False, dsrc.user_data_rhevm()) + + def test_no_udevadm_cmd(self): + '''Test user_data_rhevm() with no udevadm command.''' + + self.m_udevadm_settle.side_effect = OSError("No such file or dir") + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + self.assertEqual(False, dsrc.user_data_rhevm()) + + +class TestUserDataVsphere(CiTestCase): + ''' + Test to exercise method: DataSourceAltCloud.user_data_vsphere() + ''' + def setUp(self): + '''Set up.''' + self.tmp = self.tmp_dir() + self.paths = helpers.Paths({'cloud_dir': self.tmp}) + self.mount_dir = tempfile.mkdtemp() + + _write_user_data_files(self.mount_dir, 'test user data') + + def tearDown(self): + # Reset + + _remove_user_data_files(self.mount_dir) + + # Attempt to remove the temp dir ignoring errors + try: + shutil.rmtree(self.mount_dir) + except OSError: + pass + + dsac.CLOUD_INFO_FILE = \ + '/etc/sysconfig/cloud-info' + + @mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with") + @mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb") + def test_user_data_vsphere_no_cdrom(self, m_mount_cb, m_find_devs_with): + '''Test user_data_vsphere() where mount_cb fails.''' + + m_mount_cb.return_value = [] + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + self.assertEqual(False, dsrc.user_data_vsphere()) + self.assertEqual(0, m_mount_cb.call_count) + + @mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with") + @mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb") + def test_user_data_vsphere_mcb_fail(self, m_mount_cb, m_find_devs_with): + '''Test user_data_vsphere() where mount_cb fails.''' + + m_find_devs_with.return_value = ["/dev/mock/cdrom"] + m_mount_cb.side_effect = util.MountFailedError("Unable To mount") + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + self.assertEqual(False, dsrc.user_data_vsphere()) + self.assertEqual(1, m_find_devs_with.call_count) + self.assertEqual(1, m_mount_cb.call_count) + + @mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with") + @mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb") + def test_user_data_vsphere_success(self, m_mount_cb, m_find_devs_with): + """Test user_data_vsphere() where successful.""" + m_find_devs_with.return_value = ["/dev/mock/cdrom"] + m_mount_cb.return_value = 'raw userdata from cdrom' + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + cloud_info = self.tmp_path('cloud-info', dir=self.tmp) + util.write_file(cloud_info, 'VSPHERE') + self.assertEqual(True, dsrc.user_data_vsphere()) + m_find_devs_with.assert_called_once_with('LABEL=CDROM') + m_mount_cb.assert_called_once_with( + '/dev/mock/cdrom', dsac.read_user_data_callback) + with mock.patch.object(dsrc, 'get_cloud_type', return_value='VSPHERE'): + self.assertEqual('vsphere (/dev/mock/cdrom)', dsrc.subplatform) + + +class TestReadUserDataCallback(CiTestCase): + ''' + Test to exercise method: DataSourceAltCloud.read_user_data_callback() + ''' + def setUp(self): + '''Set up.''' + self.paths = helpers.Paths({'cloud_dir': '/tmp'}) + self.mount_dir = tempfile.mkdtemp() + + _write_user_data_files(self.mount_dir, 'test user data') + + def tearDown(self): + # Reset + + _remove_user_data_files(self.mount_dir) + + # Attempt to remove the temp dir ignoring errors + try: + shutil.rmtree(self.mount_dir) + except OSError: + pass + + def test_callback_both(self): + '''Test read_user_data_callback() with both files.''' + + self.assertEqual('test user data', + dsac.read_user_data_callback(self.mount_dir)) + + def test_callback_dc(self): + '''Test read_user_data_callback() with only DC file.''' + + _remove_user_data_files(self.mount_dir, + dc_file=False, + non_dc_file=True) + + self.assertEqual('test user data', + dsac.read_user_data_callback(self.mount_dir)) + + def test_callback_non_dc(self): + '''Test read_user_data_callback() with only non-DC file.''' + + _remove_user_data_files(self.mount_dir, + dc_file=True, + non_dc_file=False) + + self.assertEqual('test user data', + dsac.read_user_data_callback(self.mount_dir)) + + def test_callback_none(self): + '''Test read_user_data_callback() no files are found.''' + + _remove_user_data_files(self.mount_dir) + self.assertIsNone(dsac.read_user_data_callback(self.mount_dir)) + + +def force_arch(arch=None): + + def _os_uname(): + return ('LINUX', 'NODENAME', 'RELEASE', 'VERSION', arch) + + if arch: + setattr(os, 'uname', _os_uname) + elif arch is None: + setattr(os, 'uname', OS_UNAME_ORIG) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py new file mode 100644 index 00000000..b221a0d7 --- /dev/null +++ b/tests/unittests/sources/test_azure.py @@ -0,0 +1,3394 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import distros +from cloudinit import helpers +from cloudinit import url_helper +from cloudinit.sources import ( + UNSET, DataSourceAzure as dsaz, InvalidMetaDataException) +from cloudinit.util import (b64e, decode_binary, load_file, write_file, + MountFailedError, json_dumps, load_json) +from cloudinit.version import version_string as vs +from tests.unittests.helpers import ( + HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, + ExitStack, resourceLocation) +from cloudinit.sources.helpers import netlink + +import copy +import crypt +import httpretty +import json +import os +import requests +import stat +import xml.etree.ElementTree as ET +import yaml + + +def construct_valid_ovf_env(data=None, pubkeys=None, + userdata=None, platform_settings=None): + if data is None: + data = {'HostName': 'FOOHOST'} + if pubkeys is None: + pubkeys = {} + + content = """ + + + 1.0 + + LinuxProvisioningConfiguration + """ + for key, dval in data.items(): + if isinstance(dval, dict): + val = dict(dval).get('text') + attrs = ' ' + ' '.join(["%s='%s'" % (k, v) for k, v + in dict(dval).items() if k != 'text']) + else: + val = dval + attrs = "" + content += "<%s%s>%s\n" % (key, attrs, val, key) + + if userdata: + content += "%s\n" % (b64e(userdata)) + + if pubkeys: + content += "\n" + for fp, path, value in pubkeys: + content += " " + if fp and path: + content += ("%s%s" % + (fp, path)) + if value: + content += "%s" % value + content += "\n" + content += "" + content += """ + + + 1.0 + + kms.core.windows.net + false + """ + if platform_settings: + for k, v in platform_settings.items(): + content += "<%s>%s\n" % (k, v, k) + if "PreprovisionedVMType" not in platform_settings: + content += """""" + content += """ +""" + + return content + + +NETWORK_METADATA = { + "compute": { + "location": "eastus2", + "name": "my-hostname", + "offer": "UbuntuServer", + "osType": "Linux", + "placementGroupId": "", + "platformFaultDomain": "0", + "platformUpdateDomain": "0", + "publisher": "Canonical", + "resourceGroupName": "srugroup1", + "sku": "19.04-DAILY", + "subscriptionId": "12aad61c-6de4-4e53-a6c6-5aff52a83777", + "tags": "", + "version": "19.04.201906190", + "vmId": "ff702a6b-cb6a-4fcd-ad68-b4ce38227642", + "vmScaleSetName": "", + "vmSize": "Standard_DS1_v2", + "zone": "", + "publicKeys": [ + { + "keyData": "ssh-rsa key1", + "path": "path1" + } + ] + }, + "network": { + "interface": [ + { + "macAddress": "000D3A047598", + "ipv6": { + "ipAddress": [] + }, + "ipv4": { + "subnet": [ + { + "prefix": "24", + "address": "10.0.0.0" + } + ], + "ipAddress": [ + { + "privateIpAddress": "10.0.0.4", + "publicIpAddress": "104.46.124.81" + } + ] + } + } + ] + } +} + +SECONDARY_INTERFACE = { + "macAddress": "220D3A047598", + "ipv6": { + "ipAddress": [] + }, + "ipv4": { + "subnet": [ + { + "prefix": "24", + "address": "10.0.1.0" + } + ], + "ipAddress": [ + { + "privateIpAddress": "10.0.1.5", + } + ] + } +} + +SECONDARY_INTERFACE_NO_IP = { + "macAddress": "220D3A047598", + "ipv6": { + "ipAddress": [] + }, + "ipv4": { + "subnet": [ + { + "prefix": "24", + "address": "10.0.1.0" + } + ], + "ipAddress": [] + } +} + +IMDS_NETWORK_METADATA = { + "interface": [ + { + "macAddress": "000D3A047598", + "ipv6": { + "ipAddress": [] + }, + "ipv4": { + "subnet": [ + { + "prefix": "24", + "address": "10.0.0.0" + } + ], + "ipAddress": [ + { + "privateIpAddress": "10.0.0.4", + "publicIpAddress": "104.46.124.81" + } + ] + } + } + ] +} + +MOCKPATH = 'cloudinit.sources.DataSourceAzure.' +EXAMPLE_UUID = 'd0df4c54-4ecb-4a4b-9954-5bdf3ed5c3b8' + + +class TestParseNetworkConfig(CiTestCase): + + maxDiff = None + fallback_config = { + 'version': 1, + 'config': [{ + 'type': 'physical', 'name': 'eth0', + 'mac_address': '00:11:22:33:44:55', + 'params': {'driver': 'hv_netsvc'}, + 'subnets': [{'type': 'dhcp'}], + }] + } + + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + def test_single_ipv4_nic_configuration(self, m_driver): + """parse_network_config emits dhcp on single nic with ipv4""" + expected = {'ethernets': { + 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, 'version': 2} + self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA)) + + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + def test_increases_route_metric_for_non_primary_nics(self, m_driver): + """parse_network_config increases route-metric for each nic""" + expected = {'ethernets': { + 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}, + 'eth1': {'set-name': 'eth1', + 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 200}}, + 'eth2': {'set-name': 'eth2', + 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 300}}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['network']['interface'].append(SECONDARY_INTERFACE) + third_intf = copy.deepcopy(SECONDARY_INTERFACE) + third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') + third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' + third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' + imds_data['network']['interface'].append(third_intf) + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + def test_ipv4_and_ipv6_route_metrics_match_for_nics(self, m_driver): + """parse_network_config emits matching ipv4 and ipv6 route-metrics.""" + expected = {'ethernets': { + 'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/128'], + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 100}, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}, + 'eth1': {'set-name': 'eth1', + 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp6': False, + 'dhcp4-overrides': {'route-metric': 200}}, + 'eth2': {'set-name': 'eth2', + 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 300}, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 300}}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + nic1 = imds_data['network']['interface'][0] + nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + + nic1['ipv6'] = { + "subnet": [{"address": "2001:dead:beef::16"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}, + {"privateIpAddress": "2001:dead:beef::2"}] + } + imds_data['network']['interface'].append(SECONDARY_INTERFACE) + third_intf = copy.deepcopy(SECONDARY_INTERFACE) + third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') + third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' + third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' + third_intf['ipv6'] = { + "subnet": [{"prefix": "64", "address": "2001:dead:beef::2"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}] + } + imds_data['network']['interface'].append(third_intf) + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + def test_ipv4_secondary_ips_will_be_static_addrs(self, m_driver): + """parse_network_config emits primary ipv4 as dhcp others are static""" + expected = {'ethernets': { + 'eth0': {'addresses': ['10.0.0.5/24'], + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 100}, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + nic1 = imds_data['network']['interface'][0] + nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + + nic1['ipv6'] = { + "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}] + } + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + def test_ipv6_secondary_ips_will_be_static_cidrs(self, m_driver): + """parse_network_config emits primary ipv6 as dhcp others are static""" + expected = {'ethernets': { + 'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/10'], + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 100}, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + nic1 = imds_data['network']['interface'][0] + nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + + # Secondary ipv6 addresses currently ignored/unconfigured + nic1['ipv6'] = { + "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}, + {"privateIpAddress": "2001:dead:beef::2"}] + } + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value='hv_netvsc') + def test_match_driver_for_netvsc(self, m_driver): + """parse_network_config emits driver when using netvsc.""" + expected = {'ethernets': { + 'eth0': { + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, + 'match': { + 'macaddress': '00:0d:3a:04:75:98', + 'driver': 'hv_netvsc', + }, + 'set-name': 'eth0' + }}, 'version': 2} + self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA)) + + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + @mock.patch('cloudinit.net.generate_fallback_config') + def test_parse_network_config_uses_fallback_cfg_when_no_network_metadata( + self, m_fallback_config, m_driver): + """parse_network_config generates fallback network config when the + IMDS instance metadata is corrupted/invalid, such as when + network metadata is not present. + """ + imds_metadata_missing_network_metadata = copy.deepcopy( + NETWORK_METADATA) + del imds_metadata_missing_network_metadata['network'] + m_fallback_config.return_value = self.fallback_config + self.assertEqual( + self.fallback_config, + dsaz.parse_network_config( + imds_metadata_missing_network_metadata)) + + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + @mock.patch('cloudinit.net.generate_fallback_config') + def test_parse_network_config_uses_fallback_cfg_when_no_interface_metadata( + self, m_fallback_config, m_driver): + """parse_network_config generates fallback network config when the + IMDS instance metadata is corrupted/invalid, such as when + network interface metadata is not present. + """ + imds_metadata_missing_interface_metadata = copy.deepcopy( + NETWORK_METADATA) + del imds_metadata_missing_interface_metadata['network']['interface'] + m_fallback_config.return_value = self.fallback_config + self.assertEqual( + self.fallback_config, + dsaz.parse_network_config( + imds_metadata_missing_interface_metadata)) + + +class TestGetMetadataFromIMDS(HttprettyTestCase): + + with_logs = True + + def setUp(self): + super(TestGetMetadataFromIMDS, self).setUp() + self.network_md_url = "{}/instance?api-version=2019-06-01".format( + dsaz.IMDS_URL + ) + + @mock.patch(MOCKPATH + 'readurl') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4', autospec=True) + @mock.patch(MOCKPATH + 'net.is_up', autospec=True) + def test_get_metadata_does_not_dhcp_if_network_is_up( + self, m_net_is_up, m_dhcp, m_readurl): + """Do not perform DHCP setup when nic is already up.""" + m_net_is_up.return_value = True + m_readurl.return_value = url_helper.StringResponse( + json.dumps(NETWORK_METADATA).encode('utf-8')) + self.assertEqual( + NETWORK_METADATA, + dsaz.get_metadata_from_imds('eth9', retries=3)) + + m_net_is_up.assert_called_with('eth9') + m_dhcp.assert_not_called() + self.assertIn( + "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time + self.logs.getvalue()) + + @mock.patch(MOCKPATH + 'readurl', autospec=True) + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + @mock.patch(MOCKPATH + 'net.is_up') + def test_get_metadata_uses_instance_url( + self, m_net_is_up, m_dhcp, m_readurl): + """Make sure readurl is called with the correct url when accessing + metadata""" + m_net_is_up.return_value = True + m_readurl.return_value = url_helper.StringResponse( + json.dumps(IMDS_NETWORK_METADATA).encode('utf-8')) + + dsaz.get_metadata_from_imds( + 'eth0', retries=3, md_type=dsaz.metadata_type.all) + m_readurl.assert_called_with( + "http://169.254.169.254/metadata/instance?api-version=" + "2019-06-01", exception_cb=mock.ANY, + headers=mock.ANY, retries=mock.ANY, + timeout=mock.ANY, infinite=False) + + @mock.patch(MOCKPATH + 'readurl', autospec=True) + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + @mock.patch(MOCKPATH + 'net.is_up') + def test_get_network_metadata_uses_network_url( + self, m_net_is_up, m_dhcp, m_readurl): + """Make sure readurl is called with the correct url when accessing + network metadata""" + m_net_is_up.return_value = True + m_readurl.return_value = url_helper.StringResponse( + json.dumps(IMDS_NETWORK_METADATA).encode('utf-8')) + + dsaz.get_metadata_from_imds( + 'eth0', retries=3, md_type=dsaz.metadata_type.network) + m_readurl.assert_called_with( + "http://169.254.169.254/metadata/instance/network?api-version=" + "2019-06-01", exception_cb=mock.ANY, + headers=mock.ANY, retries=mock.ANY, + timeout=mock.ANY, infinite=False) + + @mock.patch(MOCKPATH + 'readurl', autospec=True) + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + @mock.patch(MOCKPATH + 'net.is_up') + def test_get_default_metadata_uses_instance_url( + self, m_net_is_up, m_dhcp, m_readurl): + """Make sure readurl is called with the correct url when accessing + metadata""" + m_net_is_up.return_value = True + m_readurl.return_value = url_helper.StringResponse( + json.dumps(IMDS_NETWORK_METADATA).encode('utf-8')) + + dsaz.get_metadata_from_imds( + 'eth0', retries=3) + m_readurl.assert_called_with( + "http://169.254.169.254/metadata/instance?api-version=" + "2019-06-01", exception_cb=mock.ANY, + headers=mock.ANY, retries=mock.ANY, + timeout=mock.ANY, infinite=False) + + @mock.patch(MOCKPATH + 'readurl', autospec=True) + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + @mock.patch(MOCKPATH + 'net.is_up') + def test_get_metadata_uses_extended_url( + self, m_net_is_up, m_dhcp, m_readurl): + """Make sure readurl is called with the correct url when accessing + metadata""" + m_net_is_up.return_value = True + m_readurl.return_value = url_helper.StringResponse( + json.dumps(IMDS_NETWORK_METADATA).encode('utf-8')) + + dsaz.get_metadata_from_imds( + 'eth0', retries=3, md_type=dsaz.metadata_type.all, + api_version="2021-08-01") + m_readurl.assert_called_with( + "http://169.254.169.254/metadata/instance?api-version=" + "2021-08-01&extended=true", exception_cb=mock.ANY, + headers=mock.ANY, retries=mock.ANY, + timeout=mock.ANY, infinite=False) + + @mock.patch(MOCKPATH + 'readurl', autospec=True) + @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting', autospec=True) + @mock.patch(MOCKPATH + 'net.is_up', autospec=True) + def test_get_metadata_performs_dhcp_when_network_is_down( + self, m_net_is_up, m_dhcp, m_readurl): + """Perform DHCP setup when nic is not up.""" + m_net_is_up.return_value = False + m_readurl.return_value = url_helper.StringResponse( + json.dumps(NETWORK_METADATA).encode('utf-8')) + + self.assertEqual( + NETWORK_METADATA, + dsaz.get_metadata_from_imds('eth9', retries=2)) + + m_net_is_up.assert_called_with('eth9') + m_dhcp.assert_called_with(mock.ANY, 'eth9') + self.assertIn( + "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time + self.logs.getvalue()) + + m_readurl.assert_called_with( + self.network_md_url, exception_cb=mock.ANY, + headers={'Metadata': 'true'}, retries=2, + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, infinite=False) + + @mock.patch('cloudinit.url_helper.time.sleep') + @mock.patch(MOCKPATH + 'net.is_up', autospec=True) + def test_get_metadata_from_imds_empty_when_no_imds_present( + self, m_net_is_up, m_sleep): + """Return empty dict when IMDS network metadata is absent.""" + httpretty.register_uri( + httpretty.GET, + dsaz.IMDS_URL + '/instance?api-version=2017-12-01', + body={}, status=404) + + m_net_is_up.return_value = True # skips dhcp + + self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=2)) + + m_net_is_up.assert_called_with('eth9') + self.assertEqual([mock.call(1), mock.call(1)], m_sleep.call_args_list) + self.assertIn( + "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time + self.logs.getvalue()) + + @mock.patch('requests.Session.request') + @mock.patch('cloudinit.url_helper.time.sleep') + @mock.patch(MOCKPATH + 'net.is_up', autospec=True) + def test_get_metadata_from_imds_retries_on_timeout( + self, m_net_is_up, m_sleep, m_request): + """Retry IMDS network metadata on timeout errors.""" + + self.attempt = 0 + m_request.side_effect = requests.Timeout('Fake Connection Timeout') + + def retry_callback(request, uri, headers): + self.attempt += 1 + raise requests.Timeout('Fake connection timeout') + + httpretty.register_uri( + httpretty.GET, + dsaz.IMDS_URL + 'instance?api-version=2017-12-01', + body=retry_callback) + + m_net_is_up.return_value = True # skips dhcp + + self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=3)) + + m_net_is_up.assert_called_with('eth9') + self.assertEqual([mock.call(1)]*3, m_sleep.call_args_list) + self.assertIn( + "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time + self.logs.getvalue()) + + +class TestAzureDataSource(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestAzureDataSource, self).setUp() + self.tmp = self.tmp_dir() + + # patch cloud_dir, so our 'seed_dir' is guaranteed empty + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) + self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') + + self.patches = ExitStack() + self.addCleanup(self.patches.close) + + self.patches.enter_context(mock.patch.object( + dsaz, '_get_random_seed', return_value='wild')) + self.m_get_metadata_from_imds = self.patches.enter_context( + mock.patch.object( + dsaz, 'get_metadata_from_imds', + mock.MagicMock(return_value=NETWORK_METADATA))) + self.m_fallback_nic = self.patches.enter_context( + mock.patch('cloudinit.sources.net.find_fallback_nic', + return_value='eth9')) + self.m_remove_ubuntu_network_scripts = self.patches.enter_context( + mock.patch.object( + dsaz, 'maybe_remove_ubuntu_network_config_scripts', + mock.MagicMock())) + super(TestAzureDataSource, self).setUp() + + def apply_patches(self, patches): + for module, name, new in patches: + self.patches.enter_context(mock.patch.object(module, name, new)) + + def _get_mockds(self): + sysctl_out = "dev.storvsc.3.%pnpinfo: "\ + "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\ + "deviceid=f8b3781b-1e82-4818-a1c3-63d806ec15bb\n" + sysctl_out += "dev.storvsc.2.%pnpinfo: "\ + "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\ + "deviceid=f8b3781a-1e82-4818-a1c3-63d806ec15bb\n" + sysctl_out += "dev.storvsc.1.%pnpinfo: "\ + "classid=32412632-86cb-44a2-9b5c-50d1417354f5 "\ + "deviceid=00000000-0001-8899-0000-000000000000\n" + camctl_devbus = """ +scbus0 on ata0 bus 0 +scbus1 on ata1 bus 0 +scbus2 on blkvsc0 bus 0 +scbus3 on blkvsc1 bus 0 +scbus4 on storvsc2 bus 0 +scbus5 on storvsc3 bus 0 +scbus-1 on xpt0 bus 0 + """ + camctl_dev = """ + at scbus1 target 0 lun 0 (cd0,pass0) + at scbus2 target 0 lun 0 (da0,pass1) + at scbus3 target 1 lun 0 (da1,pass2) + """ + self.apply_patches([ + (dsaz, 'get_dev_storvsc_sysctl', mock.MagicMock( + return_value=sysctl_out)), + (dsaz, 'get_camcontrol_dev_bus', mock.MagicMock( + return_value=camctl_devbus)), + (dsaz, 'get_camcontrol_dev', mock.MagicMock( + return_value=camctl_dev)) + ]) + return dsaz + + def _get_ds(self, data, distro='ubuntu', + apply_network=None, instance_id=None): + + def _wait_for_files(flist, _maxwait=None, _naplen=None): + data['waited'] = flist + return [] + + def _load_possible_azure_ds(seed_dir, cache_dir): + yield seed_dir + yield dsaz.DEFAULT_PROVISIONING_ISO_DEV + yield from data.get('dsdevs', []) + if cache_dir: + yield cache_dir + + seed_dir = os.path.join(self.paths.seed_dir, "azure") + if data.get('ovfcontent') is not None: + populate_dir(seed_dir, + {'ovf-env.xml': data['ovfcontent']}) + + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + + self.m_is_platform_viable = mock.MagicMock(autospec=True) + self.m_get_metadata_from_fabric = mock.MagicMock( + return_value={'public-keys': []}) + self.m_report_failure_to_fabric = mock.MagicMock(autospec=True) + self.m_ephemeral_dhcpv4 = mock.MagicMock() + self.m_ephemeral_dhcpv4_with_reporting = mock.MagicMock() + self.m_list_possible_azure_ds = mock.MagicMock( + side_effect=_load_possible_azure_ds) + + if instance_id: + self.instance_id = instance_id + else: + self.instance_id = EXAMPLE_UUID + + def _dmi_mocks(key): + if key == 'system-uuid': + return self.instance_id + elif key == 'chassis-asset-tag': + return '7783-7084-3265-9085-8269-3286-77' + + self.apply_patches([ + (dsaz, 'list_possible_azure_ds', + self.m_list_possible_azure_ds), + (dsaz, 'perform_hostname_bounce', mock.MagicMock()), + (dsaz, 'get_hostname', mock.MagicMock()), + (dsaz, 'set_hostname', mock.MagicMock()), + (dsaz, '_is_platform_viable', + self.m_is_platform_viable), + (dsaz, 'get_metadata_from_fabric', + self.m_get_metadata_from_fabric), + (dsaz, 'report_failure_to_fabric', + self.m_report_failure_to_fabric), + (dsaz, 'EphemeralDHCPv4', self.m_ephemeral_dhcpv4), + (dsaz, 'EphemeralDHCPv4WithReporting', + self.m_ephemeral_dhcpv4_with_reporting), + (dsaz, 'get_boot_telemetry', mock.MagicMock()), + (dsaz, 'get_system_info', mock.MagicMock()), + (dsaz.subp, 'which', lambda x: True), + (dsaz.dmi, 'read_dmi_data', mock.MagicMock( + side_effect=_dmi_mocks)), + (dsaz.util, 'wait_for_files', mock.MagicMock( + side_effect=_wait_for_files)), + ]) + + if isinstance(distro, str): + distro_cls = distros.fetch(distro) + distro = distro_cls(distro, data.get('sys_cfg', {}), self.paths) + dsrc = dsaz.DataSourceAzure( + data.get('sys_cfg', {}), distro=distro, paths=self.paths) + if apply_network is not None: + dsrc.ds_cfg['apply_network_config'] = apply_network + + return dsrc + + def _get_and_setup(self, dsrc): + ret = dsrc.get_data() + if ret: + dsrc.setup(True) + return ret + + def xml_equals(self, oxml, nxml): + """Compare two sets of XML to make sure they are equal""" + + def create_tag_index(xml): + et = ET.fromstring(xml) + ret = {} + for x in et.iter(): + ret[x.tag] = x + return ret + + def tags_exists(x, y): + for tag in x.keys(): + assert tag in y + for tag in y.keys(): + assert tag in x + + def tags_equal(x, y): + for x_val in x.values(): + y_val = y.get(x_val.tag) + assert x_val.text == y_val.text + + old_cnt = create_tag_index(oxml) + new_cnt = create_tag_index(nxml) + tags_exists(old_cnt, new_cnt) + tags_equal(old_cnt, new_cnt) + + def xml_notequals(self, oxml, nxml): + try: + self.xml_equals(oxml, nxml) + except AssertionError: + return + raise AssertionError("XML is the same") + + def test_get_resource_disk(self): + ds = self._get_mockds() + dev = ds.get_resource_disk_on_freebsd(1) + self.assertEqual("da1", dev) + + def test_not_is_platform_viable_seed_should_return_no_datasource(self): + """Check seed_dir using _is_platform_viable and return False.""" + # Return a non-matching asset tag value + data = {} + dsrc = self._get_ds(data) + self.m_is_platform_viable.return_value = False + with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ + mock.patch.object(dsrc, '_report_failure') as m_report_failure: + ret = dsrc.get_data() + self.m_is_platform_viable.assert_called_with(dsrc.seed_dir) + self.assertFalse(ret) + # Assert that for non viable platforms, + # there is no communication with the Azure datasource. + self.assertEqual( + 0, + m_crawl_metadata.call_count) + self.assertEqual( + 0, + m_report_failure.call_count) + + def test_platform_viable_but_no_devs_should_return_no_datasource(self): + """For platforms where the Azure platform is viable + (which is indicated by the matching asset tag), + the absence of any devs at all (devs == candidate sources + for crawling Azure datasource) is NOT expected. + Report failure to Azure as this is an unexpected fatal error. + """ + data = {} + dsrc = self._get_ds(data) + with mock.patch.object(dsrc, '_report_failure') as m_report_failure: + self.m_is_platform_viable.return_value = True + ret = dsrc.get_data() + self.m_is_platform_viable.assert_called_with(dsrc.seed_dir) + self.assertFalse(ret) + self.assertEqual( + 1, + m_report_failure.call_count) + + def test_crawl_metadata_exception_returns_no_datasource(self): + data = {} + dsrc = self._get_ds(data) + self.m_is_platform_viable.return_value = True + with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: + m_crawl_metadata.side_effect = Exception + ret = dsrc.get_data() + self.m_is_platform_viable.assert_called_with(dsrc.seed_dir) + self.assertEqual( + 1, + m_crawl_metadata.call_count) + self.assertFalse(ret) + + def test_crawl_metadata_exception_should_report_failure_with_msg(self): + data = {} + dsrc = self._get_ds(data) + self.m_is_platform_viable.return_value = True + with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ + mock.patch.object(dsrc, '_report_failure') as m_report_failure: + m_crawl_metadata.side_effect = Exception + dsrc.get_data() + self.assertEqual( + 1, + m_crawl_metadata.call_count) + m_report_failure.assert_called_once_with( + description=dsaz.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE) + + def test_crawl_metadata_exc_should_log_could_not_crawl_msg(self): + data = {} + dsrc = self._get_ds(data) + self.m_is_platform_viable.return_value = True + with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: + m_crawl_metadata.side_effect = Exception + dsrc.get_data() + self.assertEqual( + 1, + m_crawl_metadata.call_count) + self.assertIn( + "Could not crawl Azure metadata", + self.logs.getvalue()) + + def test_basic_seed_dir(self): + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(dsrc.userdata_raw, "") + self.assertEqual(dsrc.metadata['local-hostname'], odata['HostName']) + self.assertTrue(os.path.isfile( + os.path.join(self.waagent_d, 'ovf-env.xml'))) + self.assertEqual('azure', dsrc.cloud_name) + self.assertEqual('azure', dsrc.platform_type) + self.assertEqual( + 'seed-dir (%s/seed/azure)' % self.tmp, dsrc.subplatform) + + def test_basic_dev_file(self): + """When a device path is used, present that in subplatform.""" + data = {'sys_cfg': {}, 'dsdevs': ['/dev/cd0']} + dsrc = self._get_ds(data) + # DSAzure will attempt to mount /dev/sr0 first, which should + # fail with mount error since the list of devices doesn't have + # /dev/sr0 + with mock.patch(MOCKPATH + 'util.mount_cb') as m_mount_cb: + m_mount_cb.side_effect = [ + MountFailedError("fail"), + ({'local-hostname': 'me'}, 'ud', {'cfg': ''}, {}) + ] + self.assertTrue(dsrc.get_data()) + self.assertEqual(dsrc.userdata_raw, 'ud') + self.assertEqual(dsrc.metadata['local-hostname'], 'me') + self.assertEqual('azure', dsrc.cloud_name) + self.assertEqual('azure', dsrc.platform_type) + self.assertEqual('config-disk (/dev/cd0)', dsrc.subplatform) + + def test_get_data_non_ubuntu_will_not_remove_network_scripts(self): + """get_data on non-Ubuntu will not remove ubuntu net scripts.""" + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + + dsrc = self._get_ds(data, distro='debian') + dsrc.get_data() + self.m_remove_ubuntu_network_scripts.assert_not_called() + + def test_get_data_on_ubuntu_will_remove_network_scripts(self): + """get_data will remove ubuntu net scripts on Ubuntu distro.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + + dsrc = self._get_ds(data, distro='ubuntu') + dsrc.get_data() + self.m_remove_ubuntu_network_scripts.assert_called_once_with() + + def test_get_data_on_ubuntu_will_not_remove_network_scripts_disabled(self): + """When apply_network_config false, do not remove scripts on Ubuntu.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + + dsrc = self._get_ds(data, distro='ubuntu') + dsrc.get_data() + self.m_remove_ubuntu_network_scripts.assert_not_called() + + def test_crawl_metadata_returns_structured_data_and_caches_nothing(self): + """Return all structured metadata and cache no class attributes.""" + yaml_cfg = "" + odata = {'HostName': "myhost", 'UserName': "myuser", + 'UserData': {'text': 'FOOBAR', 'encoding': 'plain'}, + 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + dsrc = self._get_ds(data) + expected_cfg = { + 'PreprovisionedVMType': None, + 'PreprovisionedVm': False, + 'datasource': {'Azure': {}}, + 'system_info': {'default_user': {'name': 'myuser'}}} + expected_metadata = { + 'azure_data': { + 'configurationsettype': 'LinuxProvisioningConfiguration'}, + 'imds': NETWORK_METADATA, + 'instance-id': EXAMPLE_UUID, + 'local-hostname': 'myhost', + 'random_seed': 'wild'} + + crawled_metadata = dsrc.crawl_metadata() + + self.assertCountEqual( + crawled_metadata.keys(), + ['cfg', 'files', 'metadata', 'userdata_raw']) + self.assertEqual(crawled_metadata['cfg'], expected_cfg) + self.assertEqual( + list(crawled_metadata['files'].keys()), ['ovf-env.xml']) + self.assertIn( + b'myhost', + crawled_metadata['files']['ovf-env.xml']) + self.assertEqual(crawled_metadata['metadata'], expected_metadata) + self.assertEqual(crawled_metadata['userdata_raw'], 'FOOBAR') + self.assertEqual(dsrc.userdata_raw, None) + self.assertEqual(dsrc.metadata, {}) + self.assertEqual(dsrc._metadata_imds, UNSET) + self.assertFalse(os.path.isfile( + os.path.join(self.waagent_d, 'ovf-env.xml'))) + + def test_crawl_metadata_raises_invalid_metadata_on_error(self): + """crawl_metadata raises an exception on invalid ovf-env.xml.""" + data = {'ovfcontent': "BOGUS", 'sys_cfg': {}} + dsrc = self._get_ds(data) + error_msg = ('BrokenAzureDataSource: Invalid ovf-env.xml:' + ' syntax error: line 1, column 0') + with self.assertRaises(InvalidMetaDataException) as cm: + dsrc.crawl_metadata() + self.assertEqual(str(cm.exception), error_msg) + + def test_crawl_metadata_call_imds_once_no_reprovision(self): + """If reprovisioning, report ready at the end""" + ovfenv = construct_valid_ovf_env( + platform_settings={"PreprovisionedVm": "False"} + ) + + data = { + 'ovfcontent': ovfenv, + 'sys_cfg': {} + } + dsrc = self._get_ds(data) + dsrc.crawl_metadata() + self.assertEqual(1, self.m_get_metadata_from_imds.call_count) + + @mock.patch( + 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') + @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + @mock.patch( + 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') + @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') + def test_crawl_metadata_call_imds_twice_with_reprovision( + self, poll_imds_func, m_report_ready, m_write, m_dhcp + ): + """If reprovisioning, imds metadata will be fetched twice""" + ovfenv = construct_valid_ovf_env( + platform_settings={"PreprovisionedVm": "True"} + ) + + data = { + 'ovfcontent': ovfenv, + 'sys_cfg': {} + } + dsrc = self._get_ds(data) + poll_imds_func.return_value = ovfenv + dsrc.crawl_metadata() + self.assertEqual(2, self.m_get_metadata_from_imds.call_count) + + @mock.patch( + 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') + @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + @mock.patch( + 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') + @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') + def test_crawl_metadata_on_reprovision_reports_ready( + self, poll_imds_func, m_report_ready, m_write, m_dhcp + ): + """If reprovisioning, report ready at the end""" + ovfenv = construct_valid_ovf_env( + platform_settings={"PreprovisionedVm": "True"} + ) + + data = { + 'ovfcontent': ovfenv, + 'sys_cfg': {} + } + dsrc = self._get_ds(data) + poll_imds_func.return_value = ovfenv + dsrc.crawl_metadata() + self.assertEqual(1, m_report_ready.call_count) + + @mock.patch( + 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') + @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + @mock.patch( + 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') + @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') + @mock.patch( + 'cloudinit.sources.DataSourceAzure.DataSourceAzure.' + '_wait_for_all_nics_ready') + def test_crawl_metadata_waits_for_nic_on_savable_vms( + self, detect_nics, poll_imds_func, report_ready_func, m_write, m_dhcp + ): + """If reprovisioning, report ready at the end""" + ovfenv = construct_valid_ovf_env( + platform_settings={"PreprovisionedVMType": "Savable", + "PreprovisionedVm": "True"} + ) + + data = { + 'ovfcontent': ovfenv, + 'sys_cfg': {} + } + dsrc = self._get_ds(data) + poll_imds_func.return_value = ovfenv + dsrc.crawl_metadata() + self.assertEqual(1, report_ready_func.call_count) + self.assertEqual(1, detect_nics.call_count) + + @mock.patch( + 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') + @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + @mock.patch( + 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') + @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') + @mock.patch( + 'cloudinit.sources.DataSourceAzure.DataSourceAzure.' + '_wait_for_all_nics_ready') + @mock.patch('os.path.isfile') + def test_detect_nics_when_marker_present( + self, is_file, detect_nics, poll_imds_func, report_ready_func, m_write, + m_dhcp): + """If reprovisioning, wait for nic attach if marker present""" + + def is_file_ret(key): + return key == dsaz.REPROVISION_NIC_ATTACH_MARKER_FILE + + is_file.side_effect = is_file_ret + ovfenv = construct_valid_ovf_env() + + data = { + 'ovfcontent': ovfenv, + 'sys_cfg': {} + } + + dsrc = self._get_ds(data) + poll_imds_func.return_value = ovfenv + dsrc.crawl_metadata() + self.assertEqual(1, report_ready_func.call_count) + self.assertEqual(1, detect_nics.call_count) + + @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + @mock.patch('cloudinit.sources.helpers.netlink.' + 'wait_for_media_disconnect_connect') + @mock.patch( + 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') + @mock.patch('cloudinit.sources.DataSourceAzure.readurl') + def test_crawl_metadata_on_reprovision_reports_ready_using_lease( + self, m_readurl, m_report_ready, + m_media_switch, m_write + ): + """If reprovisioning, report ready using the obtained lease""" + ovfenv = construct_valid_ovf_env( + platform_settings={"PreprovisionedVm": "True"} + ) + + data = { + 'ovfcontent': ovfenv, + 'sys_cfg': {} + } + dsrc = self._get_ds(data) + + with mock.patch.object(dsrc.distro.networking, 'is_up') \ + as m_dsrc_distro_networking_is_up: + + # For this mock, net should not be up, + # so that cached ephemeral won't be used. + # This is so that a NEW ephemeral dhcp lease will be discovered + # and used instead. + m_dsrc_distro_networking_is_up.return_value = False + + lease = { + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'} + self.m_ephemeral_dhcpv4_with_reporting.return_value \ + .__enter__.return_value = lease + m_media_switch.return_value = None + + reprovision_ovfenv = construct_valid_ovf_env() + m_readurl.return_value = url_helper.StringResponse( + reprovision_ovfenv.encode('utf-8')) + + dsrc.crawl_metadata() + self.assertEqual(2, m_report_ready.call_count) + m_report_ready.assert_called_with(lease=lease) + + def test_waagent_d_has_0700_perms(self): + # we expect /var/lib/waagent to be created 0700 + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertTrue(os.path.isdir(self.waagent_d)) + self.assertEqual(stat.S_IMODE(os.stat(self.waagent_d).st_mode), 0o700) + + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + def test_network_config_set_from_imds(self, m_driver): + """Datasource.network_config returns IMDS network data.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + expected_network_config = { + 'ethernets': { + 'eth0': {'set-name': 'eth0', + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}}}, + 'version': 2} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual(expected_network_config, dsrc.network_config) + + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + def test_network_config_set_from_imds_route_metric_for_secondary_nic( + self, m_driver): + """Datasource.network_config adds route-metric to secondary nics.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + expected_network_config = { + 'ethernets': { + 'eth0': {'set-name': 'eth0', + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}}, + 'eth1': {'set-name': 'eth1', + 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 200}}, + 'eth2': {'set-name': 'eth2', + 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 300}}}, + 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['network']['interface'].append(SECONDARY_INTERFACE) + third_intf = copy.deepcopy(SECONDARY_INTERFACE) + third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') + third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' + third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' + imds_data['network']['interface'].append(third_intf) + + self.m_get_metadata_from_imds.return_value = imds_data + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual(expected_network_config, dsrc.network_config) + + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + def test_network_config_set_from_imds_for_secondary_nic_no_ip( + self, m_driver): + """If an IP address is empty then there should no config for it.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + expected_network_config = { + 'ethernets': { + 'eth0': {'set-name': 'eth0', + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}}}, + 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['network']['interface'].append(SECONDARY_INTERFACE_NO_IP) + self.m_get_metadata_from_imds.return_value = imds_data + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual(expected_network_config, dsrc.network_config) + + def test_availability_zone_set_from_imds(self): + """Datasource.availability returns IMDS platformFaultDomain.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual('0', dsrc.availability_zone) + + def test_region_set_from_imds(self): + """Datasource.region returns IMDS region location.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual('eastus2', dsrc.region) + + def test_sys_cfg_set_never_destroy_ntfs(self): + sys_cfg = {'datasource': {'Azure': { + 'never_destroy_ntfs': 'user-supplied-value'}}} + data = {'ovfcontent': construct_valid_ovf_env(data={}), + 'sys_cfg': sys_cfg} + + dsrc = self._get_ds(data) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + self.assertEqual(dsrc.ds_cfg.get(dsaz.DS_CFG_KEY_PRESERVE_NTFS), + 'user-supplied-value') + + def test_username_used(self): + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(dsrc.cfg['system_info']['default_user']['name'], + "myuser") + + def test_password_given(self): + odata = {'HostName': "myhost", 'UserName': "myuser", + 'UserPassword': "mypass"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertIn('default_user', dsrc.cfg['system_info']) + defuser = dsrc.cfg['system_info']['default_user'] + + # default user should be updated username and should not be locked. + self.assertEqual(defuser['name'], odata['UserName']) + self.assertFalse(defuser['lock_passwd']) + # passwd is crypt formated string $id$salt$encrypted + # encrypting plaintext with salt value of everything up to final '$' + # should equal that after the '$' + pos = defuser['passwd'].rfind("$") + 1 + self.assertEqual(defuser['passwd'], + crypt.crypt(odata['UserPassword'], + defuser['passwd'][0:pos])) + + # the same hashed value should also be present in cfg['password'] + self.assertEqual(defuser['passwd'], dsrc.cfg['password']) + + def test_user_not_locked_if_password_redacted(self): + odata = {'HostName': "myhost", 'UserName': "myuser", + 'UserPassword': dsaz.DEF_PASSWD_REDACTION} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertIn('default_user', dsrc.cfg['system_info']) + defuser = dsrc.cfg['system_info']['default_user'] + + # default user should be updated username and should not be locked. + self.assertEqual(defuser['name'], odata['UserName']) + self.assertIn('lock_passwd', defuser) + self.assertFalse(defuser['lock_passwd']) + + def test_userdata_plain(self): + mydata = "FOOBAR" + odata = {'UserData': {'text': mydata, 'encoding': 'plain'}} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(decode_binary(dsrc.userdata_raw), mydata) + + def test_userdata_found(self): + mydata = "FOOBAR" + odata = {'UserData': {'text': b64e(mydata), 'encoding': 'base64'}} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(dsrc.userdata_raw, mydata.encode('utf-8')) + + def test_default_ephemeral_configs_ephemeral_exists(self): + # make sure the ephemeral configs are correct if disk present + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + + orig_exists = dsaz.os.path.exists + + def changed_exists(path): + return True if path == dsaz.RESOURCE_DISK_PATH else orig_exists( + path) + + with mock.patch(MOCKPATH + 'os.path.exists', new=changed_exists): + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + cfg = dsrc.get_config_obj() + + self.assertEqual(dsrc.device_name_to_device("ephemeral0"), + dsaz.RESOURCE_DISK_PATH) + assert 'disk_setup' in cfg + assert 'fs_setup' in cfg + self.assertIsInstance(cfg['disk_setup'], dict) + self.assertIsInstance(cfg['fs_setup'], list) + + def test_default_ephemeral_configs_ephemeral_does_not_exist(self): + # make sure the ephemeral configs are correct if disk not present + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + + orig_exists = dsaz.os.path.exists + + def changed_exists(path): + return False if path == dsaz.RESOURCE_DISK_PATH else orig_exists( + path) + + with mock.patch(MOCKPATH + 'os.path.exists', new=changed_exists): + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + cfg = dsrc.get_config_obj() + + assert 'disk_setup' not in cfg + assert 'fs_setup' not in cfg + + def test_provide_disk_aliases(self): + # Make sure that user can affect disk aliases + dscfg = {'disk_aliases': {'ephemeral0': '/dev/sdc'}} + odata = {'HostName': "myhost", 'UserName': "myuser", + 'dscfg': {'text': b64e(yaml.dump(dscfg)), + 'encoding': 'base64'}} + usercfg = {'disk_setup': {'/dev/sdc': {'something': '...'}, + 'ephemeral0': False}} + userdata = '#cloud-config' + yaml.dump(usercfg) + "\n" + + ovfcontent = construct_valid_ovf_env(data=odata, userdata=userdata) + data = {'ovfcontent': ovfcontent, 'sys_cfg': {}} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + cfg = dsrc.get_config_obj() + self.assertTrue(cfg) + + def test_userdata_arrives(self): + userdata = "This is my user-data" + xml = construct_valid_ovf_env(data={}, userdata=userdata) + data = {'ovfcontent': xml} + dsrc = self._get_ds(data) + dsrc.get_data() + + self.assertEqual(userdata.encode('us-ascii'), dsrc.userdata_raw) + + def test_password_redacted_in_ovf(self): + odata = {'HostName': "myhost", 'UserName': "myuser", + 'UserPassword': "mypass"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + dsrc = self._get_ds(data) + ret = dsrc.get_data() + + self.assertTrue(ret) + ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml') + + # The XML should not be same since the user password is redacted + on_disk_ovf = load_file(ovf_env_path) + self.xml_notequals(data['ovfcontent'], on_disk_ovf) + + # Make sure that the redacted password on disk is not used by CI + self.assertNotEqual(dsrc.cfg.get('password'), + dsaz.DEF_PASSWD_REDACTION) + + # Make sure that the password was really encrypted + et = ET.fromstring(on_disk_ovf) + for elem in et.iter(): + if 'UserPassword' in elem.tag: + self.assertEqual(dsaz.DEF_PASSWD_REDACTION, elem.text) + + def test_ovf_env_arrives_in_waagent_dir(self): + xml = construct_valid_ovf_env(data={}, userdata="FOODATA") + dsrc = self._get_ds({'ovfcontent': xml}) + dsrc.get_data() + + # 'data_dir' is '/var/lib/waagent' (walinux-agent's state dir) + # we expect that the ovf-env.xml file is copied there. + ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml') + self.assertTrue(os.path.exists(ovf_env_path)) + self.xml_equals(xml, load_file(ovf_env_path)) + + def test_ovf_can_include_unicode(self): + xml = construct_valid_ovf_env(data={}) + xml = '\ufeff{0}'.format(xml) + dsrc = self._get_ds({'ovfcontent': xml}) + dsrc.get_data() + + def test_dsaz_report_ready_returns_true_when_report_succeeds( + self): + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + self.assertTrue(dsrc._report_ready(lease=mock.MagicMock())) + + def test_dsaz_report_ready_returns_false_and_does_not_propagate_exc( + self): + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + self.m_get_metadata_from_fabric.side_effect = Exception + self.assertFalse(dsrc._report_ready(lease=mock.MagicMock())) + + def test_dsaz_report_failure_returns_true_when_report_succeeds(self): + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + + with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: + # mock crawl metadata failure to cause report failure + m_crawl_metadata.side_effect = Exception + + self.assertTrue(dsrc._report_failure()) + self.assertEqual( + 1, + self.m_report_failure_to_fabric.call_count) + + def test_dsaz_report_failure_returns_false_and_does_not_propagate_exc( + self): + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + + with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ + mock.patch.object(dsrc, '_ephemeral_dhcp_ctx') \ + as m_ephemeral_dhcp_ctx, \ + mock.patch.object(dsrc.distro.networking, 'is_up') \ + as m_dsrc_distro_networking_is_up: + # mock crawl metadata failure to cause report failure + m_crawl_metadata.side_effect = Exception + + # setup mocks to allow using cached ephemeral dhcp lease + m_dsrc_distro_networking_is_up.return_value = True + test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245' + test_lease = {'unknown-245': test_lease_dhcp_option_245} + m_ephemeral_dhcp_ctx.lease = test_lease + + # We expect 3 calls to report_failure_to_fabric, + # because we try 3 different methods of calling report failure. + # The different methods are attempted in the following order: + # 1. Using cached ephemeral dhcp context to report failure to Azure + # 2. Using new ephemeral dhcp to report failure to Azure + # 3. Using fallback lease to report failure to Azure + self.m_report_failure_to_fabric.side_effect = Exception + self.assertFalse(dsrc._report_failure()) + self.assertEqual( + 3, + self.m_report_failure_to_fabric.call_count) + + def test_dsaz_report_failure_description_msg(self): + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + + with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: + # mock crawl metadata failure to cause report failure + m_crawl_metadata.side_effect = Exception + + test_msg = 'Test report failure description message' + self.assertTrue(dsrc._report_failure(description=test_msg)) + self.m_report_failure_to_fabric.assert_called_once_with( + dhcp_opts=mock.ANY, description=test_msg) + + def test_dsaz_report_failure_no_description_msg(self): + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + + with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: + m_crawl_metadata.side_effect = Exception + + self.assertTrue(dsrc._report_failure()) # no description msg + self.m_report_failure_to_fabric.assert_called_once_with( + dhcp_opts=mock.ANY, description=None) + + def test_dsaz_report_failure_uses_cached_ephemeral_dhcp_ctx_lease(self): + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + + with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ + mock.patch.object(dsrc, '_ephemeral_dhcp_ctx') \ + as m_ephemeral_dhcp_ctx, \ + mock.patch.object(dsrc.distro.networking, 'is_up') \ + as m_dsrc_distro_networking_is_up: + # mock crawl metadata failure to cause report failure + m_crawl_metadata.side_effect = Exception + + # setup mocks to allow using cached ephemeral dhcp lease + m_dsrc_distro_networking_is_up.return_value = True + test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245' + test_lease = {'unknown-245': test_lease_dhcp_option_245} + m_ephemeral_dhcp_ctx.lease = test_lease + + self.assertTrue(dsrc._report_failure()) + + # ensure called with cached ephemeral dhcp lease option 245 + self.m_report_failure_to_fabric.assert_called_once_with( + description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245) + + # ensure cached ephemeral is cleaned + self.assertEqual( + 1, + m_ephemeral_dhcp_ctx.clean_network.call_count) + + def test_dsaz_report_failure_no_net_uses_new_ephemeral_dhcp_lease(self): + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + + with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ + mock.patch.object(dsrc.distro.networking, 'is_up') \ + as m_dsrc_distro_networking_is_up: + # mock crawl metadata failure to cause report failure + m_crawl_metadata.side_effect = Exception + + # net is not up and cannot use cached ephemeral dhcp + m_dsrc_distro_networking_is_up.return_value = False + # setup ephemeral dhcp lease discovery mock + test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245' + test_lease = {'unknown-245': test_lease_dhcp_option_245} + self.m_ephemeral_dhcpv4_with_reporting.return_value \ + .__enter__.return_value = test_lease + + self.assertTrue(dsrc._report_failure()) + + # ensure called with the newly discovered + # ephemeral dhcp lease option 245 + self.m_report_failure_to_fabric.assert_called_once_with( + description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245) + + def test_dsaz_report_failure_no_net_and_no_dhcp_uses_fallback_lease( + self): + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + + with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ + mock.patch.object(dsrc.distro.networking, 'is_up') \ + as m_dsrc_distro_networking_is_up: + # mock crawl metadata failure to cause report failure + m_crawl_metadata.side_effect = Exception + + # net is not up and cannot use cached ephemeral dhcp + m_dsrc_distro_networking_is_up.return_value = False + # ephemeral dhcp discovery failure, + # so cannot use a new ephemeral dhcp + self.m_ephemeral_dhcpv4_with_reporting.return_value \ + .__enter__.side_effect = Exception + + self.assertTrue(dsrc._report_failure()) + + # ensure called with fallback lease + self.m_report_failure_to_fabric.assert_called_once_with( + description=mock.ANY, + fallback_lease_file=dsrc.dhclient_lease_file) + + def test_exception_fetching_fabric_data_doesnt_propagate(self): + """Errors communicating with fabric should warn, but return True.""" + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + self.m_get_metadata_from_fabric.side_effect = Exception + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + + def test_fabric_data_included_in_metadata(self): + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + self.m_get_metadata_from_fabric.return_value = {'test': 'value'} + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + self.assertEqual('value', dsrc.metadata['test']) + + def test_instance_id_case_insensitive(self): + """Return the previous iid when current is a case-insensitive match.""" + lower_iid = EXAMPLE_UUID.lower() + upper_iid = EXAMPLE_UUID.upper() + # lowercase current UUID + ds = self._get_ds( + {'ovfcontent': construct_valid_ovf_env()}, instance_id=lower_iid + ) + # UPPERCASE previous + write_file( + os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), + upper_iid) + ds.get_data() + self.assertEqual(upper_iid, ds.metadata['instance-id']) + + # UPPERCASE current UUID + ds = self._get_ds( + {'ovfcontent': construct_valid_ovf_env()}, instance_id=upper_iid + ) + # lowercase previous + write_file( + os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), + lower_iid) + ds.get_data() + self.assertEqual(lower_iid, ds.metadata['instance-id']) + + def test_instance_id_endianness(self): + """Return the previous iid when dmi uuid is the byteswapped iid.""" + ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + # byte-swapped previous + write_file( + os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), + '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') + ds.get_data() + self.assertEqual( + '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8', ds.metadata['instance-id']) + # not byte-swapped previous + write_file( + os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), + '644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') + ds.get_data() + self.assertEqual(self.instance_id, ds.metadata['instance-id']) + + def test_instance_id_from_dmidecode_used(self): + ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + ds.get_data() + self.assertEqual(self.instance_id, ds.metadata['instance-id']) + + def test_instance_id_from_dmidecode_used_for_builtin(self): + ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + ds.get_data() + self.assertEqual(self.instance_id, ds.metadata['instance-id']) + + @mock.patch(MOCKPATH + 'util.is_FreeBSD') + @mock.patch(MOCKPATH + '_check_freebsd_cdrom') + def test_list_possible_azure_ds(self, m_check_fbsd_cdrom, + m_is_FreeBSD): + """On FreeBSD, possible devs should show /dev/cd0.""" + m_is_FreeBSD.return_value = True + m_check_fbsd_cdrom.return_value = True + possible_ds = [] + for src in dsaz.list_possible_azure_ds( + "seed_dir", "cache_dir"): + possible_ds.append(src) + self.assertEqual(possible_ds, ["seed_dir", + dsaz.DEFAULT_PROVISIONING_ISO_DEV, + "/dev/cd0", + "cache_dir"]) + self.assertEqual( + [mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list) + + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + @mock.patch('cloudinit.net.generate_fallback_config') + def test_imds_network_config(self, mock_fallback, m_driver): + """Network config is generated from IMDS network data when present.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + + expected_cfg = { + 'ethernets': { + 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, + 'version': 2} + + self.assertEqual(expected_cfg, dsrc.network_config) + mock_fallback.assert_not_called() + + @mock.patch('cloudinit.net.get_interface_mac') + @mock.patch('cloudinit.net.get_devicelist') + @mock.patch('cloudinit.net.device_driver') + @mock.patch('cloudinit.net.generate_fallback_config') + def test_imds_network_ignored_when_apply_network_config_false( + self, mock_fallback, mock_dd, mock_devlist, mock_get_mac): + """When apply_network_config is False, use fallback instead of IMDS.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + fallback_config = { + 'version': 1, + 'config': [{ + 'type': 'physical', 'name': 'eth0', + 'mac_address': '00:11:22:33:44:55', + 'params': {'driver': 'hv_netsvc'}, + 'subnets': [{'type': 'dhcp'}], + }] + } + mock_fallback.return_value = fallback_config + + mock_devlist.return_value = ['eth0'] + mock_dd.return_value = ['hv_netsvc'] + mock_get_mac.return_value = '00:11:22:33:44:55' + + dsrc = self._get_ds(data) + self.assertTrue(dsrc.get_data()) + self.assertEqual(dsrc.network_config, fallback_config) + + @mock.patch('cloudinit.net.get_interface_mac') + @mock.patch('cloudinit.net.get_devicelist') + @mock.patch('cloudinit.net.device_driver') + @mock.patch('cloudinit.net.generate_fallback_config', autospec=True) + def test_fallback_network_config(self, mock_fallback, mock_dd, + mock_devlist, mock_get_mac): + """On absent IMDS network data, generate network fallback config.""" + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + + fallback_config = { + 'version': 1, + 'config': [{ + 'type': 'physical', 'name': 'eth0', + 'mac_address': '00:11:22:33:44:55', + 'params': {'driver': 'hv_netsvc'}, + 'subnets': [{'type': 'dhcp'}], + }] + } + mock_fallback.return_value = fallback_config + + mock_devlist.return_value = ['eth0'] + mock_dd.return_value = ['hv_netsvc'] + mock_get_mac.return_value = '00:11:22:33:44:55' + + dsrc = self._get_ds(data) + # Represent empty response from network imds + self.m_get_metadata_from_imds.return_value = {} + ret = dsrc.get_data() + self.assertTrue(ret) + + netconfig = dsrc.network_config + self.assertEqual(netconfig, fallback_config) + mock_fallback.assert_called_with( + blacklist_drivers=['mlx4_core', 'mlx5_core'], + config_driver=True) + + @mock.patch(MOCKPATH + 'net.get_interfaces', autospec=True) + def test_blacklist_through_distro( + self, m_net_get_interfaces): + """Verify Azure DS updates blacklist drivers in the distro's + networking object.""" + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + + distro_cls = distros.fetch('ubuntu') + distro = distro_cls('ubuntu', {}, self.paths) + dsrc = self._get_ds(data, distro=distro) + dsrc.get_data() + self.assertEqual(distro.networking.blacklist_drivers, + dsaz.BLACKLIST_DRIVERS) + + distro.networking.get_interfaces_by_mac() + m_net_get_interfaces.assert_called_with( + blacklist_drivers=dsaz.BLACKLIST_DRIVERS) + + @mock.patch(MOCKPATH + 'subp.subp', autospec=True) + def test_get_hostname_with_no_args(self, m_subp): + dsaz.get_hostname() + m_subp.assert_called_once_with(("hostname",), capture=True) + + @mock.patch(MOCKPATH + 'subp.subp', autospec=True) + def test_get_hostname_with_string_arg(self, m_subp): + dsaz.get_hostname(hostname_command="hostname") + m_subp.assert_called_once_with(("hostname",), capture=True) + + @mock.patch(MOCKPATH + 'subp.subp', autospec=True) + def test_get_hostname_with_iterable_arg(self, m_subp): + dsaz.get_hostname(hostname_command=("hostname",)) + m_subp.assert_called_once_with(("hostname",), capture=True) + + @mock.patch( + 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates') + def test_get_public_ssh_keys_with_imds(self, m_parse_certificates): + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + dsrc = self._get_ds(data) + dsrc.get_data() + dsrc.setup(True) + ssh_keys = dsrc.get_public_ssh_keys() + self.assertEqual(ssh_keys, ["ssh-rsa key1"]) + self.assertEqual(m_parse_certificates.call_count, 0) + + def test_key_without_crlf_valid(self): + test_key = 'ssh-rsa somerandomkeystuff some comment' + assert True is dsaz._key_is_openssh_formatted(test_key) + + def test_key_with_crlf_invalid(self): + test_key = 'ssh-rsa someran\r\ndomkeystuff some comment' + assert False is dsaz._key_is_openssh_formatted(test_key) + + def test_key_endswith_crlf_valid(self): + test_key = 'ssh-rsa somerandomkeystuff some comment\r\n' + assert True is dsaz._key_is_openssh_formatted(test_key) + + @mock.patch( + 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates') + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + def test_get_public_ssh_keys_with_no_openssh_format( + self, + m_get_metadata_from_imds, + m_parse_certificates): + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['compute']['publicKeys'][0]['keyData'] = 'no-openssh-format' + m_get_metadata_from_imds.return_value = imds_data + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + dsrc = self._get_ds(data) + dsrc.get_data() + dsrc.setup(True) + ssh_keys = dsrc.get_public_ssh_keys() + self.assertEqual(ssh_keys, []) + self.assertEqual(m_parse_certificates.call_count, 0) + + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + def test_get_public_ssh_keys_without_imds( + self, + m_get_metadata_from_imds): + m_get_metadata_from_imds.return_value = dict() + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + dsrc = self._get_ds(data) + dsaz.get_metadata_from_fabric.return_value = {'public-keys': ['key2']} + dsrc.get_data() + dsrc.setup(True) + ssh_keys = dsrc.get_public_ssh_keys() + self.assertEqual(ssh_keys, ['key2']) + + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + def test_imds_api_version_wanted_nonexistent( + self, + m_get_metadata_from_imds): + def get_metadata_from_imds_side_eff(*args, **kwargs): + if kwargs['api_version'] == dsaz.IMDS_VER_WANT: + raise url_helper.UrlError("No IMDS version", code=400) + return NETWORK_METADATA + m_get_metadata_from_imds.side_effect = get_metadata_from_imds_side_eff + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertIsNotNone(dsrc.metadata) + self.assertTrue(dsrc.failed_desired_api_version) + + @mock.patch( + MOCKPATH + 'get_metadata_from_imds', return_value=NETWORK_METADATA) + def test_imds_api_version_wanted_exists(self, m_get_metadata_from_imds): + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertIsNotNone(dsrc.metadata) + self.assertFalse(dsrc.failed_desired_api_version) + + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + def test_hostname_from_imds(self, m_get_metadata_from_imds): + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA) + imds_data_with_os_profile["compute"]["osProfile"] = dict( + adminUsername="username1", + computerName="hostname1", + disablePasswordAuthentication="true" + ) + m_get_metadata_from_imds.return_value = imds_data_with_os_profile + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual(dsrc.metadata["local-hostname"], "hostname1") + + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + def test_username_from_imds(self, m_get_metadata_from_imds): + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA) + imds_data_with_os_profile["compute"]["osProfile"] = dict( + adminUsername="username1", + computerName="hostname1", + disablePasswordAuthentication="true" + ) + m_get_metadata_from_imds.return_value = imds_data_with_os_profile + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual( + dsrc.cfg["system_info"]["default_user"]["name"], + "username1" + ) + + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + def test_disable_password_from_imds(self, m_get_metadata_from_imds): + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA) + imds_data_with_os_profile["compute"]["osProfile"] = dict( + adminUsername="username1", + computerName="hostname1", + disablePasswordAuthentication="true" + ) + m_get_metadata_from_imds.return_value = imds_data_with_os_profile + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertTrue(dsrc.metadata["disable_password"]) + + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + def test_userdata_from_imds(self, m_get_metadata_from_imds): + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + userdata = "userdataImds" + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data["compute"]["osProfile"] = dict( + adminUsername="username1", + computerName="hostname1", + disablePasswordAuthentication="true", + ) + imds_data["compute"]["userData"] = b64e(userdata) + m_get_metadata_from_imds.return_value = imds_data + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(dsrc.userdata_raw, userdata.encode('utf-8')) + + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + def test_userdata_from_imds_with_customdata_from_OVF( + self, m_get_metadata_from_imds): + userdataOVF = "userdataOVF" + odata = { + 'HostName': "myhost", 'UserName': "myuser", + 'UserData': {'text': b64e(userdataOVF), 'encoding': 'base64'} + } + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + + userdataImds = "userdataImds" + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data["compute"]["osProfile"] = dict( + adminUsername="username1", + computerName="hostname1", + disablePasswordAuthentication="true", + ) + imds_data["compute"]["userData"] = b64e(userdataImds) + m_get_metadata_from_imds.return_value = imds_data + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(dsrc.userdata_raw, userdataOVF.encode('utf-8')) + + +class TestAzureBounce(CiTestCase): + + with_logs = True + + def mock_out_azure_moving_parts(self): + + def _load_possible_azure_ds(seed_dir, cache_dir): + yield seed_dir + yield dsaz.DEFAULT_PROVISIONING_ISO_DEV + if cache_dir: + yield cache_dir + + self.patches.enter_context( + mock.patch.object(dsaz.util, 'wait_for_files')) + self.patches.enter_context( + mock.patch.object( + dsaz, 'list_possible_azure_ds', + mock.MagicMock(side_effect=_load_possible_azure_ds))) + self.patches.enter_context( + mock.patch.object(dsaz, 'get_metadata_from_fabric', + mock.MagicMock(return_value={}))) + self.patches.enter_context( + mock.patch.object(dsaz, 'get_metadata_from_imds', + mock.MagicMock(return_value={}))) + self.patches.enter_context( + mock.patch.object(dsaz.subp, 'which', lambda x: True)) + self.patches.enter_context(mock.patch.object( + dsaz, '_get_random_seed', return_value='wild')) + + def _dmi_mocks(key): + if key == 'system-uuid': + return 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8' + elif key == 'chassis-asset-tag': + return '7783-7084-3265-9085-8269-3286-77' + raise RuntimeError('should not get here') + + self.patches.enter_context( + mock.patch.object(dsaz.dmi, 'read_dmi_data', + mock.MagicMock(side_effect=_dmi_mocks))) + + def setUp(self): + super(TestAzureBounce, self).setUp() + self.tmp = self.tmp_dir() + self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + self.patches = ExitStack() + self.mock_out_azure_moving_parts() + self.get_hostname = self.patches.enter_context( + mock.patch.object(dsaz, 'get_hostname')) + self.set_hostname = self.patches.enter_context( + mock.patch.object(dsaz, 'set_hostname')) + self.subp = self.patches.enter_context( + mock.patch(MOCKPATH + 'subp.subp')) + self.find_fallback_nic = self.patches.enter_context( + mock.patch('cloudinit.net.find_fallback_nic', return_value='eth9')) + + def tearDown(self): + self.patches.close() + super(TestAzureBounce, self).tearDown() + + def _get_ds(self, ovfcontent=None): + if ovfcontent is not None: + populate_dir(os.path.join(self.paths.seed_dir, "azure"), + {'ovf-env.xml': ovfcontent}) + dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) + return dsrc + + def _get_and_setup(self, dsrc): + ret = dsrc.get_data() + if ret: + dsrc.setup(True) + return ret + + def get_ovf_env_with_dscfg(self, hostname, cfg): + odata = { + 'HostName': hostname, + 'dscfg': { + 'text': b64e(yaml.dump(cfg)), + 'encoding': 'base64' + } + } + return construct_valid_ovf_env(data=odata) + + def test_disabled_bounce_does_not_change_hostname(self): + cfg = {'hostname_bounce': {'policy': 'off'}} + ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)) + ds.get_data() + self.assertEqual(0, self.set_hostname.call_count) + + @mock.patch(MOCKPATH + 'perform_hostname_bounce') + def test_disabled_bounce_does_not_perform_bounce( + self, perform_hostname_bounce): + cfg = {'hostname_bounce': {'policy': 'off'}} + ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)) + ds.get_data() + self.assertEqual(0, perform_hostname_bounce.call_count) + + def test_same_hostname_does_not_change_hostname(self): + host_name = 'unchanged-host-name' + self.get_hostname.return_value = host_name + cfg = {'hostname_bounce': {'policy': 'yes'}} + ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) + ds.get_data() + self.assertEqual(0, self.set_hostname.call_count) + + @mock.patch(MOCKPATH + 'perform_hostname_bounce') + def test_unchanged_hostname_does_not_perform_bounce( + self, perform_hostname_bounce): + host_name = 'unchanged-host-name' + self.get_hostname.return_value = host_name + cfg = {'hostname_bounce': {'policy': 'yes'}} + ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) + ds.get_data() + self.assertEqual(0, perform_hostname_bounce.call_count) + + @mock.patch(MOCKPATH + 'perform_hostname_bounce') + def test_force_performs_bounce_regardless(self, perform_hostname_bounce): + host_name = 'unchanged-host-name' + self.get_hostname.return_value = host_name + cfg = {'hostname_bounce': {'policy': 'force'}} + dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + self.assertEqual(1, perform_hostname_bounce.call_count) + + def test_bounce_skipped_on_ifupdown_absent(self): + host_name = 'unchanged-host-name' + self.get_hostname.return_value = host_name + cfg = {'hostname_bounce': {'policy': 'force'}} + dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) + patch_path = MOCKPATH + 'subp.which' + with mock.patch(patch_path) as m_which: + m_which.return_value = None + ret = self._get_and_setup(dsrc) + self.assertEqual([mock.call('ifup')], m_which.call_args_list) + self.assertTrue(ret) + self.assertIn( + "Skipping network bounce: ifupdown utils aren't present.", + self.logs.getvalue()) + + def test_different_hostnames_sets_hostname(self): + expected_hostname = 'azure-expected-host-name' + self.get_hostname.return_value = 'default-host-name' + dsrc = self._get_ds( + self.get_ovf_env_with_dscfg(expected_hostname, {})) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + self.assertEqual(expected_hostname, + self.set_hostname.call_args_list[0][0][0]) + + @mock.patch(MOCKPATH + 'perform_hostname_bounce') + def test_different_hostnames_performs_bounce( + self, perform_hostname_bounce): + expected_hostname = 'azure-expected-host-name' + self.get_hostname.return_value = 'default-host-name' + dsrc = self._get_ds( + self.get_ovf_env_with_dscfg(expected_hostname, {})) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + self.assertEqual(1, perform_hostname_bounce.call_count) + + def test_different_hostnames_sets_hostname_back(self): + initial_host_name = 'default-host-name' + self.get_hostname.return_value = initial_host_name + dsrc = self._get_ds( + self.get_ovf_env_with_dscfg('some-host-name', {})) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + self.assertEqual(initial_host_name, + self.set_hostname.call_args_list[-1][0][0]) + + @mock.patch(MOCKPATH + 'perform_hostname_bounce') + def test_failure_in_bounce_still_resets_host_name( + self, perform_hostname_bounce): + perform_hostname_bounce.side_effect = Exception + initial_host_name = 'default-host-name' + self.get_hostname.return_value = initial_host_name + dsrc = self._get_ds( + self.get_ovf_env_with_dscfg('some-host-name', {})) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + self.assertEqual(initial_host_name, + self.set_hostname.call_args_list[-1][0][0]) + + @mock.patch.object(dsaz, 'get_boot_telemetry') + def test_environment_correct_for_bounce_command( + self, mock_get_boot_telemetry): + interface = 'int0' + hostname = 'my-new-host' + old_hostname = 'my-old-host' + self.get_hostname.return_value = old_hostname + cfg = {'hostname_bounce': {'interface': interface, 'policy': 'force'}} + data = self.get_ovf_env_with_dscfg(hostname, cfg) + dsrc = self._get_ds(data) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + self.assertEqual(1, self.subp.call_count) + bounce_env = self.subp.call_args[1]['env'] + self.assertEqual(interface, bounce_env['interface']) + self.assertEqual(hostname, bounce_env['hostname']) + self.assertEqual(old_hostname, bounce_env['old_hostname']) + + @mock.patch.object(dsaz, 'get_boot_telemetry') + def test_default_bounce_command_ifup_used_by_default( + self, mock_get_boot_telemetry): + cfg = {'hostname_bounce': {'policy': 'force'}} + data = self.get_ovf_env_with_dscfg('some-hostname', cfg) + dsrc = self._get_ds(data) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + self.assertEqual(1, self.subp.call_count) + bounce_args = self.subp.call_args[1]['args'] + self.assertEqual( + dsaz.BOUNCE_COMMAND_IFUP, bounce_args) + + @mock.patch(MOCKPATH + 'perform_hostname_bounce') + def test_set_hostname_option_can_disable_bounce( + self, perform_hostname_bounce): + cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}} + data = self.get_ovf_env_with_dscfg('some-hostname', cfg) + self._get_ds(data).get_data() + + self.assertEqual(0, perform_hostname_bounce.call_count) + + def test_set_hostname_option_can_disable_hostname_set(self): + cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}} + data = self.get_ovf_env_with_dscfg('some-hostname', cfg) + self._get_ds(data).get_data() + + self.assertEqual(0, self.set_hostname.call_count) + + @mock.patch(MOCKPATH + 'perform_hostname_bounce') + def test_set_hostname_failed_disable_bounce( + self, perform_hostname_bounce): + cfg = {'set_hostname': True, 'hostname_bounce': {'policy': 'force'}} + self.get_hostname.return_value = "old-hostname" + self.set_hostname.side_effect = Exception + data = self.get_ovf_env_with_dscfg('some-hostname', cfg) + self._get_ds(data).get_data() + + self.assertEqual(0, perform_hostname_bounce.call_count) + + +class TestLoadAzureDsDir(CiTestCase): + """Tests for load_azure_ds_dir.""" + + def setUp(self): + self.source_dir = self.tmp_dir() + super(TestLoadAzureDsDir, self).setUp() + + def test_missing_ovf_env_xml_raises_non_azure_datasource_error(self): + """load_azure_ds_dir raises an error When ovf-env.xml doesn't exit.""" + with self.assertRaises(dsaz.NonAzureDataSource) as context_manager: + dsaz.load_azure_ds_dir(self.source_dir) + self.assertEqual( + 'No ovf-env file found', + str(context_manager.exception)) + + def test_wb_invalid_ovf_env_xml_calls_read_azure_ovf(self): + """load_azure_ds_dir calls read_azure_ovf to parse the xml.""" + ovf_path = os.path.join(self.source_dir, 'ovf-env.xml') + with open(ovf_path, 'wb') as stream: + stream.write(b'invalid xml') + with self.assertRaises(dsaz.BrokenAzureDataSource) as context_manager: + dsaz.load_azure_ds_dir(self.source_dir) + self.assertEqual( + 'Invalid ovf-env.xml: syntax error: line 1, column 0', + str(context_manager.exception)) + + +class TestReadAzureOvf(CiTestCase): + + def test_invalid_xml_raises_non_azure_ds(self): + invalid_xml = "" + construct_valid_ovf_env(data={}) + self.assertRaises(dsaz.BrokenAzureDataSource, + dsaz.read_azure_ovf, invalid_xml) + + def test_load_with_pubkeys(self): + mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}] + pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] + content = construct_valid_ovf_env(pubkeys=pubkeys) + (_md, _ud, cfg) = dsaz.read_azure_ovf(content) + for mypk in mypklist: + self.assertIn(mypk, cfg['_pubkeys']) + + +class TestCanDevBeReformatted(CiTestCase): + warning_file = 'dataloss_warning_readme.txt' + + def _domock(self, mockpath, sattr=None): + patcher = mock.patch(mockpath) + setattr(self, sattr, patcher.start()) + self.addCleanup(patcher.stop) + + def patchup(self, devs): + bypath = {} + for path, data in devs.items(): + bypath[path] = data + if 'realpath' in data: + bypath[data['realpath']] = data + for ppath, pdata in data.get('partitions', {}).items(): + bypath[ppath] = pdata + if 'realpath' in data: + bypath[pdata['realpath']] = pdata + + def realpath(d): + return bypath[d].get('realpath', d) + + def partitions_on_device(devpath): + parts = bypath.get(devpath, {}).get('partitions', {}) + ret = [] + for path, data in parts.items(): + ret.append((data.get('num'), realpath(path))) + # return sorted by partition number + return sorted(ret, key=lambda d: d[0]) + + def mount_cb(device, callback, mtype, update_env_for_mount): + self.assertEqual('ntfs', mtype) + self.assertEqual('C', update_env_for_mount.get('LANG')) + p = self.tmp_dir() + for f in bypath.get(device).get('files', []): + write_file(os.path.join(p, f), content=f) + return callback(p) + + def has_ntfs_fs(device): + return bypath.get(device, {}).get('fs') == 'ntfs' + + p = MOCKPATH + self._domock(p + "_partitions_on_device", 'm_partitions_on_device') + self._domock(p + "_has_ntfs_filesystem", 'm_has_ntfs_filesystem') + self._domock(p + "util.mount_cb", 'm_mount_cb') + self._domock(p + "os.path.realpath", 'm_realpath') + self._domock(p + "os.path.exists", 'm_exists') + self._domock(p + "util.SeLinuxGuard", 'm_selguard') + + self.m_exists.side_effect = lambda p: p in bypath + self.m_realpath.side_effect = realpath + self.m_has_ntfs_filesystem.side_effect = has_ntfs_fs + self.m_mount_cb.side_effect = mount_cb + self.m_partitions_on_device.side_effect = partitions_on_device + self.m_selguard.__enter__ = mock.Mock(return_value=False) + self.m_selguard.__exit__ = mock.Mock() + + def test_three_partitions_is_false(self): + """A disk with 3 partitions can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1}, + '/dev/sda2': {'num': 2}, + '/dev/sda3': {'num': 3}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) + self.assertFalse(value) + self.assertIn("3 or more", msg.lower()) + + def test_no_partitions_is_false(self): + """A disk with no partitions can not be formatted.""" + self.patchup({'/dev/sda': {}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) + self.assertFalse(value) + self.assertIn("not partitioned", msg.lower()) + + def test_two_partitions_not_ntfs_false(self): + """2 partitions and 2nd not ntfs can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1}, + '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) + self.assertFalse(value) + self.assertIn("not ntfs", msg.lower()) + + def test_two_partitions_ntfs_populated_false(self): + """2 partitions and populated ntfs fs on 2nd can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1}, + '/dev/sda2': {'num': 2, 'fs': 'ntfs', + 'files': ['secret.txt']}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) + self.assertFalse(value) + self.assertIn("files on it", msg.lower()) + + def test_two_partitions_ntfs_empty_is_true(self): + """2 partitions and empty ntfs fs on 2nd can be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1}, + '/dev/sda2': {'num': 2, 'fs': 'ntfs', 'files': []}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) + self.assertTrue(value) + self.assertIn("safe for", msg.lower()) + + def test_one_partition_not_ntfs_false(self): + """1 partition witih fs other than ntfs can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'zfs'}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) + self.assertFalse(value) + self.assertIn("not ntfs", msg.lower()) + + def test_one_partition_ntfs_populated_false(self): + """1 mountable ntfs partition with many files can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', + 'files': ['file1.txt', 'file2.exe']}, + }}}) + with mock.patch.object(dsaz.LOG, 'warning') as warning: + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) + wmsg = warning.call_args[0][0] + self.assertIn("looks like you're using NTFS on the ephemeral disk", + wmsg) + self.assertFalse(value) + self.assertIn("files on it", msg.lower()) + + def test_one_partition_ntfs_empty_is_true(self): + """1 mountable ntfs partition and no files can be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) + self.assertTrue(value) + self.assertIn("safe for", msg.lower()) + + def test_one_partition_ntfs_empty_with_dataloss_file_is_true(self): + """1 mountable ntfs partition and only warn file can be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', + 'files': ['dataloss_warning_readme.txt']} + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) + self.assertTrue(value) + self.assertIn("safe for", msg.lower()) + + def test_one_partition_through_realpath_is_true(self): + """A symlink to a device with 1 ntfs partition can be formatted.""" + epath = '/dev/disk/cloud/azure_resource' + self.patchup({ + epath: { + 'realpath': '/dev/sdb', + 'partitions': { + epath + '-part1': { + 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file], + 'realpath': '/dev/sdb1'} + }}}) + value, msg = dsaz.can_dev_be_reformatted(epath, + preserve_ntfs=False) + self.assertTrue(value) + self.assertIn("safe for", msg.lower()) + + def test_three_partition_through_realpath_is_false(self): + """A symlink to a device with 3 partitions can not be formatted.""" + epath = '/dev/disk/cloud/azure_resource' + self.patchup({ + epath: { + 'realpath': '/dev/sdb', + 'partitions': { + epath + '-part1': { + 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file], + 'realpath': '/dev/sdb1'}, + epath + '-part2': {'num': 2, 'fs': 'ext3', + 'realpath': '/dev/sdb2'}, + epath + '-part3': {'num': 3, 'fs': 'ext', + 'realpath': '/dev/sdb3'} + }}}) + value, msg = dsaz.can_dev_be_reformatted(epath, + preserve_ntfs=False) + self.assertFalse(value) + self.assertIn("3 or more", msg.lower()) + + def test_ntfs_mount_errors_true(self): + """can_dev_be_reformatted does not fail if NTFS is unknown fstype.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} + }}}) + + error_msgs = [ + "Stderr: mount: unknown filesystem type 'ntfs'", # RHEL + "Stderr: mount: /dev/sdb1: unknown filesystem type 'ntfs'" # SLES + ] + + for err_msg in error_msgs: + self.m_mount_cb.side_effect = MountFailedError( + "Failed mounting %s to %s due to: \nUnexpected.\n%s" % + ('/dev/sda', '/fake-tmp/dir', err_msg)) + + value, msg = dsaz.can_dev_be_reformatted('/dev/sda', + preserve_ntfs=False) + self.assertTrue(value) + self.assertIn('cannot mount NTFS, assuming', msg) + + def test_never_destroy_ntfs_config_false(self): + """Normally formattable situation with never_destroy_ntfs set.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', + 'files': ['dataloss_warning_readme.txt']} + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=True) + self.assertFalse(value) + self.assertIn("config says to never destroy NTFS " + "(datasource.Azure.never_destroy_ntfs)", msg) + + +class TestClearCachedData(CiTestCase): + + def test_clear_cached_attrs_clears_imds(self): + """All class attributes are reset to defaults, including imds data.""" + tmp = self.tmp_dir() + paths = helpers.Paths( + {'cloud_dir': tmp, 'run_dir': tmp}) + dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=paths) + clean_values = [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds] + dsrc.metadata = 'md' + dsrc.userdata = 'ud' + dsrc._metadata_imds = 'imds' + dsrc._dirty_cache = True + dsrc.clear_cached_attrs() + self.assertEqual( + [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds], + clean_values) + + +class TestAzureNetExists(CiTestCase): + + def test_azure_net_must_exist_for_legacy_objpkl(self): + """DataSourceAzureNet must exist for old obj.pkl files + that reference it.""" + self.assertTrue(hasattr(dsaz, "DataSourceAzureNet")) + + +class TestPreprovisioningReadAzureOvfFlag(CiTestCase): + + def test_read_azure_ovf_with_true_flag(self): + """The read_azure_ovf method should set the PreprovisionedVM + cfg flag if the proper setting is present.""" + content = construct_valid_ovf_env( + platform_settings={"PreprovisionedVm": "True"}) + ret = dsaz.read_azure_ovf(content) + cfg = ret[2] + self.assertTrue(cfg['PreprovisionedVm']) + + def test_read_azure_ovf_with_false_flag(self): + """The read_azure_ovf method should set the PreprovisionedVM + cfg flag to false if the proper setting is false.""" + content = construct_valid_ovf_env( + platform_settings={"PreprovisionedVm": "False"}) + ret = dsaz.read_azure_ovf(content) + cfg = ret[2] + self.assertFalse(cfg['PreprovisionedVm']) + + def test_read_azure_ovf_without_flag(self): + """The read_azure_ovf method should not set the + PreprovisionedVM cfg flag.""" + content = construct_valid_ovf_env() + ret = dsaz.read_azure_ovf(content) + cfg = ret[2] + self.assertFalse(cfg['PreprovisionedVm']) + self.assertEqual(None, cfg["PreprovisionedVMType"]) + + def test_read_azure_ovf_with_running_type(self): + """The read_azure_ovf method should set PreprovisionedVMType + cfg flag to Running.""" + content = construct_valid_ovf_env( + platform_settings={"PreprovisionedVMType": "Running", + "PreprovisionedVm": "True"}) + ret = dsaz.read_azure_ovf(content) + cfg = ret[2] + self.assertTrue(cfg['PreprovisionedVm']) + self.assertEqual("Running", cfg['PreprovisionedVMType']) + + def test_read_azure_ovf_with_savable_type(self): + """The read_azure_ovf method should set PreprovisionedVMType + cfg flag to Savable.""" + content = construct_valid_ovf_env( + platform_settings={"PreprovisionedVMType": "Savable", + "PreprovisionedVm": "True"}) + ret = dsaz.read_azure_ovf(content) + cfg = ret[2] + self.assertTrue(cfg['PreprovisionedVm']) + self.assertEqual("Savable", cfg['PreprovisionedVMType']) + + +@mock.patch('os.path.isfile') +class TestPreprovisioningShouldReprovision(CiTestCase): + + def setUp(self): + super(TestPreprovisioningShouldReprovision, self).setUp() + tmp = self.tmp_dir() + self.waagent_d = self.tmp_path('/var/lib/waagent', tmp) + self.paths = helpers.Paths({'cloud_dir': tmp}) + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + + @mock.patch(MOCKPATH + 'util.write_file') + def test__should_reprovision_with_true_cfg(self, isfile, write_f): + """The _should_reprovision method should return true with config + flag present.""" + isfile.return_value = False + dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) + self.assertTrue(dsa._should_reprovision( + (None, None, {'PreprovisionedVm': True}, None))) + + def test__should_reprovision_with_file_existing(self, isfile): + """The _should_reprovision method should return True if the sentinal + exists.""" + isfile.return_value = True + dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) + self.assertTrue(dsa._should_reprovision( + (None, None, {'preprovisionedvm': False}, None))) + + def test__should_reprovision_returns_false(self, isfile): + """The _should_reprovision method should return False + if config and sentinal are not present.""" + isfile.return_value = False + dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) + self.assertFalse(dsa._should_reprovision((None, None, {}, None))) + + @mock.patch(MOCKPATH + 'util.write_file', autospec=True) + def test__should_reprovision_uses_imds_md(self, write_file, isfile): + """The _should_reprovision method should be able to + retrieve the preprovisioning VM type from imds metadata""" + isfile.return_value = False + dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) + self.assertTrue(dsa._should_reprovision( + (None, None, {}, None), + {'extended': {'compute': {'ppsType': 'Running'}}})) + self.assertFalse(dsa._should_reprovision( + (None, None, {}, None), + {})) + self.assertFalse(dsa._should_reprovision( + (None, None, {}, None), + {'extended': {'compute': {"hasCustomData": False}}})) + + @mock.patch(MOCKPATH + 'DataSourceAzure._poll_imds') + def test_reprovision_calls__poll_imds(self, _poll_imds, isfile): + """_reprovision will poll IMDS.""" + isfile.return_value = False + hostname = "myhost" + username = "myuser" + odata = {'HostName': hostname, 'UserName': username} + _poll_imds.return_value = construct_valid_ovf_env(data=odata) + dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) + dsa._reprovision() + _poll_imds.assert_called_with() + + +class TestPreprovisioningHotAttachNics(CiTestCase): + + def setUp(self): + super(TestPreprovisioningHotAttachNics, self).setUp() + self.tmp = self.tmp_dir() + self.waagent_d = self.tmp_path('/var/lib/waagent', self.tmp) + self.paths = helpers.Paths({'cloud_dir': self.tmp}) + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + self.paths = helpers.Paths({'cloud_dir': self.tmp}) + + @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_detach_event', + autospec=True) + @mock.patch(MOCKPATH + 'util.write_file', autospec=True) + def test_nic_detach_writes_marker(self, m_writefile, m_detach): + """When we detect that a nic gets detached, we write a marker for it""" + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + nl_sock = mock.MagicMock() + dsa._wait_for_nic_detach(nl_sock) + m_detach.assert_called_with(nl_sock) + self.assertEqual(1, m_detach.call_count) + m_writefile.assert_called_with( + dsaz.REPROVISION_NIC_DETACHED_MARKER_FILE, mock.ANY) + + @mock.patch(MOCKPATH + 'util.write_file', autospec=True) + @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting') + @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready') + @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') + def test_detect_nic_attach_reports_ready_and_waits_for_detach( + self, m_detach, m_report_ready, m_dhcp, m_fallback_if, + m_writefile): + """Report ready first and then wait for nic detach""" + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + dsa._wait_for_all_nics_ready() + m_fallback_if.return_value = "Dummy interface" + self.assertEqual(1, m_report_ready.call_count) + self.assertEqual(1, m_detach.call_count) + self.assertEqual(1, m_writefile.call_count) + self.assertEqual(1, m_dhcp.call_count) + m_writefile.assert_called_with(dsaz.REPORTED_READY_MARKER_FILE, + mock.ANY) + + @mock.patch('os.path.isfile') + @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting') + @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready') + @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') + def test_detect_nic_attach_skips_report_ready_when_marker_present( + self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile): + """Skip reporting ready if we already have a marker file.""" + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + + def isfile(key): + return key == dsaz.REPORTED_READY_MARKER_FILE + + m_isfile.side_effect = isfile + dsa._wait_for_all_nics_ready() + m_fallback_if.return_value = "Dummy interface" + self.assertEqual(0, m_report_ready.call_count) + self.assertEqual(0, m_dhcp.call_count) + self.assertEqual(1, m_detach.call_count) + + @mock.patch('os.path.isfile') + @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting') + @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready') + @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') + def test_detect_nic_attach_skips_nic_detach_when_marker_present( + self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile): + """Skip wait for nic detach if it already happened.""" + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + + m_isfile.return_value = True + dsa._wait_for_all_nics_ready() + m_fallback_if.return_value = "Dummy interface" + self.assertEqual(0, m_report_ready.call_count) + self.assertEqual(0, m_dhcp.call_count) + self.assertEqual(0, m_detach.call_count) + + @mock.patch(MOCKPATH + 'DataSourceAzure.wait_for_link_up', autospec=True) + @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_attach_event') + @mock.patch('cloudinit.sources.net.find_fallback_nic') + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') + @mock.patch('os.path.isfile') + def test_wait_for_nic_attach_if_no_fallback_interface( + self, m_isfile, m_detach, m_dhcpv4, m_imds, m_fallback_if, + m_attach, m_link_up): + """Wait for nic attach if we do not have a fallback interface""" + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + lease = { + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'} + + m_isfile.return_value = True + m_attach.return_value = "eth0" + dhcp_ctx = mock.MagicMock(lease=lease) + dhcp_ctx.obtain_lease.return_value = lease + m_dhcpv4.return_value = dhcp_ctx + m_imds.return_value = IMDS_NETWORK_METADATA + m_fallback_if.return_value = None + + dsa._wait_for_all_nics_ready() + + self.assertEqual(0, m_detach.call_count) + self.assertEqual(1, m_attach.call_count) + self.assertEqual(1, m_dhcpv4.call_count) + self.assertEqual(1, m_imds.call_count) + self.assertEqual(1, m_link_up.call_count) + m_link_up.assert_called_with(mock.ANY, "eth0") + + @mock.patch(MOCKPATH + 'DataSourceAzure.wait_for_link_up') + @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_attach_event') + @mock.patch('cloudinit.sources.net.find_fallback_nic') + @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') + @mock.patch('os.path.isfile') + def test_wait_for_nic_attach_multinic_attach( + self, m_isfile, m_detach, m_dhcpv4, m_imds, m_fallback_if, + m_attach, m_link_up): + """Wait for nic attach if we do not have a fallback interface""" + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + lease = { + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'} + m_attach_call_count = 0 + + def nic_attach_ret(nl_sock, nics_found): + nonlocal m_attach_call_count + m_attach_call_count = m_attach_call_count + 1 + if m_attach_call_count == 1: + return "eth0" + elif m_attach_call_count == 2: + return "eth1" + raise RuntimeError("Must have found primary nic by now.") + + # Simulate two NICs by adding the same one twice. + md = { + "interface": [ + IMDS_NETWORK_METADATA['interface'][0], + IMDS_NETWORK_METADATA['interface'][0] + ] + } + + def network_metadata_ret(ifname, retries, type, exc_cb, infinite): + if ifname == "eth0": + return md + raise requests.Timeout('Fake connection timeout') + + m_isfile.return_value = True + m_attach.side_effect = nic_attach_ret + dhcp_ctx = mock.MagicMock(lease=lease) + dhcp_ctx.obtain_lease.return_value = lease + m_dhcpv4.return_value = dhcp_ctx + m_imds.side_effect = network_metadata_ret + m_fallback_if.return_value = None + + dsa._wait_for_all_nics_ready() + + self.assertEqual(0, m_detach.call_count) + self.assertEqual(2, m_attach.call_count) + # DHCP and network metadata calls will only happen on the primary NIC. + self.assertEqual(1, m_dhcpv4.call_count) + self.assertEqual(1, m_imds.call_count) + self.assertEqual(2, m_link_up.call_count) + + @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + def test_check_if_nic_is_primary_retries_on_failures( + self, m_dhcpv4, m_imds): + """Retry polling for network metadata on all failures except timeout + and network unreachable errors""" + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + lease = { + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'} + + eth0Retries = [] + eth1Retries = [] + # Simulate two NICs by adding the same one twice. + md = { + "interface": [ + IMDS_NETWORK_METADATA['interface'][0], + IMDS_NETWORK_METADATA['interface'][0] + ] + } + + def network_metadata_ret(ifname, retries, type, exc_cb, infinite): + nonlocal eth0Retries, eth1Retries + + # Simulate readurl functionality with retries and + # exception callbacks so that the callback logic can be + # validated. + if ifname == "eth0": + cause = requests.HTTPError() + for _ in range(0, 15): + error = url_helper.UrlError(cause=cause, code=410) + eth0Retries.append(exc_cb("No goal state.", error)) + else: + for _ in range(0, 10): + # We are expected to retry for a certain period for both + # timeout errors and network unreachable errors. + if _ < 5: + cause = requests.Timeout('Fake connection timeout') + else: + cause = requests.ConnectionError('Network Unreachable') + error = url_helper.UrlError(cause=cause) + eth1Retries.append(exc_cb("Connection timeout", error)) + # Should stop retrying after 10 retries + eth1Retries.append(exc_cb("Connection timeout", error)) + raise cause + return md + + m_imds.side_effect = network_metadata_ret + + dhcp_ctx = mock.MagicMock(lease=lease) + dhcp_ctx.obtain_lease.return_value = lease + m_dhcpv4.return_value = dhcp_ctx + + is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth0") + self.assertEqual(True, is_primary) + self.assertEqual(2, expected_nic_count) + + # All Eth0 errors are non-timeout errors. So we should have been + # retrying indefinitely until success. + for i in eth0Retries: + self.assertTrue(i) + + is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth1") + self.assertEqual(False, is_primary) + + # All Eth1 errors are timeout errors. Retry happens for a max of 10 and + # then we should have moved on assuming it is not the primary nic. + for i in range(0, 10): + self.assertTrue(eth1Retries[i]) + self.assertFalse(eth1Retries[10]) + + @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up') + def test_wait_for_link_up_returns_if_already_up( + self, m_is_link_up): + """Waiting for link to be up should return immediately if the link is + already up.""" + + distro_cls = distros.fetch('ubuntu') + distro = distro_cls('ubuntu', {}, self.paths) + dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths) + m_is_link_up.return_value = True + + dsa.wait_for_link_up("eth0") + self.assertEqual(1, m_is_link_up.call_count) + + @mock.patch(MOCKPATH + 'net.is_up', autospec=True) + @mock.patch(MOCKPATH + 'util.write_file') + @mock.patch('cloudinit.net.read_sys_net') + @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up') + def test_wait_for_link_up_checks_link_after_sleep( + self, m_try_set_link_up, m_read_sys_net, m_writefile, m_is_up): + """Waiting for link to be up should return immediately if the link is + already up.""" + + distro_cls = distros.fetch('ubuntu') + distro = distro_cls('ubuntu', {}, self.paths) + dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths) + m_try_set_link_up.return_value = False + + callcount = 0 + + def is_up_mock(key): + nonlocal callcount + if callcount == 0: + callcount += 1 + return False + return True + + m_is_up.side_effect = is_up_mock + + dsa.wait_for_link_up("eth0") + self.assertEqual(2, m_try_set_link_up.call_count) + self.assertEqual(2, m_is_up.call_count) + + @mock.patch(MOCKPATH + 'util.write_file') + @mock.patch('cloudinit.net.read_sys_net') + @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up') + def test_wait_for_link_up_writes_to_device_file( + self, m_is_link_up, m_read_sys_net, m_writefile): + """Waiting for link to be up should return immediately if the link is + already up.""" + + distro_cls = distros.fetch('ubuntu') + distro = distro_cls('ubuntu', {}, self.paths) + dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths) + + callcount = 0 + + def linkup(key): + nonlocal callcount + if callcount == 0: + callcount += 1 + return False + return True + + m_is_link_up.side_effect = linkup + + dsa.wait_for_link_up("eth0") + self.assertEqual(2, m_is_link_up.call_count) + self.assertEqual(1, m_read_sys_net.call_count) + self.assertEqual(2, m_writefile.call_count) + + @mock.patch('cloudinit.sources.helpers.netlink.' + 'create_bound_netlink_socket') + def test_wait_for_all_nics_ready_raises_if_socket_fails(self, m_socket): + """Waiting for all nics should raise exception if netlink socket + creation fails.""" + + m_socket.side_effect = netlink.NetlinkCreateSocketError + distro_cls = distros.fetch('ubuntu') + distro = distro_cls('ubuntu', {}, self.paths) + dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths) + + self.assertRaises(netlink.NetlinkCreateSocketError, + dsa._wait_for_all_nics_ready) + # dsa._wait_for_all_nics_ready() + + +@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') +@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') +@mock.patch('cloudinit.sources.helpers.netlink.' + 'wait_for_media_disconnect_connect') +@mock.patch('requests.Session.request') +@mock.patch(MOCKPATH + 'DataSourceAzure._report_ready') +class TestPreprovisioningPollIMDS(CiTestCase): + + def setUp(self): + super(TestPreprovisioningPollIMDS, self).setUp() + self.tmp = self.tmp_dir() + self.waagent_d = self.tmp_path('/var/lib/waagent', self.tmp) + self.paths = helpers.Paths({'cloud_dir': self.tmp}) + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + + @mock.patch('time.sleep', mock.MagicMock()) + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + def test_poll_imds_re_dhcp_on_timeout(self, m_dhcpv4, m_report_ready, + m_request, m_media_switch, m_dhcp, + m_net): + """The poll_imds will retry DHCP on IMDS timeout.""" + report_file = self.tmp_path('report_marker', self.tmp) + lease = { + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'} + m_dhcp.return_value = [lease] + m_media_switch.return_value = None + dhcp_ctx = mock.MagicMock(lease=lease) + dhcp_ctx.obtain_lease.return_value = lease + m_dhcpv4.return_value = dhcp_ctx + + self.tries = 0 + + def fake_timeout_once(**kwargs): + self.tries += 1 + if self.tries == 1: + raise requests.Timeout('Fake connection timeout') + elif self.tries in (2, 3): + response = requests.Response() + response.status_code = 404 if self.tries == 2 else 410 + raise requests.exceptions.HTTPError( + "fake {}".format(response.status_code), response=response + ) + # Third try should succeed and stop retries or redhcp + return mock.MagicMock(status_code=200, text="good", content="good") + + m_request.side_effect = fake_timeout_once + + dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) + with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): + dsa._poll_imds() + self.assertEqual(m_report_ready.call_count, 1) + m_report_ready.assert_called_with(lease=lease) + self.assertEqual(3, m_dhcpv4.call_count, 'Expected 3 DHCP calls') + self.assertEqual(4, self.tries, 'Expected 4 total reads from IMDS') + + @mock.patch('os.path.isfile') + def test_poll_imds_skips_dhcp_if_ctx_present( + self, m_isfile, report_ready_func, fake_resp, m_media_switch, + m_dhcp, m_net): + """The poll_imds function should reuse the dhcp ctx if it is already + present. This happens when we wait for nic to be hot-attached before + polling for reprovisiondata. Note that if this ctx is set when + _poll_imds is called, then it is not expected to be waiting for + media_disconnect_connect either.""" + report_file = self.tmp_path('report_marker', self.tmp) + m_isfile.return_value = True + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + dsa._ephemeral_dhcp_ctx = "Dummy dhcp ctx" + with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): + dsa._poll_imds() + self.assertEqual(0, m_dhcp.call_count) + self.assertEqual(0, m_media_switch.call_count) + + @mock.patch('os.path.isfile') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + def test_poll_imds_does_dhcp_on_retries_if_ctx_present( + self, m_ephemeral_dhcpv4, m_isfile, report_ready_func, m_request, + m_media_switch, m_dhcp, m_net): + """The poll_imds function should reuse the dhcp ctx if it is already + present. This happens when we wait for nic to be hot-attached before + polling for reprovisiondata. Note that if this ctx is set when + _poll_imds is called, then it is not expected to be waiting for + media_disconnect_connect either.""" + + tries = 0 + + def fake_timeout_once(**kwargs): + nonlocal tries + tries += 1 + if tries == 1: + raise requests.Timeout('Fake connection timeout') + return mock.MagicMock(status_code=200, text="good", content="good") + + m_request.side_effect = fake_timeout_once + report_file = self.tmp_path('report_marker', self.tmp) + m_isfile.return_value = True + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file),\ + mock.patch.object(dsa, '_ephemeral_dhcp_ctx') as m_dhcp_ctx: + m_dhcp_ctx.obtain_lease.return_value = "Dummy lease" + dsa._ephemeral_dhcp_ctx = m_dhcp_ctx + dsa._poll_imds() + self.assertEqual(1, m_dhcp_ctx.clean_network.call_count) + self.assertEqual(1, m_ephemeral_dhcpv4.call_count) + self.assertEqual(0, m_media_switch.call_count) + self.assertEqual(2, m_request.call_count) + + def test_does_not_poll_imds_report_ready_when_marker_file_exists( + self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net): + """poll_imds should not call report ready when the reported ready + marker file exists""" + report_file = self.tmp_path('report_marker', self.tmp) + write_file(report_file, content='dont run report_ready :)') + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'}] + m_media_switch.return_value = None + dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) + with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): + dsa._poll_imds() + self.assertEqual(m_report_ready.call_count, 0) + + def test_poll_imds_report_ready_success_writes_marker_file( + self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net): + """poll_imds should write the report_ready marker file if + reporting ready succeeds""" + report_file = self.tmp_path('report_marker', self.tmp) + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'}] + m_media_switch.return_value = None + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertFalse(os.path.exists(report_file)) + with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): + dsa._poll_imds() + self.assertEqual(m_report_ready.call_count, 1) + self.assertTrue(os.path.exists(report_file)) + + def test_poll_imds_report_ready_failure_raises_exc_and_doesnt_write_marker( + self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net): + """poll_imds should write the report_ready marker file if + reporting ready succeeds""" + report_file = self.tmp_path('report_marker', self.tmp) + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'}] + m_media_switch.return_value = None + m_report_ready.return_value = False + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertFalse(os.path.exists(report_file)) + with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): + self.assertRaises( + InvalidMetaDataException, + dsa._poll_imds) + self.assertEqual(m_report_ready.call_count, 1) + self.assertFalse(os.path.exists(report_file)) + + +@mock.patch(MOCKPATH + 'DataSourceAzure._report_ready', mock.MagicMock()) +@mock.patch(MOCKPATH + 'subp.subp', mock.MagicMock()) +@mock.patch(MOCKPATH + 'util.write_file', mock.MagicMock()) +@mock.patch('cloudinit.sources.helpers.netlink.' + 'wait_for_media_disconnect_connect') +@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network', autospec=True) +@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') +@mock.patch('requests.Session.request') +class TestAzureDataSourcePreprovisioning(CiTestCase): + + def setUp(self): + super(TestAzureDataSourcePreprovisioning, self).setUp() + tmp = self.tmp_dir() + self.waagent_d = self.tmp_path('/var/lib/waagent', tmp) + self.paths = helpers.Paths({'cloud_dir': tmp}) + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + + def test_poll_imds_returns_ovf_env(self, m_request, + m_dhcp, m_net, + m_media_switch): + """The _poll_imds method should return the ovf_env.xml.""" + m_media_switch.return_value = None + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0'}] + url = 'http://{0}/metadata/reprovisiondata?api-version=2019-06-01' + host = "169.254.169.254" + full_url = url.format(host) + m_request.return_value = mock.MagicMock(status_code=200, text="ovf", + content="ovf") + dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) + self.assertTrue(len(dsa._poll_imds()) > 0) + self.assertEqual(m_request.call_args_list, + [mock.call(allow_redirects=True, + headers={'Metadata': 'true', + 'User-Agent': + 'Cloud-Init/%s' % vs() + }, method='GET', + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, + url=full_url)]) + self.assertEqual(m_dhcp.call_count, 2) + m_net.assert_any_call( + broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', + prefix_or_mask='255.255.255.0', router='192.168.2.1', + static_routes=None) + self.assertEqual(m_net.call_count, 2) + + def test__reprovision_calls__poll_imds(self, m_request, + m_dhcp, m_net, + m_media_switch): + """The _reprovision method should call poll IMDS.""" + m_media_switch.return_value = None + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'}] + url = 'http://{0}/metadata/reprovisiondata?api-version=2019-06-01' + host = "169.254.169.254" + full_url = url.format(host) + hostname = "myhost" + username = "myuser" + odata = {'HostName': hostname, 'UserName': username} + content = construct_valid_ovf_env(data=odata) + m_request.return_value = mock.MagicMock(status_code=200, text=content, + content=content) + dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) + md, _ud, cfg, _d = dsa._reprovision() + self.assertEqual(md['local-hostname'], hostname) + self.assertEqual(cfg['system_info']['default_user']['name'], username) + self.assertIn( + mock.call( + allow_redirects=True, + headers={ + 'Metadata': 'true', + 'User-Agent': 'Cloud-Init/%s' % vs() + }, + method='GET', + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, + url=full_url + ), + m_request.call_args_list) + self.assertEqual(m_dhcp.call_count, 2) + m_net.assert_any_call( + broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', + prefix_or_mask='255.255.255.0', router='192.168.2.1', + static_routes=None) + self.assertEqual(m_net.call_count, 2) + + +class TestRemoveUbuntuNetworkConfigScripts(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestRemoveUbuntuNetworkConfigScripts, self).setUp() + self.tmp = self.tmp_dir() + + def test_remove_network_scripts_removes_both_files_and_directories(self): + """Any files or directories in paths are removed when present.""" + file1 = self.tmp_path('file1', dir=self.tmp) + subdir = self.tmp_path('sub1', dir=self.tmp) + subfile = self.tmp_path('leaf1', dir=subdir) + write_file(file1, 'file1content') + write_file(subfile, 'leafcontent') + dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[subdir, file1]) + + for path in (file1, subdir, subfile): + self.assertFalse(os.path.exists(path), + 'Found unremoved: %s' % path) + + expected_logs = [ + 'INFO: Removing Ubuntu extended network scripts because cloud-init' + ' updates Azure network configuration on the following events:' + " ['boot', 'boot-legacy']", + 'Recursively deleting %s' % subdir, + 'Attempting to remove %s' % file1] + for log in expected_logs: + self.assertIn(log, self.logs.getvalue()) + + def test_remove_network_scripts_only_attempts_removal_if_path_exists(self): + """Any files or directories absent are skipped without error.""" + dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[ + self.tmp_path('nodirhere/', dir=self.tmp), + self.tmp_path('notfilehere', dir=self.tmp)]) + self.assertNotIn('/not/a', self.logs.getvalue()) # No delete logs + + @mock.patch(MOCKPATH + 'os.path.exists') + def test_remove_network_scripts_default_removes_stock_scripts(self, + m_exists): + """Azure's stock ubuntu image scripts and artifacts are removed.""" + # Report path absent on all to avoid delete operation + m_exists.return_value = False + dsaz.maybe_remove_ubuntu_network_config_scripts() + calls = m_exists.call_args_list + for path in dsaz.UBUNTU_EXTENDED_NETWORK_SCRIPTS: + self.assertIn(mock.call(path), calls) + + +class TestWBIsPlatformViable(CiTestCase): + """White box tests for _is_platform_viable.""" + with_logs = True + + @mock.patch(MOCKPATH + 'dmi.read_dmi_data') + def test_true_on_non_azure_chassis(self, m_read_dmi_data): + """Return True if DMI chassis-asset-tag is AZURE_CHASSIS_ASSET_TAG.""" + m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG + self.assertTrue(dsaz._is_platform_viable('doesnotmatter')) + + @mock.patch(MOCKPATH + 'os.path.exists') + @mock.patch(MOCKPATH + 'dmi.read_dmi_data') + def test_true_on_azure_ovf_env_in_seed_dir(self, m_read_dmi_data, m_exist): + """Return True if ovf-env.xml exists in known seed dirs.""" + # Non-matching Azure chassis-asset-tag + m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG + 'X' + + m_exist.return_value = True + self.assertTrue(dsaz._is_platform_viable('/some/seed/dir')) + m_exist.called_once_with('/other/seed/dir') + + def test_false_on_no_matching_azure_criteria(self): + """Report non-azure on unmatched asset tag, ovf-env absent and no dev. + + Return False when the asset tag doesn't match Azure's static + AZURE_CHASSIS_ASSET_TAG, no ovf-env.xml files exist in known seed dirs + and no devices have a label starting with prefix 'rd_rdfe_'. + """ + self.assertFalse(wrap_and_call( + MOCKPATH, + {'os.path.exists': False, + # Non-matching Azure chassis-asset-tag + 'dmi.read_dmi_data': dsaz.AZURE_CHASSIS_ASSET_TAG + 'X', + 'subp.which': None}, + dsaz._is_platform_viable, 'doesnotmatter')) + self.assertIn( + "DEBUG: Non-Azure DMI asset tag '{0}' discovered.\n".format( + dsaz.AZURE_CHASSIS_ASSET_TAG + 'X'), + self.logs.getvalue()) + + +class TestRandomSeed(CiTestCase): + """Test proper handling of random_seed""" + + def test_non_ascii_seed_is_serializable(self): + """Pass if a random string from the Azure infrastructure which + contains at least one non-Unicode character can be converted to/from + JSON without alteration and without throwing an exception. + """ + path = resourceLocation("azure/non_unicode_random_string") + result = dsaz._get_random_seed(path) + + obj = {'seed': result} + try: + serialized = json_dumps(obj) + deserialized = load_json(serialized) + except UnicodeDecodeError: + self.fail("Non-serializable random seed returned") + + self.assertEqual(deserialized['seed'], result) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_azure_helper.py b/tests/unittests/sources/test_azure_helper.py new file mode 100644 index 00000000..24c582c2 --- /dev/null +++ b/tests/unittests/sources/test_azure_helper.py @@ -0,0 +1,1441 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import copy +import os +import re +import unittest +from textwrap import dedent +from xml.etree import ElementTree +from xml.sax.saxutils import escape, unescape + +from cloudinit.sources.helpers import azure as azure_helper +from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir + +from cloudinit.util import load_file +from cloudinit.sources.helpers.azure import WALinuxAgentShim as wa_shim + +GOAL_STATE_TEMPLATE = """\ + + + 2012-11-30 + {incarnation} + + Started + 300000 + + 16001 + + FALSE + + + {container_id} + + + {instance_id} + Started + + + http://100.86.192.70:80/...hostingEnvironmentConfig... + + http://100.86.192.70:80/..SharedConfig.. + + http://100.86.192.70:80/...extensionsConfig... + + http://100.86.192.70:80/...fullConfig... + {certificates_url} + 68ce47.0.68ce47.0.utl-trusty--292258.1.xml + + + + + +""" + +HEALTH_REPORT_XML_TEMPLATE = '''\ + + + {incarnation} + + {container_id} + + + {instance_id} + + {health_status} + {health_detail_subsection} + + + + + +''' + +HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = dedent('''\ +
+ {health_substatus} + {health_description} +
+ ''') + +HEALTH_REPORT_DESCRIPTION_TRIM_LEN = 512 + + +class SentinelException(Exception): + pass + + +class TestFindEndpoint(CiTestCase): + + def setUp(self): + super(TestFindEndpoint, self).setUp() + patches = ExitStack() + self.addCleanup(patches.close) + + self.load_file = patches.enter_context( + mock.patch.object(azure_helper.util, 'load_file')) + + self.dhcp_options = patches.enter_context( + mock.patch.object(wa_shim, '_load_dhclient_json')) + + self.networkd_leases = patches.enter_context( + mock.patch.object(wa_shim, '_networkd_get_value_from_leases')) + self.networkd_leases.return_value = None + + def test_missing_file(self): + """wa_shim find_endpoint uses default endpoint if leasefile not found + """ + self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16") + + def test_missing_special_azure_line(self): + """wa_shim find_endpoint uses default endpoint if leasefile is found + but does not contain DHCP Option 245 (whose value is the endpoint) + """ + self.load_file.return_value = '' + self.dhcp_options.return_value = {'eth0': {'key': 'value'}} + self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16") + + @staticmethod + def _build_lease_content(encoded_address): + endpoint = azure_helper._get_dhcp_endpoint_option_name() + return '\n'.join([ + 'lease {', + ' interface "eth0";', + ' option {0} {1};'.format(endpoint, encoded_address), + '}']) + + def test_from_dhcp_client(self): + self.dhcp_options.return_value = {"eth0": {"unknown_245": "5:4:3:2"}} + self.assertEqual('5.4.3.2', wa_shim.find_endpoint(None)) + + def test_latest_lease_used(self): + encoded_addresses = ['5:4:3:2', '4:3:2:1'] + file_content = '\n'.join([self._build_lease_content(encoded_address) + for encoded_address in encoded_addresses]) + self.load_file.return_value = file_content + self.assertEqual(encoded_addresses[-1].replace(':', '.'), + wa_shim.find_endpoint("foobar")) + + +class TestExtractIpAddressFromLeaseValue(CiTestCase): + + def test_hex_string(self): + ip_address, encoded_address = '98.76.54.32', '62:4c:36:20' + self.assertEqual( + ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) + + def test_hex_string_with_single_character_part(self): + ip_address, encoded_address = '4.3.2.1', '4:3:2:1' + self.assertEqual( + ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) + + def test_packed_string(self): + ip_address, encoded_address = '98.76.54.32', 'bL6 ' + self.assertEqual( + ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) + + def test_packed_string_with_escaped_quote(self): + ip_address, encoded_address = '100.72.34.108', 'dH\\"l' + self.assertEqual( + ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) + + def test_packed_string_containing_a_colon(self): + ip_address, encoded_address = '100.72.58.108', 'dH:l' + self.assertEqual( + ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) + + +class TestGoalStateParsing(CiTestCase): + + default_parameters = { + 'incarnation': 1, + 'container_id': 'MyContainerId', + 'instance_id': 'MyInstanceId', + 'certificates_url': 'MyCertificatesUrl', + } + + def _get_formatted_goal_state_xml_string(self, **kwargs): + parameters = self.default_parameters.copy() + parameters.update(kwargs) + xml = GOAL_STATE_TEMPLATE.format(**parameters) + if parameters['certificates_url'] is None: + new_xml_lines = [] + for line in xml.splitlines(): + if 'Certificates' in line: + continue + new_xml_lines.append(line) + xml = '\n'.join(new_xml_lines) + return xml + + def _get_goal_state(self, m_azure_endpoint_client=None, **kwargs): + if m_azure_endpoint_client is None: + m_azure_endpoint_client = mock.MagicMock() + xml = self._get_formatted_goal_state_xml_string(**kwargs) + return azure_helper.GoalState(xml, m_azure_endpoint_client) + + def test_incarnation_parsed_correctly(self): + incarnation = '123' + goal_state = self._get_goal_state(incarnation=incarnation) + self.assertEqual(incarnation, goal_state.incarnation) + + def test_container_id_parsed_correctly(self): + container_id = 'TestContainerId' + goal_state = self._get_goal_state(container_id=container_id) + self.assertEqual(container_id, goal_state.container_id) + + def test_instance_id_parsed_correctly(self): + instance_id = 'TestInstanceId' + goal_state = self._get_goal_state(instance_id=instance_id) + self.assertEqual(instance_id, goal_state.instance_id) + + def test_instance_id_byte_swap(self): + """Return true when previous_iid is byteswapped current_iid""" + previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + current_iid = "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8" + self.assertTrue( + azure_helper.is_byte_swapped(previous_iid, current_iid)) + + def test_instance_id_no_byte_swap_same_instance_id(self): + previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + current_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + self.assertFalse( + azure_helper.is_byte_swapped(previous_iid, current_iid)) + + def test_instance_id_no_byte_swap_diff_instance_id(self): + previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + current_iid = "G0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + self.assertFalse( + azure_helper.is_byte_swapped(previous_iid, current_iid)) + + def test_certificates_xml_parsed_and_fetched_correctly(self): + m_azure_endpoint_client = mock.MagicMock() + certificates_url = 'TestCertificatesUrl' + goal_state = self._get_goal_state( + m_azure_endpoint_client=m_azure_endpoint_client, + certificates_url=certificates_url) + certificates_xml = goal_state.certificates_xml + self.assertEqual(1, m_azure_endpoint_client.get.call_count) + self.assertEqual( + certificates_url, + m_azure_endpoint_client.get.call_args[0][0]) + self.assertTrue( + m_azure_endpoint_client.get.call_args[1].get( + 'secure', False)) + self.assertEqual( + m_azure_endpoint_client.get.return_value.contents, + certificates_xml) + + def test_missing_certificates_skips_http_get(self): + m_azure_endpoint_client = mock.MagicMock() + goal_state = self._get_goal_state( + m_azure_endpoint_client=m_azure_endpoint_client, + certificates_url=None) + certificates_xml = goal_state.certificates_xml + self.assertEqual(0, m_azure_endpoint_client.get.call_count) + self.assertIsNone(certificates_xml) + + def test_invalid_goal_state_xml_raises_parse_error(self): + xml = 'random non-xml data' + with self.assertRaises(ElementTree.ParseError): + azure_helper.GoalState(xml, mock.MagicMock()) + + def test_missing_container_id_in_goal_state_xml_raises_exc(self): + xml = self._get_formatted_goal_state_xml_string() + xml = re.sub('.*', '', xml) + with self.assertRaises(azure_helper.InvalidGoalStateXMLException): + azure_helper.GoalState(xml, mock.MagicMock()) + + def test_missing_instance_id_in_goal_state_xml_raises_exc(self): + xml = self._get_formatted_goal_state_xml_string() + xml = re.sub('.*', '', xml) + with self.assertRaises(azure_helper.InvalidGoalStateXMLException): + azure_helper.GoalState(xml, mock.MagicMock()) + + def test_missing_incarnation_in_goal_state_xml_raises_exc(self): + xml = self._get_formatted_goal_state_xml_string() + xml = re.sub('.*', '', xml) + with self.assertRaises(azure_helper.InvalidGoalStateXMLException): + azure_helper.GoalState(xml, mock.MagicMock()) + + +class TestAzureEndpointHttpClient(CiTestCase): + + regular_headers = { + 'x-ms-agent-name': 'WALinuxAgent', + 'x-ms-version': '2012-11-30', + } + + def setUp(self): + super(TestAzureEndpointHttpClient, self).setUp() + patches = ExitStack() + self.addCleanup(patches.close) + self.m_http_with_retries = patches.enter_context( + mock.patch.object(azure_helper, 'http_with_retries')) + + def test_non_secure_get(self): + client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) + url = 'MyTestUrl' + response = client.get(url, secure=False) + self.assertEqual(1, self.m_http_with_retries.call_count) + self.assertEqual(self.m_http_with_retries.return_value, response) + self.assertEqual( + mock.call(url, headers=self.regular_headers), + self.m_http_with_retries.call_args) + + def test_non_secure_get_raises_exception(self): + client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) + url = 'MyTestUrl' + self.m_http_with_retries.side_effect = SentinelException + self.assertRaises(SentinelException, client.get, url, secure=False) + self.assertEqual(1, self.m_http_with_retries.call_count) + + def test_secure_get(self): + url = 'MyTestUrl' + m_certificate = mock.MagicMock() + expected_headers = self.regular_headers.copy() + expected_headers.update({ + "x-ms-cipher-name": "DES_EDE3_CBC", + "x-ms-guest-agent-public-x509-cert": m_certificate, + }) + client = azure_helper.AzureEndpointHttpClient(m_certificate) + response = client.get(url, secure=True) + self.assertEqual(1, self.m_http_with_retries.call_count) + self.assertEqual(self.m_http_with_retries.return_value, response) + self.assertEqual( + mock.call(url, headers=expected_headers), + self.m_http_with_retries.call_args) + + def test_secure_get_raises_exception(self): + url = 'MyTestUrl' + client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) + self.m_http_with_retries.side_effect = SentinelException + self.assertRaises(SentinelException, client.get, url, secure=True) + self.assertEqual(1, self.m_http_with_retries.call_count) + + def test_post(self): + m_data = mock.MagicMock() + url = 'MyTestUrl' + client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) + response = client.post(url, data=m_data) + self.assertEqual(1, self.m_http_with_retries.call_count) + self.assertEqual(self.m_http_with_retries.return_value, response) + self.assertEqual( + mock.call(url, data=m_data, headers=self.regular_headers), + self.m_http_with_retries.call_args) + + def test_post_raises_exception(self): + m_data = mock.MagicMock() + url = 'MyTestUrl' + client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) + self.m_http_with_retries.side_effect = SentinelException + self.assertRaises(SentinelException, client.post, url, data=m_data) + self.assertEqual(1, self.m_http_with_retries.call_count) + + def test_post_with_extra_headers(self): + url = 'MyTestUrl' + client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) + extra_headers = {'test': 'header'} + client.post(url, extra_headers=extra_headers) + expected_headers = self.regular_headers.copy() + expected_headers.update(extra_headers) + self.assertEqual(1, self.m_http_with_retries.call_count) + self.assertEqual( + mock.call(url, data=mock.ANY, headers=expected_headers), + self.m_http_with_retries.call_args) + + def test_post_with_sleep_with_extra_headers_raises_exception(self): + m_data = mock.MagicMock() + url = 'MyTestUrl' + extra_headers = {'test': 'header'} + client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) + self.m_http_with_retries.side_effect = SentinelException + self.assertRaises( + SentinelException, client.post, + url, data=m_data, extra_headers=extra_headers) + self.assertEqual(1, self.m_http_with_retries.call_count) + + +class TestAzureHelperHttpWithRetries(CiTestCase): + + with_logs = True + + max_readurl_attempts = 240 + default_readurl_timeout = 5 + sleep_duration_between_retries = 5 + periodic_logging_attempts = 12 + + def setUp(self): + super(TestAzureHelperHttpWithRetries, self).setUp() + patches = ExitStack() + self.addCleanup(patches.close) + + self.m_readurl = patches.enter_context( + mock.patch.object( + azure_helper.url_helper, 'readurl', mock.MagicMock())) + self.m_sleep = patches.enter_context( + mock.patch.object(azure_helper.time, 'sleep', autospec=True)) + + def test_http_with_retries(self): + self.m_readurl.return_value = 'TestResp' + self.assertEqual( + azure_helper.http_with_retries('testurl'), + self.m_readurl.return_value) + self.assertEqual(self.m_readurl.call_count, 1) + + def test_http_with_retries_propagates_readurl_exc_and_logs_exc( + self): + self.m_readurl.side_effect = SentinelException + + self.assertRaises( + SentinelException, azure_helper.http_with_retries, 'testurl') + self.assertEqual(self.m_readurl.call_count, self.max_readurl_attempts) + + self.assertIsNotNone( + re.search( + r'Failed HTTP request with Azure endpoint \S* during ' + r'attempt \d+ with exception: \S*', + self.logs.getvalue())) + self.assertIsNone( + re.search( + r'Successful HTTP request with Azure endpoint \S* after ' + r'\d+ attempts', + self.logs.getvalue())) + + def test_http_with_retries_delayed_success_due_to_temporary_readurl_exc( + self): + self.m_readurl.side_effect = \ + [SentinelException] * self.periodic_logging_attempts + \ + ['TestResp'] + self.m_readurl.return_value = 'TestResp' + + response = azure_helper.http_with_retries('testurl') + self.assertEqual( + response, + self.m_readurl.return_value) + self.assertEqual( + self.m_readurl.call_count, + self.periodic_logging_attempts + 1) + + # Ensure that cloud-init did sleep between each failed request + self.assertEqual( + self.m_sleep.call_count, + self.periodic_logging_attempts) + self.m_sleep.assert_called_with(self.sleep_duration_between_retries) + + def test_http_with_retries_long_delay_logs_periodic_failure_msg(self): + self.m_readurl.side_effect = \ + [SentinelException] * self.periodic_logging_attempts + \ + ['TestResp'] + self.m_readurl.return_value = 'TestResp' + + azure_helper.http_with_retries('testurl') + + self.assertEqual( + self.m_readurl.call_count, + self.periodic_logging_attempts + 1) + self.assertIsNotNone( + re.search( + r'Failed HTTP request with Azure endpoint \S* during ' + r'attempt \d+ with exception: \S*', + self.logs.getvalue())) + self.assertIsNotNone( + re.search( + r'Successful HTTP request with Azure endpoint \S* after ' + r'\d+ attempts', + self.logs.getvalue())) + + def test_http_with_retries_short_delay_does_not_log_periodic_failure_msg( + self): + self.m_readurl.side_effect = \ + [SentinelException] * \ + (self.periodic_logging_attempts - 1) + \ + ['TestResp'] + self.m_readurl.return_value = 'TestResp' + + azure_helper.http_with_retries('testurl') + self.assertEqual( + self.m_readurl.call_count, + self.periodic_logging_attempts) + + self.assertIsNone( + re.search( + r'Failed HTTP request with Azure endpoint \S* during ' + r'attempt \d+ with exception: \S*', + self.logs.getvalue())) + self.assertIsNotNone( + re.search( + r'Successful HTTP request with Azure endpoint \S* after ' + r'\d+ attempts', + self.logs.getvalue())) + + def test_http_with_retries_calls_url_helper_readurl_with_args_kwargs(self): + testurl = mock.MagicMock() + kwargs = { + 'headers': mock.MagicMock(), + 'data': mock.MagicMock(), + # timeout kwarg should not be modified or deleted if present + 'timeout': mock.MagicMock() + } + azure_helper.http_with_retries(testurl, **kwargs) + self.m_readurl.assert_called_once_with(testurl, **kwargs) + + def test_http_with_retries_adds_timeout_kwarg_if_not_present(self): + testurl = mock.MagicMock() + kwargs = { + 'headers': mock.MagicMock(), + 'data': mock.MagicMock() + } + expected_kwargs = copy.deepcopy(kwargs) + expected_kwargs['timeout'] = self.default_readurl_timeout + + azure_helper.http_with_retries(testurl, **kwargs) + self.m_readurl.assert_called_once_with(testurl, **expected_kwargs) + + def test_http_with_retries_deletes_retries_kwargs_passed_in( + self): + """http_with_retries already implements retry logic, + so url_helper.readurl should not have retries. + http_with_retries should delete kwargs that + cause url_helper.readurl to retry. + """ + testurl = mock.MagicMock() + kwargs = { + 'headers': mock.MagicMock(), + 'data': mock.MagicMock(), + 'timeout': mock.MagicMock(), + 'retries': mock.MagicMock(), + 'infinite': mock.MagicMock() + } + expected_kwargs = copy.deepcopy(kwargs) + expected_kwargs.pop('retries', None) + expected_kwargs.pop('infinite', None) + + azure_helper.http_with_retries(testurl, **kwargs) + self.m_readurl.assert_called_once_with(testurl, **expected_kwargs) + self.assertIn( + 'retries kwarg passed in for communication with Azure endpoint.', + self.logs.getvalue()) + self.assertIn( + 'infinite kwarg passed in for communication with Azure endpoint.', + self.logs.getvalue()) + + +class TestOpenSSLManager(CiTestCase): + + def setUp(self): + super(TestOpenSSLManager, self).setUp() + patches = ExitStack() + self.addCleanup(patches.close) + + self.subp = patches.enter_context( + mock.patch.object(azure_helper.subp, 'subp')) + try: + self.open = patches.enter_context( + mock.patch('__builtin__.open')) + except ImportError: + self.open = patches.enter_context( + mock.patch('builtins.open')) + + @mock.patch.object(azure_helper, 'cd', mock.MagicMock()) + @mock.patch.object(azure_helper.temp_utils, 'mkdtemp') + def test_openssl_manager_creates_a_tmpdir(self, mkdtemp): + manager = azure_helper.OpenSSLManager() + self.assertEqual(mkdtemp.return_value, manager.tmpdir) + + def test_generate_certificate_uses_tmpdir(self): + subp_directory = {} + + def capture_directory(*args, **kwargs): + subp_directory['path'] = os.getcwd() + + self.subp.side_effect = capture_directory + manager = azure_helper.OpenSSLManager() + self.assertEqual(manager.tmpdir, subp_directory['path']) + manager.clean_up() + + @mock.patch.object(azure_helper, 'cd', mock.MagicMock()) + @mock.patch.object(azure_helper.temp_utils, 'mkdtemp', mock.MagicMock()) + @mock.patch.object(azure_helper.util, 'del_dir') + def test_clean_up(self, del_dir): + manager = azure_helper.OpenSSLManager() + manager.clean_up() + self.assertEqual([mock.call(manager.tmpdir)], del_dir.call_args_list) + + +class TestOpenSSLManagerActions(CiTestCase): + + def setUp(self): + super(TestOpenSSLManagerActions, self).setUp() + + self.allowed_subp = True + + def _data_file(self, name): + path = 'tests/data/azure' + return os.path.join(path, name) + + @unittest.skip("todo move to cloud_test") + def test_pubkey_extract(self): + cert = load_file(self._data_file('pubkey_extract_cert')) + good_key = load_file(self._data_file('pubkey_extract_ssh_key')) + sslmgr = azure_helper.OpenSSLManager() + key = sslmgr._get_ssh_key_from_cert(cert) + self.assertEqual(good_key, key) + + good_fingerprint = '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' + fingerprint = sslmgr._get_fingerprint_from_cert(cert) + self.assertEqual(good_fingerprint, fingerprint) + + @unittest.skip("todo move to cloud_test") + @mock.patch.object(azure_helper.OpenSSLManager, '_decrypt_certs_from_xml') + def test_parse_certificates(self, mock_decrypt_certs): + """Azure control plane puts private keys as well as certificates + into the Certificates XML object. Make sure only the public keys + from certs are extracted and that fingerprints are converted to + the form specified in the ovf-env.xml file. + """ + cert_contents = load_file(self._data_file('parse_certificates_pem')) + fingerprints = load_file(self._data_file( + 'parse_certificates_fingerprints') + ).splitlines() + mock_decrypt_certs.return_value = cert_contents + sslmgr = azure_helper.OpenSSLManager() + keys_by_fp = sslmgr.parse_certificates('') + for fp in keys_by_fp.keys(): + self.assertIn(fp, fingerprints) + for fp in fingerprints: + self.assertIn(fp, keys_by_fp) + + +class TestGoalStateHealthReporter(CiTestCase): + + maxDiff = None + + default_parameters = { + 'incarnation': 1634, + 'container_id': 'MyContainerId', + 'instance_id': 'MyInstanceId' + } + + test_azure_endpoint = 'TestEndpoint' + test_health_report_url = 'http://{0}/machine?comp=health'.format( + test_azure_endpoint) + test_default_headers = {'Content-Type': 'text/xml; charset=utf-8'} + + provisioning_success_status = 'Ready' + provisioning_not_ready_status = 'NotReady' + provisioning_failure_substatus = 'ProvisioningFailed' + provisioning_failure_err_description = ( + 'Test error message containing provisioning failure details') + + def setUp(self): + super(TestGoalStateHealthReporter, self).setUp() + patches = ExitStack() + self.addCleanup(patches.close) + + patches.enter_context( + mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock())) + self.read_file_or_url = patches.enter_context( + mock.patch.object(azure_helper.url_helper, 'read_file_or_url')) + + self.post = patches.enter_context( + mock.patch.object(azure_helper.AzureEndpointHttpClient, + 'post')) + + self.GoalState = patches.enter_context( + mock.patch.object(azure_helper, 'GoalState')) + self.GoalState.return_value.container_id = \ + self.default_parameters['container_id'] + self.GoalState.return_value.instance_id = \ + self.default_parameters['instance_id'] + self.GoalState.return_value.incarnation = \ + self.default_parameters['incarnation'] + + def _text_from_xpath_in_xroot(self, xroot, xpath): + element = xroot.find(xpath) + if element is not None: + return element.text + return None + + def _get_formatted_health_report_xml_string(self, **kwargs): + return HEALTH_REPORT_XML_TEMPLATE.format(**kwargs) + + def _get_formatted_health_detail_subsection_xml_string(self, **kwargs): + return HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format(**kwargs) + + def _get_report_ready_health_document(self): + return self._get_formatted_health_report_xml_string( + incarnation=escape(str(self.default_parameters['incarnation'])), + container_id=escape(self.default_parameters['container_id']), + instance_id=escape(self.default_parameters['instance_id']), + health_status=escape(self.provisioning_success_status), + health_detail_subsection='') + + def _get_report_failure_health_document(self): + health_detail_subsection = \ + self._get_formatted_health_detail_subsection_xml_string( + health_substatus=escape(self.provisioning_failure_substatus), + health_description=escape( + self.provisioning_failure_err_description)) + + return self._get_formatted_health_report_xml_string( + incarnation=escape(str(self.default_parameters['incarnation'])), + container_id=escape(self.default_parameters['container_id']), + instance_id=escape(self.default_parameters['instance_id']), + health_status=escape(self.provisioning_not_ready_status), + health_detail_subsection=health_detail_subsection) + + def test_send_ready_signal_sends_post_request(self): + with mock.patch.object( + azure_helper.GoalStateHealthReporter, + 'build_report') as m_build_report: + client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) + reporter = azure_helper.GoalStateHealthReporter( + azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), + client, self.test_azure_endpoint) + reporter.send_ready_signal() + + self.assertEqual(1, self.post.call_count) + self.assertEqual( + mock.call( + self.test_health_report_url, + data=m_build_report.return_value, + extra_headers=self.test_default_headers), + self.post.call_args) + + def test_send_failure_signal_sends_post_request(self): + with mock.patch.object( + azure_helper.GoalStateHealthReporter, + 'build_report') as m_build_report: + client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) + reporter = azure_helper.GoalStateHealthReporter( + azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), + client, self.test_azure_endpoint) + reporter.send_failure_signal( + description=self.provisioning_failure_err_description) + + self.assertEqual(1, self.post.call_count) + self.assertEqual( + mock.call( + self.test_health_report_url, + data=m_build_report.return_value, + extra_headers=self.test_default_headers), + self.post.call_args) + + def test_build_report_for_ready_signal_health_document(self): + health_document = self._get_report_ready_health_document() + reporter = azure_helper.GoalStateHealthReporter( + azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), + azure_helper.AzureEndpointHttpClient(mock.MagicMock()), + self.test_azure_endpoint) + generated_health_document = reporter.build_report( + incarnation=self.default_parameters['incarnation'], + container_id=self.default_parameters['container_id'], + instance_id=self.default_parameters['instance_id'], + status=self.provisioning_success_status) + + self.assertEqual(health_document, generated_health_document) + + generated_xroot = ElementTree.fromstring(generated_health_document) + self.assertEqual( + self._text_from_xpath_in_xroot( + generated_xroot, './GoalStateIncarnation'), + str(self.default_parameters['incarnation'])) + self.assertEqual( + self._text_from_xpath_in_xroot( + generated_xroot, './Container/ContainerId'), + str(self.default_parameters['container_id'])) + self.assertEqual( + self._text_from_xpath_in_xroot( + generated_xroot, + './Container/RoleInstanceList/Role/InstanceId'), + str(self.default_parameters['instance_id'])) + self.assertEqual( + self._text_from_xpath_in_xroot( + generated_xroot, + './Container/RoleInstanceList/Role/Health/State'), + escape(self.provisioning_success_status)) + self.assertIsNone( + self._text_from_xpath_in_xroot( + generated_xroot, + './Container/RoleInstanceList/Role/Health/Details')) + self.assertIsNone( + self._text_from_xpath_in_xroot( + generated_xroot, + './Container/RoleInstanceList/Role/Health/Details/SubStatus')) + self.assertIsNone( + self._text_from_xpath_in_xroot( + generated_xroot, + './Container/RoleInstanceList/Role/Health/Details/Description') + ) + + def test_build_report_for_failure_signal_health_document(self): + health_document = self._get_report_failure_health_document() + reporter = azure_helper.GoalStateHealthReporter( + azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), + azure_helper.AzureEndpointHttpClient(mock.MagicMock()), + self.test_azure_endpoint) + generated_health_document = reporter.build_report( + incarnation=self.default_parameters['incarnation'], + container_id=self.default_parameters['container_id'], + instance_id=self.default_parameters['instance_id'], + status=self.provisioning_not_ready_status, + substatus=self.provisioning_failure_substatus, + description=self.provisioning_failure_err_description) + + self.assertEqual(health_document, generated_health_document) + + generated_xroot = ElementTree.fromstring(generated_health_document) + self.assertEqual( + self._text_from_xpath_in_xroot( + generated_xroot, './GoalStateIncarnation'), + str(self.default_parameters['incarnation'])) + self.assertEqual( + self._text_from_xpath_in_xroot( + generated_xroot, './Container/ContainerId'), + self.default_parameters['container_id']) + self.assertEqual( + self._text_from_xpath_in_xroot( + generated_xroot, + './Container/RoleInstanceList/Role/InstanceId'), + self.default_parameters['instance_id']) + self.assertEqual( + self._text_from_xpath_in_xroot( + generated_xroot, + './Container/RoleInstanceList/Role/Health/State'), + escape(self.provisioning_not_ready_status)) + self.assertEqual( + self._text_from_xpath_in_xroot( + generated_xroot, + './Container/RoleInstanceList/Role/Health/Details/' + 'SubStatus'), + escape(self.provisioning_failure_substatus)) + self.assertEqual( + self._text_from_xpath_in_xroot( + generated_xroot, + './Container/RoleInstanceList/Role/Health/Details/' + 'Description'), + escape(self.provisioning_failure_err_description)) + + def test_send_ready_signal_calls_build_report(self): + with mock.patch.object( + azure_helper.GoalStateHealthReporter, 'build_report' + ) as m_build_report: + reporter = azure_helper.GoalStateHealthReporter( + azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), + azure_helper.AzureEndpointHttpClient(mock.MagicMock()), + self.test_azure_endpoint) + reporter.send_ready_signal() + + self.assertEqual(1, m_build_report.call_count) + self.assertEqual( + mock.call( + incarnation=self.default_parameters['incarnation'], + container_id=self.default_parameters['container_id'], + instance_id=self.default_parameters['instance_id'], + status=self.provisioning_success_status), + m_build_report.call_args) + + def test_send_failure_signal_calls_build_report(self): + with mock.patch.object( + azure_helper.GoalStateHealthReporter, 'build_report' + ) as m_build_report: + reporter = azure_helper.GoalStateHealthReporter( + azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), + azure_helper.AzureEndpointHttpClient(mock.MagicMock()), + self.test_azure_endpoint) + reporter.send_failure_signal( + description=self.provisioning_failure_err_description) + + self.assertEqual(1, m_build_report.call_count) + self.assertEqual( + mock.call( + incarnation=self.default_parameters['incarnation'], + container_id=self.default_parameters['container_id'], + instance_id=self.default_parameters['instance_id'], + status=self.provisioning_not_ready_status, + substatus=self.provisioning_failure_substatus, + description=self.provisioning_failure_err_description), + m_build_report.call_args) + + def test_build_report_escapes_chars(self): + incarnation = 'jd8\'9*&^<\'A>' + instance_id = 'Opo>>>jas\'&d;[p&fp\"a<&aa\'sd!@&!)((*<&>' + health_substatus = '&as\"d<d<\'^@!5&6<7' + health_description = '&&&>!#$\"&&><>&\"sd<67<]>>' + + health_detail_subsection = \ + self._get_formatted_health_detail_subsection_xml_string( + health_substatus=escape(health_substatus), + health_description=escape(health_description)) + health_document = self._get_formatted_health_report_xml_string( + incarnation=escape(incarnation), + container_id=escape(container_id), + instance_id=escape(instance_id), + health_status=escape(health_status), + health_detail_subsection=health_detail_subsection) + + reporter = azure_helper.GoalStateHealthReporter( + azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), + azure_helper.AzureEndpointHttpClient(mock.MagicMock()), + self.test_azure_endpoint) + generated_health_document = reporter.build_report( + incarnation=incarnation, + container_id=container_id, + instance_id=instance_id, + status=health_status, + substatus=health_substatus, + description=health_description) + + self.assertEqual(health_document, generated_health_document) + + def test_build_report_conforms_to_length_limits(self): + reporter = azure_helper.GoalStateHealthReporter( + azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), + azure_helper.AzureEndpointHttpClient(mock.MagicMock()), + self.test_azure_endpoint) + long_err_msg = 'a9&ea8>>>e as1< d\"q2*&(^%\'a=5<' * 100 + generated_health_document = reporter.build_report( + incarnation=self.default_parameters['incarnation'], + container_id=self.default_parameters['container_id'], + instance_id=self.default_parameters['instance_id'], + status=self.provisioning_not_ready_status, + substatus=self.provisioning_failure_substatus, + description=long_err_msg) + + generated_xroot = ElementTree.fromstring(generated_health_document) + generated_health_report_description = self._text_from_xpath_in_xroot( + generated_xroot, + './Container/RoleInstanceList/Role/Health/Details/Description') + self.assertEqual( + len(unescape(generated_health_report_description)), + HEALTH_REPORT_DESCRIPTION_TRIM_LEN) + + def test_trim_description_then_escape_conforms_to_len_limits_worst_case( + self): + """When unescaped characters are XML-escaped, the length increases. + Char Escape String + < < + > > + " " + ' ' + & & + + We (step 1) trim the health report XML's description field, + and then (step 2) XML-escape the health report XML's description field. + + The health report XML's description field limit within cloud-init + is HEALTH_REPORT_DESCRIPTION_TRIM_LEN. + + The Azure platform's limit on the health report XML's description field + is 4096 chars. + + For worst-case chars, there is a 5x blowup in length + when the chars are XML-escaped. + ' and " when XML-escaped have a 5x blowup. + + Ensure that (1) trimming and then (2) XML-escaping does not blow past + the Azure platform's limit for health report XML's description field + (4096 chars). + """ + reporter = azure_helper.GoalStateHealthReporter( + azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), + azure_helper.AzureEndpointHttpClient(mock.MagicMock()), + self.test_azure_endpoint) + long_err_msg = '\'\"' * 10000 + generated_health_document = reporter.build_report( + incarnation=self.default_parameters['incarnation'], + container_id=self.default_parameters['container_id'], + instance_id=self.default_parameters['instance_id'], + status=self.provisioning_not_ready_status, + substatus=self.provisioning_failure_substatus, + description=long_err_msg) + + generated_xroot = ElementTree.fromstring(generated_health_document) + generated_health_report_description = self._text_from_xpath_in_xroot( + generated_xroot, + './Container/RoleInstanceList/Role/Health/Details/Description') + # The escaped description string should be less than + # the Azure platform limit for the escaped description string. + self.assertLessEqual(len(generated_health_report_description), 4096) + + +class TestWALinuxAgentShim(CiTestCase): + + def setUp(self): + super(TestWALinuxAgentShim, self).setUp() + patches = ExitStack() + self.addCleanup(patches.close) + + self.AzureEndpointHttpClient = patches.enter_context( + mock.patch.object(azure_helper, 'AzureEndpointHttpClient')) + self.find_endpoint = patches.enter_context( + mock.patch.object(wa_shim, 'find_endpoint')) + self.GoalState = patches.enter_context( + mock.patch.object(azure_helper, 'GoalState')) + self.OpenSSLManager = patches.enter_context( + mock.patch.object(azure_helper, 'OpenSSLManager', autospec=True)) + patches.enter_context( + mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock())) + + self.test_incarnation = 'TestIncarnation' + self.test_container_id = 'TestContainerId' + self.test_instance_id = 'TestInstanceId' + self.GoalState.return_value.incarnation = self.test_incarnation + self.GoalState.return_value.container_id = self.test_container_id + self.GoalState.return_value.instance_id = self.test_instance_id + + def test_eject_iso_is_called(self): + shim = wa_shim() + with mock.patch.object( + shim, 'eject_iso', autospec=True + ) as m_eject_iso: + shim.register_with_azure_and_fetch_data(iso_dev="/dev/sr0") + m_eject_iso.assert_called_once_with("/dev/sr0") + + def test_http_client_does_not_use_certificate_for_report_ready(self): + shim = wa_shim() + shim.register_with_azure_and_fetch_data() + self.assertEqual( + [mock.call(None)], + self.AzureEndpointHttpClient.call_args_list) + + def test_http_client_does_not_use_certificate_for_report_failure(self): + shim = wa_shim() + shim.register_with_azure_and_report_failure(description='TestDesc') + self.assertEqual( + [mock.call(None)], + self.AzureEndpointHttpClient.call_args_list) + + def test_correct_url_used_for_goalstate_during_report_ready(self): + self.find_endpoint.return_value = 'test_endpoint' + shim = wa_shim() + shim.register_with_azure_and_fetch_data() + m_get = self.AzureEndpointHttpClient.return_value.get + self.assertEqual( + [mock.call('http://test_endpoint/machine/?comp=goalstate')], + m_get.call_args_list) + self.assertEqual( + [mock.call( + m_get.return_value.contents, + self.AzureEndpointHttpClient.return_value, + False + )], + self.GoalState.call_args_list) + + def test_correct_url_used_for_goalstate_during_report_failure(self): + self.find_endpoint.return_value = 'test_endpoint' + shim = wa_shim() + shim.register_with_azure_and_report_failure(description='TestDesc') + m_get = self.AzureEndpointHttpClient.return_value.get + self.assertEqual( + [mock.call('http://test_endpoint/machine/?comp=goalstate')], + m_get.call_args_list) + self.assertEqual( + [mock.call( + m_get.return_value.contents, + self.AzureEndpointHttpClient.return_value, + False + )], + self.GoalState.call_args_list) + + def test_certificates_used_to_determine_public_keys(self): + # if register_with_azure_and_fetch_data() isn't passed some info about + # the user's public keys, there's no point in even trying to parse the + # certificates + shim = wa_shim() + mypk = [{'fingerprint': 'fp1', 'path': 'path1'}, + {'fingerprint': 'fp3', 'path': 'path3', 'value': ''}] + certs = {'fp1': 'expected-key', + 'fp2': 'should-not-be-found', + 'fp3': 'expected-no-value-key', + } + sslmgr = self.OpenSSLManager.return_value + sslmgr.parse_certificates.return_value = certs + data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) + self.assertEqual( + [mock.call(self.GoalState.return_value.certificates_xml)], + sslmgr.parse_certificates.call_args_list) + self.assertIn('expected-key', data['public-keys']) + self.assertIn('expected-no-value-key', data['public-keys']) + self.assertNotIn('should-not-be-found', data['public-keys']) + + def test_absent_certificates_produces_empty_public_keys(self): + mypk = [{'fingerprint': 'fp1', 'path': 'path1'}] + self.GoalState.return_value.certificates_xml = None + shim = wa_shim() + data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) + self.assertEqual([], data['public-keys']) + + def test_correct_url_used_for_report_ready(self): + self.find_endpoint.return_value = 'test_endpoint' + shim = wa_shim() + shim.register_with_azure_and_fetch_data() + expected_url = 'http://test_endpoint/machine?comp=health' + self.assertEqual( + [mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)], + self.AzureEndpointHttpClient.return_value.post + .call_args_list) + + def test_correct_url_used_for_report_failure(self): + self.find_endpoint.return_value = 'test_endpoint' + shim = wa_shim() + shim.register_with_azure_and_report_failure(description='TestDesc') + expected_url = 'http://test_endpoint/machine?comp=health' + self.assertEqual( + [mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)], + self.AzureEndpointHttpClient.return_value.post + .call_args_list) + + def test_goal_state_values_used_for_report_ready(self): + shim = wa_shim() + shim.register_with_azure_and_fetch_data() + posted_document = ( + self.AzureEndpointHttpClient.return_value.post + .call_args[1]['data'] + ) + self.assertIn(self.test_incarnation, posted_document) + self.assertIn(self.test_container_id, posted_document) + self.assertIn(self.test_instance_id, posted_document) + + def test_goal_state_values_used_for_report_failure(self): + shim = wa_shim() + shim.register_with_azure_and_report_failure(description='TestDesc') + posted_document = ( + self.AzureEndpointHttpClient.return_value.post + .call_args[1]['data'] + ) + self.assertIn(self.test_incarnation, posted_document) + self.assertIn(self.test_container_id, posted_document) + self.assertIn(self.test_instance_id, posted_document) + + def test_xml_elems_in_report_ready_post(self): + shim = wa_shim() + shim.register_with_azure_and_fetch_data() + health_document = HEALTH_REPORT_XML_TEMPLATE.format( + incarnation=escape(self.test_incarnation), + container_id=escape(self.test_container_id), + instance_id=escape(self.test_instance_id), + health_status=escape('Ready'), + health_detail_subsection='') + posted_document = ( + self.AzureEndpointHttpClient.return_value.post + .call_args[1]['data']) + self.assertEqual(health_document, posted_document) + + def test_xml_elems_in_report_failure_post(self): + shim = wa_shim() + shim.register_with_azure_and_report_failure(description='TestDesc') + health_document = HEALTH_REPORT_XML_TEMPLATE.format( + incarnation=escape(self.test_incarnation), + container_id=escape(self.test_container_id), + instance_id=escape(self.test_instance_id), + health_status=escape('NotReady'), + health_detail_subsection=HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE + .format( + health_substatus=escape('ProvisioningFailed'), + health_description=escape('TestDesc'))) + posted_document = ( + self.AzureEndpointHttpClient.return_value.post + .call_args[1]['data']) + self.assertEqual(health_document, posted_document) + + @mock.patch.object(azure_helper, 'GoalStateHealthReporter', autospec=True) + def test_register_with_azure_and_fetch_data_calls_send_ready_signal( + self, m_goal_state_health_reporter): + shim = wa_shim() + shim.register_with_azure_and_fetch_data() + self.assertEqual( + 1, + m_goal_state_health_reporter.return_value.send_ready_signal + .call_count) + + @mock.patch.object(azure_helper, 'GoalStateHealthReporter', autospec=True) + def test_register_with_azure_and_report_failure_calls_send_failure_signal( + self, m_goal_state_health_reporter): + shim = wa_shim() + shim.register_with_azure_and_report_failure(description='TestDesc') + m_goal_state_health_reporter.return_value.send_failure_signal \ + .assert_called_once_with(description='TestDesc') + + def test_register_with_azure_and_report_failure_does_not_need_certificates( + self): + shim = wa_shim() + with mock.patch.object( + shim, '_fetch_goal_state_from_azure', autospec=True + ) as m_fetch_goal_state_from_azure: + shim.register_with_azure_and_report_failure(description='TestDesc') + m_fetch_goal_state_from_azure.assert_called_once_with( + need_certificate=False) + + def test_clean_up_can_be_called_at_any_time(self): + shim = wa_shim() + shim.clean_up() + + def test_openssl_manager_not_instantiated_by_shim_report_status(self): + shim = wa_shim() + shim.register_with_azure_and_fetch_data() + shim.register_with_azure_and_report_failure(description='TestDesc') + shim.clean_up() + self.OpenSSLManager.assert_not_called() + + def test_clean_up_after_report_ready(self): + shim = wa_shim() + shim.register_with_azure_and_fetch_data() + shim.clean_up() + self.OpenSSLManager.return_value.clean_up.assert_not_called() + + def test_clean_up_after_report_failure(self): + shim = wa_shim() + shim.register_with_azure_and_report_failure(description='TestDesc') + shim.clean_up() + self.OpenSSLManager.return_value.clean_up.assert_not_called() + + def test_fetch_goalstate_during_report_ready_raises_exc_on_get_exc(self): + self.AzureEndpointHttpClient.return_value.get \ + .side_effect = SentinelException + shim = wa_shim() + self.assertRaises(SentinelException, + shim.register_with_azure_and_fetch_data) + + def test_fetch_goalstate_during_report_failure_raises_exc_on_get_exc(self): + self.AzureEndpointHttpClient.return_value.get \ + .side_effect = SentinelException + shim = wa_shim() + self.assertRaises(SentinelException, + shim.register_with_azure_and_report_failure, + description='TestDesc') + + def test_fetch_goalstate_during_report_ready_raises_exc_on_parse_exc(self): + self.GoalState.side_effect = SentinelException + shim = wa_shim() + self.assertRaises(SentinelException, + shim.register_with_azure_and_fetch_data) + + def test_fetch_goalstate_during_report_failure_raises_exc_on_parse_exc( + self): + self.GoalState.side_effect = SentinelException + shim = wa_shim() + self.assertRaises(SentinelException, + shim.register_with_azure_and_report_failure, + description='TestDesc') + + def test_failure_to_send_report_ready_health_doc_bubbles_up(self): + self.AzureEndpointHttpClient.return_value.post \ + .side_effect = SentinelException + shim = wa_shim() + self.assertRaises(SentinelException, + shim.register_with_azure_and_fetch_data) + + def test_failure_to_send_report_failure_health_doc_bubbles_up(self): + self.AzureEndpointHttpClient.return_value.post \ + .side_effect = SentinelException + shim = wa_shim() + self.assertRaises(SentinelException, + shim.register_with_azure_and_report_failure, + description='TestDesc') + + +class TestGetMetadataGoalStateXMLAndReportReadyToFabric(CiTestCase): + + def setUp(self): + super(TestGetMetadataGoalStateXMLAndReportReadyToFabric, self).setUp() + patches = ExitStack() + self.addCleanup(patches.close) + + self.m_shim = patches.enter_context( + mock.patch.object(azure_helper, 'WALinuxAgentShim')) + + def test_data_from_shim_returned(self): + ret = azure_helper.get_metadata_from_fabric() + self.assertEqual( + self.m_shim.return_value.register_with_azure_and_fetch_data + .return_value, + ret) + + def test_success_calls_clean_up(self): + azure_helper.get_metadata_from_fabric() + self.assertEqual(1, self.m_shim.return_value.clean_up.call_count) + + def test_failure_in_registration_propagates_exc_and_calls_clean_up( + self): + self.m_shim.return_value.register_with_azure_and_fetch_data \ + .side_effect = SentinelException + self.assertRaises(SentinelException, + azure_helper.get_metadata_from_fabric) + self.assertEqual(1, self.m_shim.return_value.clean_up.call_count) + + def test_calls_shim_register_with_azure_and_fetch_data(self): + m_pubkey_info = mock.MagicMock() + azure_helper.get_metadata_from_fabric( + pubkey_info=m_pubkey_info, iso_dev="/dev/sr0") + self.assertEqual( + 1, + self.m_shim.return_value + .register_with_azure_and_fetch_data.call_count) + self.assertEqual( + mock.call(iso_dev="/dev/sr0", pubkey_info=m_pubkey_info), + self.m_shim.return_value + .register_with_azure_and_fetch_data.call_args) + + def test_instantiates_shim_with_kwargs(self): + m_fallback_lease_file = mock.MagicMock() + m_dhcp_options = mock.MagicMock() + azure_helper.get_metadata_from_fabric( + fallback_lease_file=m_fallback_lease_file, + dhcp_opts=m_dhcp_options) + self.assertEqual(1, self.m_shim.call_count) + self.assertEqual( + mock.call( + fallback_lease_file=m_fallback_lease_file, + dhcp_options=m_dhcp_options), + self.m_shim.call_args) + + +class TestGetMetadataGoalStateXMLAndReportFailureToFabric(CiTestCase): + + def setUp(self): + super( + TestGetMetadataGoalStateXMLAndReportFailureToFabric, self).setUp() + patches = ExitStack() + self.addCleanup(patches.close) + + self.m_shim = patches.enter_context( + mock.patch.object(azure_helper, 'WALinuxAgentShim')) + + def test_success_calls_clean_up(self): + azure_helper.report_failure_to_fabric() + self.assertEqual( + 1, + self.m_shim.return_value.clean_up.call_count) + + def test_failure_in_shim_report_failure_propagates_exc_and_calls_clean_up( + self): + self.m_shim.return_value.register_with_azure_and_report_failure \ + .side_effect = SentinelException + self.assertRaises(SentinelException, + azure_helper.report_failure_to_fabric) + self.assertEqual( + 1, + self.m_shim.return_value.clean_up.call_count) + + def test_report_failure_to_fabric_with_desc_calls_shim_report_failure( + self): + azure_helper.report_failure_to_fabric(description='TestDesc') + self.m_shim.return_value.register_with_azure_and_report_failure \ + .assert_called_once_with(description='TestDesc') + + def test_report_failure_to_fabric_with_no_desc_calls_shim_report_failure( + self): + azure_helper.report_failure_to_fabric() + # default err message description should be shown to the user + # if no description is passed in + self.m_shim.return_value.register_with_azure_and_report_failure \ + .assert_called_once_with( + description=azure_helper + .DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE) + + def test_report_failure_to_fabric_empty_desc_calls_shim_report_failure( + self): + azure_helper.report_failure_to_fabric(description='') + # default err message description should be shown to the user + # if an empty description is passed in + self.m_shim.return_value.register_with_azure_and_report_failure \ + .assert_called_once_with( + description=azure_helper + .DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE) + + def test_instantiates_shim_with_kwargs(self): + m_fallback_lease_file = mock.MagicMock() + m_dhcp_options = mock.MagicMock() + azure_helper.report_failure_to_fabric( + fallback_lease_file=m_fallback_lease_file, + dhcp_opts=m_dhcp_options) + self.m_shim.assert_called_once_with( + fallback_lease_file=m_fallback_lease_file, + dhcp_options=m_dhcp_options) + + +class TestExtractIpAddressFromNetworkd(CiTestCase): + + azure_lease = dedent("""\ + # This is private data. Do not parse. + ADDRESS=10.132.0.5 + NETMASK=255.255.255.255 + ROUTER=10.132.0.1 + SERVER_ADDRESS=169.254.169.254 + NEXT_SERVER=10.132.0.1 + MTU=1460 + T1=43200 + T2=75600 + LIFETIME=86400 + DNS=169.254.169.254 + NTP=169.254.169.254 + DOMAINNAME=c.ubuntu-foundations.internal + DOMAIN_SEARCH_LIST=c.ubuntu-foundations.internal google.internal + HOSTNAME=tribaal-test-171002-1349.c.ubuntu-foundations.internal + ROUTES=10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1 + CLIENTID=ff405663a200020000ab11332859494d7a8b4c + OPTION_245=624c3620 + """) + + def setUp(self): + super(TestExtractIpAddressFromNetworkd, self).setUp() + self.lease_d = self.tmp_dir() + + def test_no_valid_leases_is_none(self): + """No valid leases should return None.""" + self.assertIsNone( + wa_shim._networkd_get_value_from_leases(self.lease_d)) + + def test_option_245_is_found_in_single(self): + """A single valid lease with 245 option should return it.""" + populate_dir(self.lease_d, {'9': self.azure_lease}) + self.assertEqual( + '624c3620', wa_shim._networkd_get_value_from_leases(self.lease_d)) + + def test_option_245_not_found_returns_None(self): + """A valid lease, but no option 245 should return None.""" + populate_dir( + self.lease_d, + {'9': self.azure_lease.replace("OPTION_245", "OPTION_999")}) + self.assertIsNone( + wa_shim._networkd_get_value_from_leases(self.lease_d)) + + def test_multiple_returns_first(self): + """Somewhat arbitrarily return the first address when multiple. + + Most important at the moment is that this is consistent behavior + rather than changing randomly as in order of a dictionary.""" + myval = "624c3601" + populate_dir( + self.lease_d, + {'9': self.azure_lease, + '2': self.azure_lease.replace("624c3620", myval)}) + self.assertEqual( + myval, wa_shim._networkd_get_value_from_leases(self.lease_d)) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_cloudsigma.py b/tests/unittests/sources/test_cloudsigma.py new file mode 100644 index 00000000..2eae16ee --- /dev/null +++ b/tests/unittests/sources/test_cloudsigma.py @@ -0,0 +1,137 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import copy + +from cloudinit.cs_utils import Cepko +from cloudinit import distros +from cloudinit import helpers +from cloudinit import sources +from cloudinit.sources import DataSourceCloudSigma + +from tests.unittests import helpers as test_helpers + +SERVER_CONTEXT = { + "cpu": 1000, + "cpus_instead_of_cores": False, + "global_context": {"some_global_key": "some_global_val"}, + "mem": 1073741824, + "meta": { + "ssh_public_key": "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe", + "cloudinit-user-data": "#cloud-config\n\n...", + }, + "name": "test_server", + "requirements": [], + "smp": 1, + "tags": ["much server", "very performance"], + "uuid": "65b2fb23-8c03-4187-a3ba-8b7c919e8890", + "vnc_password": "9e84d6cb49e46379", + "vendor_data": { + "location": "zrh", + "cloudinit": "#cloud-config\n\n...", + } +} + +DS_PATH = 'cloudinit.sources.DataSourceCloudSigma.DataSourceCloudSigma' + + +class CepkoMock(Cepko): + def __init__(self, mocked_context): + self.result = mocked_context + + def all(self): + return self + + +class DataSourceCloudSigmaTest(test_helpers.CiTestCase): + def setUp(self): + super(DataSourceCloudSigmaTest, self).setUp() + self.paths = helpers.Paths({'run_dir': self.tmp_dir()}) + self.add_patch(DS_PATH + '.is_running_in_cloudsigma', + "m_is_container", return_value=True) + + distro_cls = distros.fetch("ubuntu") + distro = distro_cls("ubuntu", cfg={}, paths=self.paths) + self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( + sys_cfg={}, distro=distro, paths=self.paths) + self.datasource.cepko = CepkoMock(SERVER_CONTEXT) + + def test_get_hostname(self): + self.datasource.get_data() + self.assertEqual("test_server", self.datasource.get_hostname()) + self.datasource.metadata['name'] = '' + self.assertEqual("65b2fb23", self.datasource.get_hostname()) + utf8_hostname = b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82'.decode('utf-8') + self.datasource.metadata['name'] = utf8_hostname + self.assertEqual("65b2fb23", self.datasource.get_hostname()) + + def test_get_public_ssh_keys(self): + self.datasource.get_data() + self.assertEqual([SERVER_CONTEXT['meta']['ssh_public_key']], + self.datasource.get_public_ssh_keys()) + + def test_get_instance_id(self): + self.datasource.get_data() + self.assertEqual(SERVER_CONTEXT['uuid'], + self.datasource.get_instance_id()) + + def test_platform(self): + """All platform-related attributes are set.""" + self.datasource.get_data() + self.assertEqual(self.datasource.cloud_name, 'cloudsigma') + self.assertEqual(self.datasource.platform_type, 'cloudsigma') + self.assertEqual(self.datasource.subplatform, 'cepko (/dev/ttyS1)') + + def test_metadata(self): + self.datasource.get_data() + self.assertEqual(self.datasource.metadata, SERVER_CONTEXT) + + def test_user_data(self): + self.datasource.get_data() + self.assertEqual(self.datasource.userdata_raw, + SERVER_CONTEXT['meta']['cloudinit-user-data']) + + def test_encoded_user_data(self): + encoded_context = copy.deepcopy(SERVER_CONTEXT) + encoded_context['meta']['base64_fields'] = 'cloudinit-user-data' + encoded_context['meta']['cloudinit-user-data'] = 'aGkgd29ybGQK' + self.datasource.cepko = CepkoMock(encoded_context) + self.datasource.get_data() + + self.assertEqual(self.datasource.userdata_raw, b'hi world\n') + + def test_vendor_data(self): + self.datasource.get_data() + self.assertEqual(self.datasource.vendordata_raw, + SERVER_CONTEXT['vendor_data']['cloudinit']) + + def test_lack_of_vendor_data(self): + stripped_context = copy.deepcopy(SERVER_CONTEXT) + del stripped_context["vendor_data"] + self.datasource.cepko = CepkoMock(stripped_context) + self.datasource.get_data() + + self.assertIsNone(self.datasource.vendordata_raw) + + def test_lack_of_cloudinit_key_in_vendor_data(self): + stripped_context = copy.deepcopy(SERVER_CONTEXT) + del stripped_context["vendor_data"]["cloudinit"] + self.datasource.cepko = CepkoMock(stripped_context) + self.datasource.get_data() + + self.assertIsNone(self.datasource.vendordata_raw) + + +class DsLoads(test_helpers.TestCase): + def test_get_datasource_list_returns_in_local(self): + deps = (sources.DEP_FILESYSTEM,) + ds_list = DataSourceCloudSigma.get_datasource_list(deps) + self.assertEqual(ds_list, + [DataSourceCloudSigma.DataSourceCloudSigma]) + + def test_list_sources_finds_ds(self): + found = sources.list_sources( + ['CloudSigma'], (sources.DEP_FILESYSTEM,), ['cloudinit.sources']) + self.assertEqual([DataSourceCloudSigma.DataSourceCloudSigma], + found) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_cloudstack.py b/tests/unittests/sources/test_cloudstack.py new file mode 100644 index 00000000..2b1a1b70 --- /dev/null +++ b/tests/unittests/sources/test_cloudstack.py @@ -0,0 +1,186 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import helpers +from cloudinit import util +from cloudinit.sources.DataSourceCloudStack import ( + DataSourceCloudStack, get_latest_lease) + +from tests.unittests.helpers import CiTestCase, ExitStack, mock + +import os +import time + +MOD_PATH = 'cloudinit.sources.DataSourceCloudStack' +DS_PATH = MOD_PATH + '.DataSourceCloudStack' + + +class TestCloudStackPasswordFetching(CiTestCase): + + def setUp(self): + super(TestCloudStackPasswordFetching, self).setUp() + self.patches = ExitStack() + self.addCleanup(self.patches.close) + mod_name = MOD_PATH + self.patches.enter_context(mock.patch('{0}.ec2'.format(mod_name))) + self.patches.enter_context(mock.patch('{0}.uhelp'.format(mod_name))) + default_gw = "192.201.20.0" + get_latest_lease = mock.MagicMock(return_value=None) + self.patches.enter_context(mock.patch( + mod_name + '.get_latest_lease', get_latest_lease)) + + get_default_gw = mock.MagicMock(return_value=default_gw) + self.patches.enter_context(mock.patch( + mod_name + '.get_default_gateway', get_default_gw)) + + get_networkd_server_address = mock.MagicMock(return_value=None) + self.patches.enter_context(mock.patch( + mod_name + '.dhcp.networkd_get_option_from_leases', + get_networkd_server_address)) + self.tmp = self.tmp_dir() + + def _set_password_server_response(self, response_string): + subp = mock.MagicMock(return_value=(response_string, '')) + self.patches.enter_context( + mock.patch('cloudinit.sources.DataSourceCloudStack.subp.subp', + subp)) + return subp + + def test_empty_password_doesnt_create_config(self): + self._set_password_server_response('') + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) + ds.get_data() + self.assertEqual({}, ds.get_config_obj()) + + def test_saved_password_doesnt_create_config(self): + self._set_password_server_response('saved_password') + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) + ds.get_data() + self.assertEqual({}, ds.get_config_obj()) + + @mock.patch(DS_PATH + '.wait_for_metadata_service') + def test_password_sets_password(self, m_wait): + m_wait.return_value = True + password = 'SekritSquirrel' + self._set_password_server_response(password) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) + ds.get_data() + self.assertEqual(password, ds.get_config_obj()['password']) + + @mock.patch(DS_PATH + '.wait_for_metadata_service') + def test_bad_request_doesnt_stop_ds_from_working(self, m_wait): + m_wait.return_value = True + self._set_password_server_response('bad_request') + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) + self.assertTrue(ds.get_data()) + + def assertRequestTypesSent(self, subp, expected_request_types): + request_types = [] + for call in subp.call_args_list: + args = call[0][0] + for arg in args: + if arg.startswith('DomU_Request'): + request_types.append(arg.split()[1]) + self.assertEqual(expected_request_types, request_types) + + @mock.patch(DS_PATH + '.wait_for_metadata_service') + def test_valid_response_means_password_marked_as_saved(self, m_wait): + m_wait.return_value = True + password = 'SekritSquirrel' + subp = self._set_password_server_response(password) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) + ds.get_data() + self.assertRequestTypesSent(subp, + ['send_my_password', 'saved_password']) + + def _check_password_not_saved_for(self, response_string): + subp = self._set_password_server_response(response_string) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) + with mock.patch(DS_PATH + '.wait_for_metadata_service') as m_wait: + m_wait.return_value = True + ds.get_data() + self.assertRequestTypesSent(subp, ['send_my_password']) + + def test_password_not_saved_if_empty(self): + self._check_password_not_saved_for('') + + def test_password_not_saved_if_already_saved(self): + self._check_password_not_saved_for('saved_password') + + def test_password_not_saved_if_bad_request(self): + self._check_password_not_saved_for('bad_request') + + +class TestGetLatestLease(CiTestCase): + + def _populate_dir_list(self, bdir, files): + """populate_dir_list([(name, data), (name, data)]) + + writes files to bdir, and updates timestamps to ensure + that their mtime increases with each file.""" + + start = int(time.time()) + for num, fname in enumerate(reversed(files)): + fpath = os.path.sep.join((bdir, fname)) + util.write_file(fpath, fname.encode()) + os.utime(fpath, (start - num, start - num)) + + def _pop_and_test(self, files, expected): + lease_d = self.tmp_dir() + self._populate_dir_list(lease_d, files) + self.assertEqual(self.tmp_path(expected, lease_d), + get_latest_lease(lease_d)) + + def test_skips_dhcpv6_files(self): + """files started with dhclient6 should be skipped.""" + expected = "dhclient.lease" + self._pop_and_test([expected, "dhclient6.lease"], expected) + + def test_selects_dhclient_dot_files(self): + """files named dhclient.lease or dhclient.leases should be used. + + Ubuntu names files dhclient.eth0.leases dhclient6.leases and + sometimes dhclient.leases.""" + self._pop_and_test(["dhclient.lease"], "dhclient.lease") + self._pop_and_test(["dhclient.leases"], "dhclient.leases") + + def test_selects_dhclient_dash_files(self): + """files named dhclient-lease or dhclient-leases should be used. + + Redhat/Centos names files with dhclient--eth0.lease (centos 7) or + dhclient-eth0.leases (centos 6). + """ + self._pop_and_test(["dhclient-eth0.lease"], "dhclient-eth0.lease") + self._pop_and_test(["dhclient--eth0.lease"], "dhclient--eth0.lease") + + def test_ignores_by_extension(self): + """only .lease or .leases file should be considered.""" + + self._pop_and_test(["dhclient.lease", "dhclient.lease.bk", + "dhclient.lease-old", "dhclient.leaselease"], + "dhclient.lease") + + def test_selects_newest_matching(self): + """If multiple files match, the newest written should be used.""" + lease_d = self.tmp_dir() + valid_1 = "dhclient.leases" + valid_2 = "dhclient.lease" + valid_1_path = self.tmp_path(valid_1, lease_d) + valid_2_path = self.tmp_path(valid_2, lease_d) + + self._populate_dir_list(lease_d, [valid_1, valid_2]) + self.assertEqual(valid_2_path, get_latest_lease(lease_d)) + + # now update mtime on valid_2 to be older than valid_1 and re-check. + mtime = int(os.path.getmtime(valid_1_path)) - 1 + os.utime(valid_2_path, (mtime, mtime)) + + self.assertEqual(valid_1_path, get_latest_lease(lease_d)) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_common.py b/tests/unittests/sources/test_common.py new file mode 100644 index 00000000..bb8fa530 --- /dev/null +++ b/tests/unittests/sources/test_common.py @@ -0,0 +1,121 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import settings +from cloudinit import sources +from cloudinit import type_utils +from cloudinit.sources import ( + DataSource, + DataSourceAliYun as AliYun, + DataSourceAltCloud as AltCloud, + DataSourceAzure as Azure, + DataSourceBigstep as Bigstep, + DataSourceCloudSigma as CloudSigma, + DataSourceCloudStack as CloudStack, + DataSourceConfigDrive as ConfigDrive, + DataSourceDigitalOcean as DigitalOcean, + DataSourceEc2 as Ec2, + DataSourceExoscale as Exoscale, + DataSourceGCE as GCE, + DataSourceHetzner as Hetzner, + DataSourceIBMCloud as IBMCloud, + DataSourceLXD as LXD, + DataSourceMAAS as MAAS, + DataSourceNoCloud as NoCloud, + DataSourceOpenNebula as OpenNebula, + DataSourceOpenStack as OpenStack, + DataSourceOracle as Oracle, + DataSourceOVF as OVF, + DataSourceRbxCloud as RbxCloud, + DataSourceScaleway as Scaleway, + DataSourceSmartOS as SmartOS, + DataSourceUpCloud as UpCloud, + DataSourceVultr as Vultr, + DataSourceVMware as VMware, +) +from cloudinit.sources import DataSourceNone as DSNone + +from tests.unittests import helpers as test_helpers + +DEFAULT_LOCAL = [ + Azure.DataSourceAzure, + CloudSigma.DataSourceCloudSigma, + ConfigDrive.DataSourceConfigDrive, + DigitalOcean.DataSourceDigitalOcean, + GCE.DataSourceGCELocal, + Hetzner.DataSourceHetzner, + IBMCloud.DataSourceIBMCloud, + LXD.DataSourceLXD, + NoCloud.DataSourceNoCloud, + OpenNebula.DataSourceOpenNebula, + Oracle.DataSourceOracle, + OVF.DataSourceOVF, + SmartOS.DataSourceSmartOS, + Vultr.DataSourceVultr, + Ec2.DataSourceEc2Local, + OpenStack.DataSourceOpenStackLocal, + RbxCloud.DataSourceRbxCloud, + Scaleway.DataSourceScaleway, + UpCloud.DataSourceUpCloudLocal, + VMware.DataSourceVMware, +] + +DEFAULT_NETWORK = [ + AliYun.DataSourceAliYun, + AltCloud.DataSourceAltCloud, + Bigstep.DataSourceBigstep, + CloudStack.DataSourceCloudStack, + DSNone.DataSourceNone, + Ec2.DataSourceEc2, + Exoscale.DataSourceExoscale, + GCE.DataSourceGCE, + MAAS.DataSourceMAAS, + NoCloud.DataSourceNoCloudNet, + OpenStack.DataSourceOpenStack, + OVF.DataSourceOVFNet, + UpCloud.DataSourceUpCloud, + VMware.DataSourceVMware, +] + + +class ExpectedDataSources(test_helpers.TestCase): + builtin_list = settings.CFG_BUILTIN['datasource_list'] + deps_local = [sources.DEP_FILESYSTEM] + deps_network = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK] + pkg_list = [type_utils.obj_name(sources)] + + def test_expected_default_local_sources_found(self): + found = sources.list_sources( + self.builtin_list, self.deps_local, self.pkg_list) + self.assertEqual(set(DEFAULT_LOCAL), set(found)) + + def test_expected_default_network_sources_found(self): + found = sources.list_sources( + self.builtin_list, self.deps_network, self.pkg_list) + self.assertEqual(set(DEFAULT_NETWORK), set(found)) + + def test_expected_nondefault_network_sources_found(self): + found = sources.list_sources( + ['AliYun'], self.deps_network, self.pkg_list) + self.assertEqual(set([AliYun.DataSourceAliYun]), set(found)) + + +class TestDataSourceInvariants(test_helpers.TestCase): + def test_data_sources_have_valid_network_config_sources(self): + for ds in DEFAULT_LOCAL + DEFAULT_NETWORK: + for cfg_src in ds.network_config_sources: + fail_msg = ('{} has an invalid network_config_sources entry:' + ' {}'.format(str(ds), cfg_src)) + self.assertTrue(hasattr(sources.NetworkConfigSource, cfg_src), + fail_msg) + + def test_expected_dsname_defined(self): + for ds in DEFAULT_LOCAL + DEFAULT_NETWORK: + fail_msg = ( + '{} has an invalid / missing dsname property: {}'.format( + str(ds), str(ds.dsname) + ) + ) + self.assertNotEqual(ds.dsname, DataSource.dsname, fail_msg) + self.assertIsNotNone(ds.dsname) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_configdrive.py b/tests/unittests/sources/test_configdrive.py new file mode 100644 index 00000000..775d0622 --- /dev/null +++ b/tests/unittests/sources/test_configdrive.py @@ -0,0 +1,844 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from copy import copy, deepcopy +import json +import os + +from cloudinit import helpers +from cloudinit.net import eni +from cloudinit.net import network_state +from cloudinit import settings +from cloudinit.sources import DataSourceConfigDrive as ds +from cloudinit.sources.helpers import openstack +from cloudinit import util + +from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir + + +PUBKEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' +EC2_META = { + 'ami-id': 'ami-00000001', + 'ami-launch-index': 0, + 'ami-manifest-path': 'FIXME', + 'block-device-mapping': { + 'ami': 'sda1', + 'ephemeral0': 'sda2', + 'root': '/dev/sda1', + 'swap': 'sda3'}, + 'hostname': 'sm-foo-test.novalocal', + 'instance-action': 'none', + 'instance-id': 'i-00000001', + 'instance-type': 'm1.tiny', + 'local-hostname': 'sm-foo-test.novalocal', + 'local-ipv4': None, + 'placement': {'availability-zone': 'nova'}, + 'public-hostname': 'sm-foo-test.novalocal', + 'public-ipv4': '', + 'public-keys': {'0': {'openssh-key': PUBKEY}}, + 'reservation-id': 'r-iru5qm4m', + 'security-groups': ['default'] +} +USER_DATA = b'#!/bin/sh\necho This is user data\n' +OSTACK_META = { + 'availability_zone': 'nova', + 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'}, + {'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}], + 'hostname': 'sm-foo-test.novalocal', + 'meta': {'dsmode': 'local', 'my-meta': 'my-value'}, + 'name': 'sm-foo-test', + 'public_keys': {'mykey': PUBKEY}, + 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'} + +CONTENT_0 = b'This is contents of /etc/foo.cfg\n' +CONTENT_1 = b'# this is /etc/bar/bar.cfg\n' +NETWORK_DATA = { + 'services': [ + {'type': 'dns', 'address': '199.204.44.24'}, + {'type': 'dns', 'address': '199.204.47.54'} + ], + 'links': [ + {'vif_id': '2ecc7709-b3f7-4448-9580-e1ec32d75bbd', + 'ethernet_mac_address': 'fa:16:3e:69:b0:58', + 'type': 'ovs', 'mtu': None, 'id': 'tap2ecc7709-b3'}, + {'vif_id': '2f88d109-5b57-40e6-af32-2472df09dc33', + 'ethernet_mac_address': 'fa:16:3e:d4:57:ad', + 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'}, + {'vif_id': '1a5382f8-04c5-4d75-ab98-d666c1ef52cc', + 'ethernet_mac_address': 'fa:16:3e:05:30:fe', + 'type': 'ovs', 'mtu': None, 'id': 'tap1a5382f8-04', 'name': 'nic0'} + ], + 'networks': [ + {'link': 'tap2ecc7709-b3', 'type': 'ipv4_dhcp', + 'network_id': '6d6357ac-0f70-4afa-8bd7-c274cc4ea235', + 'id': 'network0'}, + {'link': 'tap2f88d109-5b', 'type': 'ipv4_dhcp', + 'network_id': 'd227a9b3-6960-4d94-8976-ee5788b44f54', + 'id': 'network1'}, + {'link': 'tap1a5382f8-04', 'type': 'ipv4_dhcp', + 'network_id': 'dab2ba57-cae2-4311-a5ed-010b263891f5', + 'id': 'network2'} + ] +} + +NETWORK_DATA_2 = { + "services": [ + {"type": "dns", "address": "1.1.1.191"}, + {"type": "dns", "address": "1.1.1.4"}], + "networks": [ + {"network_id": "d94bbe94-7abc-48d4-9c82-4628ea26164a", "type": "ipv4", + "netmask": "255.255.255.248", "link": "eth0", + "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", + "gateway": "2.2.2.9"}], + "ip_address": "2.2.2.10", "id": "network0-ipv4"}, + {"network_id": "ca447c83-6409-499b-aaef-6ad1ae995348", "type": "ipv4", + "netmask": "255.255.255.224", "link": "eth1", + "routes": [], "ip_address": "3.3.3.24", "id": "network1-ipv4"}], + "links": [ + {"ethernet_mac_address": "fa:16:3e:dd:50:9a", "mtu": 1500, + "type": "vif", "id": "eth0", "vif_id": "vif-foo1"}, + {"ethernet_mac_address": "fa:16:3e:a8:14:69", "mtu": 1500, + "type": "vif", "id": "eth1", "vif_id": "vif-foo2"}] +} + +# This network data ha 'tap' or null type for a link. +NETWORK_DATA_3 = { + "services": [{"type": "dns", "address": "172.16.36.11"}, + {"type": "dns", "address": "172.16.36.12"}], + "networks": [ + {"network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e", + "type": "ipv4", "netmask": "255.255.255.128", + "link": "tap77a0dc5b-72", "ip_address": "172.17.48.18", + "id": "network0", + "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", + "gateway": "172.17.48.1"}]}, + {"network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e", + "type": "ipv6", "netmask": "ffff:ffff:ffff:ffff::", + "link": "tap77a0dc5b-72", + "ip_address": "fdb8:52d0:9d14:0:f816:3eff:fe9f:70d", + "id": "network1", + "routes": [{"netmask": "::", "network": "::", + "gateway": "fdb8:52d0:9d14::1"}]}, + {"network_id": "1f53cb0e-72d3-47c7-94b9-ff4397c5fe54", + "type": "ipv4", "netmask": "255.255.255.128", + "link": "tap7d6b7bec-93", "ip_address": "172.16.48.13", + "id": "network2", + "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", + "gateway": "172.16.48.1"}, + {"netmask": "255.255.0.0", "network": "172.16.0.0", + "gateway": "172.16.48.1"}]}], + "links": [ + {"ethernet_mac_address": "fa:16:3e:dd:50:9a", "mtu": None, + "type": "tap", "id": "tap77a0dc5b-72", + "vif_id": "77a0dc5b-720e-41b7-bfa7-1b2ff62e0d48"}, + {"ethernet_mac_address": "fa:16:3e:a8:14:69", "mtu": None, + "type": None, "id": "tap7d6b7bec-93", + "vif_id": "7d6b7bec-93e6-4c03-869a-ddc5014892d5"} + ] +} + +BOND_MAC = "fa:16:3e:b3:72:36" +NETWORK_DATA_BOND = { + "services": [ + {"type": "dns", "address": "1.1.1.191"}, + {"type": "dns", "address": "1.1.1.4"}, + ], + "networks": [ + {"id": "network2-ipv4", "ip_address": "2.2.2.13", + "link": "vlan2", "netmask": "255.255.255.248", + "network_id": "4daf5ce8-38cf-4240-9f1a-04e86d7c6117", + "type": "ipv4", + "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", + "gateway": "2.2.2.9"}]}, + {"id": "network3-ipv4", "ip_address": "10.0.1.5", + "link": "vlan3", "netmask": "255.255.255.248", + "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d", + "type": "ipv4", + "routes": [{"netmask": "255.255.255.255", + "network": "192.168.1.0", "gateway": "10.0.1.1"}]} + ], + "links": [ + {"ethernet_mac_address": "0c:c4:7a:34:6e:3c", + "id": "eth0", "mtu": 1500, "type": "phy"}, + {"ethernet_mac_address": "0c:c4:7a:34:6e:3d", + "id": "eth1", "mtu": 1500, "type": "phy"}, + {"bond_links": ["eth0", "eth1"], + "bond_miimon": 100, "bond_mode": "4", + "bond_xmit_hash_policy": "layer3+4", + "ethernet_mac_address": BOND_MAC, + "id": "bond0", "type": "bond"}, + {"ethernet_mac_address": "fa:16:3e:b3:72:30", + "id": "vlan2", "type": "vlan", "vlan_id": 602, + "vlan_link": "bond0", "vlan_mac_address": "fa:16:3e:b3:72:30"}, + {"ethernet_mac_address": "fa:16:3e:66:ab:a6", + "id": "vlan3", "type": "vlan", "vlan_id": 612, "vlan_link": "bond0", + "vlan_mac_address": "fa:16:3e:66:ab:a6"} + ] +} + +NETWORK_DATA_VLAN = { + "services": [{"type": "dns", "address": "1.1.1.191"}], + "networks": [ + {"id": "network1-ipv4", "ip_address": "10.0.1.5", + "link": "vlan1", "netmask": "255.255.255.248", + "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d", + "type": "ipv4", + "routes": [{"netmask": "255.255.255.255", + "network": "192.168.1.0", "gateway": "10.0.1.1"}]} + ], + "links": [ + {"ethernet_mac_address": "fa:16:3e:69:b0:58", + "id": "eth0", "mtu": 1500, "type": "phy"}, + {"ethernet_mac_address": "fa:16:3e:b3:72:30", + "id": "vlan1", "type": "vlan", "vlan_id": 602, + "vlan_link": "eth0", "vlan_mac_address": "fa:16:3e:b3:72:30"}, + ] +} + +KNOWN_MACS = { + 'fa:16:3e:69:b0:58': 'enp0s1', + 'fa:16:3e:d4:57:ad': 'enp0s2', + 'fa:16:3e:dd:50:9a': 'foo1', + 'fa:16:3e:a8:14:69': 'foo2', + 'fa:16:3e:ed:9a:59': 'foo3', + '0c:c4:7a:34:6e:3d': 'oeth1', + '0c:c4:7a:34:6e:3c': 'oeth0', +} + +CFG_DRIVE_FILES_V2 = { + 'ec2/2009-04-04/meta-data.json': json.dumps(EC2_META), + 'ec2/2009-04-04/user-data': USER_DATA, + 'ec2/latest/meta-data.json': json.dumps(EC2_META), + 'ec2/latest/user-data': USER_DATA, + 'openstack/2012-08-10/meta_data.json': json.dumps(OSTACK_META), + 'openstack/2012-08-10/user_data': USER_DATA, + 'openstack/content/0000': CONTENT_0, + 'openstack/content/0001': CONTENT_1, + 'openstack/latest/meta_data.json': json.dumps(OSTACK_META), + 'openstack/latest/user_data': USER_DATA, + 'openstack/latest/network_data.json': json.dumps(NETWORK_DATA), + 'openstack/2015-10-15/meta_data.json': json.dumps(OSTACK_META), + 'openstack/2015-10-15/user_data': USER_DATA, + 'openstack/2015-10-15/network_data.json': json.dumps(NETWORK_DATA)} + +M_PATH = "cloudinit.sources.DataSourceConfigDrive." + + +class TestConfigDriveDataSource(CiTestCase): + + def setUp(self): + super(TestConfigDriveDataSource, self).setUp() + self.add_patch( + M_PATH + "util.find_devs_with", + "m_find_devs_with", return_value=[]) + self.tmp = self.tmp_dir() + + def test_ec2_metadata(self): + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + found = ds.read_config_drive(self.tmp) + self.assertTrue('ec2-metadata' in found) + ec2_md = found['ec2-metadata'] + self.assertEqual(EC2_META, ec2_md) + + def test_dev_os_remap(self): + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + found = ds.read_config_drive(self.tmp) + cfg_ds.metadata = found['metadata'] + name_tests = { + 'ami': '/dev/vda1', + 'root': '/dev/vda1', + 'ephemeral0': '/dev/vda2', + 'swap': '/dev/vda3', + } + for name, dev_name in name_tests.items(): + with ExitStack() as mocks: + provided_name = dev_name[len('/dev/'):] + provided_name = "s" + provided_name[1:] + find_mock = mocks.enter_context( + mock.patch.object(util, 'find_devs_with', + return_value=[provided_name])) + # We want os.path.exists() to return False on its first call, + # and True on its second call. We use a handy generator as + # the mock side effect for this. The mocked function returns + # what the side effect returns. + + def exists_side_effect(): + yield False + yield True + exists_mock = mocks.enter_context( + mock.patch.object(os.path, 'exists', + side_effect=exists_side_effect())) + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) + + find_mock.assert_called_once_with(mock.ANY) + self.assertEqual(exists_mock.call_count, 2) + + def test_dev_os_map(self): + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + found = ds.read_config_drive(self.tmp) + os_md = found['metadata'] + cfg_ds.metadata = os_md + name_tests = { + 'ami': '/dev/vda1', + 'root': '/dev/vda1', + 'ephemeral0': '/dev/vda2', + 'swap': '/dev/vda3', + } + for name, dev_name in name_tests.items(): + with ExitStack() as mocks: + find_mock = mocks.enter_context( + mock.patch.object(util, 'find_devs_with', + return_value=[dev_name])) + exists_mock = mocks.enter_context( + mock.patch.object(os.path, 'exists', + return_value=True)) + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) + + find_mock.assert_called_once_with(mock.ANY) + exists_mock.assert_called_once_with(mock.ANY) + + def test_dev_ec2_remap(self): + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + found = ds.read_config_drive(self.tmp) + ec2_md = found['ec2-metadata'] + os_md = found['metadata'] + cfg_ds.ec2_metadata = ec2_md + cfg_ds.metadata = os_md + name_tests = { + 'ami': '/dev/vda1', + 'root': '/dev/vda1', + 'ephemeral0': '/dev/vda2', + 'swap': '/dev/vda3', + None: None, + 'bob': None, + 'root2k': None, + } + for name, dev_name in name_tests.items(): + # We want os.path.exists() to return False on its first call, + # and True on its second call. We use a handy generator as + # the mock side effect for this. The mocked function returns + # what the side effect returns. + def exists_side_effect(): + yield False + yield True + with mock.patch.object(os.path, 'exists', + side_effect=exists_side_effect()): + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) + # We don't assert the call count for os.path.exists() because + # not all of the entries in name_tests results in two calls to + # that function. Specifically, 'root2k' doesn't seem to call + # it at all. + + def test_dev_ec2_map(self): + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + found = ds.read_config_drive(self.tmp) + ec2_md = found['ec2-metadata'] + os_md = found['metadata'] + cfg_ds.ec2_metadata = ec2_md + cfg_ds.metadata = os_md + name_tests = { + 'ami': '/dev/sda1', + 'root': '/dev/sda1', + 'ephemeral0': '/dev/sda2', + 'swap': '/dev/sda3', + None: None, + 'bob': None, + 'root2k': None, + } + for name, dev_name in name_tests.items(): + with mock.patch.object(os.path, 'exists', return_value=True): + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) + + def test_dir_valid(self): + """Verify a dir is read as such.""" + + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + + found = ds.read_config_drive(self.tmp) + + expected_md = copy(OSTACK_META) + expected_md['instance-id'] = expected_md['uuid'] + expected_md['local-hostname'] = expected_md['hostname'] + + self.assertEqual(USER_DATA, found['userdata']) + self.assertEqual(expected_md, found['metadata']) + self.assertEqual(NETWORK_DATA, found['networkdata']) + self.assertEqual(found['files']['/etc/foo.cfg'], CONTENT_0) + self.assertEqual(found['files']['/etc/bar/bar.cfg'], CONTENT_1) + + def test_seed_dir_valid_extra(self): + """Verify extra files do not affect datasource validity.""" + + data = copy(CFG_DRIVE_FILES_V2) + data["myfoofile.txt"] = "myfoocontent" + data["openstack/latest/random-file.txt"] = "random-content" + + populate_dir(self.tmp, data) + + found = ds.read_config_drive(self.tmp) + + expected_md = copy(OSTACK_META) + expected_md['instance-id'] = expected_md['uuid'] + expected_md['local-hostname'] = expected_md['hostname'] + + self.assertEqual(expected_md, found['metadata']) + + def test_seed_dir_bad_json_metadata(self): + """Verify that bad json in metadata raises BrokenConfigDriveDir.""" + data = copy(CFG_DRIVE_FILES_V2) + + data["openstack/2012-08-10/meta_data.json"] = "non-json garbage {}" + data["openstack/2015-10-15/meta_data.json"] = "non-json garbage {}" + data["openstack/latest/meta_data.json"] = "non-json garbage {}" + + populate_dir(self.tmp, data) + + self.assertRaises(openstack.BrokenMetadata, + ds.read_config_drive, self.tmp) + + def test_seed_dir_no_configdrive(self): + """Verify that no metadata raises NonConfigDriveDir.""" + + my_d = os.path.join(self.tmp, "non-configdrive") + data = copy(CFG_DRIVE_FILES_V2) + data["myfoofile.txt"] = "myfoocontent" + data["openstack/latest/random-file.txt"] = "random-content" + data["content/foo"] = "foocontent" + + self.assertRaises(openstack.NonReadable, + ds.read_config_drive, my_d) + + def test_seed_dir_missing(self): + """Verify that missing seed_dir raises NonConfigDriveDir.""" + my_d = os.path.join(self.tmp, "nonexistantdirectory") + self.assertRaises(openstack.NonReadable, + ds.read_config_drive, my_d) + + def test_find_candidates(self): + devs_with_answers = {} + + def my_devs_with(*args, **kwargs): + criteria = args[0] if len(args) else kwargs.pop('criteria', None) + return devs_with_answers.get(criteria, []) + + def my_is_partition(dev): + return dev[-1] in "0123456789" and not dev.startswith("sr") + + try: + orig_find_devs_with = util.find_devs_with + util.find_devs_with = my_devs_with + + orig_is_partition = util.is_partition + util.is_partition = my_is_partition + + devs_with_answers = {"TYPE=vfat": [], + "TYPE=iso9660": ["/dev/vdb"], + "LABEL=config-2": ["/dev/vdb"]} + self.assertEqual(["/dev/vdb"], ds.find_candidate_devs()) + + # add a vfat item + # zdd reverse sorts after vdb, but config-2 label is preferred + devs_with_answers['TYPE=vfat'] = ["/dev/zdd"] + self.assertEqual(["/dev/vdb", "/dev/zdd"], + ds.find_candidate_devs()) + + # verify that partitions are considered, that have correct label. + devs_with_answers = {"TYPE=vfat": ["/dev/sda1"], + "TYPE=iso9660": [], + "LABEL=config-2": ["/dev/vdb3"]} + self.assertEqual(["/dev/vdb3"], + ds.find_candidate_devs()) + + # Verify that uppercase labels are also found. + devs_with_answers = {"TYPE=vfat": [], + "TYPE=iso9660": ["/dev/vdb"], + "LABEL=CONFIG-2": ["/dev/vdb"]} + self.assertEqual(["/dev/vdb"], ds.find_candidate_devs()) + + finally: + util.find_devs_with = orig_find_devs_with + util.is_partition = orig_is_partition + + @mock.patch(M_PATH + 'on_first_boot') + def test_pubkeys_v2(self, on_first_boot): + """Verify that public-keys work in config-drive-v2.""" + myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) + self.assertEqual(myds.get_public_ssh_keys(), + [OSTACK_META['public_keys']['mykey']]) + self.assertEqual('configdrive', myds.cloud_name) + self.assertEqual('openstack', myds.platform) + self.assertEqual('seed-dir (%s/seed)' % self.tmp, myds.subplatform) + + def test_subplatform_config_drive_when_starts_with_dev(self): + """subplatform reports config-drive when source starts with /dev/.""" + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + with mock.patch(M_PATH + 'find_candidate_devs') as m_find_devs: + with mock.patch(M_PATH + 'util.mount_cb'): + with mock.patch(M_PATH + 'on_first_boot'): + m_find_devs.return_value = ['/dev/anything'] + self.assertEqual(True, cfg_ds.get_data()) + self.assertEqual('config-disk (/dev/anything)', cfg_ds.subplatform) + + +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) +class TestNetJson(CiTestCase): + def setUp(self): + super(TestNetJson, self).setUp() + self.tmp = self.tmp_dir() + self.maxDiff = None + + @mock.patch(M_PATH + 'on_first_boot') + def test_network_data_is_found(self, on_first_boot): + """Verify that network_data is present in ds in config-drive-v2.""" + myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) + self.assertIsNotNone(myds.network_json) + + @mock.patch(M_PATH + 'on_first_boot') + def test_network_config_is_converted(self, on_first_boot): + """Verify that network_data is converted and present on ds object.""" + myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) + network_config = openstack.convert_net_json(NETWORK_DATA, + known_macs=KNOWN_MACS) + self.assertEqual(myds.network_config, network_config) + + def test_network_config_conversion_dhcp6(self): + """Test some ipv6 input network json and check the expected + conversions.""" + in_data = { + 'links': [ + {'vif_id': '2ecc7709-b3f7-4448-9580-e1ec32d75bbd', + 'ethernet_mac_address': 'fa:16:3e:69:b0:58', + 'type': 'ovs', 'mtu': None, 'id': 'tap2ecc7709-b3'}, + {'vif_id': '2f88d109-5b57-40e6-af32-2472df09dc33', + 'ethernet_mac_address': 'fa:16:3e:d4:57:ad', + 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'}, + ], + 'networks': [ + {'link': 'tap2ecc7709-b3', 'type': 'ipv6_dhcpv6-stateless', + 'network_id': '6d6357ac-0f70-4afa-8bd7-c274cc4ea235', + 'id': 'network0'}, + {'link': 'tap2f88d109-5b', 'type': 'ipv6_dhcpv6-stateful', + 'network_id': 'd227a9b3-6960-4d94-8976-ee5788b44f54', + 'id': 'network1'}, + ] + } + out_data = { + 'version': 1, + 'config': [ + {'mac_address': 'fa:16:3e:69:b0:58', + 'mtu': None, + 'name': 'enp0s1', + 'subnets': [{'type': 'ipv6_dhcpv6-stateless'}], + 'type': 'physical'}, + {'mac_address': 'fa:16:3e:d4:57:ad', + 'mtu': None, + 'name': 'enp0s2', + 'subnets': [{'type': 'ipv6_dhcpv6-stateful'}], + 'type': 'physical', + 'accept-ra': True} + ], + } + conv_data = openstack.convert_net_json(in_data, known_macs=KNOWN_MACS) + self.assertEqual(out_data, conv_data) + + def test_network_config_conversions(self): + """Tests a bunch of input network json and checks the + expected conversions.""" + in_datas = [ + NETWORK_DATA, + { + 'services': [{'type': 'dns', 'address': '172.19.0.12'}], + 'networks': [{ + 'network_id': 'dacd568d-5be6-4786-91fe-750c374b78b4', + 'type': 'ipv4', + 'netmask': '255.255.252.0', + 'link': 'tap1a81968a-79', + 'routes': [{ + 'netmask': '0.0.0.0', + 'network': '0.0.0.0', + 'gateway': '172.19.3.254', + }], + 'ip_address': '172.19.1.34', + 'id': 'network0', + }], + 'links': [{ + 'type': 'bridge', + 'vif_id': '1a81968a-797a-400f-8a80-567f997eb93f', + 'ethernet_mac_address': 'fa:16:3e:ed:9a:59', + 'id': 'tap1a81968a-79', + 'mtu': None, + }], + }, + ] + out_datas = [ + { + 'version': 1, + 'config': [ + { + 'subnets': [{'type': 'dhcp4'}], + 'type': 'physical', + 'mac_address': 'fa:16:3e:69:b0:58', + 'name': 'enp0s1', + 'mtu': None, + }, + { + 'subnets': [{'type': 'dhcp4'}], + 'type': 'physical', + 'mac_address': 'fa:16:3e:d4:57:ad', + 'name': 'enp0s2', + 'mtu': None, + }, + { + 'subnets': [{'type': 'dhcp4'}], + 'type': 'physical', + 'mac_address': 'fa:16:3e:05:30:fe', + 'name': 'nic0', + 'mtu': None, + }, + { + 'type': 'nameserver', + 'address': '199.204.44.24', + }, + { + 'type': 'nameserver', + 'address': '199.204.47.54', + } + ], + + }, + { + 'version': 1, + 'config': [ + { + 'name': 'foo3', + 'mac_address': 'fa:16:3e:ed:9a:59', + 'mtu': None, + 'type': 'physical', + 'subnets': [ + { + 'address': '172.19.1.34', + 'netmask': '255.255.252.0', + 'type': 'static', + 'ipv4': True, + 'routes': [{ + 'gateway': '172.19.3.254', + 'netmask': '0.0.0.0', + 'network': '0.0.0.0', + }], + } + ] + }, + { + 'type': 'nameserver', + 'address': '172.19.0.12', + } + ], + }, + ] + for in_data, out_data in zip(in_datas, out_datas): + conv_data = openstack.convert_net_json(in_data, + known_macs=KNOWN_MACS) + self.assertEqual(out_data, conv_data) + + +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) +class TestConvertNetworkData(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestConvertNetworkData, self).setUp() + self.tmp = self.tmp_dir() + + def _getnames_in_config(self, ncfg): + return set([n['name'] for n in ncfg['config'] + if n['type'] == 'physical']) + + def test_conversion_fills_names(self): + ncfg = openstack.convert_net_json(NETWORK_DATA, known_macs=KNOWN_MACS) + expected = set(['nic0', 'enp0s1', 'enp0s2']) + found = self._getnames_in_config(ncfg) + self.assertEqual(found, expected) + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_convert_reads_system_prefers_name(self, get_interfaces_by_mac): + macs = KNOWN_MACS.copy() + macs.update({'fa:16:3e:05:30:fe': 'foonic1', + 'fa:16:3e:69:b0:58': 'ens1'}) + get_interfaces_by_mac.return_value = macs + + ncfg = openstack.convert_net_json(NETWORK_DATA) + expected = set(['nic0', 'ens1', 'enp0s2']) + found = self._getnames_in_config(ncfg) + self.assertEqual(found, expected) + + def test_convert_raises_value_error_on_missing_name(self): + macs = {'aa:aa:aa:aa:aa:00': 'ens1'} + self.assertRaises(ValueError, openstack.convert_net_json, + NETWORK_DATA, known_macs=macs) + + def test_conversion_with_route(self): + ncfg = openstack.convert_net_json(NETWORK_DATA_2, + known_macs=KNOWN_MACS) + # not the best test, but see that we get a route in the + # network config and that it gets rendered to an ENI file + routes = [] + for n in ncfg['config']: + for s in n.get('subnets', []): + routes.extend(s.get('routes', [])) + self.assertIn( + {'network': '0.0.0.0', 'netmask': '0.0.0.0', 'gateway': '2.2.2.9'}, + routes) + eni_renderer = eni.Renderer() + eni_renderer.render_network_state( + network_state.parse_net_config_data(ncfg), target=self.tmp) + with open(os.path.join(self.tmp, "etc", + "network", "interfaces"), 'r') as f: + eni_rendering = f.read() + self.assertIn("route add default gw 2.2.2.9", eni_rendering) + + def test_conversion_with_tap(self): + ncfg = openstack.convert_net_json(NETWORK_DATA_3, + known_macs=KNOWN_MACS) + physicals = set() + for i in ncfg['config']: + if i.get('type') == "physical": + physicals.add(i['name']) + self.assertEqual(physicals, set(('foo1', 'foo2'))) + + def test_bond_conversion(self): + # light testing of bond conversion and eni rendering of bond + ncfg = openstack.convert_net_json(NETWORK_DATA_BOND, + known_macs=KNOWN_MACS) + eni_renderer = eni.Renderer() + + eni_renderer.render_network_state( + network_state.parse_net_config_data(ncfg), target=self.tmp) + with open(os.path.join(self.tmp, "etc", + "network", "interfaces"), 'r') as f: + eni_rendering = f.read() + + # Verify there are expected interfaces in the net config. + interfaces = sorted( + [i['name'] for i in ncfg['config'] + if i['type'] in ('vlan', 'bond', 'physical')]) + self.assertEqual( + sorted(["oeth0", "oeth1", "bond0", "bond0.602", "bond0.612"]), + interfaces) + + words = eni_rendering.split() + # 'eth0' and 'eth1' are the ids. because their mac adresses + # map to other names, we should not see them in the ENI + self.assertNotIn('eth0', words) + self.assertNotIn('eth1', words) + + # oeth0 and oeth1 are the interface names for eni. + # bond0 will be generated for the bond. Each should be auto. + self.assertIn("auto oeth0", eni_rendering) + self.assertIn("auto oeth1", eni_rendering) + self.assertIn("auto bond0", eni_rendering) + # The bond should have the given mac address + pos = eni_rendering.find("auto bond0") + self.assertIn(BOND_MAC, eni_rendering[pos:]) + + def test_vlan(self): + # light testing of vlan config conversion and eni rendering + ncfg = openstack.convert_net_json(NETWORK_DATA_VLAN, + known_macs=KNOWN_MACS) + eni_renderer = eni.Renderer() + eni_renderer.render_network_state( + network_state.parse_net_config_data(ncfg), target=self.tmp) + with open(os.path.join(self.tmp, "etc", + "network", "interfaces"), 'r') as f: + eni_rendering = f.read() + + self.assertIn("iface enp0s1", eni_rendering) + self.assertIn("address 10.0.1.5", eni_rendering) + self.assertIn("auto enp0s1.602", eni_rendering) + + def test_mac_addrs_can_be_upper_case(self): + # input mac addresses on rackspace may be upper case + my_netdata = deepcopy(NETWORK_DATA) + for link in my_netdata['links']: + link['ethernet_mac_address'] = link['ethernet_mac_address'].upper() + + ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS) + config_name2mac = {} + for n in ncfg['config']: + if n['type'] == 'physical': + config_name2mac[n['name']] = n['mac_address'] + + expected = {'nic0': 'fa:16:3e:05:30:fe', 'enp0s1': 'fa:16:3e:69:b0:58', + 'enp0s2': 'fa:16:3e:d4:57:ad'} + self.assertEqual(expected, config_name2mac) + + def test_unknown_device_types_accepted(self): + # If we don't recognise a link, we should treat it as physical for a + # best-effort boot + my_netdata = deepcopy(NETWORK_DATA) + my_netdata['links'][0]['type'] = 'my-special-link-type' + + ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS) + config_name2mac = {} + for n in ncfg['config']: + if n['type'] == 'physical': + config_name2mac[n['name']] = n['mac_address'] + + expected = {'nic0': 'fa:16:3e:05:30:fe', 'enp0s1': 'fa:16:3e:69:b0:58', + 'enp0s2': 'fa:16:3e:d4:57:ad'} + self.assertEqual(expected, config_name2mac) + + # We should, however, warn the user that we don't recognise the type + self.assertIn('Unknown network_data link type (my-special-link-type)', + self.logs.getvalue()) + + +def cfg_ds_from_dir(base_d, files=None): + run = os.path.join(base_d, "run") + os.mkdir(run) + cfg_ds = ds.DataSourceConfigDrive( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': run})) + cfg_ds.seed_dir = os.path.join(base_d, "seed") + if files: + populate_dir(cfg_ds.seed_dir, files) + cfg_ds.known_macs = KNOWN_MACS.copy() + if not cfg_ds.get_data(): + raise RuntimeError("Data source did not extract itself from" + " seed directory %s" % cfg_ds.seed_dir) + return cfg_ds + + +def populate_ds_from_read_config(cfg_ds, source, results): + """Patch the DataSourceConfigDrive from the results of + read_config_drive_dir hopefully in line with what it would have + if cfg_ds.get_data had been successfully called""" + cfg_ds.source = source + cfg_ds.metadata = results.get('metadata') + cfg_ds.ec2_metadata = results.get('ec2-metadata') + cfg_ds.userdata_raw = results.get('userdata') + cfg_ds.version = results.get('version') + cfg_ds.network_json = results.get('networkdata') + cfg_ds._network_config = openstack.convert_net_json( + cfg_ds.network_json, known_macs=KNOWN_MACS) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_digitalocean.py b/tests/unittests/sources/test_digitalocean.py new file mode 100644 index 00000000..351bf7ba --- /dev/null +++ b/tests/unittests/sources/test_digitalocean.py @@ -0,0 +1,372 @@ +# Copyright (C) 2014 Neal Shrader +# +# Author: Neal Shrader +# Author: Ben Howard +# Author: Scott Moser +# +# This file is part of cloud-init. See LICENSE file for license information. + +import json + +from cloudinit import helpers +from cloudinit import settings +from cloudinit.sources import DataSourceDigitalOcean +from cloudinit.sources.helpers import digitalocean + +from tests.unittests.helpers import mock, CiTestCase + +DO_MULTIPLE_KEYS = ["ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co", + "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co"] +DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@do.co" + +# the following JSON was taken from droplet (that's why its a string) +DO_META = json.loads(""" +{ + "droplet_id": "22532410", + "hostname": "utl-96268", + "vendor_data": "vendordata goes here", + "user_data": "userdata goes here", + "public_keys": "", + "auth_key": "authorization_key", + "region": "nyc3", + "interfaces": { + "private": [ + { + "ipv4": { + "ip_address": "10.132.6.205", + "netmask": "255.255.0.0", + "gateway": "10.132.0.1" + }, + "mac": "04:01:57:d1:9e:02", + "type": "private" + } + ], + "public": [ + { + "ipv4": { + "ip_address": "192.0.0.20", + "netmask": "255.255.255.0", + "gateway": "104.236.0.1" + }, + "ipv6": { + "ip_address": "2604:A880:0800:0000:1000:0000:0000:0000", + "cidr": 64, + "gateway": "2604:A880:0800:0000:0000:0000:0000:0001" + }, + "anchor_ipv4": { + "ip_address": "10.0.0.5", + "netmask": "255.255.0.0", + "gateway": "10.0.0.1" + }, + "mac": "04:01:57:d1:9e:01", + "type": "public" + } + ] + }, + "floating_ip": { + "ipv4": { + "active": false + } + }, + "dns": { + "nameservers": [ + "2001:4860:4860::8844", + "2001:4860:4860::8888", + "8.8.8.8" + ] + } +} +""") + +# This has no private interface +DO_META_2 = { + "droplet_id": 27223699, + "hostname": "smtest1", + "vendor_data": "\n".join([ + ('"Content-Type: multipart/mixed; ' + 'boundary=\"===============8645434374073493512==\"'), + 'MIME-Version: 1.0', + '', + '--===============8645434374073493512==', + 'MIME-Version: 1.0' + 'Content-Type: text/cloud-config; charset="us-ascii"' + 'Content-Transfer-Encoding: 7bit' + 'Content-Disposition: attachment; filename="cloud-config"' + '', + '#cloud-config', + 'disable_root: false', + 'manage_etc_hosts: true', + '', + '', + '--===============8645434374073493512==' + ]), + "public_keys": [ + "ssh-rsa AAAAB3NzaN...N3NtHw== smoser@brickies" + ], + "auth_key": "88888888888888888888888888888888", + "region": "nyc3", + "interfaces": { + "public": [{ + "ipv4": { + "ip_address": "45.55.249.133", + "netmask": "255.255.192.0", + "gateway": "45.55.192.1" + }, + "anchor_ipv4": { + "ip_address": "10.17.0.5", + "netmask": "255.255.0.0", + "gateway": "10.17.0.1" + }, + "mac": "ae:cc:08:7c:88:00", + "type": "public" + }] + }, + "floating_ip": {"ipv4": {"active": True, "ip_address": "138.197.59.92"}}, + "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]}, + "tags": None, +} + +DO_META['public_keys'] = DO_SINGLE_KEY + +MD_URL = 'http://169.254.169.254/metadata/v1.json' + + +def _mock_dmi(): + return (True, DO_META.get('id')) + + +class TestDataSourceDigitalOcean(CiTestCase): + """ + Test reading the meta-data + """ + def setUp(self): + super(TestDataSourceDigitalOcean, self).setUp() + self.tmp = self.tmp_dir() + + def get_ds(self, get_sysinfo=_mock_dmi): + ds = DataSourceDigitalOcean.DataSourceDigitalOcean( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + ds.use_ip4LL = False + if get_sysinfo is not None: + ds._get_sysinfo = get_sysinfo + return ds + + @mock.patch('cloudinit.sources.helpers.digitalocean.read_sysinfo') + def test_returns_false_not_on_docean(self, m_read_sysinfo): + m_read_sysinfo.return_value = (False, None) + ds = self.get_ds(get_sysinfo=None) + self.assertEqual(False, ds.get_data()) + self.assertTrue(m_read_sysinfo.called) + + @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata') + def test_metadata(self, mock_readmd): + mock_readmd.return_value = DO_META.copy() + + ds = self.get_ds() + ret = ds.get_data() + self.assertTrue(ret) + + self.assertTrue(mock_readmd.called) + + self.assertEqual(DO_META.get('user_data'), ds.get_userdata_raw()) + self.assertEqual(DO_META.get('vendor_data'), ds.get_vendordata_raw()) + self.assertEqual(DO_META.get('region'), ds.availability_zone) + self.assertEqual(DO_META.get('droplet_id'), ds.get_instance_id()) + self.assertEqual(DO_META.get('hostname'), ds.get_hostname()) + + # Single key + self.assertEqual([DO_META.get('public_keys')], + ds.get_public_ssh_keys()) + + self.assertIsInstance(ds.get_public_ssh_keys(), list) + + @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata') + def test_multiple_ssh_keys(self, mock_readmd): + metadata = DO_META.copy() + metadata['public_keys'] = DO_MULTIPLE_KEYS + mock_readmd.return_value = metadata.copy() + + ds = self.get_ds() + ret = ds.get_data() + self.assertTrue(ret) + + self.assertTrue(mock_readmd.called) + + # Multiple keys + self.assertEqual(metadata['public_keys'], ds.get_public_ssh_keys()) + self.assertIsInstance(ds.get_public_ssh_keys(), list) + + +class TestNetworkConvert(CiTestCase): + + def _get_networking(self): + self.m_get_by_mac.return_value = { + '04:01:57:d1:9e:01': 'ens1', + '04:01:57:d1:9e:02': 'ens2', + 'b8:ae:ed:75:5f:9a': 'enp0s25', + 'ae:cc:08:7c:88:00': 'meta2p1'} + netcfg = digitalocean.convert_network_configuration( + DO_META['interfaces'], DO_META['dns']['nameservers']) + self.assertIn('config', netcfg) + return netcfg + + def setUp(self): + super(TestNetworkConvert, self).setUp() + self.add_patch('cloudinit.net.get_interfaces_by_mac', 'm_get_by_mac') + + def test_networking_defined(self): + netcfg = self._get_networking() + self.assertIsNotNone(netcfg) + dns_defined = False + + for part in netcfg.get('config'): + n_type = part.get('type') + print("testing part ", n_type, "\n", json.dumps(part, indent=3)) + + if n_type == 'nameserver': + n_address = part.get('address') + self.assertIsNotNone(n_address) + self.assertEqual(len(n_address), 3) + + dns_resolvers = DO_META["dns"]["nameservers"] + for x in n_address: + self.assertIn(x, dns_resolvers) + dns_defined = True + + else: + n_subnets = part.get('type') + n_name = part.get('name') + n_mac = part.get('mac_address') + + self.assertIsNotNone(n_type) + self.assertIsNotNone(n_subnets) + self.assertIsNotNone(n_name) + self.assertIsNotNone(n_mac) + + self.assertTrue(dns_defined) + + def _get_nic_definition(self, int_type, expected_name): + """helper function to return if_type (i.e. public) and the expected + name used by cloud-init (i.e eth0)""" + netcfg = self._get_networking() + meta_def = (DO_META.get('interfaces')).get(int_type)[0] + + self.assertEqual(int_type, meta_def.get('type')) + + for nic_def in netcfg.get('config'): + print(nic_def) + if nic_def.get('name') == expected_name: + return nic_def, meta_def + + def _get_match_subn(self, subnets, ip_addr): + """get the matching subnet definition based on ip address""" + for subn in subnets: + address = subn.get('address') + self.assertIsNotNone(address) + + # equals won't work because of ipv6 addressing being in + # cidr notation, i.e fe00::1/64 + if ip_addr in address: + print(json.dumps(subn, indent=3)) + return subn + + def test_correct_gateways_defined(self): + """test to make sure the eth0 ipv4 and ipv6 gateways are defined""" + netcfg = self._get_networking() + gateways = [] + for nic_def in netcfg.get('config'): + if nic_def.get('type') != 'physical': + continue + for subn in nic_def.get('subnets'): + if 'gateway' in subn: + gateways.append(subn.get('gateway')) + + # we should have two gateways, one ipv4 and ipv6 + self.assertEqual(len(gateways), 2) + + # make that the ipv6 gateway is there + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + ipv4_def = meta_def.get('ipv4') + self.assertIn(ipv4_def.get('gateway'), gateways) + + # make sure the the ipv6 gateway is there + ipv6_def = meta_def.get('ipv6') + self.assertIn(ipv6_def.get('gateway'), gateways) + + def test_public_interface_defined(self): + """test that the public interface is defined as eth0""" + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + self.assertEqual('eth0', nic_def.get('name')) + self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address')) + self.assertEqual('physical', nic_def.get('type')) + + def test_private_interface_defined(self): + """test that the private interface is defined as eth1""" + (nic_def, meta_def) = self._get_nic_definition('private', 'eth1') + self.assertEqual('eth1', nic_def.get('name')) + self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address')) + self.assertEqual('physical', nic_def.get('type')) + + def test_public_interface_ipv6(self): + """test public ipv6 addressing""" + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + ipv6_def = meta_def.get('ipv6') + self.assertIsNotNone(ipv6_def) + + subn_def = self._get_match_subn(nic_def.get('subnets'), + ipv6_def.get('ip_address')) + + cidr_notated_address = "{0}/{1}".format(ipv6_def.get('ip_address'), + ipv6_def.get('cidr')) + + self.assertEqual(cidr_notated_address, subn_def.get('address')) + self.assertEqual(ipv6_def.get('gateway'), subn_def.get('gateway')) + + def test_public_interface_ipv4(self): + """test public ipv4 addressing""" + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + ipv4_def = meta_def.get('ipv4') + self.assertIsNotNone(ipv4_def) + + subn_def = self._get_match_subn(nic_def.get('subnets'), + ipv4_def.get('ip_address')) + + self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask')) + self.assertEqual(ipv4_def.get('gateway'), subn_def.get('gateway')) + + def test_public_interface_anchor_ipv4(self): + """test public ipv4 addressing""" + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + ipv4_def = meta_def.get('anchor_ipv4') + self.assertIsNotNone(ipv4_def) + + subn_def = self._get_match_subn(nic_def.get('subnets'), + ipv4_def.get('ip_address')) + + self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask')) + self.assertNotIn('gateway', subn_def) + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_convert_without_private(self, m_get_by_mac): + m_get_by_mac.return_value = { + 'b8:ae:ed:75:5f:9a': 'enp0s25', + 'ae:cc:08:7c:88:00': 'meta2p1'} + netcfg = digitalocean.convert_network_configuration( + DO_META_2['interfaces'], DO_META_2['dns']['nameservers']) + + # print(netcfg) + byname = {} + for i in netcfg['config']: + if 'name' in i: + if i['name'] in byname: + raise ValueError("name '%s' in config twice: %s" % + (i['name'], netcfg)) + byname[i['name']] = i + self.assertTrue('eth0' in byname) + self.assertTrue('subnets' in byname['eth0']) + eth0 = byname['eth0'] + self.assertEqual( + sorted(['45.55.249.133', '10.17.0.5']), + sorted([i['address'] for i in eth0['subnets']])) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_ec2.py b/tests/unittests/sources/test_ec2.py new file mode 100644 index 00000000..19c2bbcd --- /dev/null +++ b/tests/unittests/sources/test_ec2.py @@ -0,0 +1,978 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import copy +import httpretty +import json +import requests +from unittest import mock + +from cloudinit import helpers +from cloudinit.sources import DataSourceEc2 as ec2 +from tests.unittests import helpers as test_helpers + + +DYNAMIC_METADATA = { + "instance-identity": { + "document": json.dumps({ + "devpayProductCodes": None, + "marketplaceProductCodes": ["1abc2defghijklm3nopqrs4tu"], + "availabilityZone": "us-west-2b", + "privateIp": "10.158.112.84", + "version": "2017-09-30", + "instanceId": "my-identity-id", + "billingProducts": None, + "instanceType": "t2.micro", + "accountId": "123456789012", + "imageId": "ami-5fb8c835", + "pendingTime": "2016-11-19T16:32:11Z", + "architecture": "x86_64", + "kernelId": None, + "ramdiskId": None, + "region": "us-west-2" + }) + } +} + + +# collected from api version 2016-09-02/ with +# python3 -c 'import json +# from cloudinit.ec2_utils import get_instance_metadata as gm +# print(json.dumps(gm("2016-09-02"), indent=1, sort_keys=True))' +# Note that the MAC addresses have been modified to sort in the opposite order +# to the device-number attribute, to test LP: #1876312 +DEFAULT_METADATA = { + "ami-id": "ami-8b92b4ee", + "ami-launch-index": "0", + "ami-manifest-path": "(unknown)", + "block-device-mapping": {"ami": "/dev/sda1", "root": "/dev/sda1"}, + "hostname": "ip-172-31-31-158.us-east-2.compute.internal", + "instance-action": "none", + "instance-id": "i-0a33f80f09c96477f", + "instance-type": "t2.small", + "local-hostname": "ip-172-3-3-15.us-east-2.compute.internal", + "local-ipv4": "172.3.3.15", + "mac": "06:17:04:d7:26:09", + "metrics": {"vhostmd": ""}, + "network": { + "interfaces": { + "macs": { + "06:17:04:d7:26:09": { + "device-number": "0", + "interface-id": "eni-e44ef49e", + "ipv4-associations": {"13.59.77.202": "172.3.3.15"}, + "ipv6s": "2600:1f16:aeb:b20b:9d87:a4af:5cc9:73dc", + "local-hostname": ("ip-172-3-3-15.us-east-2." + "compute.internal"), + "local-ipv4s": "172.3.3.15", + "mac": "06:17:04:d7:26:09", + "owner-id": "950047163771", + "public-hostname": ("ec2-13-59-77-202.us-east-2." + "compute.amazonaws.com"), + "public-ipv4s": "13.59.77.202", + "security-group-ids": "sg-5a61d333", + "security-groups": "wide-open", + "subnet-id": "subnet-20b8565b", + "subnet-ipv4-cidr-block": "172.31.16.0/20", + "subnet-ipv6-cidr-blocks": "2600:1f16:aeb:b20b::/64", + "vpc-id": "vpc-87e72bee", + "vpc-ipv4-cidr-block": "172.31.0.0/16", + "vpc-ipv4-cidr-blocks": "172.31.0.0/16", + "vpc-ipv6-cidr-blocks": "2600:1f16:aeb:b200::/56" + }, + "06:17:04:d7:26:08": { + "device-number": "1", # Only IPv4 local config + "interface-id": "eni-e44ef49f", + "ipv4-associations": {"": "172.3.3.16"}, + "ipv6s": "", # No IPv6 config + "local-hostname": ("ip-172-3-3-16.us-east-2." + "compute.internal"), + "local-ipv4s": "172.3.3.16", + "mac": "06:17:04:d7:26:08", + "owner-id": "950047163771", + "public-hostname": ("ec2-172-3-3-16.us-east-2." + "compute.amazonaws.com"), + "public-ipv4s": "", # No public ipv4 config + "security-group-ids": "sg-5a61d333", + "security-groups": "wide-open", + "subnet-id": "subnet-20b8565b", + "subnet-ipv4-cidr-block": "172.31.16.0/20", + "subnet-ipv6-cidr-blocks": "", + "vpc-id": "vpc-87e72bee", + "vpc-ipv4-cidr-block": "172.31.0.0/16", + "vpc-ipv4-cidr-blocks": "172.31.0.0/16", + "vpc-ipv6-cidr-blocks": "" + } + } + } + }, + "placement": {"availability-zone": "us-east-2b"}, + "profile": "default-hvm", + "public-hostname": "ec2-13-59-77-202.us-east-2.compute.amazonaws.com", + "public-ipv4": "13.59.77.202", + "public-keys": {"brickies": ["ssh-rsa AAAAB3Nz....w== brickies"]}, + "reservation-id": "r-01efbc9996bac1bd6", + "security-groups": "my-wide-open", + "services": {"domain": "amazonaws.com", "partition": "aws"}, +} + +# collected from api version 2018-09-24/ with +# python3 -c 'import json +# from cloudinit.ec2_utils import get_instance_metadata as gm +# print(json.dumps(gm("2018-09-24"), indent=1, sort_keys=True))' + +NIC1_MD_IPV4_IPV6_MULTI_IP = { + "device-number": "0", + "interface-id": "eni-0d6335689899ce9cc", + "ipv4-associations": { + "18.218.219.181": "172.31.44.13" + }, + "ipv6s": [ + "2600:1f16:292:100:c187:593c:4349:136", + "2600:1f16:292:100:f153:12a3:c37c:11f9", + "2600:1f16:292:100:f152:2222:3333:4444" + ], + "local-hostname": ("ip-172-31-44-13.us-east-2." + "compute.internal"), + "local-ipv4s": [ + "172.31.44.13", + "172.31.45.70" + ], + "mac": "0a:07:84:3d:6e:38", + "owner-id": "329910648901", + "public-hostname": ("ec2-18-218-219-181.us-east-2." + "compute.amazonaws.com"), + "public-ipv4s": "18.218.219.181", + "security-group-ids": "sg-0c387755222ba8d2e", + "security-groups": "launch-wizard-4", + "subnet-id": "subnet-9d7ba0d1", + "subnet-ipv4-cidr-block": "172.31.32.0/20", + "subnet_ipv6_cidr_blocks": "2600:1f16:292:100::/64", + "vpc-id": "vpc-a07f62c8", + "vpc-ipv4-cidr-block": "172.31.0.0/16", + "vpc-ipv4-cidr-blocks": "172.31.0.0/16", + "vpc_ipv6_cidr_blocks": "2600:1f16:292:100::/56" +} + +NIC2_MD = { + "device-number": "1", + "interface-id": "eni-043cdce36ded5e79f", + "local-hostname": "ip-172-31-47-221.us-east-2.compute.internal", + "local-ipv4s": "172.31.47.221", + "mac": "0a:75:69:92:e2:16", + "owner-id": "329910648901", + "security-group-ids": "sg-0d68fef37d8cc9b77", + "security-groups": "launch-wizard-17", + "subnet-id": "subnet-9d7ba0d1", + "subnet-ipv4-cidr-block": "172.31.32.0/20", + "vpc-id": "vpc-a07f62c8", + "vpc-ipv4-cidr-block": "172.31.0.0/16", + "vpc-ipv4-cidr-blocks": "172.31.0.0/16" +} + +SECONDARY_IP_METADATA_2018_09_24 = { + "ami-id": "ami-0986c2ac728528ac2", + "ami-launch-index": "0", + "ami-manifest-path": "(unknown)", + "block-device-mapping": { + "ami": "/dev/sda1", + "root": "/dev/sda1" + }, + "events": { + "maintenance": { + "history": "[]", + "scheduled": "[]" + } + }, + "hostname": "ip-172-31-44-13.us-east-2.compute.internal", + "identity-credentials": { + "ec2": { + "info": { + "AccountId": "329910648901", + "Code": "Success", + "LastUpdated": "2019-07-06T14:22:56Z" + } + } + }, + "instance-action": "none", + "instance-id": "i-069e01e8cc43732f8", + "instance-type": "t2.micro", + "local-hostname": "ip-172-31-44-13.us-east-2.compute.internal", + "local-ipv4": "172.31.44.13", + "mac": "0a:07:84:3d:6e:38", + "metrics": { + "vhostmd": "" + }, + "network": { + "interfaces": { + "macs": { + "0a:07:84:3d:6e:38": NIC1_MD_IPV4_IPV6_MULTI_IP, + } + } + }, + "placement": { + "availability-zone": "us-east-2c" + }, + "profile": "default-hvm", + "public-hostname": ( + "ec2-18-218-219-181.us-east-2.compute.amazonaws.com"), + "public-ipv4": "18.218.219.181", + "public-keys": { + "yourkeyname,e": [ + "ssh-rsa AAAAW...DZ yourkeyname" + ] + }, + "reservation-id": "r-09b4917135cdd33be", + "security-groups": "launch-wizard-4", + "services": { + "domain": "amazonaws.com", + "partition": "aws" + } +} + +M_PATH_NET = 'cloudinit.sources.DataSourceEc2.net.' + + +def _register_ssh_keys(rfunc, base_url, keys_data): + """handle ssh key inconsistencies. + + public-keys in the ec2 metadata is inconsistently formated compared + to other entries. + Given keys_data of {name1: pubkey1, name2: pubkey2} + + This registers the following urls: + base_url 0={name1}\n1={name2} # (for each name) + base_url/ 0={name1}\n1={name2} # (for each name) + base_url/0 openssh-key + base_url/0/ openssh-key + base_url/0/openssh-key {pubkey1} + base_url/0/openssh-key/ {pubkey1} + ... + """ + + base_url = base_url.rstrip("/") + odd_index = '\n'.join( + ["{0}={1}".format(n, name) + for n, name in enumerate(sorted(keys_data))]) + + rfunc(base_url, odd_index) + rfunc(base_url + "/", odd_index) + + for n, name in enumerate(sorted(keys_data)): + val = keys_data[name] + if isinstance(val, list): + val = '\n'.join(val) + burl = base_url + "/%s" % n + rfunc(burl, "openssh-key") + rfunc(burl + "/", "openssh-key") + rfunc(burl + "/%s/openssh-key" % name, val) + rfunc(burl + "/%s/openssh-key/" % name, val) + + +def register_mock_metaserver(base_url, data): + """Register with httpretty a ec2 metadata like service serving 'data'. + + If given a dictionary, it will populate urls under base_url for + that dictionary. For example, input of + {"instance-id": "i-abc", "mac": "00:16:3e:00:00:00"} + populates + base_url with 'instance-id\nmac' + base_url/ with 'instance-id\nmac' + base_url/instance-id with i-abc + base_url/mac with 00:16:3e:00:00:00 + In the index, references to lists or dictionaries have a trailing /. + """ + def register_helper(register, base_url, body): + if not isinstance(base_url, str): + register(base_url, body) + return + base_url = base_url.rstrip("/") + if isinstance(body, str): + register(base_url, body) + elif isinstance(body, list): + register(base_url, '\n'.join(body) + '\n') + register(base_url + '/', '\n'.join(body) + '\n') + elif isinstance(body, dict): + vals = [] + for k, v in body.items(): + if k == 'public-keys': + _register_ssh_keys( + register, base_url + '/public-keys/', v) + continue + suffix = k.rstrip("/") + if not isinstance(v, (str, list)): + suffix += "/" + vals.append(suffix) + url = base_url + '/' + suffix + register_helper(register, url, v) + register(base_url, '\n'.join(vals) + '\n') + register(base_url + '/', '\n'.join(vals) + '\n') + elif body is None: + register(base_url, 'not found', status=404) + + def myreg(*argc, **kwargs): + url = argc[0] + method = httpretty.PUT if ec2.API_TOKEN_ROUTE in url else httpretty.GET + return httpretty.register_uri(method, *argc, **kwargs) + + register_helper(myreg, base_url, data) + + +class TestEc2(test_helpers.HttprettyTestCase): + with_logs = True + maxDiff = None + + valid_platform_data = { + 'uuid': 'ec212f79-87d1-2f1d-588f-d86dc0fd5412', + 'uuid_source': 'dmi', + 'serial': 'ec212f79-87d1-2f1d-588f-d86dc0fd5412', + } + + def setUp(self): + super(TestEc2, self).setUp() + self.datasource = ec2.DataSourceEc2 + self.metadata_addr = self.datasource.metadata_urls[0] + self.tmp = self.tmp_dir() + + def data_url(self, version, data_item='meta-data'): + """Return a metadata url based on the version provided.""" + return '/'.join([self.metadata_addr, version, data_item]) + + def _patch_add_cleanup(self, mpath, *args, **kwargs): + p = mock.patch(mpath, *args, **kwargs) + p.start() + self.addCleanup(p.stop) + + def _setup_ds(self, sys_cfg, platform_data, md, md_version=None): + self.uris = [] + distro = {} + paths = helpers.Paths({'run_dir': self.tmp}) + if sys_cfg is None: + sys_cfg = {} + ds = self.datasource(sys_cfg=sys_cfg, distro=distro, paths=paths) + if not md_version: + md_version = ds.min_metadata_version + if platform_data is not None: + self._patch_add_cleanup( + "cloudinit.sources.DataSourceEc2._collect_platform_data", + return_value=platform_data) + + if md: + all_versions = ( + [ds.min_metadata_version] + ds.extended_metadata_versions) + token_url = self.data_url('latest', data_item='api/token') + register_mock_metaserver(token_url, 'API-TOKEN') + for version in all_versions: + metadata_url = self.data_url(version) + '/' + if version == md_version: + # Register all metadata for desired version + register_mock_metaserver( + metadata_url, md.get('md', DEFAULT_METADATA)) + userdata_url = self.data_url( + version, data_item='user-data') + register_mock_metaserver(userdata_url, md.get('ud', '')) + identity_url = self.data_url( + version, data_item='dynamic/instance-identity') + register_mock_metaserver( + identity_url, md.get('id', DYNAMIC_METADATA)) + else: + instance_id_url = metadata_url + 'instance-id' + if version == ds.min_metadata_version: + # Add min_metadata_version service availability check + register_mock_metaserver( + instance_id_url, DEFAULT_METADATA['instance-id']) + else: + # Register 404s for all unrequested extended versions + register_mock_metaserver(instance_id_url, None) + return ds + + def test_network_config_property_returns_version_2_network_data(self): + """network_config property returns network version 2 for metadata""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, + md={'md': DEFAULT_METADATA}) + find_fallback_path = M_PATH_NET + 'find_fallback_nic' + with mock.patch(find_fallback_path) as m_find_fallback: + m_find_fallback.return_value = 'eth9' + ds.get_data() + + mac1 = '06:17:04:d7:26:09' # Defined in DEFAULT_METADATA + expected = {'version': 2, 'ethernets': {'eth9': { + 'match': {'macaddress': '06:17:04:d7:26:09'}, 'set-name': 'eth9', + 'dhcp4': True, 'dhcp6': True}}} + patch_path = M_PATH_NET + 'get_interfaces_by_mac' + get_interface_mac_path = M_PATH_NET + 'get_interface_mac' + with mock.patch(patch_path) as m_get_interfaces_by_mac: + with mock.patch(find_fallback_path) as m_find_fallback: + with mock.patch(get_interface_mac_path) as m_get_mac: + m_get_interfaces_by_mac.return_value = {mac1: 'eth9'} + m_find_fallback.return_value = 'eth9' + m_get_mac.return_value = mac1 + self.assertEqual(expected, ds.network_config) + + def test_network_config_property_set_dhcp4(self): + """network_config property configures dhcp4 on nics with local-ipv4s. + + Only one device is configured based on get_interfaces_by_mac even when + multiple MACs exist in metadata. + """ + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, + md={'md': DEFAULT_METADATA}) + find_fallback_path = M_PATH_NET + 'find_fallback_nic' + with mock.patch(find_fallback_path) as m_find_fallback: + m_find_fallback.return_value = 'eth9' + ds.get_data() + + mac1 = '06:17:04:d7:26:08' # IPv4 only in DEFAULT_METADATA + expected = {'version': 2, 'ethernets': {'eth9': { + 'match': {'macaddress': mac1.lower()}, 'set-name': 'eth9', + 'dhcp4': True, 'dhcp6': False}}} + patch_path = M_PATH_NET + 'get_interfaces_by_mac' + get_interface_mac_path = M_PATH_NET + 'get_interface_mac' + with mock.patch(patch_path) as m_get_interfaces_by_mac: + with mock.patch(find_fallback_path) as m_find_fallback: + with mock.patch(get_interface_mac_path) as m_get_mac: + m_get_interfaces_by_mac.return_value = {mac1: 'eth9'} + m_find_fallback.return_value = 'eth9' + m_get_mac.return_value = mac1 + self.assertEqual(expected, ds.network_config) + + def test_network_config_property_secondary_private_ips(self): + """network_config property configures any secondary ipv4 addresses. + + Only one device is configured based on get_interfaces_by_mac even when + multiple MACs exist in metadata. + """ + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, + md={'md': SECONDARY_IP_METADATA_2018_09_24}) + find_fallback_path = M_PATH_NET + 'find_fallback_nic' + with mock.patch(find_fallback_path) as m_find_fallback: + m_find_fallback.return_value = 'eth9' + ds.get_data() + + mac1 = '0a:07:84:3d:6e:38' # 1 secondary IPv4 and 2 secondary IPv6 + expected = {'version': 2, 'ethernets': {'eth9': { + 'match': {'macaddress': mac1}, 'set-name': 'eth9', + 'addresses': ['172.31.45.70/20', + '2600:1f16:292:100:f152:2222:3333:4444/128', + '2600:1f16:292:100:f153:12a3:c37c:11f9/128'], + 'dhcp4': True, 'dhcp6': True}}} + patch_path = M_PATH_NET + 'get_interfaces_by_mac' + get_interface_mac_path = M_PATH_NET + 'get_interface_mac' + with mock.patch(patch_path) as m_get_interfaces_by_mac: + with mock.patch(find_fallback_path) as m_find_fallback: + with mock.patch(get_interface_mac_path) as m_get_mac: + m_get_interfaces_by_mac.return_value = {mac1: 'eth9'} + m_find_fallback.return_value = 'eth9' + m_get_mac.return_value = mac1 + self.assertEqual(expected, ds.network_config) + + def test_network_config_property_is_cached_in_datasource(self): + """network_config property is cached in DataSourceEc2.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, + md={'md': DEFAULT_METADATA}) + ds._network_config = {'cached': 'data'} + self.assertEqual({'cached': 'data'}, ds.network_config) + + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + def test_network_config_cached_property_refreshed_on_upgrade(self, m_dhcp): + """Refresh the network_config Ec2 cache if network key is absent. + + This catches an upgrade issue where obj.pkl contained stale metadata + which lacked newly required network key. + """ + old_metadata = copy.deepcopy(DEFAULT_METADATA) + old_metadata.pop('network') + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, + md={'md': old_metadata}) + self.assertTrue(ds.get_data()) + # Provide new revision of metadata that contains network data + register_mock_metaserver( + 'http://169.254.169.254/2009-04-04/meta-data/', DEFAULT_METADATA) + mac1 = '06:17:04:d7:26:09' # Defined in DEFAULT_METADATA + get_interface_mac_path = M_PATH_NET + 'get_interfaces_by_mac' + ds.fallback_nic = 'eth9' + with mock.patch(get_interface_mac_path) as m_get_interfaces_by_mac: + m_get_interfaces_by_mac.return_value = {mac1: 'eth9'} + nc = ds.network_config # Will re-crawl network metadata + self.assertIsNotNone(nc) + self.assertIn( + 'Refreshing stale metadata from prior to upgrade', + self.logs.getvalue()) + expected = {'version': 2, 'ethernets': {'eth9': { + 'match': {'macaddress': mac1}, 'set-name': 'eth9', + 'dhcp4': True, 'dhcp6': True}}} + self.assertEqual(expected, ds.network_config) + + def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self): + """get_instance-id gets DataSourceEc2Local.identity if not present. + + This handles an upgrade case where the old pickled datasource didn't + set up self.identity, but 'systemctl cloud-init init' runs + get_instance_id which traces on missing self.identity. lp:1748354. + """ + self.datasource = ec2.DataSourceEc2Local + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + # Mock 404s on all versions except latest + all_versions = ( + [ds.min_metadata_version] + ds.extended_metadata_versions) + for ver in all_versions[:-1]: + register_mock_metaserver( + 'http://169.254.169.254/{0}/meta-data/instance-id'.format(ver), + None) + ds.metadata_address = 'http://169.254.169.254' + register_mock_metaserver( + '{0}/{1}/meta-data/'.format(ds.metadata_address, all_versions[-1]), + DEFAULT_METADATA) + # Register dynamic/instance-identity document which we now read. + register_mock_metaserver( + '{0}/{1}/dynamic/'.format(ds.metadata_address, all_versions[-1]), + DYNAMIC_METADATA) + ds._cloud_name = ec2.CloudNames.AWS + # Setup cached metadata on the Datasource + ds.metadata = DEFAULT_METADATA + self.assertEqual('my-identity-id', ds.get_instance_id()) + + def test_classic_instance_true(self): + """If no vpc-id in metadata, is_classic_instance must return true.""" + md_copy = copy.deepcopy(DEFAULT_METADATA) + ifaces_md = md_copy.get('network', {}).get('interfaces', {}) + for _mac, mac_data in ifaces_md.get('macs', {}).items(): + if 'vpc-id' in mac_data: + del mac_data['vpc-id'] + + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': md_copy}) + self.assertTrue(ds.get_data()) + self.assertTrue(ds.is_classic_instance()) + + def test_classic_instance_false(self): + """If vpc-id in metadata, is_classic_instance must return false.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + self.assertTrue(ds.get_data()) + self.assertFalse(ds.is_classic_instance()) + + def test_aws_inaccessible_imds_service_fails_with_retries(self): + """Inaccessibility of http://169.254.169.254 are retried.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md=None) + + conn_error = requests.exceptions.ConnectionError( + '[Errno 113] no route to host' + ) + + mock_success = mock.MagicMock(contents=b'fakesuccess') + mock_success.ok.return_value = True + + with mock.patch('cloudinit.url_helper.readurl') as m_readurl: + m_readurl.side_effect = (conn_error, conn_error, mock_success) + with mock.patch('cloudinit.url_helper.time.sleep'): + self.assertTrue(ds.wait_for_metadata_service()) + + # Just one /latest/api/token request + self.assertEqual(3, len(m_readurl.call_args_list)) + for readurl_call in m_readurl.call_args_list: + self.assertIn('latest/api/token', readurl_call[0][0]) + + def test_aws_token_403_fails_without_retries(self): + """Verify that 403s fetching AWS tokens are not retried.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md=None) + token_url = self.data_url('latest', data_item='api/token') + httpretty.register_uri(httpretty.PUT, token_url, body={}, status=403) + self.assertFalse(ds.get_data()) + # Just one /latest/api/token request + logs = self.logs.getvalue() + failed_put_log = '"PUT /latest/api/token HTTP/1.1" 403 0' + expected_logs = [ + 'WARNING: Ec2 IMDS endpoint returned a 403 error. HTTP endpoint is' + ' disabled. Aborting.', + "WARNING: IMDS's HTTP endpoint is probably disabled", + failed_put_log + ] + for log in expected_logs: + self.assertIn(log, logs) + self.assertEqual( + 1, + len([line for line in logs.splitlines() if failed_put_log in line]) + ) + + def test_aws_token_redacted(self): + """Verify that aws tokens are redacted when logged.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + self.assertTrue(ds.get_data()) + all_logs = self.logs.getvalue().splitlines() + REDACT_TTL = "'X-aws-ec2-metadata-token-ttl-seconds': 'REDACTED'" + REDACT_TOK = "'X-aws-ec2-metadata-token': 'REDACTED'" + logs_with_redacted_ttl = [log for log in all_logs if REDACT_TTL in log] + logs_with_redacted = [log for log in all_logs if REDACT_TOK in log] + logs_with_token = [log for log in all_logs if 'API-TOKEN' in log] + self.assertEqual(1, len(logs_with_redacted_ttl)) + self.assertEqual(81, len(logs_with_redacted)) + self.assertEqual(0, len(logs_with_token)) + + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + def test_valid_platform_with_strict_true(self, m_dhcp): + """Valid platform data should return true with strict_id true.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, + md={'md': DEFAULT_METADATA}) + ret = ds.get_data() + self.assertTrue(ret) + self.assertEqual(0, m_dhcp.call_count) + self.assertEqual('aws', ds.cloud_name) + self.assertEqual('ec2', ds.platform_type) + self.assertEqual('metadata (%s)' % ds.metadata_address, ds.subplatform) + + def test_valid_platform_with_strict_false(self): + """Valid platform data should return true with strict_id false.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + ret = ds.get_data() + self.assertTrue(ret) + + def test_unknown_platform_with_strict_true(self): + """Unknown platform data with strict_id true should return False.""" + uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a' + ds = self._setup_ds( + platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''}, + sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, + md={'md': DEFAULT_METADATA}) + ret = ds.get_data() + self.assertFalse(ret) + + def test_unknown_platform_with_strict_false(self): + """Unknown platform data with strict_id false should return True.""" + uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a' + ds = self._setup_ds( + platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''}, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + ret = ds.get_data() + self.assertTrue(ret) + + def test_ec2_local_returns_false_on_non_aws(self): + """DataSourceEc2Local returns False when platform is not AWS.""" + self.datasource = ec2.DataSourceEc2Local + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + platform_attrs = [ + attr for attr in ec2.CloudNames.__dict__.keys() + if not attr.startswith('__')] + for attr_name in platform_attrs: + platform_name = getattr(ec2.CloudNames, attr_name) + if platform_name != 'aws': + ds._cloud_name = platform_name + ret = ds.get_data() + self.assertEqual('ec2', ds.platform_type) + self.assertFalse(ret) + message = ( + "Local Ec2 mode only supported on ('aws',)," + ' not {0}'.format(platform_name)) + self.assertIn(message, self.logs.getvalue()) + + @mock.patch('cloudinit.sources.DataSourceEc2.util.is_FreeBSD') + def test_ec2_local_returns_false_on_bsd(self, m_is_freebsd): + """DataSourceEc2Local returns False on BSD. + + FreeBSD dhclient doesn't support dhclient -sf to run in a sandbox. + """ + m_is_freebsd.return_value = True + self.datasource = ec2.DataSourceEc2Local + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + ret = ds.get_data() + self.assertFalse(ret) + self.assertIn( + "FreeBSD doesn't support running dhclient with -sf", + self.logs.getvalue()) + + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + @mock.patch('cloudinit.net.find_fallback_nic') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @mock.patch('cloudinit.sources.DataSourceEc2.util.is_FreeBSD') + def test_ec2_local_performs_dhcp_on_non_bsd(self, m_is_bsd, m_dhcp, + m_fallback_nic, m_net): + """Ec2Local returns True for valid platform data on non-BSD with dhcp. + + DataSourceEc2Local will setup initial IPv4 network via dhcp discovery. + Then the metadata services is crawled for more network config info. + When the platform data is valid, return True. + """ + + m_fallback_nic.return_value = 'eth9' + m_is_bsd.return_value = False + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'broadcast-address': '192.168.2.255'}] + self.datasource = ec2.DataSourceEc2Local + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + + ret = ds.get_data() + self.assertTrue(ret) + m_dhcp.assert_called_once_with('eth9', None) + m_net.assert_called_once_with( + broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', + prefix_or_mask='255.255.255.0', router='192.168.2.1', + static_routes=None) + self.assertIn('Crawl of metadata service took', self.logs.getvalue()) + + +class TestGetSecondaryAddresses(test_helpers.CiTestCase): + + mac = '06:17:04:d7:26:ff' + with_logs = True + + def test_md_with_no_secondary_addresses(self): + """Empty list is returned when nic metadata contains no secondary ip""" + self.assertEqual([], ec2.get_secondary_addresses(NIC2_MD, self.mac)) + + def test_md_with_secondary_v4_and_v6_addresses(self): + """All secondary addresses are returned from nic metadata""" + self.assertEqual( + ['172.31.45.70/20', '2600:1f16:292:100:f152:2222:3333:4444/128', + '2600:1f16:292:100:f153:12a3:c37c:11f9/128'], + ec2.get_secondary_addresses(NIC1_MD_IPV4_IPV6_MULTI_IP, self.mac)) + + def test_invalid_ipv4_ipv6_cidr_metadata_logged_with_defaults(self): + """Any invalid subnet-ipv(4|6)-cidr-block values use defaults""" + invalid_cidr_md = copy.deepcopy(NIC1_MD_IPV4_IPV6_MULTI_IP) + invalid_cidr_md['subnet-ipv4-cidr-block'] = "something-unexpected" + invalid_cidr_md['subnet-ipv6-cidr-block'] = "not/sure/what/this/is" + self.assertEqual( + ['172.31.45.70/24', '2600:1f16:292:100:f152:2222:3333:4444/128', + '2600:1f16:292:100:f153:12a3:c37c:11f9/128'], + ec2.get_secondary_addresses(invalid_cidr_md, self.mac)) + expected_logs = [ + "WARNING: Could not parse subnet-ipv4-cidr-block" + " something-unexpected for mac 06:17:04:d7:26:ff." + " ipv4 network config prefix defaults to /24", + "WARNING: Could not parse subnet-ipv6-cidr-block" + " not/sure/what/this/is for mac 06:17:04:d7:26:ff." + " ipv6 network config prefix defaults to /128" + ] + logs = self.logs.getvalue() + for log in expected_logs: + self.assertIn(log, logs) + + +class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase): + + def setUp(self): + super(TestConvertEc2MetadataNetworkConfig, self).setUp() + self.mac1 = '06:17:04:d7:26:09' + interface_dict = copy.deepcopy( + DEFAULT_METADATA['network']['interfaces']['macs'][self.mac1]) + # These tests are written assuming the base interface doesn't have IPv6 + interface_dict.pop('ipv6s') + self.network_metadata = { + 'interfaces': {'macs': {self.mac1: interface_dict}}} + + def test_convert_ec2_metadata_network_config_skips_absent_macs(self): + """Any mac absent from metadata is skipped by network config.""" + macs_to_nics = {self.mac1: 'eth9', 'DE:AD:BE:EF:FF:FF': 'vitualnic2'} + + # DE:AD:BE:EF:FF:FF represented by OS but not in metadata + expected = {'version': 2, 'ethernets': {'eth9': { + 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', + 'dhcp4': True, 'dhcp6': False}}} + self.assertEqual( + expected, + ec2.convert_ec2_metadata_network_config( + self.network_metadata, macs_to_nics)) + + def test_convert_ec2_metadata_network_config_handles_only_dhcp6(self): + """Config dhcp6 when ipv6s is in metadata for a mac.""" + macs_to_nics = {self.mac1: 'eth9'} + network_metadata_ipv6 = copy.deepcopy(self.network_metadata) + nic1_metadata = ( + network_metadata_ipv6['interfaces']['macs'][self.mac1]) + nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64' + nic1_metadata.pop('public-ipv4s') + expected = {'version': 2, 'ethernets': {'eth9': { + 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', + 'dhcp4': True, 'dhcp6': True}}} + self.assertEqual( + expected, + ec2.convert_ec2_metadata_network_config( + network_metadata_ipv6, macs_to_nics)) + + def test_convert_ec2_metadata_network_config_local_only_dhcp4(self): + """Config dhcp4 when there are no public addresses in public-ipv4s.""" + macs_to_nics = {self.mac1: 'eth9'} + network_metadata_ipv6 = copy.deepcopy(self.network_metadata) + nic1_metadata = ( + network_metadata_ipv6['interfaces']['macs'][self.mac1]) + nic1_metadata['local-ipv4s'] = '172.3.3.15' + nic1_metadata.pop('public-ipv4s') + expected = {'version': 2, 'ethernets': {'eth9': { + 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', + 'dhcp4': True, 'dhcp6': False}}} + self.assertEqual( + expected, + ec2.convert_ec2_metadata_network_config( + network_metadata_ipv6, macs_to_nics)) + + def test_convert_ec2_metadata_network_config_handles_absent_dhcp4(self): + """Config dhcp4 on fallback_nic when there are no ipv4 addresses.""" + macs_to_nics = {self.mac1: 'eth9'} + network_metadata_ipv6 = copy.deepcopy(self.network_metadata) + nic1_metadata = ( + network_metadata_ipv6['interfaces']['macs'][self.mac1]) + nic1_metadata['public-ipv4s'] = '' + + # When no ipv4 or ipv6 content but fallback_nic set, set dhcp4 config. + expected = {'version': 2, 'ethernets': {'eth9': { + 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', + 'dhcp4': True, 'dhcp6': False}}} + self.assertEqual( + expected, + ec2.convert_ec2_metadata_network_config( + network_metadata_ipv6, macs_to_nics, fallback_nic='eth9')) + + def test_convert_ec2_metadata_network_config_handles_local_v4_and_v6(self): + """When ipv6s and local-ipv4s are non-empty, enable dhcp6 and dhcp4.""" + macs_to_nics = {self.mac1: 'eth9'} + network_metadata_both = copy.deepcopy(self.network_metadata) + nic1_metadata = ( + network_metadata_both['interfaces']['macs'][self.mac1]) + nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64' + nic1_metadata.pop('public-ipv4s') + nic1_metadata['local-ipv4s'] = '10.0.0.42' # Local ipv4 only on vpc + expected = {'version': 2, 'ethernets': {'eth9': { + 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', + 'dhcp4': True, 'dhcp6': True}}} + self.assertEqual( + expected, + ec2.convert_ec2_metadata_network_config( + network_metadata_both, macs_to_nics)) + + def test_convert_ec2_metadata_network_config_handles_multiple_nics(self): + """DHCP route-metric increases on secondary NICs for IPv4 and IPv6.""" + mac2 = '06:17:04:d7:26:08' + macs_to_nics = {self.mac1: 'eth9', mac2: 'eth10'} + network_metadata_both = copy.deepcopy(self.network_metadata) + # Add 2nd nic info + network_metadata_both['interfaces']['macs'][mac2] = NIC2_MD + nic1_metadata = ( + network_metadata_both['interfaces']['macs'][self.mac1]) + nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64' + nic1_metadata.pop('public-ipv4s') # No public-ipv4 IPs in cfg + nic1_metadata['local-ipv4s'] = '10.0.0.42' # Local ipv4 only on vpc + expected = {'version': 2, 'ethernets': { + 'eth9': { + 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', + 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': True, 'dhcp6-overrides': {'route-metric': 100}}, + 'eth10': { + 'match': {'macaddress': mac2}, 'set-name': 'eth10', + 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 200}, + 'dhcp6': False}}} + self.assertEqual( + expected, + ec2.convert_ec2_metadata_network_config( + network_metadata_both, macs_to_nics)) + + def test_convert_ec2_metadata_network_config_handles_dhcp4_and_dhcp6(self): + """Config both dhcp4 and dhcp6 when both vpc-ipv6 and ipv4 exists.""" + macs_to_nics = {self.mac1: 'eth9'} + network_metadata_both = copy.deepcopy(self.network_metadata) + nic1_metadata = ( + network_metadata_both['interfaces']['macs'][self.mac1]) + nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64' + expected = {'version': 2, 'ethernets': {'eth9': { + 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', + 'dhcp4': True, 'dhcp6': True}}} + self.assertEqual( + expected, + ec2.convert_ec2_metadata_network_config( + network_metadata_both, macs_to_nics)) + + def test_convert_ec2_metadata_gets_macs_from_get_interfaces_by_mac(self): + """Convert Ec2 Metadata calls get_interfaces_by_mac by default.""" + expected = {'version': 2, 'ethernets': {'eth9': { + 'match': {'macaddress': self.mac1}, + 'set-name': 'eth9', 'dhcp4': True, 'dhcp6': False}}} + patch_path = M_PATH_NET + 'get_interfaces_by_mac' + with mock.patch(patch_path) as m_get_interfaces_by_mac: + m_get_interfaces_by_mac.return_value = {self.mac1: 'eth9'} + self.assertEqual( + expected, + ec2.convert_ec2_metadata_network_config(self.network_metadata)) + + +class TesIdentifyPlatform(test_helpers.CiTestCase): + + def collmock(self, **kwargs): + """return non-special _collect_platform_data updated with changes.""" + unspecial = { + 'asset_tag': '3857-0037-2746-7462-1818-3997-77', + 'serial': 'H23-C4J3JV-R6', + 'uuid': '81c7e555-6471-4833-9551-1ab366c4cfd2', + 'uuid_source': 'dmi', + 'vendor': 'tothecloud', + } + unspecial.update(**kwargs) + return unspecial + + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_zstack(self, m_collect): + """zstack should be identified if chassis-asset-tag ends in .zstack.io + """ + m_collect.return_value = self.collmock(asset_tag='123456.zstack.io') + self.assertEqual(ec2.CloudNames.ZSTACK, ec2.identify_platform()) + + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_zstack_full_domain_only(self, m_collect): + """zstack asset-tag matching should match only on full domain boundary. + """ + m_collect.return_value = self.collmock(asset_tag='123456.buzzstack.io') + self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) + + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_e24cloud(self, m_collect): + """e24cloud identified if vendor is e24cloud""" + m_collect.return_value = self.collmock(vendor='e24cloud') + self.assertEqual(ec2.CloudNames.E24CLOUD, ec2.identify_platform()) + + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_e24cloud_negative(self, m_collect): + """e24cloud identified if vendor is e24cloud""" + m_collect.return_value = self.collmock(vendor='e24cloudyday') + self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_exoscale.py b/tests/unittests/sources/test_exoscale.py new file mode 100644 index 00000000..b0ffb7a5 --- /dev/null +++ b/tests/unittests/sources/test_exoscale.py @@ -0,0 +1,211 @@ +# Author: Mathieu Corbin +# Author: Christopher Glass +# +# This file is part of cloud-init. See LICENSE file for license information. +from cloudinit import helpers +from cloudinit.sources.DataSourceExoscale import ( + API_VERSION, + DataSourceExoscale, + METADATA_URL, + get_password, + PASSWORD_SERVER_PORT, + read_metadata) +from tests.unittests.helpers import HttprettyTestCase, mock +from cloudinit import util + +import httpretty +import os +import requests + + +TEST_PASSWORD_URL = "{}:{}/{}/".format(METADATA_URL, + PASSWORD_SERVER_PORT, + API_VERSION) + +TEST_METADATA_URL = "{}/{}/meta-data/".format(METADATA_URL, + API_VERSION) + +TEST_USERDATA_URL = "{}/{}/user-data".format(METADATA_URL, + API_VERSION) + + +@httpretty.activate +class TestDatasourceExoscale(HttprettyTestCase): + + def setUp(self): + super(TestDatasourceExoscale, self).setUp() + self.tmp = self.tmp_dir() + self.password_url = TEST_PASSWORD_URL + self.metadata_url = TEST_METADATA_URL + self.userdata_url = TEST_USERDATA_URL + + def test_password_saved(self): + """The password is not set when it is not found + in the metadata service.""" + httpretty.register_uri(httpretty.GET, + self.password_url, + body="saved_password") + self.assertFalse(get_password()) + + def test_password_empty(self): + """No password is set if the metadata service returns + an empty string.""" + httpretty.register_uri(httpretty.GET, + self.password_url, + body="") + self.assertFalse(get_password()) + + def test_password(self): + """The password is set to what is found in the metadata + service.""" + expected_password = "p@ssw0rd" + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_password) + password = get_password() + self.assertEqual(expected_password, password) + + def test_activate_removes_set_passwords_semaphore(self): + """Allow set_passwords to run every boot by removing the semaphore.""" + path = helpers.Paths({'cloud_dir': self.tmp}) + sem_dir = self.tmp_path('instance/sem', dir=self.tmp) + util.ensure_dir(sem_dir) + sem_file = os.path.join(sem_dir, 'config_set_passwords') + with open(sem_file, 'w') as stream: + stream.write('') + ds = DataSourceExoscale({}, None, path) + ds.activate(None, None) + self.assertFalse(os.path.exists(sem_file)) + + def test_get_data(self): + """The datasource conforms to expected behavior when supplied + full test data.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: True + expected_password = "p@ssw0rd" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_password) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + self.assertTrue(ds._get_data()) + self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") + self.assertEqual(ds.metadata, {"instance-id": expected_id, + "local-hostname": expected_hostname}) + self.assertEqual(ds.get_config_obj(), + {'ssh_pwauth': True, + 'password': expected_password, + 'chpasswd': { + 'expire': False, + }}) + + def test_get_data_saved_password(self): + """The datasource conforms to expected behavior when saved_password is + returned by the password server.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: True + expected_answer = "saved_password" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_answer) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + self.assertTrue(ds._get_data()) + self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") + self.assertEqual(ds.metadata, {"instance-id": expected_id, + "local-hostname": expected_hostname}) + self.assertEqual(ds.get_config_obj(), {}) + + def test_get_data_no_password(self): + """The datasource conforms to expected behavior when no password is + returned by the password server.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: True + expected_answer = "" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_answer) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + self.assertTrue(ds._get_data()) + self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") + self.assertEqual(ds.metadata, {"instance-id": expected_id, + "local-hostname": expected_hostname}) + self.assertEqual(ds.get_config_obj(), {}) + + @mock.patch('cloudinit.sources.DataSourceExoscale.get_password') + def test_read_metadata_when_password_server_unreachable(self, m_password): + """The read_metadata function returns partial results in case the + password server (only) is unreachable.""" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + + m_password.side_effect = requests.Timeout('Fake Connection Timeout') + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + + result = read_metadata() + + self.assertIsNone(result.get("password")) + self.assertEqual(result.get("user-data").decode("utf-8"), + expected_userdata) + + def test_non_viable_platform(self): + """The datasource fails fast when the platform is not viable.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: False + self.assertFalse(ds._get_data()) diff --git a/tests/unittests/sources/test_gce.py b/tests/unittests/sources/test_gce.py new file mode 100644 index 00000000..dc768e99 --- /dev/null +++ b/tests/unittests/sources/test_gce.py @@ -0,0 +1,388 @@ +# Copyright (C) 2014 Vaidas Jablonskis +# +# Author: Vaidas Jablonskis +# +# This file is part of cloud-init. See LICENSE file for license information. + +import datetime +import httpretty +import json +import re +from unittest import mock +from urllib.parse import urlparse + +from base64 import b64encode, b64decode + +from cloudinit import distros +from cloudinit import helpers +from cloudinit import settings +from cloudinit.sources import DataSourceGCE + +from tests.unittests import helpers as test_helpers + + +GCE_META = { + 'instance/id': '123', + 'instance/zone': 'foo/bar', + 'instance/hostname': 'server.project-foo.local', +} + +GCE_META_PARTIAL = { + 'instance/id': '1234', + 'instance/hostname': 'server.project-bar.local', + 'instance/zone': 'bar/baz', +} + +GCE_META_ENCODING = { + 'instance/id': '12345', + 'instance/hostname': 'server.project-baz.local', + 'instance/zone': 'baz/bang', + 'instance/attributes': { + 'user-data': b64encode(b'#!/bin/echo baz\n').decode('utf-8'), + 'user-data-encoding': 'base64', + } +} + +GCE_USER_DATA_TEXT = { + 'instance/id': '12345', + 'instance/hostname': 'server.project-baz.local', + 'instance/zone': 'baz/bang', + 'instance/attributes': { + 'user-data': '#!/bin/sh\necho hi mom\ntouch /run/up-now\n', + } +} + +HEADERS = {'Metadata-Flavor': 'Google'} +MD_URL_RE = re.compile( + r'http://metadata.google.internal/computeMetadata/v1/.*') +GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/' + 'v1/instance/guest-attributes/hostkeys/') + + +def _set_mock_metadata(gce_meta=None): + if gce_meta is None: + gce_meta = GCE_META + + def _request_callback(method, uri, headers): + url_path = urlparse(uri).path + if url_path.startswith('/computeMetadata/v1/'): + path = url_path.split('/computeMetadata/v1/')[1:][0] + recursive = path.endswith('/') + path = path.rstrip('/') + else: + path = None + if path in gce_meta: + response = gce_meta.get(path) + if recursive: + response = json.dumps(response) + return (200, headers, response) + else: + return (404, headers, '') + + # reset is needed. https://github.com/gabrielfalcao/HTTPretty/issues/316 + httpretty.register_uri(httpretty.GET, MD_URL_RE, body=_request_callback) + + +@httpretty.activate +class TestDataSourceGCE(test_helpers.HttprettyTestCase): + + def _make_distro(self, dtype, def_user=None): + cfg = dict(settings.CFG_BUILTIN) + cfg['system_info']['distro'] = dtype + paths = helpers.Paths(cfg['system_info']['paths']) + distro_cls = distros.fetch(dtype) + if def_user: + cfg['system_info']['default_user'] = def_user.copy() + distro = distro_cls(dtype, cfg['system_info'], paths) + return distro + + def setUp(self): + tmp = self.tmp_dir() + self.ds = DataSourceGCE.DataSourceGCE( + settings.CFG_BUILTIN, None, + helpers.Paths({'run_dir': tmp})) + ppatch = self.m_platform_reports_gce = mock.patch( + 'cloudinit.sources.DataSourceGCE.platform_reports_gce') + self.m_platform_reports_gce = ppatch.start() + self.m_platform_reports_gce.return_value = True + self.addCleanup(ppatch.stop) + self.add_patch('time.sleep', 'm_sleep') # just to speed up tests + super(TestDataSourceGCE, self).setUp() + + def test_connection(self): + _set_mock_metadata() + success = self.ds.get_data() + self.assertTrue(success) + + req_header = httpretty.last_request().headers + for header_name, expected_value in HEADERS.items(): + self.assertEqual(expected_value, req_header.get(header_name)) + + def test_metadata(self): + # UnicodeDecodeError if set to ds.userdata instead of userdata_raw + meta = GCE_META.copy() + meta['instance/attributes/user-data'] = b'/bin/echo \xff\n' + + _set_mock_metadata() + self.ds.get_data() + + shostname = GCE_META.get('instance/hostname').split('.')[0] + self.assertEqual(shostname, + self.ds.get_hostname()) + + self.assertEqual(GCE_META.get('instance/id'), + self.ds.get_instance_id()) + + self.assertEqual(GCE_META.get('instance/attributes/user-data'), + self.ds.get_userdata_raw()) + + # test partial metadata (missing user-data in particular) + def test_metadata_partial(self): + _set_mock_metadata(GCE_META_PARTIAL) + self.ds.get_data() + + self.assertEqual(GCE_META_PARTIAL.get('instance/id'), + self.ds.get_instance_id()) + + shostname = GCE_META_PARTIAL.get('instance/hostname').split('.')[0] + self.assertEqual(shostname, self.ds.get_hostname()) + + def test_userdata_no_encoding(self): + """check that user-data is read.""" + _set_mock_metadata(GCE_USER_DATA_TEXT) + self.ds.get_data() + self.assertEqual( + GCE_USER_DATA_TEXT['instance/attributes']['user-data'].encode(), + self.ds.get_userdata_raw()) + + def test_metadata_encoding(self): + """user-data is base64 encoded if user-data-encoding is 'base64'.""" + _set_mock_metadata(GCE_META_ENCODING) + self.ds.get_data() + + instance_data = GCE_META_ENCODING.get('instance/attributes') + decoded = b64decode(instance_data.get('user-data')) + self.assertEqual(decoded, self.ds.get_userdata_raw()) + + def test_missing_required_keys_return_false(self): + for required_key in ['instance/id', 'instance/zone', + 'instance/hostname']: + meta = GCE_META_PARTIAL.copy() + del meta[required_key] + _set_mock_metadata(meta) + self.assertEqual(False, self.ds.get_data()) + httpretty.reset() + + def test_no_ssh_keys_metadata(self): + _set_mock_metadata() + self.ds.get_data() + self.assertEqual([], self.ds.get_public_ssh_keys()) + + def test_cloudinit_ssh_keys(self): + valid_key = 'ssh-rsa VALID {0}' + invalid_key = 'ssh-rsa INVALID {0}' + project_attributes = { + 'sshKeys': '\n'.join([ + 'cloudinit:{0}'.format(valid_key.format(0)), + 'user:{0}'.format(invalid_key.format(0)), + ]), + 'ssh-keys': '\n'.join([ + 'cloudinit:{0}'.format(valid_key.format(1)), + 'user:{0}'.format(invalid_key.format(1)), + ]), + } + instance_attributes = { + 'ssh-keys': '\n'.join([ + 'cloudinit:{0}'.format(valid_key.format(2)), + 'user:{0}'.format(invalid_key.format(2)), + ]), + 'block-project-ssh-keys': 'False', + } + + meta = GCE_META.copy() + meta['project/attributes'] = project_attributes + meta['instance/attributes'] = instance_attributes + + _set_mock_metadata(meta) + self.ds.get_data() + + expected = [valid_key.format(key) for key in range(3)] + self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys())) + + @mock.patch("cloudinit.sources.DataSourceGCE.ug_util") + def test_default_user_ssh_keys(self, mock_ug_util): + mock_ug_util.normalize_users_groups.return_value = None, None + mock_ug_util.extract_default.return_value = 'ubuntu', None + ubuntu_ds = DataSourceGCE.DataSourceGCE( + settings.CFG_BUILTIN, self._make_distro('ubuntu'), + helpers.Paths({'run_dir': self.tmp_dir()})) + + valid_key = 'ssh-rsa VALID {0}' + invalid_key = 'ssh-rsa INVALID {0}' + project_attributes = { + 'sshKeys': '\n'.join([ + 'ubuntu:{0}'.format(valid_key.format(0)), + 'user:{0}'.format(invalid_key.format(0)), + ]), + 'ssh-keys': '\n'.join([ + 'ubuntu:{0}'.format(valid_key.format(1)), + 'user:{0}'.format(invalid_key.format(1)), + ]), + } + instance_attributes = { + 'ssh-keys': '\n'.join([ + 'ubuntu:{0}'.format(valid_key.format(2)), + 'user:{0}'.format(invalid_key.format(2)), + ]), + 'block-project-ssh-keys': 'False', + } + + meta = GCE_META.copy() + meta['project/attributes'] = project_attributes + meta['instance/attributes'] = instance_attributes + + _set_mock_metadata(meta) + ubuntu_ds.get_data() + + expected = [valid_key.format(key) for key in range(3)] + self.assertEqual(set(expected), set(ubuntu_ds.get_public_ssh_keys())) + + def test_instance_ssh_keys_override(self): + valid_key = 'ssh-rsa VALID {0}' + invalid_key = 'ssh-rsa INVALID {0}' + project_attributes = { + 'sshKeys': 'cloudinit:{0}'.format(invalid_key.format(0)), + 'ssh-keys': 'cloudinit:{0}'.format(invalid_key.format(1)), + } + instance_attributes = { + 'sshKeys': 'cloudinit:{0}'.format(valid_key.format(0)), + 'ssh-keys': 'cloudinit:{0}'.format(valid_key.format(1)), + 'block-project-ssh-keys': 'False', + } + + meta = GCE_META.copy() + meta['project/attributes'] = project_attributes + meta['instance/attributes'] = instance_attributes + + _set_mock_metadata(meta) + self.ds.get_data() + + expected = [valid_key.format(key) for key in range(2)] + self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys())) + + def test_block_project_ssh_keys_override(self): + valid_key = 'ssh-rsa VALID {0}' + invalid_key = 'ssh-rsa INVALID {0}' + project_attributes = { + 'sshKeys': 'cloudinit:{0}'.format(invalid_key.format(0)), + 'ssh-keys': 'cloudinit:{0}'.format(invalid_key.format(1)), + } + instance_attributes = { + 'ssh-keys': 'cloudinit:{0}'.format(valid_key.format(0)), + 'block-project-ssh-keys': 'True', + } + + meta = GCE_META.copy() + meta['project/attributes'] = project_attributes + meta['instance/attributes'] = instance_attributes + + _set_mock_metadata(meta) + self.ds.get_data() + + expected = [valid_key.format(0)] + self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys())) + + def test_only_last_part_of_zone_used_for_availability_zone(self): + _set_mock_metadata() + r = self.ds.get_data() + self.assertEqual(True, r) + self.assertEqual('bar', self.ds.availability_zone) + + @mock.patch("cloudinit.sources.DataSourceGCE.GoogleMetadataFetcher") + def test_get_data_returns_false_if_not_on_gce(self, m_fetcher): + self.m_platform_reports_gce.return_value = False + ret = self.ds.get_data() + self.assertEqual(False, ret) + m_fetcher.assert_not_called() + + def test_has_expired(self): + + def _get_timestamp(days): + format_str = '%Y-%m-%dT%H:%M:%S+0000' + today = datetime.datetime.now() + timestamp = today + datetime.timedelta(days=days) + return timestamp.strftime(format_str) + + past = _get_timestamp(-1) + future = _get_timestamp(1) + ssh_keys = { + None: False, + '': False, + 'Invalid': False, + 'user:ssh-rsa key user@domain.com': False, + 'user:ssh-rsa key google {"expireOn":"%s"}' % past: False, + 'user:ssh-rsa key google-ssh': False, + 'user:ssh-rsa key google-ssh {invalid:json}': False, + 'user:ssh-rsa key google-ssh {"userName":"user"}': False, + 'user:ssh-rsa key google-ssh {"expireOn":"invalid"}': False, + 'user:xyz key google-ssh {"expireOn":"%s"}' % future: False, + 'user:xyz key google-ssh {"expireOn":"%s"}' % past: True, + } + + for key, expired in ssh_keys.items(): + self.assertEqual(DataSourceGCE._has_expired(key), expired) + + def test_parse_public_keys_non_ascii(self): + public_key_data = [ + 'cloudinit:rsa ssh-ke%s invalid' % chr(165), + 'use%sname:rsa ssh-key' % chr(174), + 'cloudinit:test 1', + 'default:test 2', + 'user:test 3', + ] + expected = ['test 1', 'test 2'] + found = DataSourceGCE._parse_public_keys( + public_key_data, default_user='default') + self.assertEqual(sorted(found), sorted(expected)) + + @mock.patch("cloudinit.url_helper.readurl") + def test_publish_host_keys(self, m_readurl): + hostkeys = [('ssh-rsa', 'asdfasdf'), + ('ssh-ed25519', 'qwerqwer')] + readurl_expected_calls = [ + mock.call(check_status=False, data=b'asdfasdf', headers=HEADERS, + request_method='PUT', + url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-rsa')), + mock.call(check_status=False, data=b'qwerqwer', headers=HEADERS, + request_method='PUT', + url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-ed25519')), + ] + self.ds.publish_host_keys(hostkeys) + m_readurl.assert_has_calls(readurl_expected_calls, any_order=True) + + @mock.patch( + "cloudinit.sources.DataSourceGCE.EphemeralDHCPv4", + autospec=True, + ) + @mock.patch( + "cloudinit.sources.DataSourceGCE.DataSourceGCELocal.fallback_interface" + ) + def test_local_datasource_uses_ephemeral_dhcp(self, _m_fallback, m_dhcp): + _set_mock_metadata() + ds = DataSourceGCE.DataSourceGCELocal( + sys_cfg={}, distro=None, paths=None + ) + ds._get_data() + assert m_dhcp.call_count == 1 + + @mock.patch( + "cloudinit.sources.DataSourceGCE.EphemeralDHCPv4", + autospec=True, + ) + def test_datasource_doesnt_use_ephemeral_dhcp(self, m_dhcp): + _set_mock_metadata() + ds = DataSourceGCE.DataSourceGCE(sys_cfg={}, distro=None, paths=None) + ds._get_data() + assert m_dhcp.call_count == 0 + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_hetzner.py b/tests/unittests/sources/test_hetzner.py new file mode 100644 index 00000000..5af0f3db --- /dev/null +++ b/tests/unittests/sources/test_hetzner.py @@ -0,0 +1,142 @@ +# Copyright (C) 2018 Jonas Keidel +# +# Author: Jonas Keidel +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.sources import DataSourceHetzner +import cloudinit.sources.helpers.hetzner as hc_helper +from cloudinit import util, settings, helpers + +from tests.unittests.helpers import mock, CiTestCase + +import base64 +import pytest + +METADATA = util.load_yaml(""" +hostname: cloudinit-test +instance-id: 123456 +local-ipv4: '' +network-config: + config: + - mac_address: 96:00:00:08:19:da + name: eth0 + subnets: + - dns_nameservers: + - 213.133.99.99 + - 213.133.100.100 + - 213.133.98.98 + ipv4: true + type: dhcp + type: physical + - name: eth0:0 + subnets: + - address: 2a01:4f8:beef:beef::1/64 + gateway: fe80::1 + ipv6: true + routes: + - gateway: fe80::1%eth0 + netmask: 0 + network: '::' + type: static + type: physical + version: 1 +network-sysconfig: "DEVICE='eth0'\nTYPE=Ethernet\nBOOTPROTO=dhcp\n\ + ONBOOT='yes'\nHWADDR=96:00:00:08:19:da\n\ + IPV6INIT=yes\nIPV6ADDR=2a01:4f8:beef:beef::1/64\n\ + IPV6_DEFAULTGW=fe80::1%eth0\nIPV6_AUTOCONF=no\n\ + DNS1=213.133.99.99\nDNS2=213.133.100.100\n" +public-ipv4: 192.168.0.1 +public-keys: +- ssh-ed25519 \ + AAAAC3Nzac1lZdI1NTE5AaaAIaFrcac0yVITsmRrmueq6MD0qYNKlEvW8O1Ib4nkhmWh \ + test-key@workstation +vendor_data: "test" +""") + +USERDATA = b"""#cloud-config +runcmd: +- [touch, /root/cloud-init-worked ] +""" + + +class TestDataSourceHetzner(CiTestCase): + """ + Test reading the meta-data + """ + def setUp(self): + super(TestDataSourceHetzner, self).setUp() + self.tmp = self.tmp_dir() + + def get_ds(self): + ds = DataSourceHetzner.DataSourceHetzner( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + return ds + + @mock.patch('cloudinit.net.EphemeralIPv4Network') + @mock.patch('cloudinit.net.find_fallback_nic') + @mock.patch('cloudinit.sources.helpers.hetzner.read_metadata') + @mock.patch('cloudinit.sources.helpers.hetzner.read_userdata') + @mock.patch('cloudinit.sources.DataSourceHetzner.get_hcloud_data') + def test_read_data(self, m_get_hcloud_data, m_usermd, m_readmd, + m_fallback_nic, m_net): + m_get_hcloud_data.return_value = (True, + str(METADATA.get('instance-id'))) + m_readmd.return_value = METADATA.copy() + m_usermd.return_value = USERDATA + m_fallback_nic.return_value = 'eth0' + + ds = self.get_ds() + ret = ds.get_data() + self.assertTrue(ret) + + m_net.assert_called_once_with( + 'eth0', '169.254.0.1', + 16, '169.254.255.255' + ) + + self.assertTrue(m_readmd.called) + + self.assertEqual(METADATA.get('hostname'), ds.get_hostname()) + + self.assertEqual(METADATA.get('public-keys'), + ds.get_public_ssh_keys()) + + self.assertIsInstance(ds.get_public_ssh_keys(), list) + self.assertEqual(ds.get_userdata_raw(), USERDATA) + self.assertEqual(ds.get_vendordata_raw(), METADATA.get('vendor_data')) + + @mock.patch('cloudinit.sources.helpers.hetzner.read_metadata') + @mock.patch('cloudinit.net.find_fallback_nic') + @mock.patch('cloudinit.sources.DataSourceHetzner.get_hcloud_data') + def test_not_on_hetzner_returns_false(self, m_get_hcloud_data, + m_find_fallback, m_read_md): + """If helper 'get_hcloud_data' returns False, + return False from get_data.""" + m_get_hcloud_data.return_value = (False, None) + ds = self.get_ds() + ret = ds.get_data() + + self.assertFalse(ret) + # These are a white box attempt to ensure it did not search. + m_find_fallback.assert_not_called() + m_read_md.assert_not_called() + + +class TestMaybeB64Decode: + """Test the maybe_b64decode helper function.""" + + @pytest.mark.parametrize("invalid_input", (str("not bytes"), int(4))) + def test_raises_error_on_non_bytes(self, invalid_input): + """maybe_b64decode should raise error if data is not bytes.""" + with pytest.raises(TypeError): + hc_helper.maybe_b64decode(invalid_input) + + @pytest.mark.parametrize("in_data,expected", [ + # If data is not b64 encoded, then return value should be the same. + (b"this is my data", b"this is my data"), + # If data is b64 encoded, then return value should be decoded. + (base64.b64encode(b"data"), b"data"), + ]) + def test_happy_path(self, in_data, expected): + assert expected == hc_helper.maybe_b64decode(in_data) diff --git a/tests/unittests/sources/test_ibmcloud.py b/tests/unittests/sources/test_ibmcloud.py new file mode 100644 index 00000000..38e8e892 --- /dev/null +++ b/tests/unittests/sources/test_ibmcloud.py @@ -0,0 +1,343 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.helpers import Paths +from cloudinit.sources import DataSourceIBMCloud as ibm +from tests.unittests import helpers as test_helpers +from cloudinit import util + +import base64 +import copy +import json +from textwrap import dedent + +mock = test_helpers.mock + +D_PATH = "cloudinit.sources.DataSourceIBMCloud." + + +@mock.patch(D_PATH + "_is_xen", return_value=True) +@mock.patch(D_PATH + "_is_ibm_provisioning") +@mock.patch(D_PATH + "util.blkid") +class TestGetIBMPlatform(test_helpers.CiTestCase): + """Test the get_ibm_platform helper.""" + + blkid_base = { + "/dev/xvda1": { + "DEVNAME": "/dev/xvda1", "LABEL": "cloudimg-bootfs", + "TYPE": "ext3"}, + "/dev/xvda2": { + "DEVNAME": "/dev/xvda2", "LABEL": "cloudimg-rootfs", + "TYPE": "ext4"}, + } + + blkid_metadata_disk = { + "/dev/xvdh1": { + "DEVNAME": "/dev/xvdh1", "LABEL": "METADATA", "TYPE": "vfat", + "SEC_TYPE": "msdos", "UUID": "681B-8C5D", + "PARTUUID": "3d631e09-01"}, + } + + blkid_oscode_disk = { + "/dev/xvdh": { + "DEVNAME": "/dev/xvdh", "LABEL": "config-2", "TYPE": "vfat", + "SEC_TYPE": "msdos", "UUID": ibm.IBM_CONFIG_UUID} + } + + def setUp(self): + self.blkid_metadata = copy.deepcopy(self.blkid_base) + self.blkid_metadata.update(copy.deepcopy(self.blkid_metadata_disk)) + + self.blkid_oscode = copy.deepcopy(self.blkid_base) + self.blkid_oscode.update(copy.deepcopy(self.blkid_oscode_disk)) + + def test_id_template_live_metadata(self, m_blkid, m_is_prov, _m_xen): + """identify TEMPLATE_LIVE_METADATA.""" + m_blkid.return_value = self.blkid_metadata + m_is_prov.return_value = False + self.assertEqual( + (ibm.Platforms.TEMPLATE_LIVE_METADATA, "/dev/xvdh1"), + ibm.get_ibm_platform()) + + def test_id_template_prov_metadata(self, m_blkid, m_is_prov, _m_xen): + """identify TEMPLATE_PROVISIONING_METADATA.""" + m_blkid.return_value = self.blkid_metadata + m_is_prov.return_value = True + self.assertEqual( + (ibm.Platforms.TEMPLATE_PROVISIONING_METADATA, "/dev/xvdh1"), + ibm.get_ibm_platform()) + + def test_id_template_prov_nodata(self, m_blkid, m_is_prov, _m_xen): + """identify TEMPLATE_PROVISIONING_NODATA.""" + m_blkid.return_value = self.blkid_base + m_is_prov.return_value = True + self.assertEqual( + (ibm.Platforms.TEMPLATE_PROVISIONING_NODATA, None), + ibm.get_ibm_platform()) + + def test_id_os_code(self, m_blkid, m_is_prov, _m_xen): + """Identify OS_CODE.""" + m_blkid.return_value = self.blkid_oscode + m_is_prov.return_value = False + self.assertEqual((ibm.Platforms.OS_CODE, "/dev/xvdh"), + ibm.get_ibm_platform()) + + def test_id_os_code_must_match_uuid(self, m_blkid, m_is_prov, _m_xen): + """Test against false positive on openstack with non-ibm UUID.""" + blkid = self.blkid_oscode + blkid["/dev/xvdh"]["UUID"] = "9999-9999" + m_blkid.return_value = blkid + m_is_prov.return_value = False + self.assertEqual((None, None), ibm.get_ibm_platform()) + + +@mock.patch(D_PATH + "_read_system_uuid", return_value=None) +@mock.patch(D_PATH + "get_ibm_platform") +class TestReadMD(test_helpers.CiTestCase): + """Test the read_datasource helper.""" + + template_md = { + "files": [], + "network_config": {"content_path": "/content/interfaces"}, + "hostname": "ci-fond-ram", + "name": "ci-fond-ram", + "domain": "testing.ci.cloud-init.org", + "meta": {"dsmode": "net"}, + "uuid": "8e636730-9f5d-c4a5-327c-d7123c46e82f", + "public_keys": {"1091307": "ssh-rsa AAAAB3NzaC1...Hw== ci-pubkey"}, + } + + oscode_md = { + "hostname": "ci-grand-gannet.testing.ci.cloud-init.org", + "name": "ci-grand-gannet", + "uuid": "2f266908-8e6c-4818-9b5c-42e9cc66a785", + "random_seed": "bm90LXJhbmRvbQo=", + "crypt_key": "ssh-rsa AAAAB3NzaC1yc2..n6z/", + "configuration_token": "eyJhbGciOi..M3ZA", + "public_keys": {"1091307": "ssh-rsa AAAAB3N..Hw== ci-pubkey"}, + } + + content_interfaces = dedent("""\ + auto lo + iface lo inet loopback + + auto eth0 + allow-hotplug eth0 + iface eth0 inet static + address 10.82.43.5 + netmask 255.255.255.192 + """) + + userdata = b"#!/bin/sh\necho hi mom\n" + # meta.js file gets json encoded userdata as a list. + meta_js = '["#!/bin/sh\necho hi mom\n"]' + vendor_data = { + "cloud-init": "#!/bin/bash\necho 'root:$6$5ab01p1m1' | chpasswd -e"} + + network_data = { + "links": [ + {"id": "interface_29402281", "name": "eth0", "mtu": None, + "type": "phy", "ethernet_mac_address": "06:00:f1:bd:da:25"}, + {"id": "interface_29402279", "name": "eth1", "mtu": None, + "type": "phy", "ethernet_mac_address": "06:98:5e:d0:7f:86"} + ], + "networks": [ + {"id": "network_109887563", "link": "interface_29402281", + "type": "ipv4", "ip_address": "10.82.43.2", + "netmask": "255.255.255.192", + "routes": [ + {"network": "10.0.0.0", "netmask": "255.0.0.0", + "gateway": "10.82.43.1"}, + {"network": "161.26.0.0", "netmask": "255.255.0.0", + "gateway": "10.82.43.1"}]}, + {"id": "network_109887551", "link": "interface_29402279", + "type": "ipv4", "ip_address": "108.168.194.252", + "netmask": "255.255.255.248", + "routes": [ + {"network": "0.0.0.0", "netmask": "0.0.0.0", + "gateway": "108.168.194.249"}]} + ], + "services": [ + {"type": "dns", "address": "10.0.80.11"}, + {"type": "dns", "address": "10.0.80.12"} + ], + } + + sysuuid = '7f79ebf5-d791-43c3-a723-854e8389d59f' + + def _get_expected_metadata(self, os_md): + """return expected 'metadata' for data loaded from meta_data.json.""" + os_md = copy.deepcopy(os_md) + renames = ( + ('hostname', 'local-hostname'), + ('uuid', 'instance-id'), + ('public_keys', 'public-keys')) + ret = {} + for osname, mdname in renames: + if osname in os_md: + ret[mdname] = os_md[osname] + if 'random_seed' in os_md: + ret['random_seed'] = base64.b64decode(os_md['random_seed']) + + return ret + + def test_provisioning_md(self, m_platform, m_sysuuid): + """Provisioning env with a metadata disk should return None.""" + m_platform.return_value = ( + ibm.Platforms.TEMPLATE_PROVISIONING_METADATA, "/dev/xvdh") + self.assertIsNone(ibm.read_md()) + + def test_provisioning_no_metadata(self, m_platform, m_sysuuid): + """Provisioning env with no metadata disk should return None.""" + m_platform.return_value = ( + ibm.Platforms.TEMPLATE_PROVISIONING_NODATA, None) + self.assertIsNone(ibm.read_md()) + + def test_provisioning_not_ibm(self, m_platform, m_sysuuid): + """Provisioning env but not identified as IBM should return None.""" + m_platform.return_value = (None, None) + self.assertIsNone(ibm.read_md()) + + def test_template_live(self, m_platform, m_sysuuid): + """Template live environment should be identified.""" + tmpdir = self.tmp_dir() + m_platform.return_value = ( + ibm.Platforms.TEMPLATE_LIVE_METADATA, tmpdir) + m_sysuuid.return_value = self.sysuuid + + test_helpers.populate_dir(tmpdir, { + 'openstack/latest/meta_data.json': json.dumps(self.template_md), + 'openstack/latest/user_data': self.userdata, + 'openstack/content/interfaces': self.content_interfaces, + 'meta.js': self.meta_js}) + + ret = ibm.read_md() + self.assertEqual(ibm.Platforms.TEMPLATE_LIVE_METADATA, + ret['platform']) + self.assertEqual(tmpdir, ret['source']) + self.assertEqual(self.userdata, ret['userdata']) + self.assertEqual(self._get_expected_metadata(self.template_md), + ret['metadata']) + self.assertEqual(self.sysuuid, ret['system-uuid']) + + def test_os_code_live(self, m_platform, m_sysuuid): + """Verify an os_code metadata path.""" + tmpdir = self.tmp_dir() + m_platform.return_value = (ibm.Platforms.OS_CODE, tmpdir) + netdata = json.dumps(self.network_data) + test_helpers.populate_dir(tmpdir, { + 'openstack/latest/meta_data.json': json.dumps(self.oscode_md), + 'openstack/latest/user_data': self.userdata, + 'openstack/latest/vendor_data.json': json.dumps(self.vendor_data), + 'openstack/latest/network_data.json': netdata, + }) + + ret = ibm.read_md() + self.assertEqual(ibm.Platforms.OS_CODE, ret['platform']) + self.assertEqual(tmpdir, ret['source']) + self.assertEqual(self.userdata, ret['userdata']) + self.assertEqual(self._get_expected_metadata(self.oscode_md), + ret['metadata']) + + def test_os_code_live_no_userdata(self, m_platform, m_sysuuid): + """Verify os_code without user-data.""" + tmpdir = self.tmp_dir() + m_platform.return_value = (ibm.Platforms.OS_CODE, tmpdir) + test_helpers.populate_dir(tmpdir, { + 'openstack/latest/meta_data.json': json.dumps(self.oscode_md), + 'openstack/latest/vendor_data.json': json.dumps(self.vendor_data), + }) + + ret = ibm.read_md() + self.assertEqual(ibm.Platforms.OS_CODE, ret['platform']) + self.assertEqual(tmpdir, ret['source']) + self.assertIsNone(ret['userdata']) + self.assertEqual(self._get_expected_metadata(self.oscode_md), + ret['metadata']) + + +class TestIsIBMProvisioning(test_helpers.FilesystemMockingTestCase): + """Test the _is_ibm_provisioning method.""" + inst_log = "/root/swinstall.log" + prov_cfg = "/root/provisioningConfiguration.cfg" + boot_ref = "/proc/1/environ" + with_logs = True + + def _call_with_root(self, rootd): + self.reRoot(rootd) + return ibm._is_ibm_provisioning() + + def test_no_config(self): + """No provisioning config means not provisioning.""" + self.assertFalse(self._call_with_root(self.tmp_dir())) + + def test_config_only(self): + """A provisioning config without a log means provisioning.""" + rootd = self.tmp_dir() + test_helpers.populate_dir(rootd, {self.prov_cfg: "key=value"}) + self.assertTrue(self._call_with_root(rootd)) + + def test_config_with_old_log(self): + """A config with a log from previous boot is not provisioning.""" + rootd = self.tmp_dir() + data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10), + self.inst_log: ("log data\n", -30), + self.boot_ref: ("PWD=/", 0)} + test_helpers.populate_dir_with_ts(rootd, data) + self.assertFalse(self._call_with_root(rootd=rootd)) + self.assertIn("from previous boot", self.logs.getvalue()) + + def test_config_with_new_log(self): + """A config with a log from this boot is provisioning.""" + rootd = self.tmp_dir() + data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10), + self.inst_log: ("log data\n", 30), + self.boot_ref: ("PWD=/", 0)} + test_helpers.populate_dir_with_ts(rootd, data) + self.assertTrue(self._call_with_root(rootd=rootd)) + self.assertIn("from current boot", self.logs.getvalue()) + + def test_config_and_log_no_reference(self): + """If the config and log existed, but no reference, assume not.""" + rootd = self.tmp_dir() + test_helpers.populate_dir( + rootd, {self.prov_cfg: "key=value", self.inst_log: "log data\n"}) + self.assertFalse(self._call_with_root(rootd=rootd)) + self.assertIn("no reference file", self.logs.getvalue()) + + +class TestDataSourceIBMCloud(test_helpers.CiTestCase): + + def setUp(self): + super(TestDataSourceIBMCloud, self).setUp() + self.tmp = self.tmp_dir() + self.cloud_dir = self.tmp_path('cloud', dir=self.tmp) + util.ensure_dir(self.cloud_dir) + paths = Paths({'run_dir': self.tmp, 'cloud_dir': self.cloud_dir}) + self.ds = ibm.DataSourceIBMCloud( + sys_cfg={}, distro=None, paths=paths) + + def test_get_data_false(self): + """When read_md returns None, get_data returns False.""" + with mock.patch(D_PATH + 'read_md', return_value=None): + self.assertFalse(self.ds.get_data()) + + def test_get_data_processes_read_md(self): + """get_data processes and caches content returned by read_md.""" + md = { + 'metadata': {}, 'networkdata': 'net', 'platform': 'plat', + 'source': 'src', 'system-uuid': 'uuid', 'userdata': 'ud', + 'vendordata': 'vd'} + with mock.patch(D_PATH + 'read_md', return_value=md): + self.assertTrue(self.ds.get_data()) + self.assertEqual('src', self.ds.source) + self.assertEqual('plat', self.ds.platform) + self.assertEqual({}, self.ds.metadata) + self.assertEqual('ud', self.ds.userdata_raw) + self.assertEqual('net', self.ds.network_json) + self.assertEqual('vd', self.ds.vendordata_pure) + self.assertEqual('uuid', self.ds.system_uuid) + self.assertEqual('ibmcloud', self.ds.cloud_name) + self.assertEqual('ibmcloud', self.ds.platform_type) + self.assertEqual('plat (src)', self.ds.subplatform) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_init.py b/tests/unittests/sources/test_init.py new file mode 100644 index 00000000..a1d19518 --- /dev/null +++ b/tests/unittests/sources/test_init.py @@ -0,0 +1,771 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import copy +import inspect +import os +import stat + +from cloudinit.event import EventScope, EventType +from cloudinit.helpers import Paths +from cloudinit import importer +from cloudinit.sources import ( + EXPERIMENTAL_TEXT, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE, + METADATA_UNKNOWN, REDACT_SENSITIVE_VALUE, UNSET, DataSource, + canonical_cloud_id, redact_sensitive_keys) +from tests.unittests.helpers import CiTestCase, mock +from cloudinit.user_data import UserDataProcessor +from cloudinit import util + + +class DataSourceTestSubclassNet(DataSource): + + dsname = 'MyTestSubclass' + url_max_wait = 55 + + def __init__(self, sys_cfg, distro, paths, custom_metadata=None, + custom_userdata=None, get_data_retval=True): + super(DataSourceTestSubclassNet, self).__init__( + sys_cfg, distro, paths) + self._custom_userdata = custom_userdata + self._custom_metadata = custom_metadata + self._get_data_retval = get_data_retval + + def _get_cloud_name(self): + return 'SubclassCloudName' + + def _get_data(self): + if self._custom_metadata: + self.metadata = self._custom_metadata + else: + self.metadata = {'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion'} + if self._custom_userdata: + self.userdata_raw = self._custom_userdata + else: + self.userdata_raw = 'userdata_raw' + self.vendordata_raw = 'vendordata_raw' + return self._get_data_retval + + +class InvalidDataSourceTestSubclassNet(DataSource): + pass + + +class TestDataSource(CiTestCase): + + with_logs = True + maxDiff = None + + def setUp(self): + super(TestDataSource, self).setUp() + self.sys_cfg = {'datasource': {'_undef': {'key1': False}}} + self.distro = 'distrotest' # generally should be a Distro object + self.paths = Paths({}) + self.datasource = DataSource(self.sys_cfg, self.distro, self.paths) + + def test_datasource_init(self): + """DataSource initializes metadata attributes, ds_cfg and ud_proc.""" + self.assertEqual(self.paths, self.datasource.paths) + self.assertEqual(self.sys_cfg, self.datasource.sys_cfg) + self.assertEqual(self.distro, self.datasource.distro) + self.assertIsNone(self.datasource.userdata) + self.assertEqual({}, self.datasource.metadata) + self.assertIsNone(self.datasource.userdata_raw) + self.assertIsNone(self.datasource.vendordata) + self.assertIsNone(self.datasource.vendordata_raw) + self.assertEqual({'key1': False}, self.datasource.ds_cfg) + self.assertIsInstance(self.datasource.ud_proc, UserDataProcessor) + + def test_datasource_init_gets_ds_cfg_using_dsname(self): + """Init uses DataSource.dsname for sourcing ds_cfg.""" + sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}} + distro = 'distrotest' # generally should be a Distro object + datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths) + self.assertEqual({'key2': False}, datasource.ds_cfg) + + def test_str_is_classname(self): + """The string representation of the datasource is the classname.""" + self.assertEqual('DataSource', str(self.datasource)) + self.assertEqual( + 'DataSourceTestSubclassNet', + str(DataSourceTestSubclassNet('', '', self.paths))) + + def test_datasource_get_url_params_defaults(self): + """get_url_params default url config settings for the datasource.""" + params = self.datasource.get_url_params() + self.assertEqual(params.max_wait_seconds, self.datasource.url_max_wait) + self.assertEqual(params.timeout_seconds, self.datasource.url_timeout) + self.assertEqual(params.num_retries, self.datasource.url_retries) + self.assertEqual(params.sec_between_retries, + self.datasource.url_sec_between_retries) + + def test_datasource_get_url_params_subclassed(self): + """Subclasses can override get_url_params defaults.""" + sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}} + distro = 'distrotest' # generally should be a Distro object + datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths) + expected = (datasource.url_max_wait, datasource.url_timeout, + datasource.url_retries, datasource.url_sec_between_retries) + url_params = datasource.get_url_params() + self.assertNotEqual(self.datasource.get_url_params(), url_params) + self.assertEqual(expected, url_params) + + def test_datasource_get_url_params_ds_config_override(self): + """Datasource configuration options can override url param defaults.""" + sys_cfg = { + 'datasource': { + 'MyTestSubclass': { + 'max_wait': '1', 'timeout': '2', + 'retries': '3', 'sec_between_retries': 4 + }}} + datasource = DataSourceTestSubclassNet( + sys_cfg, self.distro, self.paths) + expected = (1, 2, 3, 4) + url_params = datasource.get_url_params() + self.assertNotEqual( + (datasource.url_max_wait, datasource.url_timeout, + datasource.url_retries, datasource.url_sec_between_retries), + url_params) + self.assertEqual(expected, url_params) + + def test_datasource_get_url_params_is_zero_or_greater(self): + """get_url_params ignores timeouts with a value below 0.""" + # Set an override that is below 0 which gets ignored. + sys_cfg = {'datasource': {'_undef': {'timeout': '-1'}}} + datasource = DataSource(sys_cfg, self.distro, self.paths) + (_max_wait, timeout, _retries, + _sec_between_retries) = datasource.get_url_params() + self.assertEqual(0, timeout) + + def test_datasource_get_url_uses_defaults_on_errors(self): + """On invalid system config values for url_params defaults are used.""" + # All invalid values should be logged + sys_cfg = {'datasource': { + '_undef': { + 'max_wait': 'nope', 'timeout': 'bug', 'retries': 'nonint'}}} + datasource = DataSource(sys_cfg, self.distro, self.paths) + url_params = datasource.get_url_params() + expected = (datasource.url_max_wait, datasource.url_timeout, + datasource.url_retries, datasource.url_sec_between_retries) + self.assertEqual(expected, url_params) + logs = self.logs.getvalue() + expected_logs = [ + "Config max_wait 'nope' is not an int, using default '-1'", + "Config timeout 'bug' is not an int, using default '10'", + "Config retries 'nonint' is not an int, using default '5'", + ] + for log in expected_logs: + self.assertIn(log, logs) + + @mock.patch('cloudinit.sources.net.find_fallback_nic') + def test_fallback_interface_is_discovered(self, m_get_fallback_nic): + """The fallback_interface is discovered via find_fallback_nic.""" + m_get_fallback_nic.return_value = 'nic9' + self.assertEqual('nic9', self.datasource.fallback_interface) + + @mock.patch('cloudinit.sources.net.find_fallback_nic') + def test_fallback_interface_logs_undiscovered(self, m_get_fallback_nic): + """Log a warning when fallback_interface can not discover the nic.""" + self.datasource._cloud_name = 'MySupahCloud' + m_get_fallback_nic.return_value = None # Couldn't discover nic + self.assertIsNone(self.datasource.fallback_interface) + self.assertEqual( + 'WARNING: Did not find a fallback interface on MySupahCloud.\n', + self.logs.getvalue()) + + @mock.patch('cloudinit.sources.net.find_fallback_nic') + def test_wb_fallback_interface_is_cached(self, m_get_fallback_nic): + """The fallback_interface is cached and won't be rediscovered.""" + self.datasource._fallback_interface = 'nic10' + self.assertEqual('nic10', self.datasource.fallback_interface) + m_get_fallback_nic.assert_not_called() + + def test__get_data_unimplemented(self): + """Raise an error when _get_data is not implemented.""" + with self.assertRaises(NotImplementedError) as context_manager: + self.datasource.get_data() + self.assertIn( + 'Subclasses of DataSource must implement _get_data', + str(context_manager.exception)) + datasource2 = InvalidDataSourceTestSubclassNet( + self.sys_cfg, self.distro, self.paths) + with self.assertRaises(NotImplementedError) as context_manager: + datasource2.get_data() + self.assertIn( + 'Subclasses of DataSource must implement _get_data', + str(context_manager.exception)) + + def test_get_data_calls_subclass__get_data(self): + """Datasource.get_data uses the subclass' version of _get_data.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertTrue(datasource.get_data()) + self.assertEqual( + {'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion'}, + datasource.metadata) + self.assertEqual('userdata_raw', datasource.userdata_raw) + self.assertEqual('vendordata_raw', datasource.vendordata_raw) + + def test_get_hostname_strips_local_hostname_without_domain(self): + """Datasource.get_hostname strips metadata local-hostname of domain.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertTrue(datasource.get_data()) + self.assertEqual( + 'test-subclass-hostname', datasource.metadata['local-hostname']) + self.assertEqual('test-subclass-hostname', datasource.get_hostname()) + datasource.metadata['local-hostname'] = 'hostname.my.domain.com' + self.assertEqual('hostname', datasource.get_hostname()) + + def test_get_hostname_with_fqdn_returns_local_hostname_with_domain(self): + """Datasource.get_hostname with fqdn set gets qualified hostname.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertTrue(datasource.get_data()) + datasource.metadata['local-hostname'] = 'hostname.my.domain.com' + self.assertEqual( + 'hostname.my.domain.com', datasource.get_hostname(fqdn=True)) + + def test_get_hostname_without_metadata_uses_system_hostname(self): + """Datasource.gethostname runs util.get_hostname when no metadata.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertEqual({}, datasource.metadata) + mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' + with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: + with mock.patch(mock_fqdn) as m_fqdn: + m_gethost.return_value = 'systemhostname.domain.com' + m_fqdn.return_value = None # No maching fqdn in /etc/hosts + self.assertEqual('systemhostname', datasource.get_hostname()) + self.assertEqual( + 'systemhostname.domain.com', + datasource.get_hostname(fqdn=True)) + + def test_get_hostname_without_metadata_returns_none(self): + """Datasource.gethostname returns None when metadata_only and no MD.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertEqual({}, datasource.metadata) + mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' + with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: + with mock.patch(mock_fqdn) as m_fqdn: + self.assertIsNone(datasource.get_hostname(metadata_only=True)) + self.assertIsNone( + datasource.get_hostname(fqdn=True, metadata_only=True)) + self.assertEqual([], m_gethost.call_args_list) + self.assertEqual([], m_fqdn.call_args_list) + + def test_get_hostname_without_metadata_prefers_etc_hosts(self): + """Datasource.gethostname prefers /etc/hosts to util.get_hostname.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertEqual({}, datasource.metadata) + mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' + with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: + with mock.patch(mock_fqdn) as m_fqdn: + m_gethost.return_value = 'systemhostname.domain.com' + m_fqdn.return_value = 'fqdnhostname.domain.com' + self.assertEqual('fqdnhostname', datasource.get_hostname()) + self.assertEqual('fqdnhostname.domain.com', + datasource.get_hostname(fqdn=True)) + + def test_get_data_does_not_write_instance_data_on_failure(self): + """get_data does not write INSTANCE_JSON_FILE on get_data False.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + get_data_retval=False) + self.assertFalse(datasource.get_data()) + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + self.assertFalse( + os.path.exists(json_file), 'Found unexpected file %s' % json_file) + + def test_get_data_writes_json_instance_data_on_success(self): + """get_data writes INSTANCE_JSON_FILE to run_dir as world readable.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + sys_info = { + "python": "3.7", + "platform": + "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal", + "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah", + "x86_64"], + "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]} + with mock.patch("cloudinit.util.system_info", return_value=sys_info): + datasource.get_data() + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + content = util.load_file(json_file) + expected = { + 'base64_encoded_keys': [], + 'merged_cfg': REDACT_SENSITIVE_VALUE, + 'sensitive_keys': ['merged_cfg'], + 'sys_info': sys_info, + 'v1': { + '_beta_keys': ['subplatform'], + 'availability-zone': 'myaz', + 'availability_zone': 'myaz', + 'cloud-name': 'subclasscloudname', + 'cloud_name': 'subclasscloudname', + 'distro': 'ubuntu', + 'distro_release': 'focal', + 'distro_version': '20.04', + 'instance-id': 'iid-datasource', + 'instance_id': 'iid-datasource', + 'local-hostname': 'test-subclass-hostname', + 'local_hostname': 'test-subclass-hostname', + 'kernel_release': '5.4.0-24-generic', + 'machine': 'x86_64', + 'platform': 'mytestsubclass', + 'public_ssh_keys': [], + 'python_version': '3.7', + 'region': 'myregion', + 'system_platform': + 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal', + 'subplatform': 'unknown', + 'variant': 'ubuntu'}, + 'ds': { + + '_doc': EXPERIMENTAL_TEXT, + 'meta_data': {'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion'}}} + self.assertEqual(expected, util.load_json(content)) + file_stat = os.stat(json_file) + self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode)) + self.assertEqual(expected, util.load_json(content)) + + def test_get_data_writes_redacted_public_json_instance_data(self): + """get_data writes redacted content to public INSTANCE_JSON_FILE.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + custom_metadata={ + 'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion', + 'some': {'security-credentials': { + 'cred1': 'sekret', 'cred2': 'othersekret'}}}) + self.assertCountEqual( + ('merged_cfg', 'security-credentials',), + datasource.sensitive_metadata_keys) + sys_info = { + "python": "3.7", + "platform": + "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal", + "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah", + "x86_64"], + "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]} + with mock.patch("cloudinit.util.system_info", return_value=sys_info): + datasource.get_data() + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + redacted = util.load_json(util.load_file(json_file)) + expected = { + 'base64_encoded_keys': [], + 'merged_cfg': REDACT_SENSITIVE_VALUE, + 'sensitive_keys': [ + 'ds/meta_data/some/security-credentials', 'merged_cfg'], + 'sys_info': sys_info, + 'v1': { + '_beta_keys': ['subplatform'], + 'availability-zone': 'myaz', + 'availability_zone': 'myaz', + 'cloud-name': 'subclasscloudname', + 'cloud_name': 'subclasscloudname', + 'distro': 'ubuntu', + 'distro_release': 'focal', + 'distro_version': '20.04', + 'instance-id': 'iid-datasource', + 'instance_id': 'iid-datasource', + 'local-hostname': 'test-subclass-hostname', + 'local_hostname': 'test-subclass-hostname', + 'kernel_release': '5.4.0-24-generic', + 'machine': 'x86_64', + 'platform': 'mytestsubclass', + 'public_ssh_keys': [], + 'python_version': '3.7', + 'region': 'myregion', + 'system_platform': + 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal', + 'subplatform': 'unknown', + 'variant': 'ubuntu'}, + 'ds': { + '_doc': EXPERIMENTAL_TEXT, + 'meta_data': { + 'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion', + 'some': {'security-credentials': REDACT_SENSITIVE_VALUE}}} + } + self.assertCountEqual(expected, redacted) + file_stat = os.stat(json_file) + self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode)) + + def test_get_data_writes_json_instance_data_sensitive(self): + """ + get_data writes unmodified data to sensitive file as root-readonly. + """ + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + custom_metadata={ + 'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion', + 'some': {'security-credentials': { + 'cred1': 'sekret', 'cred2': 'othersekret'}}}) + sys_info = { + "python": "3.7", + "platform": + "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal", + "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah", + "x86_64"], + "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]} + + self.assertCountEqual( + ('merged_cfg', 'security-credentials',), + datasource.sensitive_metadata_keys) + with mock.patch("cloudinit.util.system_info", return_value=sys_info): + datasource.get_data() + sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp) + content = util.load_file(sensitive_json_file) + expected = { + 'base64_encoded_keys': [], + 'merged_cfg': { + '_doc': ( + 'Merged cloud-init system config from ' + '/etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/' + ), + 'datasource': {'_undef': {'key1': False}}}, + 'sensitive_keys': [ + 'ds/meta_data/some/security-credentials', 'merged_cfg'], + 'sys_info': sys_info, + 'v1': { + '_beta_keys': ['subplatform'], + 'availability-zone': 'myaz', + 'availability_zone': 'myaz', + 'cloud-name': 'subclasscloudname', + 'cloud_name': 'subclasscloudname', + 'distro': 'ubuntu', + 'distro_release': 'focal', + 'distro_version': '20.04', + 'instance-id': 'iid-datasource', + 'instance_id': 'iid-datasource', + 'kernel_release': '5.4.0-24-generic', + 'local-hostname': 'test-subclass-hostname', + 'local_hostname': 'test-subclass-hostname', + 'machine': 'x86_64', + 'platform': 'mytestsubclass', + 'public_ssh_keys': [], + 'python_version': '3.7', + 'region': 'myregion', + 'subplatform': 'unknown', + 'system_platform': + 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal', + 'variant': 'ubuntu'}, + 'ds': { + '_doc': EXPERIMENTAL_TEXT, + 'meta_data': { + 'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion', + 'some': { + 'security-credentials': + {'cred1': 'sekret', 'cred2': 'othersekret'}}}} + } + self.assertCountEqual(expected, util.load_json(content)) + file_stat = os.stat(sensitive_json_file) + self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode)) + self.assertEqual(expected, util.load_json(content)) + + def test_get_data_handles_redacted_unserializable_content(self): + """get_data warns unserializable content in INSTANCE_JSON_FILE.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + custom_metadata={'key1': 'val1', 'key2': {'key2.1': self.paths}}) + datasource.get_data() + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + content = util.load_file(json_file) + expected_metadata = { + 'key1': 'val1', + 'key2': { + 'key2.1': "Warning: redacted unserializable type "}} + instance_json = util.load_json(content) + self.assertEqual( + expected_metadata, instance_json['ds']['meta_data']) + + def test_persist_instance_data_writes_ec2_metadata_when_set(self): + """When ec2_metadata class attribute is set, persist to json.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + datasource.ec2_metadata = UNSET + datasource.get_data() + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + instance_data = util.load_json(util.load_file(json_file)) + self.assertNotIn('ec2_metadata', instance_data['ds']) + datasource.ec2_metadata = {'ec2stuff': 'is good'} + datasource.persist_instance_data() + instance_data = util.load_json(util.load_file(json_file)) + self.assertEqual( + {'ec2stuff': 'is good'}, + instance_data['ds']['ec2_metadata']) + + def test_persist_instance_data_writes_network_json_when_set(self): + """When network_data.json class attribute is set, persist to json.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + datasource.get_data() + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + instance_data = util.load_json(util.load_file(json_file)) + self.assertNotIn('network_json', instance_data['ds']) + datasource.network_json = {'network_json': 'is good'} + datasource.persist_instance_data() + instance_data = util.load_json(util.load_file(json_file)) + self.assertEqual( + {'network_json': 'is good'}, + instance_data['ds']['network_json']) + + def test_get_data_base64encodes_unserializable_bytes(self): + """On py3, get_data base64encodes any unserializable content.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}}) + self.assertTrue(datasource.get_data()) + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + content = util.load_file(json_file) + instance_json = util.load_json(content) + self.assertCountEqual( + ['ds/meta_data/key2/key2.1'], + instance_json['base64_encoded_keys']) + self.assertEqual( + {'key1': 'val1', 'key2': {'key2.1': 'EjM='}}, + instance_json['ds']['meta_data']) + + def test_get_hostname_subclass_support(self): + """Validate get_hostname signature on all subclasses of DataSource.""" + base_args = inspect.getfullargspec(DataSource.get_hostname) + # Import all DataSource subclasses so we can inspect them. + modules = util.find_modules(os.path.dirname(os.path.dirname(__file__))) + for _loc, name in modules.items(): + mod_locs, _ = importer.find_module(name, ['cloudinit.sources'], []) + if mod_locs: + importer.import_module(mod_locs[0]) + for child in DataSource.__subclasses__(): + if 'Test' in child.dsname: + continue + self.assertEqual( + base_args, + inspect.getfullargspec(child.get_hostname), + '%s does not implement DataSource.get_hostname params' + % child) + for grandchild in child.__subclasses__(): + self.assertEqual( + base_args, + inspect.getfullargspec(grandchild.get_hostname), + '%s does not implement DataSource.get_hostname params' + % grandchild) + + def test_clear_cached_attrs_resets_cached_attr_class_attributes(self): + """Class attributes listed in cached_attr_defaults are reset.""" + count = 0 + # Setup values for all cached class attributes + for attr, value in self.datasource.cached_attr_defaults: + setattr(self.datasource, attr, count) + count += 1 + self.datasource._dirty_cache = True + self.datasource.clear_cached_attrs() + for attr, value in self.datasource.cached_attr_defaults: + self.assertEqual(value, getattr(self.datasource, attr)) + + def test_clear_cached_attrs_noops_on_clean_cache(self): + """Class attributes listed in cached_attr_defaults are reset.""" + count = 0 + # Setup values for all cached class attributes + for attr, _ in self.datasource.cached_attr_defaults: + setattr(self.datasource, attr, count) + count += 1 + self.datasource._dirty_cache = False # Fake clean cache + self.datasource.clear_cached_attrs() + count = 0 + for attr, _ in self.datasource.cached_attr_defaults: + self.assertEqual(count, getattr(self.datasource, attr)) + count += 1 + + def test_clear_cached_attrs_skips_non_attr_class_attributes(self): + """Skip any cached_attr_defaults which aren't class attributes.""" + self.datasource._dirty_cache = True + self.datasource.clear_cached_attrs() + for attr in ('ec2_metadata', 'network_json'): + self.assertFalse(hasattr(self.datasource, attr)) + + def test_clear_cached_attrs_of_custom_attrs(self): + """Custom attr_values can be passed to clear_cached_attrs.""" + self.datasource._dirty_cache = True + cached_attr_name = self.datasource.cached_attr_defaults[0][0] + setattr(self.datasource, cached_attr_name, 'himom') + self.datasource.myattr = 'orig' + self.datasource.clear_cached_attrs( + attr_defaults=(('myattr', 'updated'),)) + self.assertEqual('himom', getattr(self.datasource, cached_attr_name)) + self.assertEqual('updated', self.datasource.myattr) + + @mock.patch.dict(DataSource.default_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) + @mock.patch.dict(DataSource.supported_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) + def test_update_metadata_only_acts_on_supported_update_events(self): + """update_metadata_if_supported wont get_data on unsupported events.""" + self.assertEqual( + {EventScope.NETWORK: set([EventType.BOOT_NEW_INSTANCE])}, + self.datasource.default_update_events + ) + + def fake_get_data(): + raise Exception('get_data should not be called') + + self.datasource.get_data = fake_get_data + self.assertFalse( + self.datasource.update_metadata_if_supported( + source_event_types=[EventType.BOOT])) + + @mock.patch.dict(DataSource.supported_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) + def test_update_metadata_returns_true_on_supported_update_event(self): + """update_metadata_if_supported returns get_data on supported events""" + def fake_get_data(): + return True + + self.datasource.get_data = fake_get_data + self.datasource._network_config = 'something' + self.datasource._dirty_cache = True + self.assertTrue( + self.datasource.update_metadata_if_supported( + source_event_types=[ + EventType.BOOT, EventType.BOOT_NEW_INSTANCE])) + self.assertEqual(UNSET, self.datasource._network_config) + + self.assertIn( + "DEBUG: Update datasource metadata and network config due to" + " events: boot-new-instance", + self.logs.getvalue() + ) + + +class TestRedactSensitiveData(CiTestCase): + + def test_redact_sensitive_data_noop_when_no_sensitive_keys_present(self): + """When sensitive_keys is absent or empty from metadata do nothing.""" + md = {'my': 'data'} + self.assertEqual( + md, redact_sensitive_keys(md, redact_value='redacted')) + md['sensitive_keys'] = [] + self.assertEqual( + md, redact_sensitive_keys(md, redact_value='redacted')) + + def test_redact_sensitive_data_redacts_exact_match_name(self): + """Only exact matched sensitive_keys are redacted from metadata.""" + md = {'sensitive_keys': ['md/secure'], + 'md': {'secure': 's3kr1t', 'insecure': 'publik'}} + secure_md = copy.deepcopy(md) + secure_md['md']['secure'] = 'redacted' + self.assertEqual( + secure_md, + redact_sensitive_keys(md, redact_value='redacted')) + + def test_redact_sensitive_data_does_redacts_with_default_string(self): + """When redact_value is absent, REDACT_SENSITIVE_VALUE is used.""" + md = {'sensitive_keys': ['md/secure'], + 'md': {'secure': 's3kr1t', 'insecure': 'publik'}} + secure_md = copy.deepcopy(md) + secure_md['md']['secure'] = 'redacted for non-root user' + self.assertEqual( + secure_md, + redact_sensitive_keys(md)) + + +class TestCanonicalCloudID(CiTestCase): + + def test_cloud_id_returns_platform_on_unknowns(self): + """When region and cloud_name are unknown, return platform.""" + self.assertEqual( + 'platform', + canonical_cloud_id(cloud_name=METADATA_UNKNOWN, + region=METADATA_UNKNOWN, + platform='platform')) + + def test_cloud_id_returns_platform_on_none(self): + """When region and cloud_name are unknown, return platform.""" + self.assertEqual( + 'platform', + canonical_cloud_id(cloud_name=None, + region=None, + platform='platform')) + + def test_cloud_id_returns_cloud_name_on_unknown_region(self): + """When region is unknown, return cloud_name.""" + for region in (None, METADATA_UNKNOWN): + self.assertEqual( + 'cloudname', + canonical_cloud_id(cloud_name='cloudname', + region=region, + platform='platform')) + + def test_cloud_id_returns_platform_on_unknown_cloud_name(self): + """When region is set but cloud_name is unknown return cloud_name.""" + self.assertEqual( + 'platform', + canonical_cloud_id(cloud_name=METADATA_UNKNOWN, + region='region', + platform='platform')) + + def test_cloud_id_aws_based_on_region_and_cloud_name(self): + """When cloud_name is aws, return proper cloud-id based on region.""" + self.assertEqual( + 'aws-china', + canonical_cloud_id(cloud_name='aws', + region='cn-north-1', + platform='platform')) + self.assertEqual( + 'aws', + canonical_cloud_id(cloud_name='aws', + region='us-east-1', + platform='platform')) + self.assertEqual( + 'aws-gov', + canonical_cloud_id(cloud_name='aws', + region='us-gov-1', + platform='platform')) + self.assertEqual( # Overrideen non-aws cloud_name is returned + '!aws', + canonical_cloud_id(cloud_name='!aws', + region='us-gov-1', + platform='platform')) + + def test_cloud_id_azure_based_on_region_and_cloud_name(self): + """Report cloud-id when cloud_name is azure and region is in china.""" + self.assertEqual( + 'azure-china', + canonical_cloud_id(cloud_name='azure', + region='chinaeast', + platform='platform')) + self.assertEqual( + 'azure', + canonical_cloud_id(cloud_name='azure', + region='!chinaeast', + platform='platform')) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_lxd.py b/tests/unittests/sources/test_lxd.py new file mode 100644 index 00000000..a6e51f3b --- /dev/null +++ b/tests/unittests/sources/test_lxd.py @@ -0,0 +1,376 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from collections import namedtuple +from copy import deepcopy +import json +import re +import stat +from unittest import mock +import yaml + +import pytest + +from cloudinit.sources import ( + DataSourceLXD as lxd, InvalidMetaDataException, UNSET +) +DS_PATH = "cloudinit.sources.DataSourceLXD." + + +LStatResponse = namedtuple("lstatresponse", "st_mode") + + +NETWORK_V1 = { + "version": 1, + "config": [ + { + "type": "physical", "name": "eth0", + "subnets": [{"type": "dhcp", "control": "auto"}] + } + ] +} + + +def _add_network_v1_device(devname) -> dict: + """Helper to inject device name into default network v1 config.""" + network_cfg = deepcopy(NETWORK_V1) + network_cfg["config"][0]["name"] = devname + return network_cfg + + +LXD_V1_METADATA = { + "meta-data": "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n", + "network-config": NETWORK_V1, + "user-data": "#cloud-config\npackages: [sl]\n", + "vendor-data": "#cloud-config\nruncmd: ['echo vendor-data']\n", + "config": { + "user.user-data": + "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n", + "user.vendor-data": + "#cloud-config\nruncmd: ['echo vendor-data']\n", + "user.network-config": yaml.safe_dump(NETWORK_V1), + } +} + + +@pytest.fixture +def lxd_metadata(): + return LXD_V1_METADATA + + +@pytest.yield_fixture +def lxd_ds(request, paths, lxd_metadata): + """ + Return an instantiated DataSourceLXD. + + This also performs the mocking required for the default test case: + * ``is_platform_viable`` returns True, + * ``read_metadata`` returns ``LXD_V1_METADATA`` + + (This uses the paths fixture for the required helpers.Paths object) + """ + with mock.patch(DS_PATH + "is_platform_viable", return_value=True): + with mock.patch(DS_PATH + "read_metadata", return_value=lxd_metadata): + yield lxd.DataSourceLXD( + sys_cfg={}, distro=mock.Mock(), paths=paths + ) + + +class TestGenerateFallbackNetworkConfig: + + @pytest.mark.parametrize( + "uname_machine,systemd_detect_virt,expected", ( + # None for systemd_detect_virt returns None from which + ({}, None, NETWORK_V1), + ({}, None, NETWORK_V1), + ("anything", "lxc\n", NETWORK_V1), + # `uname -m` on kvm determines devname + ("x86_64", "kvm\n", _add_network_v1_device("enp5s0")), + ("ppc64le", "kvm\n", _add_network_v1_device("enp0s5")), + ("s390x", "kvm\n", _add_network_v1_device("enc9")) + ) + ) + @mock.patch(DS_PATH + "util.system_info") + @mock.patch(DS_PATH + "subp.subp") + @mock.patch(DS_PATH + "subp.which") + def test_net_v2_based_on_network_mode_virt_type_and_uname_machine( + self, + m_which, + m_subp, + m_system_info, + uname_machine, + systemd_detect_virt, + expected, + ): + """Return network config v2 based on uname -m, systemd-detect-virt.""" + if systemd_detect_virt is None: + m_which.return_value = None + m_system_info.return_value = {"uname": ["", "", "", "", uname_machine]} + m_subp.return_value = (systemd_detect_virt, "") + assert expected == lxd.generate_fallback_network_config() + if systemd_detect_virt is None: + assert 0 == m_subp.call_count + assert 0 == m_system_info.call_count + else: + assert [ + mock.call(["systemd-detect-virt"]) + ] == m_subp.call_args_list + if systemd_detect_virt != "kvm\n": + assert 0 == m_system_info.call_count + else: + assert 1 == m_system_info.call_count + + +class TestDataSourceLXD: + def test_platform_info(self, lxd_ds): + assert "LXD" == lxd_ds.dsname + assert "lxd" == lxd_ds.cloud_name + assert "lxd" == lxd_ds.platform_type + + def test_subplatform(self, lxd_ds): + assert "LXD socket API v. 1.0 (/dev/lxd/sock)" == lxd_ds.subplatform + + def test__get_data(self, lxd_ds): + """get_data calls read_metadata, setting appropiate instance attrs.""" + assert UNSET == lxd_ds._crawled_metadata + assert UNSET == lxd_ds._network_config + assert None is lxd_ds.userdata_raw + assert True is lxd_ds._get_data() + assert LXD_V1_METADATA == lxd_ds._crawled_metadata + # network-config is dumped from YAML + assert NETWORK_V1 == lxd_ds._network_config + # Any user-data and vendor-data are saved as raw + assert LXD_V1_METADATA["user-data"] == lxd_ds.userdata_raw + assert LXD_V1_METADATA["vendor-data"] == lxd_ds.vendordata_raw + + +class TestIsPlatformViable: + @pytest.mark.parametrize( + "exists,lstat_mode,expected", ( + (False, None, False), + (True, stat.S_IFREG, False), + (True, stat.S_IFSOCK, True), + ) + ) + @mock.patch(DS_PATH + "os.lstat") + @mock.patch(DS_PATH + "os.path.exists") + def test_expected_viable( + self, m_exists, m_lstat, exists, lstat_mode, expected + ): + """Return True only when LXD_SOCKET_PATH exists and is a socket.""" + m_exists.return_value = exists + m_lstat.return_value = LStatResponse(lstat_mode) + assert expected is lxd.is_platform_viable() + m_exists.assert_has_calls([mock.call(lxd.LXD_SOCKET_PATH)]) + if exists: + m_lstat.assert_has_calls([mock.call(lxd.LXD_SOCKET_PATH)]) + else: + assert 0 == m_lstat.call_count + + +class TestReadMetadata: + @pytest.mark.parametrize( + "url_responses,expected,logs", ( + ( # Assert non-JSON format from config route + { + "http://lxd/1.0/meta-data": "local-hostname: md\n", + "http://lxd/1.0/config": "[NOT_JSON", + }, + InvalidMetaDataException( + "Unable to determine cloud-init config from" + " http://lxd/1.0/config. Expected JSON but found:" + " [NOT_JSON"), + ["[GET] [HTTP:200] http://lxd/1.0/meta-data", + "[GET] [HTTP:200] http://lxd/1.0/config"], + ), + ( # Assert success on just meta-data + { + "http://lxd/1.0/meta-data": "local-hostname: md\n", + "http://lxd/1.0/config": "[]", + }, + { + "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION, + "config": {}, "meta-data": "local-hostname: md\n" + }, + ["[GET] [HTTP:200] http://lxd/1.0/meta-data", + "[GET] [HTTP:200] http://lxd/1.0/config"], + ), + ( # Assert 404s for config routes log skipping + { + "http://lxd/1.0/meta-data": "local-hostname: md\n", + "http://lxd/1.0/config": + '["/1.0/config/user.custom1",' + ' "/1.0/config/user.meta-data",' + ' "/1.0/config/user.network-config",' + ' "/1.0/config/user.user-data",' + ' "/1.0/config/user.vendor-data"]', + "http://lxd/1.0/config/user.custom1": "custom1", + "http://lxd/1.0/config/user.meta-data": "", # 404 + "http://lxd/1.0/config/user.network-config": "net-config", + "http://lxd/1.0/config/user.user-data": "", # 404 + "http://lxd/1.0/config/user.vendor-data": "", # 404 + }, + { + "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION, + "config": { + "user.custom1": "custom1", # Not promoted + "user.network-config": "net-config", + }, + "meta-data": "local-hostname: md\n", + "network-config": "net-config", + }, + [ + "Skipping http://lxd/1.0/config/user.vendor-data on" + " [HTTP:404]", + "Skipping http://lxd/1.0/config/user.meta-data on" + " [HTTP:404]", + "Skipping http://lxd/1.0/config/user.user-data on" + " [HTTP:404]", + "[GET] [HTTP:200] http://lxd/1.0/config", + "[GET] [HTTP:200] http://lxd/1.0/config/user.custom1", + "[GET] [HTTP:200]" + " http://lxd/1.0/config/user.network-config", + ], + ), + ( # Assert all CONFIG_KEY_ALIASES promoted to top-level keys + { + "http://lxd/1.0/meta-data": "local-hostname: md\n", + "http://lxd/1.0/config": + '["/1.0/config/user.custom1",' + ' "/1.0/config/user.meta-data",' + ' "/1.0/config/user.network-config",' + ' "/1.0/config/user.user-data",' + ' "/1.0/config/user.vendor-data"]', + "http://lxd/1.0/config/user.custom1": "custom1", + "http://lxd/1.0/config/user.meta-data": "meta-data", + "http://lxd/1.0/config/user.network-config": "net-config", + "http://lxd/1.0/config/user.user-data": "user-data", + "http://lxd/1.0/config/user.vendor-data": "vendor-data", + }, + { + "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION, + "config": { + "user.custom1": "custom1", # Not promoted + "user.meta-data": "meta-data", + "user.network-config": "net-config", + "user.user-data": "user-data", + "user.vendor-data": "vendor-data", + }, + "meta-data": "local-hostname: md\n", + "network-config": "net-config", + "user-data": "user-data", + "vendor-data": "vendor-data", + }, + [ + "[GET] [HTTP:200] http://lxd/1.0/meta-data", + "[GET] [HTTP:200] http://lxd/1.0/config", + "[GET] [HTTP:200] http://lxd/1.0/config/user.custom1", + "[GET] [HTTP:200] http://lxd/1.0/config/user.meta-data", + "[GET] [HTTP:200]" + " http://lxd/1.0/config/user.network-config", + "[GET] [HTTP:200] http://lxd/1.0/config/user.user-data", + "[GET] [HTTP:200] http://lxd/1.0/config/user.vendor-data", + ], + ), + ( # Assert cloud-init.* config key values prefered over user.* + { + "http://lxd/1.0/meta-data": "local-hostname: md\n", + "http://lxd/1.0/config": + '["/1.0/config/user.meta-data",' + ' "/1.0/config/user.network-config",' + ' "/1.0/config/user.user-data",' + ' "/1.0/config/user.vendor-data",' + ' "/1.0/config/cloud-init.network-config",' + ' "/1.0/config/cloud-init.user-data",' + ' "/1.0/config/cloud-init.vendor-data"]', + "http://lxd/1.0/config/user.meta-data": "user.meta-data", + "http://lxd/1.0/config/user.network-config": + "user.network-config", + "http://lxd/1.0/config/user.user-data": "user.user-data", + "http://lxd/1.0/config/user.vendor-data": + "user.vendor-data", + "http://lxd/1.0/config/cloud-init.meta-data": + "cloud-init.meta-data", + "http://lxd/1.0/config/cloud-init.network-config": + "cloud-init.network-config", + "http://lxd/1.0/config/cloud-init.user-data": + "cloud-init.user-data", + "http://lxd/1.0/config/cloud-init.vendor-data": + "cloud-init.vendor-data", + }, + { + "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION, + "config": { + "user.meta-data": "user.meta-data", + "user.network-config": "user.network-config", + "user.user-data": "user.user-data", + "user.vendor-data": "user.vendor-data", + "cloud-init.network-config": + "cloud-init.network-config", + "cloud-init.user-data": "cloud-init.user-data", + "cloud-init.vendor-data": + "cloud-init.vendor-data", + }, + "meta-data": "local-hostname: md\n", + "network-config": "cloud-init.network-config", + "user-data": "cloud-init.user-data", + "vendor-data": "cloud-init.vendor-data", + }, + [ + "[GET] [HTTP:200] http://lxd/1.0/meta-data", + "[GET] [HTTP:200] http://lxd/1.0/config", + "[GET] [HTTP:200] http://lxd/1.0/config/user.meta-data", + "[GET] [HTTP:200]" + " http://lxd/1.0/config/user.network-config", + "[GET] [HTTP:200] http://lxd/1.0/config/user.user-data", + "[GET] [HTTP:200] http://lxd/1.0/config/user.vendor-data", + "[GET] [HTTP:200]" + " http://lxd/1.0/config/cloud-init.network-config", + "[GET] [HTTP:200]" + " http://lxd/1.0/config/cloud-init.user-data", + "[GET] [HTTP:200]" + " http://lxd/1.0/config/cloud-init.vendor-data", + "Ignoring LXD config user.user-data in favor of" + " cloud-init.user-data value.", + "Ignoring LXD config user.network-config in favor of" + " cloud-init.network-config value.", + "Ignoring LXD config user.vendor-data in favor of" + " cloud-init.vendor-data value.", + ], + ), + ) + ) + @mock.patch.object(lxd.requests.Session, 'get') + def test_read_metadata_handles_unexpected_content_or_http_status( + self, session_get, url_responses, expected, logs, caplog + ): + """read_metadata handles valid and invalid content and status codes.""" + + def fake_get(url): + """Mock Response json, ok, status_code, text from url_responses.""" + m_resp = mock.MagicMock() + content = url_responses.get(url, '') + m_resp.json.side_effect = lambda: json.loads(content) + if content: + mock_ok = mock.PropertyMock(return_value=True) + mock_status_code = mock.PropertyMock(return_value=200) + else: + mock_ok = mock.PropertyMock(return_value=False) + mock_status_code = mock.PropertyMock(return_value=404) + type(m_resp).ok = mock_ok + type(m_resp).status_code = mock_status_code + mock_text = mock.PropertyMock(return_value=content) + type(m_resp).text = mock_text + return m_resp + + session_get.side_effect = fake_get + + if isinstance(expected, Exception): + with pytest.raises(type(expected), match=re.escape(str(expected))): + lxd.read_metadata() + else: + assert expected == lxd.read_metadata() + caplogs = caplog.text + for log in logs: + assert log in caplogs + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_maas.py b/tests/unittests/sources/test_maas.py new file mode 100644 index 00000000..34b79587 --- /dev/null +++ b/tests/unittests/sources/test_maas.py @@ -0,0 +1,200 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from copy import copy +import os +import shutil +import tempfile +import yaml +from unittest import mock + +from cloudinit.sources import DataSourceMAAS +from cloudinit import url_helper +from tests.unittests.helpers import CiTestCase, populate_dir + + +class TestMAASDataSource(CiTestCase): + + def setUp(self): + super(TestMAASDataSource, self).setUp() + # Make a temp directoy for tests to use. + self.tmp = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.tmp) + + def test_seed_dir_valid(self): + """Verify a valid seeddir is read as such.""" + + userdata = b'valid01-userdata' + data = {'meta-data/instance-id': 'i-valid01', + 'meta-data/local-hostname': 'valid01-hostname', + 'user-data': userdata, + 'public-keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname'} + + my_d = os.path.join(self.tmp, "valid") + populate_dir(my_d, data) + + ud, md, vd = DataSourceMAAS.read_maas_seed_dir(my_d) + + self.assertEqual(userdata, ud) + for key in ('instance-id', 'local-hostname'): + self.assertEqual(data["meta-data/" + key], md[key]) + + # verify that 'userdata' is not returned as part of the metadata + self.assertFalse(('user-data' in md)) + self.assertIsNone(vd) + + def test_seed_dir_valid_extra(self): + """Verify extra files do not affect seed_dir validity.""" + + userdata = b'valid-extra-userdata' + data = {'meta-data/instance-id': 'i-valid-extra', + 'meta-data/local-hostname': 'valid-extra-hostname', + 'user-data': userdata, 'foo': 'bar'} + + my_d = os.path.join(self.tmp, "valid_extra") + populate_dir(my_d, data) + + ud, md, _vd = DataSourceMAAS.read_maas_seed_dir(my_d) + + self.assertEqual(userdata, ud) + for key in ('instance-id', 'local-hostname'): + self.assertEqual(data['meta-data/' + key], md[key]) + + # additional files should not just appear as keys in metadata atm + self.assertFalse(('foo' in md)) + + def test_seed_dir_invalid(self): + """Verify that invalid seed_dir raises MAASSeedDirMalformed.""" + + valid = {'instance-id': 'i-instanceid', + 'local-hostname': 'test-hostname', 'user-data': ''} + + my_based = os.path.join(self.tmp, "valid_extra") + + # missing 'userdata' file + my_d = "%s-01" % my_based + invalid_data = copy(valid) + del invalid_data['local-hostname'] + populate_dir(my_d, invalid_data) + self.assertRaises(DataSourceMAAS.MAASSeedDirMalformed, + DataSourceMAAS.read_maas_seed_dir, my_d) + + # missing 'instance-id' + my_d = "%s-02" % my_based + invalid_data = copy(valid) + del invalid_data['instance-id'] + populate_dir(my_d, invalid_data) + self.assertRaises(DataSourceMAAS.MAASSeedDirMalformed, + DataSourceMAAS.read_maas_seed_dir, my_d) + + def test_seed_dir_none(self): + """Verify that empty seed_dir raises MAASSeedDirNone.""" + + my_d = os.path.join(self.tmp, "valid_empty") + self.assertRaises(DataSourceMAAS.MAASSeedDirNone, + DataSourceMAAS.read_maas_seed_dir, my_d) + + def test_seed_dir_missing(self): + """Verify that missing seed_dir raises MAASSeedDirNone.""" + self.assertRaises(DataSourceMAAS.MAASSeedDirNone, + DataSourceMAAS.read_maas_seed_dir, + os.path.join(self.tmp, "nonexistantdirectory")) + + def mock_read_maas_seed_url(self, data, seed, version="19991231"): + """mock up readurl to appear as a web server at seed has provided data. + return what read_maas_seed_url returns.""" + def my_readurl(*args, **kwargs): + if len(args): + url = args[0] + else: + url = kwargs['url'] + prefix = "%s/%s/" % (seed, version) + if not url.startswith(prefix): + raise ValueError("unexpected call %s" % url) + + short = url[len(prefix):] + if short not in data: + raise url_helper.UrlError("not found", code=404, url=url) + return url_helper.StringResponse(data[short]) + + # Now do the actual call of the code under test. + with mock.patch("cloudinit.url_helper.readurl") as mock_readurl: + mock_readurl.side_effect = my_readurl + return DataSourceMAAS.read_maas_seed_url(seed, version=version) + + def test_seed_url_valid(self): + """Verify that valid seed_url is read as such.""" + valid = { + 'meta-data/instance-id': 'i-instanceid', + 'meta-data/local-hostname': 'test-hostname', + 'meta-data/public-keys': 'test-hostname', + 'meta-data/vendor-data': b'my-vendordata', + 'user-data': b'foodata', + } + my_seed = "http://example.com/xmeta" + my_ver = "1999-99-99" + ud, md, vd = self.mock_read_maas_seed_url(valid, my_seed, my_ver) + + self.assertEqual(valid['meta-data/instance-id'], md['instance-id']) + self.assertEqual( + valid['meta-data/local-hostname'], md['local-hostname']) + self.assertEqual(valid['meta-data/public-keys'], md['public-keys']) + self.assertEqual(valid['user-data'], ud) + # vendor-data is yaml, which decodes a string + self.assertEqual(valid['meta-data/vendor-data'].decode(), vd) + + def test_seed_url_vendor_data_dict(self): + expected_vd = {'key1': 'value1'} + valid = { + 'meta-data/instance-id': 'i-instanceid', + 'meta-data/local-hostname': 'test-hostname', + 'meta-data/vendor-data': yaml.safe_dump(expected_vd).encode(), + } + _ud, md, vd = self.mock_read_maas_seed_url( + valid, "http://example.com/foo") + + self.assertEqual(valid['meta-data/instance-id'], md['instance-id']) + self.assertEqual(expected_vd, vd) + + +@mock.patch("cloudinit.sources.DataSourceMAAS.url_helper.OauthUrlHelper") +class TestGetOauthHelper(CiTestCase): + base_cfg = {'consumer_key': 'FAKE_CONSUMER_KEY', + 'token_key': 'FAKE_TOKEN_KEY', + 'token_secret': 'FAKE_TOKEN_SECRET', + 'consumer_secret': None} + + def test_all_required(self, m_helper): + """Valid config as expected.""" + DataSourceMAAS.get_oauth_helper(self.base_cfg.copy()) + m_helper.assert_has_calls([mock.call(**self.base_cfg)]) + + def test_other_fields_not_passed_through(self, m_helper): + """Only relevant fields are passed through.""" + mycfg = self.base_cfg.copy() + mycfg['unrelated_field'] = 'unrelated' + DataSourceMAAS.get_oauth_helper(mycfg) + m_helper.assert_has_calls([mock.call(**self.base_cfg)]) + + +class TestGetIdHash(CiTestCase): + v1_cfg = {'consumer_key': 'CKEY', 'token_key': 'TKEY', + 'token_secret': 'TSEC'} + v1_id = ( + 'v1:' + '403ee5f19c956507f1d0e50814119c405902137ea4f8838bde167c5da8110392') + + def test_v1_expected(self): + """Test v1 id generated as expected working behavior from config.""" + result = DataSourceMAAS.get_id_from_ds_cfg(self.v1_cfg.copy()) + self.assertEqual(self.v1_id, result) + + def test_v1_extra_fields_are_ignored(self): + """Test v1 id ignores unused entries in config.""" + cfg = self.v1_cfg.copy() + cfg['consumer_secret'] = "BOO" + cfg['unrelated'] = "HI MOM" + result = DataSourceMAAS.get_id_from_ds_cfg(cfg) + self.assertEqual(self.v1_id, result) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_nocloud.py b/tests/unittests/sources/test_nocloud.py new file mode 100644 index 00000000..26f91054 --- /dev/null +++ b/tests/unittests/sources/test_nocloud.py @@ -0,0 +1,393 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import dmi +from cloudinit import helpers +from cloudinit.sources.DataSourceNoCloud import ( + DataSourceNoCloud as dsNoCloud, + _maybe_remove_top_network, + parse_cmdline_data) +from cloudinit import util +from tests.unittests.helpers import CiTestCase, populate_dir, mock, ExitStack + +import os +import textwrap +import yaml + + +@mock.patch('cloudinit.sources.DataSourceNoCloud.util.is_lxd') +class TestNoCloudDataSource(CiTestCase): + + def setUp(self): + super(TestNoCloudDataSource, self).setUp() + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) + + self.cmdline = "root=TESTCMDLINE" + + self.mocks = ExitStack() + self.addCleanup(self.mocks.close) + + self.mocks.enter_context( + mock.patch.object(util, 'get_cmdline', return_value=self.cmdline)) + self.mocks.enter_context( + mock.patch.object(dmi, 'read_dmi_data', return_value=None)) + + def _test_fs_config_is_read(self, fs_label, fs_label_to_search): + vfat_device = 'device-1' + + def m_mount_cb(device, callback, mtype): + if (device == vfat_device): + return {'meta-data': yaml.dump({'instance-id': 'IID'})} + else: + return {} + + def m_find_devs_with(query='', path=''): + if 'TYPE=vfat' == query: + return [vfat_device] + elif 'LABEL={}'.format(fs_label) == query: + return [vfat_device] + else: + return [] + + self.mocks.enter_context( + mock.patch.object(util, 'find_devs_with', + side_effect=m_find_devs_with)) + self.mocks.enter_context( + mock.patch.object(util, 'mount_cb', + side_effect=m_mount_cb)) + sys_cfg = {'datasource': {'NoCloud': {'fs_label': fs_label_to_search}}} + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + + self.assertEqual(dsrc.metadata.get('instance-id'), 'IID') + self.assertTrue(ret) + + def test_nocloud_seed_dir_on_lxd(self, m_is_lxd): + md = {'instance-id': 'IID', 'dsmode': 'local'} + ud = b"USER_DATA_HERE" + seed_dir = os.path.join(self.paths.seed_dir, "nocloud") + populate_dir(seed_dir, + {'user-data': ud, 'meta-data': yaml.safe_dump(md)}) + + sys_cfg = { + 'datasource': {'NoCloud': {'fs_label': None}} + } + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertEqual(dsrc.userdata_raw, ud) + self.assertEqual(dsrc.metadata, md) + self.assertEqual(dsrc.platform_type, 'lxd') + self.assertEqual( + dsrc.subplatform, 'seed-dir (%s)' % seed_dir) + self.assertTrue(ret) + + def test_nocloud_seed_dir_non_lxd_platform_is_nocloud(self, m_is_lxd): + """Non-lxd environments will list nocloud as the platform.""" + m_is_lxd.return_value = False + md = {'instance-id': 'IID', 'dsmode': 'local'} + seed_dir = os.path.join(self.paths.seed_dir, "nocloud") + populate_dir(seed_dir, + {'user-data': '', 'meta-data': yaml.safe_dump(md)}) + + sys_cfg = { + 'datasource': {'NoCloud': {'fs_label': None}} + } + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + self.assertTrue(dsrc.get_data()) + self.assertEqual(dsrc.platform_type, 'nocloud') + self.assertEqual( + dsrc.subplatform, 'seed-dir (%s)' % seed_dir) + + def test_fs_label(self, m_is_lxd): + # find_devs_with should not be called ff fs_label is None + class PsuedoException(Exception): + pass + + self.mocks.enter_context( + mock.patch.object(util, 'find_devs_with', + side_effect=PsuedoException)) + + # by default, NoCloud should search for filesystems by label + sys_cfg = {'datasource': {'NoCloud': {}}} + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + self.assertRaises(PsuedoException, dsrc.get_data) + + # but disabling searching should just end up with None found + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertFalse(ret) + + def test_fs_config_lowercase_label(self, m_is_lxd): + self._test_fs_config_is_read('cidata', 'cidata') + + def test_fs_config_uppercase_label(self, m_is_lxd): + self._test_fs_config_is_read('CIDATA', 'cidata') + + def test_fs_config_lowercase_label_search_uppercase(self, m_is_lxd): + self._test_fs_config_is_read('cidata', 'CIDATA') + + def test_fs_config_uppercase_label_search_uppercase(self, m_is_lxd): + self._test_fs_config_is_read('CIDATA', 'CIDATA') + + def test_no_datasource_expected(self, m_is_lxd): + # no source should be found if no cmdline, config, and fs_label=None + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + self.assertFalse(dsrc.get_data()) + + def test_seed_in_config(self, m_is_lxd): + data = { + 'fs_label': None, + 'meta-data': yaml.safe_dump({'instance-id': 'IID'}), + 'user-data': b"USER_DATA_RAW", + } + + sys_cfg = {'datasource': {'NoCloud': data}} + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertEqual(dsrc.userdata_raw, b"USER_DATA_RAW") + self.assertEqual(dsrc.metadata.get('instance-id'), 'IID') + self.assertTrue(ret) + + def test_nocloud_seed_with_vendordata(self, m_is_lxd): + md = {'instance-id': 'IID', 'dsmode': 'local'} + ud = b"USER_DATA_HERE" + vd = b"THIS IS MY VENDOR_DATA" + + populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': ud, 'meta-data': yaml.safe_dump(md), + 'vendor-data': vd}) + + sys_cfg = { + 'datasource': {'NoCloud': {'fs_label': None}} + } + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertEqual(dsrc.userdata_raw, ud) + self.assertEqual(dsrc.metadata, md) + self.assertEqual(dsrc.vendordata_raw, vd) + self.assertTrue(ret) + + def test_nocloud_no_vendordata(self, m_is_lxd): + populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", 'meta-data': "instance-id: IID\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertEqual(dsrc.userdata_raw, b"ud") + self.assertFalse(dsrc.vendordata) + self.assertTrue(ret) + + def test_metadata_network_interfaces(self, m_is_lxd): + gateway = "103.225.10.1" + md = { + 'instance-id': 'i-abcd', + 'local-hostname': 'hostname1', + 'network-interfaces': textwrap.dedent("""\ + auto eth0 + iface eth0 inet static + hwaddr 00:16:3e:70:e1:04 + address 103.225.10.12 + netmask 255.255.255.0 + gateway """ + gateway + """ + dns-servers 8.8.8.8""")} + + populate_dir( + os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", + 'meta-data': yaml.dump(md) + "\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertTrue(ret) + # very simple check just for the strings above + self.assertIn(gateway, str(dsrc.network_config)) + + def test_metadata_network_config(self, m_is_lxd): + # network-config needs to get into network_config + netconf = {'version': 1, + 'config': [{'type': 'physical', 'name': 'interface0', + 'subnets': [{'type': 'dhcp'}]}]} + populate_dir( + os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", + 'meta-data': "instance-id: IID\n", + 'network-config': yaml.dump(netconf) + "\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(netconf, dsrc.network_config) + + def test_metadata_network_config_with_toplevel_network(self, m_is_lxd): + """network-config may have 'network' top level key.""" + netconf = {'config': 'disabled'} + populate_dir( + os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", + 'meta-data': "instance-id: IID\n", + 'network-config': yaml.dump({'network': netconf}) + "\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(netconf, dsrc.network_config) + + def test_metadata_network_config_over_interfaces(self, m_is_lxd): + # network-config should override meta-data/network-interfaces + gateway = "103.225.10.1" + md = { + 'instance-id': 'i-abcd', + 'local-hostname': 'hostname1', + 'network-interfaces': textwrap.dedent("""\ + auto eth0 + iface eth0 inet static + hwaddr 00:16:3e:70:e1:04 + address 103.225.10.12 + netmask 255.255.255.0 + gateway """ + gateway + """ + dns-servers 8.8.8.8""")} + + netconf = {'version': 1, + 'config': [{'type': 'physical', 'name': 'interface0', + 'subnets': [{'type': 'dhcp'}]}]} + populate_dir( + os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", + 'meta-data': yaml.dump(md) + "\n", + 'network-config': yaml.dump(netconf) + "\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(netconf, dsrc.network_config) + self.assertNotIn(gateway, str(dsrc.network_config)) + + @mock.patch("cloudinit.util.blkid") + def test_nocloud_get_devices_freebsd(self, m_is_lxd, fake_blkid): + populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", 'meta-data': "instance-id: IID\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + self.mocks.enter_context( + mock.patch.object(util, 'is_FreeBSD', return_value=True)) + + def _mfind_devs_with_freebsd( + criteria=None, oformat='device', + tag=None, no_cache=False, path=None): + if not criteria: + return ["/dev/msdosfs/foo", "/dev/iso9660/foo"] + if criteria.startswith("LABEL="): + return ["/dev/msdosfs/foo", "/dev/iso9660/foo"] + elif criteria == "TYPE=vfat": + return ["/dev/msdosfs/foo"] + elif criteria == "TYPE=iso9660": + return ["/dev/iso9660/foo"] + return [] + + self.mocks.enter_context( + mock.patch.object( + util, 'find_devs_with_freebsd', + side_effect=_mfind_devs_with_freebsd)) + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc._get_devices('foo') + self.assertEqual(['/dev/msdosfs/foo', '/dev/iso9660/foo'], ret) + fake_blkid.assert_not_called() + + +class TestParseCommandLineData(CiTestCase): + + def test_parse_cmdline_data_valid(self): + ds_id = "ds=nocloud" + pairs = ( + ("root=/dev/sda1 %(ds_id)s", {}), + ("%(ds_id)s; root=/dev/foo", {}), + ("%(ds_id)s", {}), + ("%(ds_id)s;", {}), + ("%(ds_id)s;s=SEED", {'seedfrom': 'SEED'}), + ("%(ds_id)s;seedfrom=SEED;local-hostname=xhost", + {'seedfrom': 'SEED', 'local-hostname': 'xhost'}), + ("%(ds_id)s;h=xhost", + {'local-hostname': 'xhost'}), + ("%(ds_id)s;h=xhost;i=IID", + {'local-hostname': 'xhost', 'instance-id': 'IID'}), + ) + + for (fmt, expected) in pairs: + fill = {} + cmdline = fmt % {'ds_id': ds_id} + ret = parse_cmdline_data(ds_id=ds_id, fill=fill, cmdline=cmdline) + self.assertEqual(expected, fill) + self.assertTrue(ret) + + def test_parse_cmdline_data_none(self): + ds_id = "ds=foo" + cmdlines = ( + "root=/dev/sda1 ro", + "console=/dev/ttyS0 root=/dev/foo", + "", + "ds=foocloud", + "ds=foo-net", + "ds=nocloud;s=SEED", + ) + + for cmdline in cmdlines: + fill = {} + ret = parse_cmdline_data(ds_id=ds_id, fill=fill, cmdline=cmdline) + self.assertEqual(fill, {}) + self.assertFalse(ret) + + +class TestMaybeRemoveToplevelNetwork(CiTestCase): + """test _maybe_remove_top_network function.""" + basecfg = [{'type': 'physical', 'name': 'interface0', + 'subnets': [{'type': 'dhcp'}]}] + + def test_should_remove_safely(self): + mcfg = {'config': self.basecfg, 'version': 1} + self.assertEqual(mcfg, _maybe_remove_top_network({'network': mcfg})) + + def test_no_remove_if_other_keys(self): + """should not shift if other keys at top level.""" + mcfg = {'network': {'config': self.basecfg, 'version': 1}, + 'unknown_keyname': 'keyval'} + self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) + + def test_no_remove_if_non_dict(self): + """should not shift if not a dict.""" + mcfg = {'network': '"content here'} + self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) + + def test_no_remove_if_missing_config_or_version(self): + """should not shift unless network entry has config and version.""" + mcfg = {'network': {'config': self.basecfg}} + self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) + + mcfg = {'network': {'version': 1}} + self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) + + def test_remove_with_config_disabled(self): + """network/config=disabled should be shifted.""" + mcfg = {'config': 'disabled'} + self.assertEqual(mcfg, _maybe_remove_top_network({'network': mcfg})) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_opennebula.py b/tests/unittests/sources/test_opennebula.py new file mode 100644 index 00000000..e5963f5a --- /dev/null +++ b/tests/unittests/sources/test_opennebula.py @@ -0,0 +1,977 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import helpers +from cloudinit.sources import DataSourceOpenNebula as ds +from cloudinit import util +from tests.unittests.helpers import mock, populate_dir, CiTestCase + +import os +import pwd +import unittest + +import pytest + + +TEST_VARS = { + 'VAR1': 'single', + 'VAR2': 'double word', + 'VAR3': 'multi\nline\n', + 'VAR4': "'single'", + 'VAR5': "'double word'", + 'VAR6': "'multi\nline\n'", + 'VAR7': 'single\\t', + 'VAR8': 'double\\tword', + 'VAR9': 'multi\\t\nline\n', + 'VAR10': '\\', # expect '\' + 'VAR11': '\'', # expect ' + 'VAR12': '$', # expect $ +} + +INVALID_CONTEXT = ';' +USER_DATA = '#cloud-config\napt_upgrade: true' +SSH_KEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i' +HOSTNAME = 'foo.example.com' +PUBLIC_IP = '10.0.0.3' +MACADDR = '02:00:0a:12:01:01' +IP_BY_MACADDR = '10.18.1.1' +IP4_PREFIX = '24' +IP6_GLOBAL = '2001:db8:1:0:400:c0ff:fea8:1ba' +IP6_ULA = 'fd01:dead:beaf:0:400:c0ff:fea8:1ba' +IP6_GW = '2001:db8:1::ffff' +IP6_PREFIX = '48' + +DS_PATH = "cloudinit.sources.DataSourceOpenNebula" + + +class TestOpenNebulaDataSource(CiTestCase): + parsed_user = None + allowed_subp = ['bash'] + + def setUp(self): + super(TestOpenNebulaDataSource, self).setUp() + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) + + # defaults for few tests + self.ds = ds.DataSourceOpenNebula + self.seed_dir = os.path.join(self.paths.seed_dir, "opennebula") + self.sys_cfg = {'datasource': {'OpenNebula': {'dsmode': 'local'}}} + + # we don't want 'sudo' called in tests. so we patch switch_user_cmd + def my_switch_user_cmd(user): + self.parsed_user = user + return [] + + self.switch_user_cmd_real = ds.switch_user_cmd + ds.switch_user_cmd = my_switch_user_cmd + + def tearDown(self): + ds.switch_user_cmd = self.switch_user_cmd_real + super(TestOpenNebulaDataSource, self).tearDown() + + def test_get_data_non_contextdisk(self): + orig_find_devs_with = util.find_devs_with + try: + # dont' try to lookup for CDs + util.find_devs_with = lambda n: [] + dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertFalse(ret) + finally: + util.find_devs_with = orig_find_devs_with + + def test_get_data_broken_contextdisk(self): + orig_find_devs_with = util.find_devs_with + try: + # dont' try to lookup for CDs + util.find_devs_with = lambda n: [] + populate_dir(self.seed_dir, {'context.sh': INVALID_CONTEXT}) + dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths) + self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data) + finally: + util.find_devs_with = orig_find_devs_with + + def test_get_data_invalid_identity(self): + orig_find_devs_with = util.find_devs_with + try: + # generate non-existing system user name + sys_cfg = self.sys_cfg + invalid_user = 'invalid' + while not sys_cfg['datasource']['OpenNebula'].get('parseuser'): + try: + pwd.getpwnam(invalid_user) + invalid_user += 'X' + except KeyError: + sys_cfg['datasource']['OpenNebula']['parseuser'] = \ + invalid_user + + # dont' try to lookup for CDs + util.find_devs_with = lambda n: [] + populate_context_dir(self.seed_dir, {'KEY1': 'val1'}) + dsrc = self.ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data) + finally: + util.find_devs_with = orig_find_devs_with + + def test_get_data(self): + orig_find_devs_with = util.find_devs_with + try: + # dont' try to lookup for CDs + util.find_devs_with = lambda n: [] + populate_context_dir(self.seed_dir, {'KEY1': 'val1'}) + dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertTrue(ret) + finally: + util.find_devs_with = orig_find_devs_with + self.assertEqual('opennebula', dsrc.cloud_name) + self.assertEqual('opennebula', dsrc.platform_type) + self.assertEqual( + 'seed-dir (%s/seed/opennebula)' % self.tmp, dsrc.subplatform) + + def test_seed_dir_non_contextdisk(self): + self.assertRaises(ds.NonContextDiskDir, ds.read_context_disk_dir, + self.seed_dir, mock.Mock()) + + def test_seed_dir_empty1_context(self): + populate_dir(self.seed_dir, {'context.sh': ''}) + results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) + + self.assertIsNone(results['userdata']) + self.assertEqual(results['metadata'], {}) + + def test_seed_dir_empty2_context(self): + populate_context_dir(self.seed_dir, {}) + results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) + + self.assertIsNone(results['userdata']) + self.assertEqual(results['metadata'], {}) + + def test_seed_dir_broken_context(self): + populate_dir(self.seed_dir, {'context.sh': INVALID_CONTEXT}) + + self.assertRaises(ds.BrokenContextDiskDir, + ds.read_context_disk_dir, + self.seed_dir, mock.Mock()) + + def test_context_parser(self): + populate_context_dir(self.seed_dir, TEST_VARS) + results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) + + self.assertTrue('metadata' in results) + self.assertEqual(TEST_VARS, results['metadata']) + + def test_ssh_key(self): + public_keys = ['first key', 'second key'] + for c in range(4): + for k in ('SSH_KEY', 'SSH_PUBLIC_KEY'): + my_d = os.path.join(self.tmp, "%s-%i" % (k, c)) + populate_context_dir(my_d, {k: '\n'.join(public_keys)}) + results = ds.read_context_disk_dir(my_d, mock.Mock()) + + self.assertTrue('metadata' in results) + self.assertTrue('public-keys' in results['metadata']) + self.assertEqual(public_keys, + results['metadata']['public-keys']) + + public_keys.append(SSH_KEY % (c + 1,)) + + def test_user_data_plain(self): + for k in ('USER_DATA', 'USERDATA'): + my_d = os.path.join(self.tmp, k) + populate_context_dir(my_d, {k: USER_DATA, + 'USERDATA_ENCODING': ''}) + results = ds.read_context_disk_dir(my_d, mock.Mock()) + + self.assertTrue('userdata' in results) + self.assertEqual(USER_DATA, results['userdata']) + + def test_user_data_encoding_required_for_decode(self): + b64userdata = util.b64e(USER_DATA) + for k in ('USER_DATA', 'USERDATA'): + my_d = os.path.join(self.tmp, k) + populate_context_dir(my_d, {k: b64userdata}) + results = ds.read_context_disk_dir(my_d, mock.Mock()) + + self.assertTrue('userdata' in results) + self.assertEqual(b64userdata, results['userdata']) + + def test_user_data_base64_encoding(self): + for k in ('USER_DATA', 'USERDATA'): + my_d = os.path.join(self.tmp, k) + populate_context_dir(my_d, {k: util.b64e(USER_DATA), + 'USERDATA_ENCODING': 'base64'}) + results = ds.read_context_disk_dir(my_d, mock.Mock()) + + self.assertTrue('userdata' in results) + self.assertEqual(USER_DATA, results['userdata']) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_hostname(self, m_get_phys_by_mac): + for dev in ('eth0', 'ens3'): + m_get_phys_by_mac.return_value = {MACADDR: dev} + for k in ('SET_HOSTNAME', 'HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', + 'ETH0_IP'): + my_d = os.path.join(self.tmp, k) + populate_context_dir(my_d, {k: PUBLIC_IP}) + results = ds.read_context_disk_dir(my_d, mock.Mock()) + + self.assertTrue('metadata' in results) + self.assertTrue('local-hostname' in results['metadata']) + self.assertEqual( + PUBLIC_IP, results['metadata']['local-hostname']) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_network_interfaces(self, m_get_phys_by_mac): + for dev in ('eth0', 'ens3'): + m_get_phys_by_mac.return_value = {MACADDR: dev} + + # without ETH0_MAC + # for Older OpenNebula? + populate_context_dir(self.seed_dir, {'ETH0_IP': IP_BY_MACADDR}) + results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) + + self.assertTrue('network-interfaces' in results) + self.assertTrue( + IP_BY_MACADDR + '/' + IP4_PREFIX in + results['network-interfaces']['ethernets'][dev]['addresses']) + + # ETH0_IP and ETH0_MAC + populate_context_dir( + self.seed_dir, {'ETH0_IP': IP_BY_MACADDR, 'ETH0_MAC': MACADDR}) + results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) + + self.assertTrue('network-interfaces' in results) + self.assertTrue( + IP_BY_MACADDR + '/' + IP4_PREFIX in + results['network-interfaces']['ethernets'][dev]['addresses']) + + # ETH0_IP with empty string and ETH0_MAC + # in the case of using Virtual Network contains + # "AR = [ TYPE = ETHER ]" + populate_context_dir( + self.seed_dir, {'ETH0_IP': '', 'ETH0_MAC': MACADDR}) + results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) + + self.assertTrue('network-interfaces' in results) + self.assertTrue( + IP_BY_MACADDR + '/' + IP4_PREFIX in + results['network-interfaces']['ethernets'][dev]['addresses']) + + # ETH0_MASK + populate_context_dir( + self.seed_dir, { + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_MAC': MACADDR, + 'ETH0_MASK': '255.255.0.0' + }) + results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) + + self.assertTrue('network-interfaces' in results) + self.assertTrue( + IP_BY_MACADDR + '/16' in + results['network-interfaces']['ethernets'][dev]['addresses']) + + # ETH0_MASK with empty string + populate_context_dir( + self.seed_dir, { + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_MAC': MACADDR, + 'ETH0_MASK': '' + }) + results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) + + self.assertTrue('network-interfaces' in results) + self.assertTrue( + IP_BY_MACADDR + '/' + IP4_PREFIX in + results['network-interfaces']['ethernets'][dev]['addresses']) + + # ETH0_IP6 + populate_context_dir( + self.seed_dir, { + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_MAC': MACADDR, + }) + results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) + + self.assertTrue('network-interfaces' in results) + self.assertTrue( + IP6_GLOBAL + '/64' in + results['network-interfaces']['ethernets'][dev]['addresses']) + + # ETH0_IP6_ULA + populate_context_dir( + self.seed_dir, { + 'ETH0_IP6_ULA': IP6_ULA, + 'ETH0_MAC': MACADDR, + }) + results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) + + self.assertTrue('network-interfaces' in results) + self.assertTrue( + IP6_ULA + '/64' in + results['network-interfaces']['ethernets'][dev]['addresses']) + + # ETH0_IP6 and ETH0_IP6_PREFIX_LENGTH + populate_context_dir( + self.seed_dir, { + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX, + 'ETH0_MAC': MACADDR, + }) + results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) + + self.assertTrue('network-interfaces' in results) + self.assertTrue( + IP6_GLOBAL + '/' + IP6_PREFIX in + results['network-interfaces']['ethernets'][dev]['addresses']) + + # ETH0_IP6 and ETH0_IP6_PREFIX_LENGTH with empty string + populate_context_dir( + self.seed_dir, { + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_PREFIX_LENGTH': '', + 'ETH0_MAC': MACADDR, + }) + results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) + + self.assertTrue('network-interfaces' in results) + self.assertTrue( + IP6_GLOBAL + '/64' in + results['network-interfaces']['ethernets'][dev]['addresses']) + + def test_find_candidates(self): + def my_devs_with(criteria): + return { + "LABEL=CONTEXT": ["/dev/sdb"], + "LABEL=CDROM": ["/dev/sr0"], + "TYPE=iso9660": ["/dev/vdb"], + }.get(criteria, []) + + orig_find_devs_with = util.find_devs_with + try: + util.find_devs_with = my_devs_with + self.assertEqual(["/dev/sdb", "/dev/sr0", "/dev/vdb"], + ds.find_candidate_devs()) + finally: + util.find_devs_with = orig_find_devs_with + + +@mock.patch(DS_PATH + '.net.get_interfaces_by_mac', mock.Mock(return_value={})) +class TestOpenNebulaNetwork(unittest.TestCase): + + system_nics = ('eth0', 'ens3') + + def test_context_devname(self): + """Verify context_devname correctly returns mac and name.""" + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH1_MAC': '02:00:0a:12:0f:0f', } + expected = { + '02:00:0a:12:01:01': 'ETH0', + '02:00:0a:12:0f:0f': 'ETH1', } + net = ds.OpenNebulaNetwork(context, mock.Mock()) + self.assertEqual(expected, net.context_devname) + + def test_get_nameservers(self): + """ + Verify get_nameservers('device') correctly returns DNS server addresses + and search domains. + """ + context = { + 'DNS': '1.2.3.8', + 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_SEARCH_DOMAIN': 'example.com example.org', } + expected = { + 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], + 'search': ['example.com', 'example.org']} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_nameservers('eth0') + self.assertEqual(expected, val) + + def test_get_mtu(self): + """Verify get_mtu('device') correctly returns MTU size.""" + context = {'ETH0_MTU': '1280'} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_mtu('eth0') + self.assertEqual('1280', val) + + def test_get_ip(self): + """Verify get_ip('device') correctly returns IPv4 address.""" + context = {'ETH0_IP': PUBLIC_IP} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_ip('eth0', MACADDR) + self.assertEqual(PUBLIC_IP, val) + + def test_get_ip_emptystring(self): + """ + Verify get_ip('device') correctly returns IPv4 address. + It returns IP address created by MAC address if ETH0_IP has empty + string. + """ + context = {'ETH0_IP': ''} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_ip('eth0', MACADDR) + self.assertEqual(IP_BY_MACADDR, val) + + def test_get_ip6(self): + """ + Verify get_ip6('device') correctly returns IPv6 address. + In this case, IPv6 address is Given by ETH0_IP6. + """ + context = { + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_ULA': '', } + expected = [IP6_GLOBAL] + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_ip6('eth0') + self.assertEqual(expected, val) + + def test_get_ip6_ula(self): + """ + Verify get_ip6('device') correctly returns IPv6 address. + In this case, IPv6 address is Given by ETH0_IP6_ULA. + """ + context = { + 'ETH0_IP6': '', + 'ETH0_IP6_ULA': IP6_ULA, } + expected = [IP6_ULA] + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_ip6('eth0') + self.assertEqual(expected, val) + + def test_get_ip6_dual(self): + """ + Verify get_ip6('device') correctly returns IPv6 address. + In this case, IPv6 addresses are Given by ETH0_IP6 and ETH0_IP6_ULA. + """ + context = { + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_ULA': IP6_ULA, } + expected = [IP6_GLOBAL, IP6_ULA] + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_ip6('eth0') + self.assertEqual(expected, val) + + def test_get_ip6_prefix(self): + """ + Verify get_ip6_prefix('device') correctly returns IPv6 prefix. + """ + context = {'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_ip6_prefix('eth0') + self.assertEqual(IP6_PREFIX, val) + + def test_get_ip6_prefix_emptystring(self): + """ + Verify get_ip6_prefix('device') correctly returns IPv6 prefix. + It returns default value '64' if ETH0_IP6_PREFIX_LENGTH has empty + string. + """ + context = {'ETH0_IP6_PREFIX_LENGTH': ''} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_ip6_prefix('eth0') + self.assertEqual('64', val) + + def test_get_gateway(self): + """ + Verify get_gateway('device') correctly returns IPv4 default gateway + address. + """ + context = {'ETH0_GATEWAY': '1.2.3.5'} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_gateway('eth0') + self.assertEqual('1.2.3.5', val) + + def test_get_gateway6(self): + """ + Verify get_gateway6('device') correctly returns IPv6 default gateway + address. + """ + for k in ('GATEWAY6', 'IP6_GATEWAY'): + context = {'ETH0_' + k: IP6_GW} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_gateway6('eth0') + self.assertEqual(IP6_GW, val) + + def test_get_mask(self): + """ + Verify get_mask('device') correctly returns IPv4 subnet mask. + """ + context = {'ETH0_MASK': '255.255.0.0'} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_mask('eth0') + self.assertEqual('255.255.0.0', val) + + def test_get_mask_emptystring(self): + """ + Verify get_mask('device') correctly returns IPv4 subnet mask. + It returns default value '255.255.255.0' if ETH0_MASK has empty string. + """ + context = {'ETH0_MASK': ''} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_mask('eth0') + self.assertEqual('255.255.255.0', val) + + def test_get_network(self): + """ + Verify get_network('device') correctly returns IPv4 network address. + """ + context = {'ETH0_NETWORK': '1.2.3.0'} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_network('eth0', MACADDR) + self.assertEqual('1.2.3.0', val) + + def test_get_network_emptystring(self): + """ + Verify get_network('device') correctly returns IPv4 network address. + It returns network address created by MAC address if ETH0_NETWORK has + empty string. + """ + context = {'ETH0_NETWORK': ''} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_network('eth0', MACADDR) + self.assertEqual('10.18.1.0', val) + + def test_get_field(self): + """ + Verify get_field('device', 'name') returns *context* value. + """ + context = {'ETH9_DUMMY': 'DUMMY_VALUE'} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_field('eth9', 'dummy') + self.assertEqual('DUMMY_VALUE', val) + + def test_get_field_withdefaultvalue(self): + """ + Verify get_field('device', 'name', 'default value') returns *context* + value. + """ + context = {'ETH9_DUMMY': 'DUMMY_VALUE'} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE') + self.assertEqual('DUMMY_VALUE', val) + + def test_get_field_withdefaultvalue_emptycontext(self): + """ + Verify get_field('device', 'name', 'default value') returns *default* + value if context value is empty string. + """ + context = {'ETH9_DUMMY': ''} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE') + self.assertEqual('DEFAULT_VALUE', val) + + def test_get_field_emptycontext(self): + """ + Verify get_field('device', 'name') returns None if context value is + empty string. + """ + context = {'ETH9_DUMMY': ''} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_field('eth9', 'dummy') + self.assertEqual(None, val) + + def test_get_field_nonecontext(self): + """ + Verify get_field('device', 'name') returns None if context value is + None. + """ + context = {'ETH9_DUMMY': None} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_field('eth9', 'dummy') + self.assertEqual(None, val) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_gateway(self, m_get_phys_by_mac): + """Test rendering with/without IPv4 gateway""" + self.maxDiff = None + # empty ETH0_GATEWAY + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_GATEWAY': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + self.assertEqual(net.gen_conf(), expected) + + # set ETH0_GATEWAY + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_GATEWAY': '1.2.3.5', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'gateway4': '1.2.3.5', + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + self.assertEqual(net.gen_conf(), expected) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_gateway6(self, m_get_phys_by_mac): + """Test rendering with/without IPv6 gateway""" + self.maxDiff = None + # empty ETH0_GATEWAY6 + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_GATEWAY6': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + self.assertEqual(net.gen_conf(), expected) + + # set ETH0_GATEWAY6 + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_GATEWAY6': IP6_GW, } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'gateway6': IP6_GW, + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + self.assertEqual(net.gen_conf(), expected) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_ipv6address(self, m_get_phys_by_mac): + """Test rendering with/without IPv6 address""" + self.maxDiff = None + # empty ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_IP6': '', + 'ETH0_IP6_ULA': '', + 'ETH0_IP6_PREFIX_LENGTH': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + self.assertEqual(net.gen_conf(), expected) + + # set ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX, + 'ETH0_IP6_ULA': IP6_ULA, } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [ + IP_BY_MACADDR + '/' + IP4_PREFIX, + IP6_GLOBAL + '/' + IP6_PREFIX, + IP6_ULA + '/' + IP6_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + self.assertEqual(net.gen_conf(), expected) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_dns(self, m_get_phys_by_mac): + """Test rendering with/without DNS server, search domain""" + self.maxDiff = None + # empty DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'DNS': '', + 'ETH0_DNS': '', + 'ETH0_SEARCH_DOMAIN': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + self.assertEqual(net.gen_conf(), expected) + + # set DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'DNS': '1.2.3.8', + 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_SEARCH_DOMAIN': 'example.com example.org', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'nameservers': { + 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], + 'search': ['example.com', 'example.org']}, + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + self.assertEqual(net.gen_conf(), expected) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_mtu(self, m_get_phys_by_mac): + """Test rendering with/without MTU""" + self.maxDiff = None + # empty ETH0_MTU + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_MTU': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + self.assertEqual(net.gen_conf(), expected) + + # set ETH0_MTU + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_MTU': '1280', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'mtu': '1280', + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + self.assertEqual(net.gen_conf(), expected) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_eth0(self, m_get_phys_by_mac): + for nic in self.system_nics: + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork({}, mock.Mock()) + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + + self.assertEqual(net.gen_conf(), expected) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_distro_passed_through(self, m_get_physical_nics_by_mac): + ds.OpenNebulaNetwork({}, mock.sentinel.distro) + self.assertEqual( + [mock.call(mock.sentinel.distro)], + m_get_physical_nics_by_mac.call_args_list, + ) + + def test_eth0_override(self): + self.maxDiff = None + context = { + 'DNS': '1.2.3.8', + 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_GATEWAY': '1.2.3.5', + 'ETH0_GATEWAY6': '', + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_IP6': '', + 'ETH0_IP6_PREFIX_LENGTH': '', + 'ETH0_IP6_ULA': '', + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_MASK': '255.255.0.0', + 'ETH0_MTU': '', + 'ETH0_NETWORK': '10.18.0.0', + 'ETH0_SEARCH_DOMAIN': '', + } + for nic in self.system_nics: + net = ds.OpenNebulaNetwork(context, mock.Mock(), + system_nics_by_mac={MACADDR: nic}) + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/16'], + 'gateway4': '1.2.3.5', + 'nameservers': { + 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8']}}}} + + self.assertEqual(expected, net.gen_conf()) + + def test_eth0_v4v6_override(self): + self.maxDiff = None + context = { + 'DNS': '1.2.3.8', + 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_GATEWAY': '1.2.3.5', + 'ETH0_GATEWAY6': IP6_GW, + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX, + 'ETH0_IP6_ULA': IP6_ULA, + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_MASK': '255.255.0.0', + 'ETH0_MTU': '1280', + 'ETH0_NETWORK': '10.18.0.0', + 'ETH0_SEARCH_DOMAIN': 'example.com example.org', + } + for nic in self.system_nics: + net = ds.OpenNebulaNetwork(context, mock.Mock(), + system_nics_by_mac={MACADDR: nic}) + + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [ + IP_BY_MACADDR + '/16', + IP6_GLOBAL + '/' + IP6_PREFIX, + IP6_ULA + '/' + IP6_PREFIX], + 'gateway4': '1.2.3.5', + 'gateway6': IP6_GW, + 'nameservers': { + 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], + 'search': ['example.com', 'example.org']}, + 'mtu': '1280'}}} + + self.assertEqual(expected, net.gen_conf()) + + def test_multiple_nics(self): + """Test rendering multiple nics with names that differ from context.""" + self.maxDiff = None + MAC_1 = "02:00:0a:12:01:01" + MAC_2 = "02:00:0a:12:01:02" + context = { + 'DNS': '1.2.3.8', + 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_GATEWAY': '1.2.3.5', + 'ETH0_GATEWAY6': IP6_GW, + 'ETH0_IP': '10.18.1.1', + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_PREFIX_LENGTH': '', + 'ETH0_IP6_ULA': IP6_ULA, + 'ETH0_MAC': MAC_2, + 'ETH0_MASK': '255.255.0.0', + 'ETH0_MTU': '1280', + 'ETH0_NETWORK': '10.18.0.0', + 'ETH0_SEARCH_DOMAIN': 'example.com', + 'ETH3_DNS': '10.3.1.2', + 'ETH3_GATEWAY': '10.3.0.1', + 'ETH3_GATEWAY6': '', + 'ETH3_IP': '10.3.1.3', + 'ETH3_IP6': '', + 'ETH3_IP6_PREFIX_LENGTH': '', + 'ETH3_IP6_ULA': '', + 'ETH3_MAC': MAC_1, + 'ETH3_MASK': '255.255.0.0', + 'ETH3_MTU': '', + 'ETH3_NETWORK': '10.3.0.0', + 'ETH3_SEARCH_DOMAIN': 'third.example.com third.example.org', + } + net = ds.OpenNebulaNetwork( + context, + mock.Mock(), + system_nics_by_mac={MAC_1: 'enp0s25', MAC_2: 'enp1s2'} + ) + + expected = { + 'version': 2, + 'ethernets': { + 'enp1s2': { + 'match': {'macaddress': MAC_2}, + 'addresses': [ + '10.18.1.1/16', + IP6_GLOBAL + '/64', + IP6_ULA + '/64'], + 'gateway4': '1.2.3.5', + 'gateway6': IP6_GW, + 'nameservers': { + 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], + 'search': ['example.com']}, + 'mtu': '1280'}, + 'enp0s25': { + 'match': {'macaddress': MAC_1}, + 'addresses': ['10.3.1.3/16'], + 'gateway4': '10.3.0.1', + 'nameservers': { + 'addresses': ['10.3.1.2', '1.2.3.8'], + 'search': [ + 'third.example.com', + 'third.example.org']}}}} + + self.assertEqual(expected, net.gen_conf()) + + +class TestParseShellConfig: + @pytest.mark.allow_subp_for("bash") + def test_no_seconds(self): + cfg = '\n'.join(["foo=bar", "SECONDS=2", "xx=foo"]) + # we could test 'sleep 2', but that would make the test run slower. + ret = ds.parse_shell_config(cfg) + assert ret == {"foo": "bar", "xx": "foo"} + + +class TestGetPhysicalNicsByMac: + @pytest.mark.parametrize( + "interfaces_by_mac,physical_devs,expected_return", + [ + # No interfaces => empty return + ({}, [], {}), + # Only virtual interface => empty return + ({"mac1": "virtual0"}, [], {}), + # Only physical interface => it is returned + ({"mac2": "physical0"}, ["physical0"], {"mac2": "physical0"}), + # Combination of physical and virtual => only physical returned + ( + {"mac3": "physical1", "mac4": "virtual1"}, + ["physical1"], + {"mac3": "physical1"}, + ), + ], + ) + def test(self, interfaces_by_mac, physical_devs, expected_return): + distro = mock.Mock() + distro.networking.is_physical.side_effect = ( + lambda devname: devname in physical_devs + ) + with mock.patch( + DS_PATH + ".net.get_interfaces_by_mac", + return_value=interfaces_by_mac, + ): + assert expected_return == ds.get_physical_nics_by_mac(distro) + + +def populate_context_dir(path, variables): + data = "# Context variables generated by OpenNebula\n" + for k, v in variables.items(): + data += ("%s='%s'\n" % (k.upper(), v.replace(r"'", r"'\''"))) + populate_dir(path, {'context.sh': data}) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_openstack.py b/tests/unittests/sources/test_openstack.py new file mode 100644 index 00000000..0d6fb04a --- /dev/null +++ b/tests/unittests/sources/test_openstack.py @@ -0,0 +1,724 @@ +# Copyright (C) 2014 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This file is part of cloud-init. See LICENSE file for license information. + +import copy +import httpretty as hp +import json +import re +from io import StringIO +from urllib.parse import urlparse + +from tests.unittests import helpers as test_helpers + +from cloudinit import helpers +from cloudinit import settings +from cloudinit.sources import BrokenMetadata, convert_vendordata, UNSET +from cloudinit.sources import DataSourceOpenStack as ds +from cloudinit.sources.helpers import openstack +from cloudinit import util + +BASE_URL = "http://169.254.169.254" +PUBKEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' +EC2_META = { + 'ami-id': 'ami-00000001', + 'ami-launch-index': '0', + 'ami-manifest-path': 'FIXME', + 'hostname': 'sm-foo-test.novalocal', + 'instance-action': 'none', + 'instance-id': 'i-00000001', + 'instance-type': 'm1.tiny', + 'local-hostname': 'sm-foo-test.novalocal', + 'local-ipv4': '0.0.0.0', + 'public-hostname': 'sm-foo-test.novalocal', + 'public-ipv4': '0.0.0.1', + 'reservation-id': 'r-iru5qm4m', +} +USER_DATA = b'#!/bin/sh\necho This is user data\n' +VENDOR_DATA = { + 'magic': '', +} +VENDOR_DATA2 = { + 'static': {} +} +OSTACK_META = { + 'availability_zone': 'nova', + 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'}, + {'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}], + 'hostname': 'sm-foo-test.novalocal', + 'meta': {'dsmode': 'local', 'my-meta': 'my-value'}, + 'name': 'sm-foo-test', + 'public_keys': {'mykey': PUBKEY}, + 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c', +} +CONTENT_0 = b'This is contents of /etc/foo.cfg\n' +CONTENT_1 = b'# this is /etc/bar/bar.cfg\n' +OS_FILES = { + 'openstack/content/0000': CONTENT_0, + 'openstack/content/0001': CONTENT_1, + 'openstack/latest/meta_data.json': json.dumps(OSTACK_META), + 'openstack/latest/network_data.json': json.dumps( + {'links': [], 'networks': [], 'services': []}), + 'openstack/latest/user_data': USER_DATA, + 'openstack/latest/vendor_data.json': json.dumps(VENDOR_DATA), + 'openstack/latest/vendor_data2.json': json.dumps(VENDOR_DATA2), +} +EC2_FILES = { + 'latest/user-data': USER_DATA, +} +EC2_VERSIONS = [ + 'latest', +] + +MOCK_PATH = 'cloudinit.sources.DataSourceOpenStack.' + + +# TODO _register_uris should leverage test_ec2.register_mock_metaserver. +def _register_uris(version, ec2_files, ec2_meta, os_files): + """Registers a set of url patterns into httpretty that will mimic the + same data returned by the openstack metadata service (and ec2 service).""" + + def match_ec2_url(uri, headers): + path = uri.path.strip("/") + if len(path) == 0: + return (200, headers, "\n".join(EC2_VERSIONS)) + path = uri.path.lstrip("/") + if path in ec2_files: + return (200, headers, ec2_files.get(path)) + if path == 'latest/meta-data/': + buf = StringIO() + for (k, v) in ec2_meta.items(): + if isinstance(v, (list, tuple)): + buf.write("%s/" % (k)) + else: + buf.write("%s" % (k)) + buf.write("\n") + return (200, headers, buf.getvalue()) + if path.startswith('latest/meta-data/'): + value = None + pieces = path.split("/") + if path.endswith("/"): + pieces = pieces[2:-1] + value = util.get_cfg_by_path(ec2_meta, pieces) + else: + pieces = pieces[2:] + value = util.get_cfg_by_path(ec2_meta, pieces) + if value is not None: + return (200, headers, str(value)) + return (404, headers, '') + + def match_os_uri(uri, headers): + path = uri.path.strip("/") + if path == 'openstack': + return (200, headers, "\n".join([openstack.OS_LATEST])) + path = uri.path.lstrip("/") + if path in os_files: + return (200, headers, os_files.get(path)) + return (404, headers, '') + + def get_request_callback(method, uri, headers): + uri = urlparse(uri) + path = uri.path.lstrip("/").split("/") + if path[0] == 'openstack': + return match_os_uri(uri, headers) + return match_ec2_url(uri, headers) + + hp.register_uri(hp.GET, re.compile(r'http://169.254.169.254/.*'), + body=get_request_callback) + + +def _read_metadata_service(): + return ds.read_metadata_service(BASE_URL, retries=0, timeout=0.1) + + +class TestOpenStackDataSource(test_helpers.HttprettyTestCase): + + with_logs = True + VERSION = 'latest' + + def setUp(self): + super(TestOpenStackDataSource, self).setUp() + self.tmp = self.tmp_dir() + + def test_successful(self): + _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) + f = _read_metadata_service() + self.assertEqual(VENDOR_DATA, f.get('vendordata')) + self.assertEqual(VENDOR_DATA2, f.get('vendordata2')) + self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) + self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) + self.assertEqual(2, len(f['files'])) + self.assertEqual(USER_DATA, f.get('userdata')) + self.assertEqual(EC2_META, f.get('ec2-metadata')) + self.assertEqual(2, f.get('version')) + metadata = f['metadata'] + self.assertEqual('nova', metadata.get('availability_zone')) + self.assertEqual('sm-foo-test.novalocal', metadata.get('hostname')) + self.assertEqual('sm-foo-test.novalocal', + metadata.get('local-hostname')) + self.assertEqual('sm-foo-test', metadata.get('name')) + self.assertEqual('b0fa911b-69d4-4476-bbe2-1c92bff6535c', + metadata.get('uuid')) + self.assertEqual('b0fa911b-69d4-4476-bbe2-1c92bff6535c', + metadata.get('instance-id')) + + def test_no_ec2(self): + _register_uris(self.VERSION, {}, {}, OS_FILES) + f = _read_metadata_service() + self.assertEqual(VENDOR_DATA, f.get('vendordata')) + self.assertEqual(VENDOR_DATA2, f.get('vendordata2')) + self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) + self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) + self.assertEqual(USER_DATA, f.get('userdata')) + self.assertEqual({}, f.get('ec2-metadata')) + self.assertEqual(2, f.get('version')) + + def test_bad_metadata(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('meta_data.json'): + os_files.pop(k, None) + _register_uris(self.VERSION, {}, {}, os_files) + self.assertRaises(openstack.NonReadable, _read_metadata_service) + + def test_bad_uuid(self): + os_files = copy.deepcopy(OS_FILES) + os_meta = copy.deepcopy(OSTACK_META) + os_meta.pop('uuid') + for k in list(os_files.keys()): + if k.endswith('meta_data.json'): + os_files[k] = json.dumps(os_meta) + _register_uris(self.VERSION, {}, {}, os_files) + self.assertRaises(BrokenMetadata, _read_metadata_service) + + def test_userdata_empty(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('user_data'): + os_files.pop(k, None) + _register_uris(self.VERSION, {}, {}, os_files) + f = _read_metadata_service() + self.assertEqual(VENDOR_DATA, f.get('vendordata')) + self.assertEqual(VENDOR_DATA2, f.get('vendordata2')) + self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) + self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) + self.assertFalse(f.get('userdata')) + + def test_vendordata_empty(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('vendor_data.json'): + os_files.pop(k, None) + _register_uris(self.VERSION, {}, {}, os_files) + f = _read_metadata_service() + self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) + self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) + self.assertFalse(f.get('vendordata')) + + def test_vendordata2_empty(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('vendor_data2.json'): + os_files.pop(k, None) + _register_uris(self.VERSION, {}, {}, os_files) + f = _read_metadata_service() + self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) + self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) + self.assertFalse(f.get('vendordata2')) + + def test_vendordata_invalid(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('vendor_data.json'): + os_files[k] = '{' # some invalid json + _register_uris(self.VERSION, {}, {}, os_files) + self.assertRaises(BrokenMetadata, _read_metadata_service) + + def test_vendordata2_invalid(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('vendor_data2.json'): + os_files[k] = '{' # some invalid json + _register_uris(self.VERSION, {}, {}, os_files) + self.assertRaises(BrokenMetadata, _read_metadata_service) + + def test_metadata_invalid(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('meta_data.json'): + os_files[k] = '{' # some invalid json + _register_uris(self.VERSION, {}, {}, os_files) + self.assertRaises(BrokenMetadata, _read_metadata_service) + + @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + def test_datasource(self, m_dhcp): + _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) + ds_os = ds.DataSourceOpenStack( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + self.assertIsNone(ds_os.version) + mock_path = MOCK_PATH + 'detect_openstack' + with test_helpers.mock.patch(mock_path) as m_detect_os: + m_detect_os.return_value = True + found = ds_os.get_data() + self.assertTrue(found) + self.assertEqual(2, ds_os.version) + md = dict(ds_os.metadata) + md.pop('instance-id', None) + md.pop('local-hostname', None) + self.assertEqual(OSTACK_META, md) + self.assertEqual(EC2_META, ds_os.ec2_metadata) + self.assertEqual(USER_DATA, ds_os.userdata_raw) + self.assertEqual(2, len(ds_os.files)) + self.assertEqual(VENDOR_DATA, ds_os.vendordata_pure) + self.assertEqual(VENDOR_DATA2, ds_os.vendordata2_pure) + self.assertIsNone(ds_os.vendordata_raw) + m_dhcp.assert_not_called() + + @hp.activate + @test_helpers.mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + def test_local_datasource(self, m_dhcp, m_net): + """OpenStackLocal calls EphemeralDHCPNetwork and gets instance data.""" + _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) + ds_os_local = ds.DataSourceOpenStackLocal( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + ds_os_local._fallback_interface = 'eth9' # Monkey patch for dhcp + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'broadcast-address': '192.168.2.255'}] + + self.assertIsNone(ds_os_local.version) + mock_path = MOCK_PATH + 'detect_openstack' + with test_helpers.mock.patch(mock_path) as m_detect_os: + m_detect_os.return_value = True + found = ds_os_local.get_data() + self.assertTrue(found) + self.assertEqual(2, ds_os_local.version) + md = dict(ds_os_local.metadata) + md.pop('instance-id', None) + md.pop('local-hostname', None) + self.assertEqual(OSTACK_META, md) + self.assertEqual(EC2_META, ds_os_local.ec2_metadata) + self.assertEqual(USER_DATA, ds_os_local.userdata_raw) + self.assertEqual(2, len(ds_os_local.files)) + self.assertEqual(VENDOR_DATA, ds_os_local.vendordata_pure) + self.assertEqual(VENDOR_DATA2, ds_os_local.vendordata2_pure) + self.assertIsNone(ds_os_local.vendordata_raw) + m_dhcp.assert_called_with('eth9', None) + + def test_bad_datasource_meta(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('meta_data.json'): + os_files[k] = '{' # some invalid json + _register_uris(self.VERSION, {}, {}, os_files) + ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, + None, + helpers.Paths({'run_dir': self.tmp})) + self.assertIsNone(ds_os.version) + mock_path = MOCK_PATH + 'detect_openstack' + with test_helpers.mock.patch(mock_path) as m_detect_os: + m_detect_os.return_value = True + found = ds_os.get_data() + self.assertFalse(found) + self.assertIsNone(ds_os.version) + self.assertIn( + 'InvalidMetaDataException: Broken metadata address' + ' http://169.254.169.25', + self.logs.getvalue()) + + def test_no_datasource(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('meta_data.json'): + os_files.pop(k) + _register_uris(self.VERSION, {}, {}, os_files) + ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, + None, + helpers.Paths({'run_dir': self.tmp})) + ds_os.ds_cfg = { + 'max_wait': 0, + 'timeout': 0, + } + self.assertIsNone(ds_os.version) + mock_path = MOCK_PATH + 'detect_openstack' + with test_helpers.mock.patch(mock_path) as m_detect_os: + m_detect_os.return_value = True + found = ds_os.get_data() + self.assertFalse(found) + self.assertIsNone(ds_os.version) + + def test_network_config_disabled_by_datasource_config(self): + """The network_config can be disabled from datasource config.""" + mock_path = MOCK_PATH + 'openstack.convert_net_json' + ds_os = ds.DataSourceOpenStack( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + ds_os.ds_cfg = {'apply_network_config': False} + sample_json = {'links': [{'ethernet_mac_address': 'mymac'}], + 'networks': [], 'services': []} + ds_os.network_json = sample_json # Ignore this content from metadata + with test_helpers.mock.patch(mock_path) as m_convert_json: + self.assertIsNone(ds_os.network_config) + m_convert_json.assert_not_called() + + def test_network_config_from_network_json(self): + """The datasource gets network_config from network_data.json.""" + mock_path = MOCK_PATH + 'openstack.convert_net_json' + example_cfg = {'version': 1, 'config': []} + ds_os = ds.DataSourceOpenStack( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + sample_json = {'links': [{'ethernet_mac_address': 'mymac'}], + 'networks': [], 'services': []} + ds_os.network_json = sample_json + with test_helpers.mock.patch(mock_path) as m_convert_json: + m_convert_json.return_value = example_cfg + self.assertEqual(example_cfg, ds_os.network_config) + self.assertIn( + 'network config provided via network_json', self.logs.getvalue()) + m_convert_json.assert_called_with(sample_json, known_macs=None) + + def test_network_config_cached(self): + """The datasource caches the network_config property.""" + mock_path = MOCK_PATH + 'openstack.convert_net_json' + example_cfg = {'version': 1, 'config': []} + ds_os = ds.DataSourceOpenStack( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + ds_os._network_config = example_cfg + with test_helpers.mock.patch(mock_path) as m_convert_json: + self.assertEqual(example_cfg, ds_os.network_config) + m_convert_json.assert_not_called() + + def test_disabled_datasource(self): + os_files = copy.deepcopy(OS_FILES) + os_meta = copy.deepcopy(OSTACK_META) + os_meta['meta'] = { + 'dsmode': 'disabled', + } + for k in list(os_files.keys()): + if k.endswith('meta_data.json'): + os_files[k] = json.dumps(os_meta) + _register_uris(self.VERSION, {}, {}, os_files) + ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, + None, + helpers.Paths({'run_dir': self.tmp})) + ds_os.ds_cfg = { + 'max_wait': 0, + 'timeout': 0, + } + self.assertIsNone(ds_os.version) + mock_path = MOCK_PATH + 'detect_openstack' + with test_helpers.mock.patch(mock_path) as m_detect_os: + m_detect_os.return_value = True + found = ds_os.get_data() + self.assertFalse(found) + self.assertIsNone(ds_os.version) + + @hp.activate + def test_wb__crawl_metadata_does_not_persist(self): + """_crawl_metadata returns current metadata and does not cache.""" + _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) + ds_os = ds.DataSourceOpenStack( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + crawled_data = ds_os._crawl_metadata() + self.assertEqual(UNSET, ds_os.ec2_metadata) + self.assertIsNone(ds_os.userdata_raw) + self.assertEqual(0, len(ds_os.files)) + self.assertIsNone(ds_os.vendordata_raw) + self.assertEqual( + ['dsmode', 'ec2-metadata', 'files', 'metadata', 'networkdata', + 'userdata', 'vendordata', 'vendordata2', 'version'], + sorted(crawled_data.keys())) + self.assertEqual('local', crawled_data['dsmode']) + self.assertEqual(EC2_META, crawled_data['ec2-metadata']) + self.assertEqual(2, len(crawled_data['files'])) + md = copy.deepcopy(crawled_data['metadata']) + md.pop('instance-id') + md.pop('local-hostname') + self.assertEqual(OSTACK_META, md) + self.assertEqual( + json.loads(OS_FILES['openstack/latest/network_data.json']), + crawled_data['networkdata']) + self.assertEqual(USER_DATA, crawled_data['userdata']) + self.assertEqual(VENDOR_DATA, crawled_data['vendordata']) + self.assertEqual(VENDOR_DATA2, crawled_data['vendordata2']) + self.assertEqual(2, crawled_data['version']) + + +class TestVendorDataLoading(test_helpers.TestCase): + def cvj(self, data): + return convert_vendordata(data) + + def test_vd_load_none(self): + # non-existant vendor-data should return none + self.assertIsNone(self.cvj(None)) + + def test_vd_load_string(self): + self.assertEqual(self.cvj("foobar"), "foobar") + + def test_vd_load_list(self): + data = [{'foo': 'bar'}, 'mystring', list(['another', 'list'])] + self.assertEqual(self.cvj(data), data) + + def test_vd_load_dict_no_ci(self): + self.assertIsNone(self.cvj({'foo': 'bar'})) + + def test_vd_load_dict_ci_dict(self): + self.assertRaises(ValueError, self.cvj, + {'foo': 'bar', 'cloud-init': {'x': 1}}) + + def test_vd_load_dict_ci_string(self): + data = {'foo': 'bar', 'cloud-init': 'VENDOR_DATA'} + self.assertEqual(self.cvj(data), data['cloud-init']) + + def test_vd_load_dict_ci_list(self): + data = {'foo': 'bar', 'cloud-init': ['VD_1', 'VD_2']} + self.assertEqual(self.cvj(data), data['cloud-init']) + + +@test_helpers.mock.patch(MOCK_PATH + 'util.is_x86') +class TestDetectOpenStack(test_helpers.CiTestCase): + + def test_detect_openstack_non_intel_x86(self, m_is_x86): + """Return True on non-intel platforms because dmi isn't conclusive.""" + m_is_x86.return_value = False + self.assertTrue( + ds.detect_openstack(), 'Expected detect_openstack == True') + + @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env') + @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') + def test_not_detect_openstack_intel_x86_ec2(self, m_dmi, m_proc_env, + m_is_x86): + """Return False on EC2 platforms.""" + m_is_x86.return_value = True + # No product_name in proc/1/environ + m_proc_env.return_value = {'HOME': '/'} + + def fake_dmi_read(dmi_key): + if dmi_key == 'system-product-name': + return 'HVM domU' # Nothing 'openstackish' on EC2 + if dmi_key == 'chassis-asset-tag': + return '' # Empty string on EC2 + assert False, 'Unexpected dmi read of %s' % dmi_key + + m_dmi.side_effect = fake_dmi_read + self.assertFalse( + ds.detect_openstack(), 'Expected detect_openstack == False on EC2') + m_proc_env.assert_called_with(1) + + @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') + def test_detect_openstack_intel_product_name_compute(self, m_dmi, + m_is_x86): + """Return True on OpenStack compute and nova instances.""" + m_is_x86.return_value = True + openstack_product_names = ['OpenStack Nova', 'OpenStack Compute'] + + for product_name in openstack_product_names: + m_dmi.return_value = product_name + self.assertTrue( + ds.detect_openstack(), 'Failed to detect_openstack') + + @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') + def test_detect_openstack_opentelekomcloud_chassis_asset_tag(self, m_dmi, + m_is_x86): + """Return True on OpenStack reporting OpenTelekomCloud asset-tag.""" + m_is_x86.return_value = True + + def fake_dmi_read(dmi_key): + if dmi_key == 'system-product-name': + return 'HVM domU' # Nothing 'openstackish' on OpenTelekomCloud + if dmi_key == 'chassis-asset-tag': + return 'OpenTelekomCloud' + assert False, 'Unexpected dmi read of %s' % dmi_key + + m_dmi.side_effect = fake_dmi_read + self.assertTrue( + ds.detect_openstack(), + 'Expected detect_openstack == True on OpenTelekomCloud') + + @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') + def test_detect_openstack_sapccloud_chassis_asset_tag(self, m_dmi, + m_is_x86): + """Return True on OpenStack reporting SAP CCloud VM asset-tag.""" + m_is_x86.return_value = True + + def fake_dmi_read(dmi_key): + if dmi_key == 'system-product-name': + return 'VMware Virtual Platform' # SAP CCloud uses VMware + if dmi_key == 'chassis-asset-tag': + return 'SAP CCloud VM' + assert False, 'Unexpected dmi read of %s' % dmi_key + + m_dmi.side_effect = fake_dmi_read + self.assertTrue( + ds.detect_openstack(), + 'Expected detect_openstack == True on SAP CCloud VM') + + @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') + def test_detect_openstack_oraclecloud_chassis_asset_tag(self, m_dmi, + m_is_x86): + """Return True on OpenStack reporting Oracle cloud asset-tag.""" + m_is_x86.return_value = True + + def fake_dmi_read(dmi_key): + if dmi_key == 'system-product-name': + return 'Standard PC (i440FX + PIIX, 1996)' # No match + if dmi_key == 'chassis-asset-tag': + return 'OracleCloud.com' + assert False, 'Unexpected dmi read of %s' % dmi_key + + m_dmi.side_effect = fake_dmi_read + self.assertTrue( + ds.detect_openstack(accept_oracle=True), + 'Expected detect_openstack == True on OracleCloud.com') + self.assertFalse( + ds.detect_openstack(accept_oracle=False), + 'Expected detect_openstack == False.') + + def _test_detect_openstack_nova_compute_chassis_asset_tag(self, m_dmi, + m_is_x86, + chassis_tag): + """Return True on OpenStack reporting generic asset-tag.""" + m_is_x86.return_value = True + + def fake_dmi_read(dmi_key): + if dmi_key == 'system-product-name': + return 'Generic OpenStack Platform' + if dmi_key == 'chassis-asset-tag': + return chassis_tag + assert False, 'Unexpected dmi read of %s' % dmi_key + + m_dmi.side_effect = fake_dmi_read + self.assertTrue( + ds.detect_openstack(), + 'Expected detect_openstack == True on Generic OpenStack Platform') + + @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') + def test_detect_openstack_nova_chassis_asset_tag(self, m_dmi, + m_is_x86): + self._test_detect_openstack_nova_compute_chassis_asset_tag( + m_dmi, m_is_x86, 'OpenStack Nova') + + @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') + def test_detect_openstack_compute_chassis_asset_tag(self, m_dmi, + m_is_x86): + self._test_detect_openstack_nova_compute_chassis_asset_tag( + m_dmi, m_is_x86, 'OpenStack Compute') + + @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env') + @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') + def test_detect_openstack_by_proc_1_environ(self, m_dmi, m_proc_env, + m_is_x86): + """Return True when nova product_name specified in /proc/1/environ.""" + m_is_x86.return_value = True + # Nova product_name in proc/1/environ + m_proc_env.return_value = { + 'HOME': '/', 'product_name': 'OpenStack Nova'} + + def fake_dmi_read(dmi_key): + if dmi_key == 'system-product-name': + return 'HVM domU' # Nothing 'openstackish' + if dmi_key == 'chassis-asset-tag': + return '' # Nothin 'openstackish' + assert False, 'Unexpected dmi read of %s' % dmi_key + + m_dmi.side_effect = fake_dmi_read + self.assertTrue( + ds.detect_openstack(), + 'Expected detect_openstack == True on OpenTelekomCloud') + m_proc_env.assert_called_with(1) + + +class TestMetadataReader(test_helpers.HttprettyTestCase): + """Test the MetadataReader.""" + burl = 'http://169.254.169.254/' + md_base = { + 'availability_zone': 'myaz1', + 'hostname': 'sm-foo-test.novalocal', + "keys": [{"data": PUBKEY, "name": "brickies", "type": "ssh"}], + 'launch_index': 0, + 'name': 'sm-foo-test', + 'public_keys': {'mykey': PUBKEY}, + 'project_id': '6a103f813b774b9fb15a4fcd36e1c056', + 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'} + + def register(self, path, body=None, status=200): + content = body if not isinstance(body, str) else body.encode('utf-8') + hp.register_uri( + hp.GET, self.burl + "openstack" + path, status=status, + body=content) + + def register_versions(self, versions): + self.register("", '\n'.join(versions)) + self.register("/", '\n'.join(versions)) + + def register_version(self, version, data): + content = '\n'.join(sorted(data.keys())) + self.register(version, content) + self.register(version + "/", content) + for path, content in data.items(): + self.register("/%s/%s" % (version, path), content) + self.register("/%s/%s" % (version, path), content) + if 'user_data' not in data: + self.register("/%s/user_data" % version, "nodata", status=404) + + def test__find_working_version(self): + """Test a working version ignores unsupported.""" + unsup = "2016-11-09" + self.register_versions( + [openstack.OS_FOLSOM, openstack.OS_LIBERTY, unsup, + openstack.OS_LATEST]) + self.assertEqual( + openstack.OS_LIBERTY, + openstack.MetadataReader(self.burl)._find_working_version()) + + def test__find_working_version_uses_latest(self): + """'latest' should be used if no supported versions.""" + unsup1, unsup2 = ("2016-11-09", '2017-06-06') + self.register_versions([unsup1, unsup2, openstack.OS_LATEST]) + self.assertEqual( + openstack.OS_LATEST, + openstack.MetadataReader(self.burl)._find_working_version()) + + def test_read_v2_os_ocata(self): + """Validate return value of read_v2 for os_ocata data.""" + md = copy.deepcopy(self.md_base) + md['devices'] = [] + network_data = {'links': [], 'networks': [], 'services': []} + vendor_data = {} + vendor_data2 = {"static": {}} + + data = { + 'meta_data.json': json.dumps(md), + 'network_data.json': json.dumps(network_data), + 'vendor_data.json': json.dumps(vendor_data), + 'vendor_data2.json': json.dumps(vendor_data2), + } + + self.register_versions([openstack.OS_OCATA, openstack.OS_LATEST]) + self.register_version(openstack.OS_OCATA, data) + + mock_read_ec2 = test_helpers.mock.MagicMock( + return_value={'instance-id': 'unused-ec2'}) + expected_md = copy.deepcopy(md) + expected_md.update( + {'instance-id': md['uuid'], 'local-hostname': md['hostname']}) + expected = { + 'userdata': '', # Annoying, no user-data results in empty string. + 'version': 2, + 'metadata': expected_md, + 'vendordata': vendor_data, + 'vendordata2': vendor_data2, + 'networkdata': network_data, + 'ec2-metadata': mock_read_ec2.return_value, + 'files': {}, + } + reader = openstack.MetadataReader(self.burl) + reader._read_ec2_metadata = mock_read_ec2 + self.assertEqual(expected, reader.read_v2()) + self.assertEqual(1, mock_read_ec2.call_count) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_oracle.py b/tests/unittests/sources/test_oracle.py new file mode 100644 index 00000000..2aab097c --- /dev/null +++ b/tests/unittests/sources/test_oracle.py @@ -0,0 +1,797 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import base64 +import copy +import json +from contextlib import ExitStack +from unittest import mock + +import pytest + +from cloudinit.sources import DataSourceOracle as oracle +from cloudinit.sources import NetworkConfigSource +from cloudinit.sources.DataSourceOracle import OpcMetadata +from tests.unittests import helpers as test_helpers +from cloudinit.url_helper import UrlError + +DS_PATH = "cloudinit.sources.DataSourceOracle" + +# `curl -L http://169.254.169.254/opc/v1/vnics/` on a Oracle Bare Metal Machine +# with a secondary VNIC attached (vnicId truncated for Python line length) +OPC_BM_SECONDARY_VNIC_RESPONSE = """\ +[ { + "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtyvcucqkhdqmgjszebxe4hrb!!TRUNCATED||", + "privateIp" : "10.0.0.8", + "vlanTag" : 0, + "macAddr" : "90:e2:ba:d4:f1:68", + "virtualRouterIp" : "10.0.0.1", + "subnetCidrBlock" : "10.0.0.0/24", + "nicIndex" : 0 +}, { + "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtfmkxjdy2sqidndiwrsg63zf!!TRUNCATED||", + "privateIp" : "10.0.4.5", + "vlanTag" : 1, + "macAddr" : "02:00:17:05:CF:51", + "virtualRouterIp" : "10.0.4.1", + "subnetCidrBlock" : "10.0.4.0/24", + "nicIndex" : 0 +} ]""" + +# `curl -L http://169.254.169.254/opc/v1/vnics/` on a Oracle Virtual Machine +# with a secondary VNIC attached +OPC_VM_SECONDARY_VNIC_RESPONSE = """\ +[ { + "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtch72z5pd76cc2636qeqh7z_truncated", + "privateIp" : "10.0.0.230", + "vlanTag" : 1039, + "macAddr" : "02:00:17:05:D1:DB", + "virtualRouterIp" : "10.0.0.1", + "subnetCidrBlock" : "10.0.0.0/24" +}, { + "vnicId" : "ocid1.vnic.oc1.phx.abyhqljt4iew3gwmvrwrhhf3bp5drj_truncated", + "privateIp" : "10.0.0.231", + "vlanTag" : 1041, + "macAddr" : "00:00:17:02:2B:B1", + "virtualRouterIp" : "10.0.0.1", + "subnetCidrBlock" : "10.0.0.0/24" +} ]""" + + +# Fetched with `curl http://169.254.169.254/opc/v1/instance/` (and then +# truncated for line length) +OPC_V2_METADATA = """\ +{ + "availabilityDomain" : "qIZq:PHX-AD-1", + "faultDomain" : "FAULT-DOMAIN-2", + "compartmentId" : "ocid1.tenancy.oc1..aaaaaaaao7f7cccogqrg5emjxkxmTRUNCATED", + "displayName" : "instance-20200320-1400", + "hostname" : "instance-20200320-1400", + "id" : "ocid1.instance.oc1.phx.anyhqljtniwq6syc3nex55sep5w34qbwmw6TRUNCATED", + "image" : "ocid1.image.oc1.phx.aaaaaaaagmkn4gdhvvx24kiahh2b2qchsicTRUNCATED", + "metadata" : { + "ssh_authorized_keys" : "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ truncated", + "user_data" : "IyEvYmluL3NoCnRvdWNoIC90bXAvZm9v" + }, + "region" : "phx", + "canonicalRegionName" : "us-phoenix-1", + "ociAdName" : "phx-ad-3", + "shape" : "VM.Standard2.1", + "state" : "Running", + "timeCreated" : 1584727285318, + "agentConfig" : { + "monitoringDisabled" : true, + "managementDisabled" : true + } +}""" + +# Just a small meaningless change to differentiate the two metadatas +OPC_V1_METADATA = OPC_V2_METADATA.replace("ocid1.instance", "ocid2.instance") + + +@pytest.fixture +def metadata_version(): + return 2 + + +@pytest.yield_fixture +def oracle_ds(request, fixture_utils, paths, metadata_version): + """ + Return an instantiated DataSourceOracle. + + This also performs the mocking required for the default test case: + * ``_read_system_uuid`` returns something, + * ``_is_platform_viable`` returns True, + * ``_is_iscsi_root`` returns True (the simpler code path), + * ``read_opc_metadata`` returns ``OPC_V1_METADATA`` + + (This uses the paths fixture for the required helpers.Paths object, and the + fixture_utils fixture for fetching markers.) + """ + sys_cfg = fixture_utils.closest_marker_first_arg_or( + request, "ds_sys_cfg", mock.MagicMock() + ) + metadata = OpcMetadata(metadata_version, json.loads(OPC_V2_METADATA), None) + with mock.patch(DS_PATH + "._read_system_uuid", return_value="someuuid"): + with mock.patch(DS_PATH + "._is_platform_viable", return_value=True): + with mock.patch(DS_PATH + "._is_iscsi_root", return_value=True): + with mock.patch( + DS_PATH + ".read_opc_metadata", + return_value=metadata, + ): + yield oracle.DataSourceOracle( + sys_cfg=sys_cfg, distro=mock.Mock(), paths=paths, + ) + + +class TestDataSourceOracle: + def test_platform_info(self, oracle_ds): + assert "oracle" == oracle_ds.cloud_name + assert "oracle" == oracle_ds.platform_type + + def test_subplatform_before_fetch(self, oracle_ds): + assert 'unknown' == oracle_ds.subplatform + + def test_platform_info_after_fetch(self, oracle_ds): + oracle_ds._get_data() + assert 'metadata (http://169.254.169.254/opc/v2/)' == \ + oracle_ds.subplatform + + @pytest.mark.parametrize('metadata_version', [1]) + def test_v1_platform_info_after_fetch(self, oracle_ds): + oracle_ds._get_data() + assert 'metadata (http://169.254.169.254/opc/v1/)' == \ + oracle_ds.subplatform + + def test_secondary_nics_disabled_by_default(self, oracle_ds): + assert not oracle_ds.ds_cfg["configure_secondary_nics"] + + @pytest.mark.ds_sys_cfg( + {"datasource": {"Oracle": {"configure_secondary_nics": True}}} + ) + def test_sys_cfg_can_enable_configure_secondary_nics(self, oracle_ds): + assert oracle_ds.ds_cfg["configure_secondary_nics"] + + +class TestIsPlatformViable(test_helpers.CiTestCase): + @mock.patch(DS_PATH + ".dmi.read_dmi_data", + return_value=oracle.CHASSIS_ASSET_TAG) + def test_expected_viable(self, m_read_dmi_data): + """System with known chassis tag is viable.""" + self.assertTrue(oracle._is_platform_viable()) + m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')]) + + @mock.patch(DS_PATH + ".dmi.read_dmi_data", return_value=None) + def test_expected_not_viable_dmi_data_none(self, m_read_dmi_data): + """System without known chassis tag is not viable.""" + self.assertFalse(oracle._is_platform_viable()) + m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')]) + + @mock.patch(DS_PATH + ".dmi.read_dmi_data", return_value="LetsGoCubs") + def test_expected_not_viable_other(self, m_read_dmi_data): + """System with unnown chassis tag is not viable.""" + self.assertFalse(oracle._is_platform_viable()) + m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')]) + + +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) +class TestNetworkConfigFromOpcImds: + def test_no_secondary_nics_does_not_mutate_input(self, oracle_ds): + oracle_ds._vnics_data = [{}] + # We test this by using in a non-dict to ensure that no dict + # operations are used; failure would be seen as exceptions + oracle_ds._network_config = object() + oracle_ds._add_network_config_from_opc_imds() + + def test_bare_metal_machine_skipped(self, oracle_ds, caplog): + # nicIndex in the first entry indicates a bare metal machine + oracle_ds._vnics_data = json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE) + # We test this by using a non-dict to ensure that no dict + # operations are used + oracle_ds._network_config = object() + oracle_ds._add_network_config_from_opc_imds() + assert 'bare metal machine' in caplog.text + + def test_missing_mac_skipped(self, oracle_ds, caplog): + oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE) + + oracle_ds._network_config = { + 'version': 1, 'config': [{'primary': 'nic'}] + } + with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}): + oracle_ds._add_network_config_from_opc_imds() + + assert 1 == len(oracle_ds.network_config['config']) + assert 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping' in \ + caplog.text + + def test_missing_mac_skipped_v2(self, oracle_ds, caplog): + oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE) + + oracle_ds._network_config = { + 'version': 2, 'ethernets': {'primary': {'nic': {}}} + } + with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}): + oracle_ds._add_network_config_from_opc_imds() + + assert 1 == len(oracle_ds.network_config['ethernets']) + assert 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping' in \ + caplog.text + + def test_secondary_nic(self, oracle_ds): + oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE) + oracle_ds._network_config = { + 'version': 1, 'config': [{'primary': 'nic'}] + } + mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' + with mock.patch(DS_PATH + ".get_interfaces_by_mac", + return_value={mac_addr: nic_name}): + oracle_ds._add_network_config_from_opc_imds() + + # The input is mutated + assert 2 == len(oracle_ds.network_config['config']) + + secondary_nic_cfg = oracle_ds.network_config['config'][1] + assert nic_name == secondary_nic_cfg['name'] + assert 'physical' == secondary_nic_cfg['type'] + assert mac_addr == secondary_nic_cfg['mac_address'] + assert 9000 == secondary_nic_cfg['mtu'] + + assert 1 == len(secondary_nic_cfg['subnets']) + subnet_cfg = secondary_nic_cfg['subnets'][0] + # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE + assert '10.0.0.231' == subnet_cfg['address'] + + def test_secondary_nic_v2(self, oracle_ds): + oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE) + oracle_ds._network_config = { + 'version': 2, 'ethernets': {'primary': {'nic': {}}} + } + mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' + with mock.patch(DS_PATH + ".get_interfaces_by_mac", + return_value={mac_addr: nic_name}): + oracle_ds._add_network_config_from_opc_imds() + + # The input is mutated + assert 2 == len(oracle_ds.network_config['ethernets']) + + secondary_nic_cfg = oracle_ds.network_config['ethernets']['ens3'] + assert secondary_nic_cfg['dhcp4'] is False + assert secondary_nic_cfg['dhcp6'] is False + assert mac_addr == secondary_nic_cfg['match']['macaddress'] + assert 9000 == secondary_nic_cfg['mtu'] + + assert 1 == len(secondary_nic_cfg['addresses']) + # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE + assert '10.0.0.231' == secondary_nic_cfg['addresses'][0] + + +class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase): + + def setUp(self): + super(TestNetworkConfigFiltersNetFailover, self).setUp() + self.add_patch(DS_PATH + '.get_interfaces_by_mac', + 'm_get_interfaces_by_mac') + self.add_patch(DS_PATH + '.is_netfail_master', 'm_netfail_master') + + def test_ignore_bogus_network_config(self): + netcfg = {'something': 'here'} + passed_netcfg = copy.copy(netcfg) + oracle._ensure_netfailover_safe(passed_netcfg) + self.assertEqual(netcfg, passed_netcfg) + + def test_ignore_network_config_unknown_versions(self): + netcfg = {'something': 'here', 'version': 3} + passed_netcfg = copy.copy(netcfg) + oracle._ensure_netfailover_safe(passed_netcfg) + self.assertEqual(netcfg, passed_netcfg) + + def test_checks_v1_type_physical_interfaces(self): + mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' + self.m_get_interfaces_by_mac.return_value = { + mac_addr: nic_name, + } + netcfg = {'version': 1, 'config': [ + {'type': 'physical', 'name': nic_name, 'mac_address': mac_addr, + 'subnets': [{'type': 'dhcp4'}]}]} + passed_netcfg = copy.copy(netcfg) + self.m_netfail_master.return_value = False + oracle._ensure_netfailover_safe(passed_netcfg) + self.assertEqual(netcfg, passed_netcfg) + self.assertEqual([mock.call(nic_name)], + self.m_netfail_master.call_args_list) + + def test_checks_v1_skips_non_phys_interfaces(self): + mac_addr, nic_name = '00:00:17:02:2b:b1', 'bond0' + self.m_get_interfaces_by_mac.return_value = { + mac_addr: nic_name, + } + netcfg = {'version': 1, 'config': [ + {'type': 'bond', 'name': nic_name, 'mac_address': mac_addr, + 'subnets': [{'type': 'dhcp4'}]}]} + passed_netcfg = copy.copy(netcfg) + oracle._ensure_netfailover_safe(passed_netcfg) + self.assertEqual(netcfg, passed_netcfg) + self.assertEqual(0, self.m_netfail_master.call_count) + + def test_removes_master_mac_property_v1(self): + nic_master, mac_master = 'ens3', self.random_string() + nic_other, mac_other = 'ens7', self.random_string() + nic_extra, mac_extra = 'enp0s1f2', self.random_string() + self.m_get_interfaces_by_mac.return_value = { + mac_master: nic_master, + mac_other: nic_other, + mac_extra: nic_extra, + } + netcfg = {'version': 1, 'config': [ + {'type': 'physical', 'name': nic_master, + 'mac_address': mac_master}, + {'type': 'physical', 'name': nic_other, 'mac_address': mac_other}, + {'type': 'physical', 'name': nic_extra, 'mac_address': mac_extra}, + ]} + + def _is_netfail_master(iface): + if iface == 'ens3': + return True + return False + self.m_netfail_master.side_effect = _is_netfail_master + expected_cfg = {'version': 1, 'config': [ + {'type': 'physical', 'name': nic_master}, + {'type': 'physical', 'name': nic_other, 'mac_address': mac_other}, + {'type': 'physical', 'name': nic_extra, 'mac_address': mac_extra}, + ]} + oracle._ensure_netfailover_safe(netcfg) + self.assertEqual(expected_cfg, netcfg) + + def test_checks_v2_type_ethernet_interfaces(self): + mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' + self.m_get_interfaces_by_mac.return_value = { + mac_addr: nic_name, + } + netcfg = {'version': 2, 'ethernets': { + nic_name: {'dhcp4': True, 'critical': True, 'set-name': nic_name, + 'match': {'macaddress': mac_addr}}}} + passed_netcfg = copy.copy(netcfg) + self.m_netfail_master.return_value = False + oracle._ensure_netfailover_safe(passed_netcfg) + self.assertEqual(netcfg, passed_netcfg) + self.assertEqual([mock.call(nic_name)], + self.m_netfail_master.call_args_list) + + def test_skips_v2_non_ethernet_interfaces(self): + mac_addr, nic_name = '00:00:17:02:2b:b1', 'wlps0' + self.m_get_interfaces_by_mac.return_value = { + mac_addr: nic_name, + } + netcfg = {'version': 2, 'wifis': { + nic_name: {'dhcp4': True, 'critical': True, 'set-name': nic_name, + 'match': {'macaddress': mac_addr}}}} + passed_netcfg = copy.copy(netcfg) + oracle._ensure_netfailover_safe(passed_netcfg) + self.assertEqual(netcfg, passed_netcfg) + self.assertEqual(0, self.m_netfail_master.call_count) + + def test_removes_master_mac_property_v2(self): + nic_master, mac_master = 'ens3', self.random_string() + nic_other, mac_other = 'ens7', self.random_string() + nic_extra, mac_extra = 'enp0s1f2', self.random_string() + self.m_get_interfaces_by_mac.return_value = { + mac_master: nic_master, + mac_other: nic_other, + mac_extra: nic_extra, + } + netcfg = {'version': 2, 'ethernets': { + nic_extra: {'dhcp4': True, 'set-name': nic_extra, + 'match': {'macaddress': mac_extra}}, + nic_other: {'dhcp4': True, 'set-name': nic_other, + 'match': {'macaddress': mac_other}}, + nic_master: {'dhcp4': True, 'set-name': nic_master, + 'match': {'macaddress': mac_master}}, + }} + + def _is_netfail_master(iface): + if iface == 'ens3': + return True + return False + self.m_netfail_master.side_effect = _is_netfail_master + + expected_cfg = {'version': 2, 'ethernets': { + nic_master: {'dhcp4': True, 'match': {'name': nic_master}}, + nic_extra: {'dhcp4': True, 'set-name': nic_extra, + 'match': {'macaddress': mac_extra}}, + nic_other: {'dhcp4': True, 'set-name': nic_other, + 'match': {'macaddress': mac_other}}, + }} + oracle._ensure_netfailover_safe(netcfg) + import pprint + pprint.pprint(netcfg) + print('---- ^^ modified ^^ ---- vv original vv ----') + pprint.pprint(expected_cfg) + self.assertEqual(expected_cfg, netcfg) + + +def _mock_v2_urls(httpretty): + def instance_callback(request, uri, response_headers): + print(response_headers) + assert request.headers.get("Authorization") == "Bearer Oracle" + return [200, response_headers, OPC_V2_METADATA] + + def vnics_callback(request, uri, response_headers): + assert request.headers.get("Authorization") == "Bearer Oracle" + return [200, response_headers, OPC_BM_SECONDARY_VNIC_RESPONSE] + + httpretty.register_uri( + httpretty.GET, + "http://169.254.169.254/opc/v2/instance/", + body=instance_callback + ) + httpretty.register_uri( + httpretty.GET, + "http://169.254.169.254/opc/v2/vnics/", + body=vnics_callback + ) + + +def _mock_no_v2_urls(httpretty): + httpretty.register_uri( + httpretty.GET, + "http://169.254.169.254/opc/v2/instance/", + status=404, + ) + httpretty.register_uri( + httpretty.GET, + "http://169.254.169.254/opc/v1/instance/", + body=OPC_V1_METADATA + ) + httpretty.register_uri( + httpretty.GET, + "http://169.254.169.254/opc/v1/vnics/", + body=OPC_BM_SECONDARY_VNIC_RESPONSE + ) + + +class TestReadOpcMetadata: + # See https://docs.pytest.org/en/stable/example + # /parametrize.html#parametrizing-conditional-raising + does_not_raise = ExitStack + + @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None) + @pytest.mark.parametrize( + 'version,setup_urls,instance_data,fetch_vnics,vnics_data', [ + (2, _mock_v2_urls, json.loads(OPC_V2_METADATA), True, + json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)), + (2, _mock_v2_urls, json.loads(OPC_V2_METADATA), False, None), + (1, _mock_no_v2_urls, json.loads(OPC_V1_METADATA), True, + json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)), + (1, _mock_no_v2_urls, json.loads(OPC_V1_METADATA), False, None), + ] + ) + def test_metadata_returned( + self, version, setup_urls, instance_data, + fetch_vnics, vnics_data, httpretty + ): + setup_urls(httpretty) + metadata = oracle.read_opc_metadata(fetch_vnics_data=fetch_vnics) + + assert version == metadata.version + assert instance_data == metadata.instance_data + assert vnics_data == metadata.vnics_data + + # No need to actually wait between retries in the tests + @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None) + @pytest.mark.parametrize( + "v2_failure_count,v1_failure_count,expected_body,expectation", + [ + (1, 0, json.loads(OPC_V2_METADATA), does_not_raise()), + (2, 0, json.loads(OPC_V2_METADATA), does_not_raise()), + (3, 0, json.loads(OPC_V1_METADATA), does_not_raise()), + (3, 1, json.loads(OPC_V1_METADATA), does_not_raise()), + (3, 2, json.loads(OPC_V1_METADATA), does_not_raise()), + (3, 3, None, pytest.raises(UrlError)), + ] + ) + def test_retries(self, v2_failure_count, v1_failure_count, + expected_body, expectation, httpretty): + v2_responses = [httpretty.Response("", status=404)] * v2_failure_count + v2_responses.append(httpretty.Response(OPC_V2_METADATA)) + v1_responses = [httpretty.Response("", status=404)] * v1_failure_count + v1_responses.append(httpretty.Response(OPC_V1_METADATA)) + + httpretty.register_uri( + httpretty.GET, + "http://169.254.169.254/opc/v1/instance/", + responses=v1_responses, + ) + httpretty.register_uri( + httpretty.GET, + "http://169.254.169.254/opc/v2/instance/", + responses=v2_responses, + ) + with expectation: + assert expected_body == oracle.read_opc_metadata().instance_data + + +class TestCommon_GetDataBehaviour: + """This test class tests behaviour common to iSCSI and non-iSCSI root. + + It defines a fixture, parameterized_oracle_ds, which is used in all the + tests herein to test that the commonly expected behaviour is the same with + iSCSI root and without. + + (As non-iSCSI root behaviour is a superset of iSCSI root behaviour this + class is implicitly also testing all iSCSI root behaviour so there is no + separate class for that case.) + """ + + @pytest.yield_fixture(params=[True, False]) + def parameterized_oracle_ds(self, request, oracle_ds): + """oracle_ds parameterized for iSCSI and non-iSCSI root respectively""" + is_iscsi_root = request.param + with ExitStack() as stack: + stack.enter_context( + mock.patch( + DS_PATH + "._is_iscsi_root", return_value=is_iscsi_root + ) + ) + if not is_iscsi_root: + stack.enter_context( + mock.patch(DS_PATH + ".net.find_fallback_nic") + ) + stack.enter_context( + mock.patch(DS_PATH + ".dhcp.EphemeralDHCPv4") + ) + yield oracle_ds + + @mock.patch( + DS_PATH + "._is_platform_viable", mock.Mock(return_value=False) + ) + def test_false_if_platform_not_viable( + self, parameterized_oracle_ds, + ): + assert not parameterized_oracle_ds._get_data() + + @pytest.mark.parametrize( + "keyname,expected_value", + ( + ("availability-zone", "phx-ad-3"), + ("launch-index", 0), + ("local-hostname", "instance-20200320-1400"), + ( + "instance-id", + "ocid1.instance.oc1.phx" + ".anyhqljtniwq6syc3nex55sep5w34qbwmw6TRUNCATED", + ), + ("name", "instance-20200320-1400"), + ( + "public_keys", + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ truncated", + ), + ), + ) + def test_metadata_keys_set_correctly( + self, keyname, expected_value, parameterized_oracle_ds, + ): + assert parameterized_oracle_ds._get_data() + assert expected_value == parameterized_oracle_ds.metadata[keyname] + + @pytest.mark.parametrize( + "attribute_name,expected_value", + [ + ("_crawled_metadata", json.loads(OPC_V2_METADATA)), + ( + "userdata_raw", + base64.b64decode(b"IyEvYmluL3NoCnRvdWNoIC90bXAvZm9v"), + ), + ("system_uuid", "my-test-uuid"), + ], + ) + @mock.patch( + DS_PATH + "._read_system_uuid", mock.Mock(return_value="my-test-uuid") + ) + def test_attributes_set_correctly( + self, attribute_name, expected_value, parameterized_oracle_ds, + ): + assert parameterized_oracle_ds._get_data() + assert expected_value == getattr( + parameterized_oracle_ds, attribute_name + ) + + @pytest.mark.parametrize( + "ssh_keys,expected_value", + [ + # No SSH keys in metadata => no keys detected + (None, []), + # Empty SSH keys in metadata => no keys detected + ("", []), + # Single SSH key in metadata => single key detected + ("ssh-rsa ... test@test", ["ssh-rsa ... test@test"]), + # Multiple SSH keys in metadata => multiple keys detected + ( + "ssh-rsa ... test@test\nssh-rsa ... test2@test2", + ["ssh-rsa ... test@test", "ssh-rsa ... test2@test2"], + ), + ], + ) + def test_public_keys_handled_correctly( + self, ssh_keys, expected_value, parameterized_oracle_ds + ): + instance_data = json.loads(OPC_V1_METADATA) + if ssh_keys is None: + del instance_data["metadata"]["ssh_authorized_keys"] + else: + instance_data["metadata"]["ssh_authorized_keys"] = ssh_keys + metadata = OpcMetadata(None, instance_data, None) + with mock.patch( + DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata), + ): + assert parameterized_oracle_ds._get_data() + assert ( + expected_value == parameterized_oracle_ds.get_public_ssh_keys() + ) + + def test_missing_user_data_handled_gracefully( + self, parameterized_oracle_ds + ): + instance_data = json.loads(OPC_V1_METADATA) + del instance_data["metadata"]["user_data"] + metadata = OpcMetadata(None, instance_data, None) + with mock.patch( + DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata), + ): + assert parameterized_oracle_ds._get_data() + + assert parameterized_oracle_ds.userdata_raw is None + + def test_missing_metadata_handled_gracefully( + self, parameterized_oracle_ds + ): + instance_data = json.loads(OPC_V1_METADATA) + del instance_data["metadata"] + metadata = OpcMetadata(None, instance_data, None) + with mock.patch( + DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata), + ): + assert parameterized_oracle_ds._get_data() + + assert parameterized_oracle_ds.userdata_raw is None + assert [] == parameterized_oracle_ds.get_public_ssh_keys() + + +@mock.patch(DS_PATH + "._is_iscsi_root", lambda: False) +class TestNonIscsiRoot_GetDataBehaviour: + @mock.patch(DS_PATH + ".dhcp.EphemeralDHCPv4") + @mock.patch(DS_PATH + ".net.find_fallback_nic") + def test_read_opc_metadata_called_with_ephemeral_dhcp( + self, m_find_fallback_nic, m_EphemeralDHCPv4, oracle_ds + ): + in_context_manager = False + + def enter_context_manager(): + nonlocal in_context_manager + in_context_manager = True + + def exit_context_manager(*args): + nonlocal in_context_manager + in_context_manager = False + + m_EphemeralDHCPv4.return_value.__enter__.side_effect = ( + enter_context_manager + ) + m_EphemeralDHCPv4.return_value.__exit__.side_effect = ( + exit_context_manager + ) + + def assert_in_context_manager(**kwargs): + assert in_context_manager + return mock.MagicMock() + + with mock.patch( + DS_PATH + ".read_opc_metadata", + mock.Mock(side_effect=assert_in_context_manager), + ): + assert oracle_ds._get_data() + + assert [ + mock.call( + iface=m_find_fallback_nic.return_value, + connectivity_url_data={ + 'headers': { + 'Authorization': 'Bearer Oracle' + }, + 'url': 'http://169.254.169.254/opc/v2/instance/' + } + ) + ] == m_EphemeralDHCPv4.call_args_list + + +@mock.patch(DS_PATH + ".get_interfaces_by_mac", lambda: {}) +@mock.patch(DS_PATH + ".cmdline.read_initramfs_config") +class TestNetworkConfig: + def test_network_config_cached(self, m_read_initramfs_config, oracle_ds): + """.network_config should be cached""" + assert 0 == m_read_initramfs_config.call_count + oracle_ds.network_config # pylint: disable=pointless-statement + assert 1 == m_read_initramfs_config.call_count + oracle_ds.network_config # pylint: disable=pointless-statement + assert 1 == m_read_initramfs_config.call_count + + def test_network_cmdline(self, m_read_initramfs_config, oracle_ds): + """network_config should prefer initramfs config over fallback""" + ncfg = {"version": 1, "config": [{"a": "b"}]} + m_read_initramfs_config.return_value = copy.deepcopy(ncfg) + + assert ncfg == oracle_ds.network_config + assert 0 == oracle_ds.distro.generate_fallback_config.call_count + + def test_network_fallback(self, m_read_initramfs_config, oracle_ds): + """network_config should prefer initramfs config over fallback""" + ncfg = {"version": 1, "config": [{"a": "b"}]} + + m_read_initramfs_config.return_value = None + oracle_ds.distro.generate_fallback_config.return_value = copy.deepcopy( + ncfg + ) + + assert ncfg == oracle_ds.network_config + + @pytest.mark.parametrize( + "configure_secondary_nics,expect_secondary_nics", + [(True, True), (False, False), (None, False)], + ) + def test_secondary_nic_addition( + self, + m_read_initramfs_config, + configure_secondary_nics, + expect_secondary_nics, + oracle_ds, + ): + """Test that _add_network_config_from_opc_imds is called as expected + + (configure_secondary_nics=None is used to test the default behaviour.) + """ + m_read_initramfs_config.return_value = {"version": 1, "config": []} + + if configure_secondary_nics is not None: + oracle_ds.ds_cfg[ + "configure_secondary_nics" + ] = configure_secondary_nics + + def side_effect(self): + self._network_config["secondary_added"] = mock.sentinel.needle + + oracle_ds._vnics_data = 'DummyData' + with mock.patch.object( + oracle.DataSourceOracle, "_add_network_config_from_opc_imds", + new=side_effect, + ): + was_secondary_added = "secondary_added" in oracle_ds.network_config + assert expect_secondary_nics == was_secondary_added + + def test_secondary_nic_failure_isnt_blocking( + self, + m_read_initramfs_config, + caplog, + oracle_ds, + ): + oracle_ds.ds_cfg["configure_secondary_nics"] = True + oracle_ds._vnics_data = "DummyData" + + with mock.patch.object( + oracle.DataSourceOracle, "_add_network_config_from_opc_imds", + side_effect=Exception() + ): + network_config = oracle_ds.network_config + assert network_config == m_read_initramfs_config.return_value + assert "Failed to parse secondary network configuration" in caplog.text + + def test_ds_network_cfg_preferred_over_initramfs(self, _m): + """Ensure that DS net config is preferred over initramfs config""" + config_sources = oracle.DataSourceOracle.network_config_sources + ds_idx = config_sources.index(NetworkConfigSource.ds) + initramfs_idx = config_sources.index(NetworkConfigSource.initramfs) + assert ds_idx < initramfs_idx + + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_ovf.py b/tests/unittests/sources/test_ovf.py new file mode 100644 index 00000000..da516731 --- /dev/null +++ b/tests/unittests/sources/test_ovf.py @@ -0,0 +1,1046 @@ +# Copyright (C) 2016 Canonical Ltd. +# +# Author: Scott Moser +# +# This file is part of cloud-init. See LICENSE file for license information. + +import base64 +import os + +from collections import OrderedDict +from textwrap import dedent + +from cloudinit import subp +from cloudinit import util +from tests.unittests.helpers import CiTestCase, mock, wrap_and_call +from cloudinit.helpers import Paths +from cloudinit.sources import DataSourceOVF as dsovf +from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( + CustomScriptNotFound) +from cloudinit.safeyaml import YAMLError + +MPATH = 'cloudinit.sources.DataSourceOVF.' + +NOT_FOUND = None + +OVF_ENV_CONTENT = """ + + + + ESX Server + 3.0.1 + VMware, Inc. + en_US + + + +{properties} + + +""" + + +def fill_properties(props, template=OVF_ENV_CONTENT): + lines = [] + prop_tmpl = '' + for key, val in props.items(): + lines.append(prop_tmpl.format(key=key, val=val)) + indent = " " + properties = ''.join([indent + line + "\n" for line in lines]) + return template.format(properties=properties) + + +class TestReadOvfEnv(CiTestCase): + def test_with_b64_userdata(self): + user_data = "#!/bin/sh\necho hello world\n" + user_data_b64 = base64.b64encode(user_data.encode()).decode() + props = {"user-data": user_data_b64, "password": "passw0rd", + "instance-id": "inst-001"} + env = fill_properties(props) + md, ud, cfg = dsovf.read_ovf_environment(env) + self.assertEqual({"instance-id": "inst-001"}, md) + self.assertEqual(user_data.encode(), ud) + self.assertEqual({'password': "passw0rd"}, cfg) + + def test_with_non_b64_userdata(self): + user_data = "my-user-data" + props = {"user-data": user_data, "instance-id": "inst-001"} + env = fill_properties(props) + md, ud, cfg = dsovf.read_ovf_environment(env) + self.assertEqual({"instance-id": "inst-001"}, md) + self.assertEqual(user_data.encode(), ud) + self.assertEqual({}, cfg) + + def test_with_no_userdata(self): + props = {"password": "passw0rd", "instance-id": "inst-001"} + env = fill_properties(props) + md, ud, cfg = dsovf.read_ovf_environment(env) + self.assertEqual({"instance-id": "inst-001"}, md) + self.assertEqual({'password': "passw0rd"}, cfg) + self.assertIsNone(ud) + + def test_with_b64_network_config_enable_read_network(self): + network_config = dedent("""\ + network: + version: 2 + ethernets: + nics: + nameservers: + addresses: + - 127.0.0.53 + search: + - eng.vmware.com + - vmware.com + match: + name: eth* + gateway4: 10.10.10.253 + dhcp4: false + addresses: + - 10.10.10.1/24 + """) + network_config_b64 = base64.b64encode(network_config.encode()).decode() + props = {"network-config": network_config_b64, + "password": "passw0rd", + "instance-id": "inst-001"} + env = fill_properties(props) + md, ud, cfg = dsovf.read_ovf_environment(env, True) + self.assertEqual("inst-001", md["instance-id"]) + self.assertEqual({'password': "passw0rd"}, cfg) + self.assertEqual( + {'version': 2, 'ethernets': + {'nics': + {'nameservers': + {'addresses': ['127.0.0.53'], + 'search': ['eng.vmware.com', 'vmware.com']}, + 'match': {'name': 'eth*'}, + 'gateway4': '10.10.10.253', + 'dhcp4': False, + 'addresses': ['10.10.10.1/24']}}}, + md["network-config"]) + self.assertIsNone(ud) + + def test_with_non_b64_network_config_enable_read_network(self): + network_config = dedent("""\ + network: + version: 2 + ethernets: + nics: + nameservers: + addresses: + - 127.0.0.53 + search: + - eng.vmware.com + - vmware.com + match: + name: eth* + gateway4: 10.10.10.253 + dhcp4: false + addresses: + - 10.10.10.1/24 + """) + props = {"network-config": network_config, + "password": "passw0rd", + "instance-id": "inst-001"} + env = fill_properties(props) + md, ud, cfg = dsovf.read_ovf_environment(env, True) + self.assertEqual({"instance-id": "inst-001"}, md) + self.assertEqual({'password': "passw0rd"}, cfg) + self.assertIsNone(ud) + + def test_with_b64_network_config_disable_read_network(self): + network_config = dedent("""\ + network: + version: 2 + ethernets: + nics: + nameservers: + addresses: + - 127.0.0.53 + search: + - eng.vmware.com + - vmware.com + match: + name: eth* + gateway4: 10.10.10.253 + dhcp4: false + addresses: + - 10.10.10.1/24 + """) + network_config_b64 = base64.b64encode(network_config.encode()).decode() + props = {"network-config": network_config_b64, + "password": "passw0rd", + "instance-id": "inst-001"} + env = fill_properties(props) + md, ud, cfg = dsovf.read_ovf_environment(env) + self.assertEqual({"instance-id": "inst-001"}, md) + self.assertEqual({'password': "passw0rd"}, cfg) + self.assertIsNone(ud) + + +class TestMarkerFiles(CiTestCase): + + def setUp(self): + super(TestMarkerFiles, self).setUp() + self.tdir = self.tmp_dir() + + def test_false_when_markerid_none(self): + """Return False when markerid provided is None.""" + self.assertFalse( + dsovf.check_marker_exists(markerid=None, marker_dir=self.tdir)) + + def test_markerid_file_exist(self): + """Return False when markerid file path does not exist, + True otherwise.""" + self.assertFalse( + dsovf.check_marker_exists('123', self.tdir)) + + marker_file = self.tmp_path('.markerfile-123.txt', self.tdir) + util.write_file(marker_file, '') + self.assertTrue( + dsovf.check_marker_exists('123', self.tdir) + ) + + def test_marker_file_setup(self): + """Test creation of marker files.""" + markerfilepath = self.tmp_path('.markerfile-hi.txt', self.tdir) + self.assertFalse(os.path.exists(markerfilepath)) + dsovf.setup_marker_files(markerid='hi', marker_dir=self.tdir) + self.assertTrue(os.path.exists(markerfilepath)) + + +class TestDatasourceOVF(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestDatasourceOVF, self).setUp() + self.datasource = dsovf.DataSourceOVF + self.tdir = self.tmp_dir() + + def test_get_data_false_on_none_dmi_data(self): + """When dmi for system-product-name is None, get_data returns False.""" + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource(sys_cfg={}, distro={}, paths=paths) + retcode = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': None, + 'transport_iso9660': NOT_FOUND, + 'transport_vmware_guestinfo': NOT_FOUND}, + ds.get_data) + self.assertFalse(retcode, 'Expected False return from ds.get_data') + self.assertIn( + 'DEBUG: No system-product-name found', self.logs.getvalue()) + + def test_get_data_vmware_customization_disabled(self): + """When vmware customization is disabled via sys_cfg and + allow_raw_data is disabled via ds_cfg, log a message. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True, + 'datasource': {'OVF': {'allow_raw_data': False}}}, + distro={}, paths=paths) + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [MISC] + MARKER-ID = 12345345 + """) + util.write_file(conf_file, conf_content) + retcode = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'transport_iso9660': NOT_FOUND, + 'transport_vmware_guestinfo': NOT_FOUND, + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file}, + ds.get_data) + self.assertFalse(retcode, 'Expected False return from ds.get_data') + self.assertIn( + 'DEBUG: Customization for VMware platform is disabled.', + self.logs.getvalue()) + + def test_get_data_vmware_customization_sys_cfg_disabled(self): + """When vmware customization is disabled via sys_cfg and + no meta data is found, log a message. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True, + 'datasource': {'OVF': {'allow_raw_data': True}}}, + distro={}, paths=paths) + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [MISC] + MARKER-ID = 12345345 + """) + util.write_file(conf_file, conf_content) + retcode = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'transport_iso9660': NOT_FOUND, + 'transport_vmware_guestinfo': NOT_FOUND, + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file}, + ds.get_data) + self.assertFalse(retcode, 'Expected False return from ds.get_data') + self.assertIn( + 'DEBUG: Customization using VMware config is disabled.', + self.logs.getvalue()) + + def test_get_data_allow_raw_data_disabled(self): + """When allow_raw_data is disabled via ds_cfg and + meta data is found, log a message. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False, + 'datasource': {'OVF': {'allow_raw_data': False}}}, + distro={}, paths=paths) + + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + """) + util.write_file(conf_file, conf_content) + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + util.write_file(metadata_file, "This is meta data") + retcode = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'transport_iso9660': NOT_FOUND, + 'transport_vmware_guestinfo': NOT_FOUND, + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'collect_imc_file_paths': [self.tdir + '/test-meta', '', '']}, + ds.get_data) + self.assertFalse(retcode, 'Expected False return from ds.get_data') + self.assertIn( + 'DEBUG: Customization using raw data is disabled.', + self.logs.getvalue()) + + def test_get_data_vmware_customization_enabled(self): + """When cloud-init workflow for vmware is enabled via sys_cfg log a + message. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False}, distro={}, + paths=paths) + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CUSTOM-SCRIPT] + SCRIPT-NAME = test-script + [MISC] + MARKER-ID = 12345345 + """) + util.write_file(conf_file, conf_content) + with mock.patch(MPATH + 'get_tools_config', return_value='true'): + with self.assertRaises(CustomScriptNotFound) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + customscript = self.tmp_path('test-script', self.tdir) + self.assertIn('Script %s not found!!' % customscript, + str(context.exception)) + + def test_get_data_cust_script_disabled(self): + """If custom script is disabled by VMware tools configuration, + raise a RuntimeError. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CUSTOM-SCRIPT] + SCRIPT-NAME = test-script + [MISC] + MARKER-ID = 12345346 + """) + util.write_file(conf_file, conf_content) + # Prepare the custom sript + customscript = self.tmp_path('test-script', self.tdir) + util.write_file(customscript, "This is the post cust script") + + with mock.patch(MPATH + 'get_tools_config', return_value='invalid'): + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(RuntimeError) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + self.assertIn('Custom script is disabled by VM Administrator', + str(context.exception)) + + def test_get_data_cust_script_enabled(self): + """If custom script is enabled by VMware tools configuration, + execute the script. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CUSTOM-SCRIPT] + SCRIPT-NAME = test-script + [MISC] + MARKER-ID = 12345346 + """) + util.write_file(conf_file, conf_content) + + # Mock custom script is enabled by return true when calling + # get_tools_config + with mock.patch(MPATH + 'get_tools_config', return_value="true"): + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(CustomScriptNotFound) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + # Verify custom script is trying to be executed + customscript = self.tmp_path('test-script', self.tdir) + self.assertIn('Script %s not found!!' % customscript, + str(context.exception)) + + def test_get_data_force_run_post_script_is_yes(self): + """If DEFAULT-RUN-POST-CUST-SCRIPT is yes, custom script could run if + enable-custom-scripts is not defined in VM Tools configuration + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + # set DEFAULT-RUN-POST-CUST-SCRIPT = yes so that enable-custom-scripts + # default value is TRUE + conf_content = dedent("""\ + [CUSTOM-SCRIPT] + SCRIPT-NAME = test-script + [MISC] + MARKER-ID = 12345346 + DEFAULT-RUN-POST-CUST-SCRIPT = yes + """) + util.write_file(conf_file, conf_content) + + # Mock get_tools_config(section, key, defaultVal) to return + # defaultVal + def my_get_tools_config(*args, **kwargs): + return args[2] + + with mock.patch(MPATH + 'get_tools_config', + side_effect=my_get_tools_config): + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(CustomScriptNotFound) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + # Verify custom script still runs although it is + # disabled by VMware Tools + customscript = self.tmp_path('test-script', self.tdir) + self.assertIn('Script %s not found!!' % customscript, + str(context.exception)) + + def test_get_data_non_vmware_seed_platform_info(self): + """Platform info properly reports when on non-vmware platforms.""" + paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) + # Write ovf-env.xml seed file + seed_dir = self.tmp_path('seed', dir=self.tdir) + ovf_env = self.tmp_path('ovf-env.xml', dir=seed_dir) + util.write_file(ovf_env, OVF_ENV_CONTENT) + ds = self.datasource(sys_cfg={}, distro={}, paths=paths) + + self.assertEqual('ovf', ds.cloud_name) + self.assertEqual('ovf', ds.platform_type) + with mock.patch(MPATH + 'dmi.read_dmi_data', return_value='!VMware'): + with mock.patch(MPATH + 'transport_vmware_guestinfo') as m_guestd: + with mock.patch(MPATH + 'transport_iso9660') as m_iso9660: + m_iso9660.return_value = NOT_FOUND + m_guestd.return_value = NOT_FOUND + self.assertTrue(ds.get_data()) + self.assertEqual( + 'ovf (%s/seed/ovf-env.xml)' % self.tdir, + ds.subplatform) + + def test_get_data_vmware_seed_platform_info(self): + """Platform info properly reports when on VMware platform.""" + paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) + # Write ovf-env.xml seed file + seed_dir = self.tmp_path('seed', dir=self.tdir) + ovf_env = self.tmp_path('ovf-env.xml', dir=seed_dir) + util.write_file(ovf_env, OVF_ENV_CONTENT) + ds = self.datasource(sys_cfg={}, distro={}, paths=paths) + + self.assertEqual('ovf', ds.cloud_name) + self.assertEqual('ovf', ds.platform_type) + with mock.patch(MPATH + 'dmi.read_dmi_data', return_value='VMWare'): + with mock.patch(MPATH + 'transport_vmware_guestinfo') as m_guestd: + with mock.patch(MPATH + 'transport_iso9660') as m_iso9660: + m_iso9660.return_value = NOT_FOUND + m_guestd.return_value = NOT_FOUND + self.assertTrue(ds.get_data()) + self.assertEqual( + 'vmware (%s/seed/ovf-env.xml)' % self.tdir, + ds.subplatform) + + @mock.patch('cloudinit.subp.subp') + @mock.patch('cloudinit.sources.DataSource.persist_instance_data') + def test_get_data_vmware_guestinfo_with_network_config( + self, m_persist, m_subp + ): + self._test_get_data_with_network_config(guestinfo=False, iso=True) + + @mock.patch('cloudinit.subp.subp') + @mock.patch('cloudinit.sources.DataSource.persist_instance_data') + def test_get_data_iso9660_with_network_config(self, m_persist, m_subp): + self._test_get_data_with_network_config(guestinfo=True, iso=False) + + def _test_get_data_with_network_config(self, guestinfo, iso): + network_config = dedent("""\ + network: + version: 2 + ethernets: + nics: + nameservers: + addresses: + - 127.0.0.53 + search: + - vmware.com + match: + name: eth* + gateway4: 10.10.10.253 + dhcp4: false + addresses: + - 10.10.10.1/24 + """) + network_config_b64 = base64.b64encode(network_config.encode()).decode() + props = {"network-config": network_config_b64, + "password": "passw0rd", + "instance-id": "inst-001"} + env = fill_properties(props) + paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) + ds = self.datasource(sys_cfg={}, distro={}, paths=paths) + with mock.patch(MPATH + 'transport_vmware_guestinfo', + return_value=env if guestinfo else NOT_FOUND): + with mock.patch(MPATH + 'transport_iso9660', + return_value=env if iso else NOT_FOUND): + self.assertTrue(ds.get_data()) + self.assertEqual('inst-001', ds.metadata['instance-id']) + self.assertEqual( + {'version': 2, 'ethernets': + {'nics': + {'nameservers': + {'addresses': ['127.0.0.53'], + 'search': ['vmware.com']}, + 'match': {'name': 'eth*'}, + 'gateway4': '10.10.10.253', + 'dhcp4': False, + 'addresses': ['10.10.10.1/24']}}}, + ds.network_config) + + def test_get_data_cloudinit_metadata_json(self): + """Test metadata can be loaded to cloud-init metadata and network. + The metadata format is json. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + """) + util.write_file(conf_file, conf_content) + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = dedent("""\ + { + "instance-id": "cloud-vm", + "local-hostname": "my-host.domain.com", + "network": { + "version": 2, + "ethernets": { + "eths": { + "match": { + "name": "ens*" + }, + "dhcp4": true + } + } + } + } + """) + util.write_file(metadata_file, metadata_content) + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + result = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'collect_imc_file_paths': [self.tdir + '/test-meta', '', ''], + 'get_nics_to_enable': ''}, + ds._get_data) + + self.assertTrue(result) + self.assertEqual("cloud-vm", ds.metadata['instance-id']) + self.assertEqual("my-host.domain.com", ds.metadata['local-hostname']) + self.assertEqual(2, ds.network_config['version']) + self.assertTrue(ds.network_config['ethernets']['eths']['dhcp4']) + + def test_get_data_cloudinit_metadata_yaml(self): + """Test metadata can be loaded to cloud-init metadata and network. + The metadata format is yaml. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + """) + util.write_file(conf_file, conf_content) + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = dedent("""\ + instance-id: cloud-vm + local-hostname: my-host.domain.com + network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes + """) + util.write_file(metadata_file, metadata_content) + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + result = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'collect_imc_file_paths': [self.tdir + '/test-meta', '', ''], + 'get_nics_to_enable': ''}, + ds._get_data) + + self.assertTrue(result) + self.assertEqual("cloud-vm", ds.metadata['instance-id']) + self.assertEqual("my-host.domain.com", ds.metadata['local-hostname']) + self.assertEqual(2, ds.network_config['version']) + self.assertTrue(ds.network_config['ethernets']['nics']['dhcp4']) + + def test_get_data_cloudinit_metadata_not_valid(self): + """Test metadata is not JSON or YAML format. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + """) + util.write_file(conf_file, conf_content) + + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = "[This is not json or yaml format]a=b" + util.write_file(metadata_file, metadata_content) + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(YAMLError) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'collect_imc_file_paths': [ + self.tdir + '/test-meta', '', '' + ], + 'get_nics_to_enable': ''}, + ds.get_data) + + self.assertIn("expected '', but found ''", + str(context.exception)) + + def test_get_data_cloudinit_metadata_not_found(self): + """Test metadata file can't be found. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + """) + util.write_file(conf_file, conf_content) + # Don't prepare the meta data file + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(FileNotFoundError) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + + self.assertIn('is not found', str(context.exception)) + + def test_get_data_cloudinit_userdata(self): + """Test user data can be loaded to cloud-init user data. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False}, distro={}, + paths=paths) + + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + USERDATA = test-user + """) + util.write_file(conf_file, conf_content) + + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = dedent("""\ + instance-id: cloud-vm + local-hostname: my-host.domain.com + network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes + """) + util.write_file(metadata_file, metadata_content) + + # Prepare the user data file + userdata_file = self.tmp_path('test-user', self.tdir) + userdata_content = "This is the user data" + util.write_file(userdata_file, userdata_content) + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + result = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'collect_imc_file_paths': [self.tdir + '/test-meta', + self.tdir + '/test-user', ''], + 'get_nics_to_enable': ''}, + ds._get_data) + + self.assertTrue(result) + self.assertEqual("cloud-vm", ds.metadata['instance-id']) + self.assertEqual(userdata_content, ds.userdata_raw) + + def test_get_data_cloudinit_userdata_not_found(self): + """Test userdata file can't be found. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + USERDATA = test-user + """) + util.write_file(conf_file, conf_content) + + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = dedent("""\ + instance-id: cloud-vm + local-hostname: my-host.domain.com + network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes + """) + util.write_file(metadata_file, metadata_content) + + # Don't prepare the user data file + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(FileNotFoundError) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + + self.assertIn('is not found', str(context.exception)) + + +class TestTransportIso9660(CiTestCase): + + def setUp(self): + super(TestTransportIso9660, self).setUp() + self.add_patch('cloudinit.util.find_devs_with', + 'm_find_devs_with') + self.add_patch('cloudinit.util.mounts', 'm_mounts') + self.add_patch('cloudinit.util.mount_cb', 'm_mount_cb') + self.add_patch('cloudinit.sources.DataSourceOVF.get_ovf_env', + 'm_get_ovf_env') + self.m_get_ovf_env.return_value = ('myfile', 'mycontent') + + def test_find_already_mounted(self): + """Check we call get_ovf_env from on matching mounted devices""" + mounts = { + '/dev/sr9': { + 'fstype': 'iso9660', + 'mountpoint': 'wark/media/sr9', + 'opts': 'ro', + } + } + self.m_mounts.return_value = mounts + + self.assertEqual("mycontent", dsovf.transport_iso9660()) + + def test_find_already_mounted_skips_non_iso9660(self): + """Check we call get_ovf_env ignoring non iso9660""" + mounts = { + '/dev/xvdb': { + 'fstype': 'vfat', + 'mountpoint': 'wark/foobar', + 'opts': 'defaults,noatime', + }, + '/dev/xvdc': { + 'fstype': 'iso9660', + 'mountpoint': 'wark/media/sr9', + 'opts': 'ro', + } + } + # We use an OrderedDict here to ensure we check xvdb before xvdc + # as we're not mocking the regex matching, however, if we place + # an entry in the results then we can be reasonably sure that + # we're skipping an entry which fails to match. + self.m_mounts.return_value = ( + OrderedDict(sorted(mounts.items(), key=lambda t: t[0]))) + + self.assertEqual("mycontent", dsovf.transport_iso9660()) + + def test_find_already_mounted_matches_kname(self): + """Check we dont regex match on basename of the device""" + mounts = { + '/dev/foo/bar/xvdc': { + 'fstype': 'iso9660', + 'mountpoint': 'wark/media/sr9', + 'opts': 'ro', + } + } + # we're skipping an entry which fails to match. + self.m_mounts.return_value = mounts + + self.assertEqual(NOT_FOUND, dsovf.transport_iso9660()) + + def test_mount_cb_called_on_blkdevs_with_iso9660(self): + """Check we call mount_cb on blockdevs with iso9660 only""" + self.m_mounts.return_value = {} + self.m_find_devs_with.return_value = ['/dev/sr0'] + self.m_mount_cb.return_value = ("myfile", "mycontent") + + self.assertEqual("mycontent", dsovf.transport_iso9660()) + self.m_mount_cb.assert_called_with( + "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660") + + def test_mount_cb_called_on_blkdevs_with_iso9660_check_regex(self): + """Check we call mount_cb on blockdevs with iso9660 and match regex""" + self.m_mounts.return_value = {} + self.m_find_devs_with.return_value = [ + '/dev/abc', '/dev/my-cdrom', '/dev/sr0'] + self.m_mount_cb.return_value = ("myfile", "mycontent") + + self.assertEqual("mycontent", dsovf.transport_iso9660()) + self.m_mount_cb.assert_called_with( + "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660") + + def test_mount_cb_not_called_no_matches(self): + """Check we don't call mount_cb if nothing matches""" + self.m_mounts.return_value = {} + self.m_find_devs_with.return_value = ['/dev/vg/myovf'] + + self.assertEqual(NOT_FOUND, dsovf.transport_iso9660()) + self.assertEqual(0, self.m_mount_cb.call_count) + + def test_mount_cb_called_require_iso_false(self): + """Check we call mount_cb on blockdevs with require_iso=False""" + self.m_mounts.return_value = {} + self.m_find_devs_with.return_value = ['/dev/xvdz'] + self.m_mount_cb.return_value = ("myfile", "mycontent") + + self.assertEqual( + "mycontent", dsovf.transport_iso9660(require_iso=False)) + + self.m_mount_cb.assert_called_with( + "/dev/xvdz", dsovf.get_ovf_env, mtype=None) + + def test_maybe_cdrom_device_none(self): + """Test maybe_cdrom_device returns False for none/empty input""" + self.assertFalse(dsovf.maybe_cdrom_device(None)) + self.assertFalse(dsovf.maybe_cdrom_device('')) + + def test_maybe_cdrom_device_non_string_exception(self): + """Test maybe_cdrom_device raises ValueError on non-string types""" + with self.assertRaises(ValueError): + dsovf.maybe_cdrom_device({'a': 'eleven'}) + + def test_maybe_cdrom_device_false_on_multi_dir_paths(self): + """Test maybe_cdrom_device is false on /dev[/.*]/* paths""" + self.assertFalse(dsovf.maybe_cdrom_device('/dev/foo/sr0')) + self.assertFalse(dsovf.maybe_cdrom_device('foo/sr0')) + self.assertFalse(dsovf.maybe_cdrom_device('../foo/sr0')) + self.assertFalse(dsovf.maybe_cdrom_device('../foo/sr0')) + + def test_maybe_cdrom_device_true_on_hd_partitions(self): + """Test maybe_cdrom_device is false on /dev/hd[a-z][0-9]+ paths""" + self.assertTrue(dsovf.maybe_cdrom_device('/dev/hda1')) + self.assertTrue(dsovf.maybe_cdrom_device('hdz9')) + + def test_maybe_cdrom_device_true_on_valid_relative_paths(self): + """Test maybe_cdrom_device normalizes paths""" + self.assertTrue(dsovf.maybe_cdrom_device('/dev/wark/../sr9')) + self.assertTrue(dsovf.maybe_cdrom_device('///sr0')) + self.assertTrue(dsovf.maybe_cdrom_device('/sr0')) + self.assertTrue(dsovf.maybe_cdrom_device('//dev//hda')) + + def test_maybe_cdrom_device_true_on_xvd_partitions(self): + """Test maybe_cdrom_device returns true on xvd*""" + self.assertTrue(dsovf.maybe_cdrom_device('/dev/xvda')) + self.assertTrue(dsovf.maybe_cdrom_device('/dev/xvda1')) + self.assertTrue(dsovf.maybe_cdrom_device('xvdza1')) + + +@mock.patch(MPATH + "subp.which") +@mock.patch(MPATH + "subp.subp") +class TestTransportVmwareGuestinfo(CiTestCase): + """Test the com.vmware.guestInfo transport implemented in + transport_vmware_guestinfo.""" + + rpctool = 'vmware-rpctool' + with_logs = True + rpctool_path = '/not/important/vmware-rpctool' + + def test_without_vmware_rpctool_returns_notfound(self, m_subp, m_which): + m_which.return_value = None + self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) + self.assertEqual(0, m_subp.call_count, + "subp should not be called if no rpctool in path.") + + def test_notfound_on_exit_code_1(self, m_subp, m_which): + """If vmware-rpctool exits 1, then must return not found.""" + m_which.return_value = self.rpctool_path + m_subp.side_effect = subp.ProcessExecutionError( + stdout="", stderr="No value found", exit_code=1, cmd=["unused"]) + self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) + self.assertEqual(1, m_subp.call_count) + self.assertNotIn("WARNING", self.logs.getvalue(), + "exit code of 1 by rpctool should not cause warning.") + + def test_notfound_if_no_content_but_exit_zero(self, m_subp, m_which): + """If vmware-rpctool exited 0 with no stdout is normal not-found. + + This isn't actually a case I've seen. normally on "not found", + rpctool would exit 1 with 'No value found' on stderr. But cover + the case where it exited 0 and just wrote nothing to stdout. + """ + m_which.return_value = self.rpctool_path + m_subp.return_value = ('', '') + self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) + self.assertEqual(1, m_subp.call_count) + + def test_notfound_and_warns_on_unexpected_exit_code(self, m_subp, m_which): + """If vmware-rpctool exits non zero or 1, warnings should be logged.""" + m_which.return_value = self.rpctool_path + m_subp.side_effect = subp.ProcessExecutionError( + stdout=None, stderr="No value found", exit_code=2, cmd=["unused"]) + self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) + self.assertEqual(1, m_subp.call_count) + self.assertIn("WARNING", self.logs.getvalue(), + "exit code of 2 by rpctool should log WARNING.") + + def test_found_when_guestinfo_present(self, m_subp, m_which): + """When there is a ovf info, transport should return it.""" + m_which.return_value = self.rpctool_path + content = fill_properties({}) + m_subp.return_value = (content, '') + self.assertEqual(content, dsovf.transport_vmware_guestinfo()) + self.assertEqual(1, m_subp.call_count) + +# +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_rbx.py b/tests/unittests/sources/test_rbx.py new file mode 100644 index 00000000..c1294c92 --- /dev/null +++ b/tests/unittests/sources/test_rbx.py @@ -0,0 +1,238 @@ +import json + +from cloudinit import helpers +from cloudinit import distros +from cloudinit.sources import DataSourceRbxCloud as ds +from tests.unittests.helpers import mock, CiTestCase, populate_dir +from cloudinit import subp + +DS_PATH = "cloudinit.sources.DataSourceRbxCloud" + +CRYPTO_PASS = "$6$uktth46t$FvpDzFD2iL9YNZIG1Epz7957hJqbH0f" \ + "QKhnzcfBcUhEodGAWRqTy7tYG4nEW7SUOYBjxOSFIQW5" \ + "tToyGP41.s1" + +CLOUD_METADATA = { + "vm": { + "memory": 4, + "cpu": 2, + "name": "vm-image-builder", + "_id": "5beab44f680cffd11f0e60fc" + }, + "additionalMetadata": { + "username": "guru", + "sshKeys": ["ssh-rsa ..."], + "password": { + "sha512": CRYPTO_PASS + } + }, + "disk": [ + {"size": 10, "type": "ssd", + "name": "vm-image-builder-os", + "_id": "5beab450680cffd11f0e60fe"}, + {"size": 2, "type": "ssd", + "name": "ubuntu-1804-bionic", + "_id": "5bef002c680cffd11f107590"} + ], + "netadp": [ + { + "ip": [{"address": "62.181.8.174"}], + "network": { + "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]}, + "routing": [], + "gateway": "62.181.8.1", + "netmask": "255.255.248.0", + "name": "public", + "type": "public", + "_id": "5784e97be2627505227b578c" + }, + "speed": 1000, + "type": "hv", + "macaddress": "00:15:5D:FF:0F:03", + "_id": "5beab450680cffd11f0e6102" + }, + { + "ip": [{"address": "10.209.78.11"}], + "network": { + "dns": {"nameservers": ["9.9.9.9", "8.8.8.8"]}, + "routing": [], + "gateway": "10.209.78.1", + "netmask": "255.255.255.0", + "name": "network-determined-bardeen", + "type": "private", + "_id": "5beaec64680cffd11f0e7c31" + }, + "speed": 1000, + "type": "hv", + "macaddress": "00:15:5D:FF:0F:24", + "_id": "5bec18c6680cffd11f0f0d8b" + } + ], + "dvddrive": [{"iso": {}}] +} + + +class TestRbxDataSource(CiTestCase): + parsed_user = None + allowed_subp = ['bash'] + + def _fetch_distro(self, kind): + cls = distros.fetch(kind) + paths = helpers.Paths({}) + return cls(kind, {}, paths) + + def setUp(self): + super(TestRbxDataSource, self).setUp() + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp} + ) + + # defaults for few tests + self.ds = ds.DataSourceRbxCloud + self.seed_dir = self.paths.seed_dir + self.sys_cfg = {'datasource': {'RbxCloud': {'dsmode': 'local'}}} + + def test_seed_read_user_data_callback_empty_file(self): + populate_user_metadata(self.seed_dir, '') + populate_cloud_metadata(self.seed_dir, {}) + results = ds.read_user_data_callback(self.seed_dir) + + self.assertIsNone(results) + + def test_seed_read_user_data_callback_valid_disk(self): + populate_user_metadata(self.seed_dir, '') + populate_cloud_metadata(self.seed_dir, CLOUD_METADATA) + results = ds.read_user_data_callback(self.seed_dir) + + self.assertNotEqual(results, None) + self.assertTrue('userdata' in results) + self.assertTrue('metadata' in results) + self.assertTrue('cfg' in results) + + def test_seed_read_user_data_callback_userdata(self): + userdata = "#!/bin/sh\nexit 1" + populate_user_metadata(self.seed_dir, userdata) + populate_cloud_metadata(self.seed_dir, CLOUD_METADATA) + + results = ds.read_user_data_callback(self.seed_dir) + + self.assertNotEqual(results, None) + self.assertTrue('userdata' in results) + self.assertEqual(results['userdata'], userdata) + + def test_generate_network_config(self): + expected = { + 'version': 1, + 'config': [ + { + 'subnets': [ + {'control': 'auto', + 'dns_nameservers': ['8.8.8.8', '8.8.4.4'], + 'netmask': '255.255.248.0', + 'address': '62.181.8.174', + 'type': 'static', 'gateway': '62.181.8.1'} + ], + 'type': 'physical', + 'name': 'eth0', + 'mac_address': '00:15:5d:ff:0f:03' + }, + { + 'subnets': [ + {'control': 'auto', + 'dns_nameservers': ['9.9.9.9', '8.8.8.8'], + 'netmask': '255.255.255.0', + 'address': '10.209.78.11', + 'type': 'static', + 'gateway': '10.209.78.1'} + ], + 'type': 'physical', + 'name': 'eth1', + 'mac_address': '00:15:5d:ff:0f:24' + } + ] + } + self.assertTrue( + ds.generate_network_config(CLOUD_METADATA['netadp']), + expected + ) + + @mock.patch(DS_PATH + '.subp.subp') + def test_gratuitous_arp_run_standard_arping(self, m_subp): + """Test handle run arping & parameters.""" + items = [ + { + 'destination': '172.17.0.2', + 'source': '172.16.6.104' + }, + { + 'destination': '172.17.0.2', + 'source': '172.16.6.104', + }, + ] + ds.gratuitous_arp(items, self._fetch_distro('ubuntu')) + self.assertEqual([ + mock.call([ + 'arping', '-c', '2', '-S', + '172.16.6.104', '172.17.0.2' + ]), + mock.call([ + 'arping', '-c', '2', '-S', + '172.16.6.104', '172.17.0.2' + ]) + ], m_subp.call_args_list + ) + + @mock.patch(DS_PATH + '.subp.subp') + def test_handle_rhel_like_arping(self, m_subp): + """Test handle on RHEL-like distros.""" + items = [ + { + 'source': '172.16.6.104', + 'destination': '172.17.0.2', + } + ] + ds.gratuitous_arp(items, self._fetch_distro('fedora')) + self.assertEqual([ + mock.call( + ['arping', '-c', '2', '-s', '172.16.6.104', '172.17.0.2'] + )], + m_subp.call_args_list + ) + + @mock.patch( + DS_PATH + '.subp.subp', + side_effect=subp.ProcessExecutionError() + ) + def test_continue_on_arping_error(self, m_subp): + """Continue when command error""" + items = [ + { + 'destination': '172.17.0.2', + 'source': '172.16.6.104' + }, + { + 'destination': '172.17.0.2', + 'source': '172.16.6.104', + }, + ] + ds.gratuitous_arp(items, self._fetch_distro('ubuntu')) + self.assertEqual([ + mock.call([ + 'arping', '-c', '2', '-S', + '172.16.6.104', '172.17.0.2' + ]), + mock.call([ + 'arping', '-c', '2', '-S', + '172.16.6.104', '172.17.0.2' + ]) + ], m_subp.call_args_list + ) + + +def populate_cloud_metadata(path, data): + populate_dir(path, {'cloud.json': json.dumps(data)}) + + +def populate_user_metadata(path, data): + populate_dir(path, {'user.data': data}) diff --git a/tests/unittests/sources/test_scaleway.py b/tests/unittests/sources/test_scaleway.py new file mode 100644 index 00000000..33ae26b8 --- /dev/null +++ b/tests/unittests/sources/test_scaleway.py @@ -0,0 +1,473 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import json + +import httpretty +import requests + +from cloudinit import helpers +from cloudinit import settings +from cloudinit import sources +from cloudinit.sources import DataSourceScaleway + +from tests.unittests.helpers import mock, HttprettyTestCase, CiTestCase + + +class DataResponses(object): + """ + Possible responses of the API endpoint + 169.254.42.42/user_data/cloud-init and + 169.254.42.42/vendor_data/cloud-init. + """ + + FAKE_USER_DATA = '#!/bin/bash\necho "user-data"' + + @staticmethod + def rate_limited(method, uri, headers): + return 429, headers, '' + + @staticmethod + def api_error(method, uri, headers): + return 500, headers, '' + + @classmethod + def get_ok(cls, method, uri, headers): + return 200, headers, cls.FAKE_USER_DATA + + @staticmethod + def empty(method, uri, headers): + """ + No user data for this server. + """ + return 404, headers, '' + + +class MetadataResponses(object): + """ + Possible responses of the metadata API. + """ + + FAKE_METADATA = { + 'id': '00000000-0000-0000-0000-000000000000', + 'hostname': 'scaleway.host', + 'tags': [ + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", + ], + 'ssh_public_keys': [{ + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + 'fingerprint': '2048 06:ae:... login (RSA)' + }, { + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'fingerprint': '2048 06:ff:... login2 (RSA)' + }] + } + + @classmethod + def get_ok(cls, method, uri, headers): + return 200, headers, json.dumps(cls.FAKE_METADATA) + + +class TestOnScaleway(CiTestCase): + + def setUp(self): + super(TestOnScaleway, self).setUp() + self.tmp = self.tmp_dir() + + def install_mocks(self, fake_dmi, fake_file_exists, fake_cmdline): + mock, faked = fake_dmi + mock.return_value = 'Scaleway' if faked else 'Whatever' + + mock, faked = fake_file_exists + mock.return_value = faked + + mock, faked = fake_cmdline + mock.return_value = \ + 'initrd=initrd showopts scaleway nousb' if faked \ + else 'BOOT_IMAGE=/vmlinuz-3.11.0-26-generic' + + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('os.path.exists') + @mock.patch('cloudinit.dmi.read_dmi_data') + def test_not_on_scaleway(self, m_read_dmi_data, m_file_exists, + m_get_cmdline): + self.install_mocks( + fake_dmi=(m_read_dmi_data, False), + fake_file_exists=(m_file_exists, False), + fake_cmdline=(m_get_cmdline, False) + ) + self.assertFalse(DataSourceScaleway.on_scaleway()) + + # When not on Scaleway, get_data() returns False. + datasource = DataSourceScaleway.DataSourceScaleway( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}) + ) + self.assertFalse(datasource.get_data()) + + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('os.path.exists') + @mock.patch('cloudinit.dmi.read_dmi_data') + def test_on_scaleway_dmi(self, m_read_dmi_data, m_file_exists, + m_get_cmdline): + """ + dmidecode returns "Scaleway". + """ + # dmidecode returns "Scaleway" + self.install_mocks( + fake_dmi=(m_read_dmi_data, True), + fake_file_exists=(m_file_exists, False), + fake_cmdline=(m_get_cmdline, False) + ) + self.assertTrue(DataSourceScaleway.on_scaleway()) + + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('os.path.exists') + @mock.patch('cloudinit.dmi.read_dmi_data') + def test_on_scaleway_var_run_scaleway(self, m_read_dmi_data, m_file_exists, + m_get_cmdline): + """ + /var/run/scaleway exists. + """ + self.install_mocks( + fake_dmi=(m_read_dmi_data, False), + fake_file_exists=(m_file_exists, True), + fake_cmdline=(m_get_cmdline, False) + ) + self.assertTrue(DataSourceScaleway.on_scaleway()) + + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('os.path.exists') + @mock.patch('cloudinit.dmi.read_dmi_data') + def test_on_scaleway_cmdline(self, m_read_dmi_data, m_file_exists, + m_get_cmdline): + """ + "scaleway" in /proc/cmdline. + """ + self.install_mocks( + fake_dmi=(m_read_dmi_data, False), + fake_file_exists=(m_file_exists, False), + fake_cmdline=(m_get_cmdline, True) + ) + self.assertTrue(DataSourceScaleway.on_scaleway()) + + +def get_source_address_adapter(*args, **kwargs): + """ + Scaleway user/vendor data API requires to be called with a privileged port. + + If the unittests are run as non-root, the user doesn't have the permission + to bind on ports below 1024. + + This function removes the bind on a privileged address, since anyway the + HTTP call is mocked by httpretty. + """ + kwargs.pop('source_address') + return requests.adapters.HTTPAdapter(*args, **kwargs) + + +class TestDataSourceScaleway(HttprettyTestCase): + + def setUp(self): + tmp = self.tmp_dir() + self.datasource = DataSourceScaleway.DataSourceScaleway( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': tmp}) + ) + super(TestDataSourceScaleway, self).setUp() + + self.metadata_url = \ + DataSourceScaleway.BUILTIN_DS_CONFIG['metadata_url'] + self.userdata_url = \ + DataSourceScaleway.BUILTIN_DS_CONFIG['userdata_url'] + self.vendordata_url = \ + DataSourceScaleway.BUILTIN_DS_CONFIG['vendordata_url'] + + self.add_patch('cloudinit.sources.DataSourceScaleway.on_scaleway', + '_m_on_scaleway', return_value=True) + self.add_patch( + 'cloudinit.sources.DataSourceScaleway.net.find_fallback_nic', + '_m_find_fallback_nic', return_value='scalewaynic0') + + @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') + @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', + get_source_address_adapter) + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('time.sleep', return_value=None) + def test_metadata_ok(self, sleep, m_get_cmdline, dhcpv4): + """ + get_data() returns metadata, user data and vendor data. + """ + m_get_cmdline.return_value = 'scaleway' + + # Make user data API return a valid response + httpretty.register_uri(httpretty.GET, self.metadata_url, + body=MetadataResponses.get_ok) + httpretty.register_uri(httpretty.GET, self.userdata_url, + body=DataResponses.get_ok) + httpretty.register_uri(httpretty.GET, self.vendordata_url, + body=DataResponses.get_ok) + self.datasource.get_data() + + self.assertEqual(self.datasource.get_instance_id(), + MetadataResponses.FAKE_METADATA['id']) + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + ].sort()) + self.assertEqual(self.datasource.get_hostname(), + MetadataResponses.FAKE_METADATA['hostname']) + self.assertEqual(self.datasource.get_userdata_raw(), + DataResponses.FAKE_USER_DATA) + self.assertEqual(self.datasource.get_vendordata_raw(), + DataResponses.FAKE_USER_DATA) + self.assertIsNone(self.datasource.availability_zone) + self.assertIsNone(self.datasource.region) + self.assertEqual(sleep.call_count, 0) + + def test_ssh_keys_empty(self): + """ + get_public_ssh_keys() should return empty list if no ssh key are + available + """ + self.datasource.metadata['tags'] = [] + self.datasource.metadata['ssh_public_keys'] = [] + self.assertEqual(self.datasource.get_public_ssh_keys(), []) + + def test_ssh_keys_only_tags(self): + """ + get_public_ssh_keys() should return list of keys available in tags + """ + self.datasource.metadata['tags'] = [ + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABCCCCC", + ] + self.datasource.metadata['ssh_public_keys'] = [] + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + ].sort()) + + def test_ssh_keys_only_conf(self): + """ + get_public_ssh_keys() should return list of keys available in + ssh_public_keys field + """ + self.datasource.metadata['tags'] = [] + self.datasource.metadata['ssh_public_keys'] = [{ + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + 'fingerprint': '2048 06:ae:... login (RSA)' + }, { + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'fingerprint': '2048 06:ff:... login2 (RSA)' + }] + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + ].sort()) + + def test_ssh_keys_both(self): + """ + get_public_ssh_keys() should return a merge of keys available + in ssh_public_keys and tags + """ + self.datasource.metadata['tags'] = [ + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", + ] + + self.datasource.metadata['ssh_public_keys'] = [{ + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + 'fingerprint': '2048 06:ae:... login (RSA)' + }, { + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'fingerprint': '2048 06:ff:... login2 (RSA)' + }] + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + ].sort()) + + @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') + @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', + get_source_address_adapter) + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('time.sleep', return_value=None) + def test_metadata_404(self, sleep, m_get_cmdline, dhcpv4): + """ + get_data() returns metadata, but no user data nor vendor data. + """ + m_get_cmdline.return_value = 'scaleway' + + # Make user and vendor data APIs return HTTP/404, which means there is + # no user / vendor data for the server. + httpretty.register_uri(httpretty.GET, self.metadata_url, + body=MetadataResponses.get_ok) + httpretty.register_uri(httpretty.GET, self.userdata_url, + body=DataResponses.empty) + httpretty.register_uri(httpretty.GET, self.vendordata_url, + body=DataResponses.empty) + self.datasource.get_data() + self.assertIsNone(self.datasource.get_userdata_raw()) + self.assertIsNone(self.datasource.get_vendordata_raw()) + self.assertEqual(sleep.call_count, 0) + + @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') + @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', + get_source_address_adapter) + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('time.sleep', return_value=None) + def test_metadata_rate_limit(self, sleep, m_get_cmdline, dhcpv4): + """ + get_data() is rate limited two times by the metadata API when fetching + user data. + """ + m_get_cmdline.return_value = 'scaleway' + + httpretty.register_uri(httpretty.GET, self.metadata_url, + body=MetadataResponses.get_ok) + httpretty.register_uri(httpretty.GET, self.vendordata_url, + body=DataResponses.empty) + + httpretty.register_uri( + httpretty.GET, self.userdata_url, + responses=[ + httpretty.Response(body=DataResponses.rate_limited), + httpretty.Response(body=DataResponses.rate_limited), + httpretty.Response(body=DataResponses.get_ok), + ] + ) + self.datasource.get_data() + self.assertEqual(self.datasource.get_userdata_raw(), + DataResponses.FAKE_USER_DATA) + self.assertEqual(sleep.call_count, 2) + + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_ok(self, m_get_cmdline, fallback_nic): + """ + network_config will only generate IPv4 config if no ipv6 data is + available in the metadata + """ + m_get_cmdline.return_value = 'scaleway' + fallback_nic.return_value = 'ens2' + self.datasource.metadata['ipv6'] = None + + netcfg = self.datasource.network_config + resp = { + 'version': 1, + 'config': [ + { + 'type': 'physical', + 'name': 'ens2', + 'subnets': [{'type': 'dhcp4'}] + } + ] + } + self.assertEqual(netcfg, resp) + + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_ipv6_ok(self, m_get_cmdline, fallback_nic): + """ + network_config will only generate IPv4/v6 configs if ipv6 data is + available in the metadata + """ + m_get_cmdline.return_value = 'scaleway' + fallback_nic.return_value = 'ens2' + self.datasource.metadata['ipv6'] = { + 'address': '2000:abc:4444:9876::42:999', + 'gateway': '2000:abc:4444:9876::42:000', + 'netmask': '127', + } + + netcfg = self.datasource.network_config + resp = { + 'version': 1, + 'config': [ + { + 'type': 'physical', + 'name': 'ens2', + 'subnets': [ + { + 'type': 'dhcp4' + }, + { + 'type': 'static', + 'address': '2000:abc:4444:9876::42:999', + 'gateway': '2000:abc:4444:9876::42:000', + 'netmask': '127', + } + ] + } + ] + } + self.assertEqual(netcfg, resp) + + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_existing(self, m_get_cmdline, fallback_nic): + """ + network_config() should return the same data if a network config + already exists + """ + m_get_cmdline.return_value = 'scaleway' + self.datasource._network_config = '0xdeadbeef' + + netcfg = self.datasource.network_config + self.assertEqual(netcfg, '0xdeadbeef') + + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_unset(self, m_get_cmdline, fallback_nic): + """ + _network_config will be set to sources.UNSET after the first boot. + Make sure it behave correctly. + """ + m_get_cmdline.return_value = 'scaleway' + fallback_nic.return_value = 'ens2' + self.datasource.metadata['ipv6'] = None + self.datasource._network_config = sources.UNSET + + resp = { + 'version': 1, + 'config': [ + { + 'type': 'physical', + 'name': 'ens2', + 'subnets': [{'type': 'dhcp4'}] + } + ] + } + + netcfg = self.datasource.network_config + self.assertEqual(netcfg, resp) + + @mock.patch('cloudinit.sources.DataSourceScaleway.LOG.warning') + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_cached_none(self, m_get_cmdline, fallback_nic, + logwarning): + """ + network_config() should return config data if cached data is None + rather than sources.UNSET + """ + m_get_cmdline.return_value = 'scaleway' + fallback_nic.return_value = 'ens2' + self.datasource.metadata['ipv6'] = None + self.datasource._network_config = None + + resp = { + 'version': 1, + 'config': [ + { + 'type': 'physical', + 'name': 'ens2', + 'subnets': [{'type': 'dhcp4'}] + } + ] + } + + netcfg = self.datasource.network_config + self.assertEqual(netcfg, resp) + logwarning.assert_called_with('Found None as cached _network_config. ' + 'Resetting to %s', sources.UNSET) diff --git a/tests/unittests/sources/test_smartos.py b/tests/unittests/sources/test_smartos.py new file mode 100644 index 00000000..e306eded --- /dev/null +++ b/tests/unittests/sources/test_smartos.py @@ -0,0 +1,1163 @@ +# Copyright (C) 2013 Canonical Ltd. +# Copyright 2019 Joyent, Inc. +# +# Author: Ben Howard +# +# This file is part of cloud-init. See LICENSE file for license information. + +'''This is a testcase for the SmartOS datasource. + +It replicates a serial console and acts like the SmartOS console does in +order to validate return responses. + +''' + +from binascii import crc32 +import json +import multiprocessing +import os +import os.path +import re +import signal +import stat +import unittest +import uuid + +from cloudinit import serial +from cloudinit.sources import DataSourceSmartOS +from cloudinit.sources.DataSourceSmartOS import ( + convert_smartos_network_data as convert_net, + SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ, + identify_file) +from cloudinit.event import EventScope, EventType + +from cloudinit import helpers as c_helpers +from cloudinit.util import (b64e, write_file) +from cloudinit.subp import (subp, ProcessExecutionError, which) + +from tests.unittests.helpers import ( + CiTestCase, mock, FilesystemMockingTestCase, skipIf) + + +try: + import serial as _pyserial + assert _pyserial # avoid pyflakes error F401: import unused + HAS_PYSERIAL = True +except ImportError: + HAS_PYSERIAL = False + +DSMOS = 'cloudinit.sources.DataSourceSmartOS' +SDC_NICS = json.loads(""" +[ + { + "nic_tag": "external", + "primary": true, + "mtu": 1500, + "model": "virtio", + "gateway": "8.12.42.1", + "netmask": "255.255.255.0", + "ip": "8.12.42.102", + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "gateways": [ + "8.12.42.1" + ], + "vlan_id": 324, + "mac": "90:b8:d0:f5:e4:f5", + "interface": "net0", + "ips": [ + "8.12.42.102/24" + ] + }, + { + "nic_tag": "sdc_overlay/16187209", + "gateway": "192.168.128.1", + "model": "virtio", + "mac": "90:b8:d0:a5:ff:cd", + "netmask": "255.255.252.0", + "ip": "192.168.128.93", + "network_uuid": "4cad71da-09bc-452b-986d-03562a03a0a9", + "gateways": [ + "192.168.128.1" + ], + "vlan_id": 2, + "mtu": 8500, + "interface": "net1", + "ips": [ + "192.168.128.93/22" + ] + } +] +""") + + +SDC_NICS_ALT = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": [ + "8.12.42.1" + ], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": [ + "8.12.42.51/24" + ], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": [ + "10.210.1.217/24" + ], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_DHCP = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": [ + "8.12.42.1" + ], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": [ + "8.12.42.51/24" + ], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": [ + "dhcp" + ], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_MIP = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": [ + "8.12.42.1" + ], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": [ + "8.12.42.51/24", + "8.12.42.52/24" + ], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": [ + "10.210.1.217/24", + "10.210.1.151/24" + ], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_MIP_IPV6 = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": [ + "8.12.42.1" + ], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": [ + "2001:4800:78ff:1b:be76:4eff:fe06:96b3/64", + "8.12.42.51/24" + ], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": [ + "10.210.1.217/24" + ], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_IPV4_IPV6 = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": ["8.12.42.1", "2001::1", "2001::2"], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": ["2001::10/64", "8.12.42.51/24", "2001::11/64", + "8.12.42.52/32"], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": ["10.210.1.217/24"], + "gateways": ["10.210.1.210"], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_SINGLE_GATEWAY = json.loads(""" +[ + { + "interface":"net0", + "mac":"90:b8:d0:d8:82:b4", + "vlan_id":324, + "nic_tag":"external", + "gateway":"8.12.42.1", + "gateways":["8.12.42.1"], + "netmask":"255.255.255.0", + "ip":"8.12.42.26", + "ips":["8.12.42.26/24"], + "network_uuid":"992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model":"virtio", + "mtu":1500, + "primary":true + }, + { + "interface":"net1", + "mac":"90:b8:d0:0a:51:31", + "vlan_id":600, + "nic_tag":"internal", + "netmask":"255.255.255.0", + "ip":"10.210.1.27", + "ips":["10.210.1.27/24"], + "network_uuid":"98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model":"virtio", + "mtu":1500 + } +] +""") + + +MOCK_RETURNS = { + 'hostname': 'test-host', + 'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname', + 'disable_iptables_flag': None, + 'enable_motd_sys_info': None, + 'test-var1': 'some data', + 'cloud-init:user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']), + 'sdc:datacenter_name': 'somewhere2', + 'sdc:operator-script': '\n'.join(['bin/true', '']), + 'sdc:uuid': str(uuid.uuid4()), + 'sdc:vendor-data': '\n'.join(['VENDOR_DATA', '']), + 'user-data': '\n'.join(['something', '']), + 'user-script': '\n'.join(['/bin/true', '']), + 'sdc:nics': json.dumps(SDC_NICS), +} + +DMI_DATA_RETURN = 'smartdc' + +# Useful for calculating the length of a frame body. A SUCCESS body will be +# followed by more characters or be one character less if SUCCESS with no +# payload. See Section 4.3 of https://eng.joyent.com/mdata/protocol.html. +SUCCESS_LEN = len('0123abcd SUCCESS ') +NOTFOUND_LEN = len('0123abcd NOTFOUND') + + +class PsuedoJoyentClient(object): + def __init__(self, data=None): + if data is None: + data = MOCK_RETURNS.copy() + self.data = data + self._is_open = False + return + + def get(self, key, default=None, strip=False): + if key in self.data: + r = self.data[key] + if strip: + r = r.strip() + else: + r = default + return r + + def get_json(self, key, default=None): + result = self.get(key, default=default) + if result is None: + return default + return json.loads(result) + + def exists(self): + return True + + def open_transport(self): + assert(not self._is_open) + self._is_open = True + + def close_transport(self): + assert(self._is_open) + self._is_open = False + + +class TestSmartOSDataSource(FilesystemMockingTestCase): + jmc_cfact = None + get_smartos_environ = None + + def setUp(self): + super(TestSmartOSDataSource, self).setUp() + + self.add_patch(DSMOS + ".get_smartos_environ", "get_smartos_environ") + self.add_patch(DSMOS + ".jmc_client_factory", "jmc_cfact") + self.legacy_user_d = self.tmp_path('legacy_user_tmp') + os.mkdir(self.legacy_user_d) + self.add_patch(DSMOS + ".LEGACY_USER_D", "m_legacy_user_d", + autospec=False, new=self.legacy_user_d) + self.add_patch(DSMOS + ".identify_file", "m_identify_file", + return_value="text/plain") + + def _get_ds(self, mockdata=None, mode=DataSourceSmartOS.SMARTOS_ENV_KVM, + sys_cfg=None, ds_cfg=None): + self.jmc_cfact.return_value = PsuedoJoyentClient(mockdata) + self.get_smartos_environ.return_value = mode + + tmpd = self.tmp_dir() + dirs = {'cloud_dir': self.tmp_path('cloud_dir', tmpd), + 'run_dir': self.tmp_path('run_dir')} + for d in dirs.values(): + os.mkdir(d) + paths = c_helpers.Paths(dirs) + + if sys_cfg is None: + sys_cfg = {} + + if ds_cfg is not None: + sys_cfg['datasource'] = sys_cfg.get('datasource', {}) + sys_cfg['datasource']['SmartOS'] = ds_cfg + + return DataSourceSmartOS.DataSourceSmartOS( + sys_cfg, distro=None, paths=paths) + + def test_no_base64(self): + ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True} + dsrc = self._get_ds(ds_cfg=ds_cfg) + ret = dsrc.get_data() + self.assertTrue(ret) + + def test_uuid(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['sdc:uuid'], + dsrc.metadata['instance-id']) + + def test_platform_info(self): + """All platform-related attributes are properly set.""" + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + self.assertEqual('joyent', dsrc.cloud_name) + self.assertEqual('joyent', dsrc.platform_type) + self.assertEqual('serial (/dev/ttyS1)', dsrc.subplatform) + + def test_root_keys(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['root_authorized_keys'], + dsrc.metadata['public-keys']) + + def test_hostname_b64(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['hostname'], + dsrc.metadata['local-hostname']) + + def test_hostname(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['hostname'], + dsrc.metadata['local-hostname']) + + def test_hostname_if_no_sdc_hostname(self): + my_returns = MOCK_RETURNS.copy() + my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname'] + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(my_returns['hostname'], + dsrc.metadata['local-hostname']) + + def test_sdc_hostname_if_no_hostname(self): + my_returns = MOCK_RETURNS.copy() + my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname'] + del my_returns['hostname'] + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(my_returns['sdc:hostname'], + dsrc.metadata['local-hostname']) + + def test_sdc_uuid_if_no_hostname_or_sdc_hostname(self): + my_returns = MOCK_RETURNS.copy() + del my_returns['hostname'] + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(my_returns['sdc:uuid'], + dsrc.metadata['local-hostname']) + + def test_userdata(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['user-data'], + dsrc.metadata['legacy-user-data']) + self.assertEqual(MOCK_RETURNS['cloud-init:user-data'], + dsrc.userdata_raw) + + def test_sdc_nics(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(json.loads(MOCK_RETURNS['sdc:nics']), + dsrc.metadata['network-data']) + + def test_sdc_scripts(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['user-script'], + dsrc.metadata['user-script']) + + legacy_script_f = "%s/user-script" % self.legacy_user_d + print("legacy_script_f=%s" % legacy_script_f) + self.assertTrue(os.path.exists(legacy_script_f)) + self.assertTrue(os.path.islink(legacy_script_f)) + user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:] + self.assertEqual(user_script_perm, '700') + + def test_scripts_shebanged(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['user-script'], + dsrc.metadata['user-script']) + + legacy_script_f = "%s/user-script" % self.legacy_user_d + self.assertTrue(os.path.exists(legacy_script_f)) + self.assertTrue(os.path.islink(legacy_script_f)) + shebang = None + with open(legacy_script_f, 'r') as f: + shebang = f.readlines()[0].strip() + self.assertEqual(shebang, "#!/bin/bash") + user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:] + self.assertEqual(user_script_perm, '700') + + def test_scripts_shebang_not_added(self): + """ + Test that the SmartOS requirement that plain text scripts + are executable. This test makes sure that plain texts scripts + with out file magic have it added appropriately by cloud-init. + """ + + my_returns = MOCK_RETURNS.copy() + my_returns['user-script'] = '\n'.join(['#!/usr/bin/perl', + 'print("hi")', '']) + + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(my_returns['user-script'], + dsrc.metadata['user-script']) + + legacy_script_f = "%s/user-script" % self.legacy_user_d + self.assertTrue(os.path.exists(legacy_script_f)) + self.assertTrue(os.path.islink(legacy_script_f)) + shebang = None + with open(legacy_script_f, 'r') as f: + shebang = f.readlines()[0].strip() + self.assertEqual(shebang, "#!/usr/bin/perl") + + def test_userdata_removed(self): + """ + User-data in the SmartOS world is supposed to be written to a file + each and every boot. This tests to make sure that in the event the + legacy user-data is removed, the existing user-data is backed-up + and there is no /var/db/user-data left. + """ + + user_data_f = "%s/mdata-user-data" % self.legacy_user_d + with open(user_data_f, 'w') as f: + f.write("PREVIOUS") + + my_returns = MOCK_RETURNS.copy() + del my_returns['user-data'] + + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertFalse(dsrc.metadata.get('legacy-user-data')) + + found_new = False + for root, _dirs, files in os.walk(self.legacy_user_d): + for name in files: + name_f = os.path.join(root, name) + permissions = oct(os.stat(name_f)[stat.ST_MODE])[-3:] + if re.match(r'.*\/mdata-user-data$', name_f): + found_new = True + print(name_f) + self.assertEqual(permissions, '400') + + self.assertFalse(found_new) + + def test_vendor_data_not_default(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['sdc:vendor-data'], + dsrc.metadata['vendor-data']) + + def test_default_vendor_data(self): + my_returns = MOCK_RETURNS.copy() + def_op_script = my_returns['sdc:vendor-data'] + del my_returns['sdc:vendor-data'] + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertNotEqual(def_op_script, dsrc.metadata['vendor-data']) + + # we expect default vendor-data is a boothook + self.assertTrue(dsrc.vendordata_raw.startswith("#cloud-boothook")) + + def test_disable_iptables_flag(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['disable_iptables_flag'], + dsrc.metadata['iptables_disable']) + + def test_motd_sys_info(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['enable_motd_sys_info'], + dsrc.metadata['motd_sys_info']) + + def test_default_ephemeral(self): + # Test to make sure that the builtin config has the ephemeral + # configuration. + dsrc = self._get_ds() + cfg = dsrc.get_config_obj() + + ret = dsrc.get_data() + self.assertTrue(ret) + + assert 'disk_setup' in cfg + assert 'fs_setup' in cfg + self.assertIsInstance(cfg['disk_setup'], dict) + self.assertIsInstance(cfg['fs_setup'], list) + + def test_override_disk_aliases(self): + # Test to make sure that the built-in DS is overriden + builtin = DataSourceSmartOS.BUILTIN_DS_CONFIG + + mydscfg = {'disk_aliases': {'FOO': '/dev/bar'}} + + # expect that these values are in builtin, or this is pointless + for k in mydscfg: + self.assertIn(k, builtin) + + dsrc = self._get_ds(ds_cfg=mydscfg) + ret = dsrc.get_data() + self.assertTrue(ret) + + self.assertEqual(mydscfg['disk_aliases']['FOO'], + dsrc.ds_cfg['disk_aliases']['FOO']) + + self.assertEqual(dsrc.device_name_to_device('FOO'), + mydscfg['disk_aliases']['FOO']) + + def test_reconfig_network_on_boot(self): + # Test to ensure that network is configured from metadata on each boot + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + self.assertSetEqual( + {EventType.BOOT_NEW_INSTANCE, + EventType.BOOT, + EventType.BOOT_LEGACY}, + dsrc.default_update_events[EventScope.NETWORK] + ) + + +class TestIdentifyFile(CiTestCase): + """Test the 'identify_file' utility.""" + @skipIf(not which("file"), "command 'file' not available.") + def test_file_happy_path(self): + """Test file is available and functional on plain text.""" + fname = self.tmp_path("myfile") + write_file(fname, "plain text content here\n") + with self.allow_subp(["file"]): + self.assertEqual("text/plain", identify_file(fname)) + + @mock.patch(DSMOS + ".subp.subp") + def test_returns_none_on_error(self, m_subp): + """On 'file' execution error, None should be returned.""" + m_subp.side_effect = ProcessExecutionError("FILE_FAILED", exit_code=99) + fname = self.tmp_path("myfile") + write_file(fname, "plain text content here\n") + self.assertEqual(None, identify_file(fname)) + self.assertEqual( + [mock.call(["file", "--brief", "--mime-type", fname])], + m_subp.call_args_list) + + +class ShortReader(object): + """Implements a 'read' interface for bytes provided. + much like io.BytesIO but the 'endbyte' acts as if EOF. + When it is reached a short will be returned.""" + def __init__(self, initial_bytes, endbyte=b'\0'): + self.data = initial_bytes + self.index = 0 + self.len = len(self.data) + self.endbyte = endbyte + + @property + def emptied(self): + return self.index >= self.len + + def read(self, size=-1): + """Read size bytes but not past a null.""" + if size == 0 or self.index >= self.len: + return b'' + + rsize = size + if size < 0 or size + self.index > self.len: + rsize = self.len - self.index + + next_null = self.data.find(self.endbyte, self.index, rsize) + if next_null >= 0: + rsize = next_null - self.index + 1 + i = self.index + self.index += rsize + ret = self.data[i:i + rsize] + if len(ret) and ret[-1:] == self.endbyte: + ret = ret[:-1] + return ret + + +class TestJoyentMetadataClient(FilesystemMockingTestCase): + + invalid = b'invalid command\n' + failure = b'FAILURE\n' + v2_ok = b'V2_OK\n' + + def setUp(self): + super(TestJoyentMetadataClient, self).setUp() + + self.serial = mock.MagicMock(spec=serial.Serial) + self.request_id = 0xabcdef12 + self.metadata_value = 'value' + self.response_parts = { + 'command': 'SUCCESS', + 'crc': 'b5a9ff00', + 'length': SUCCESS_LEN + len(b64e(self.metadata_value)), + 'payload': b64e(self.metadata_value), + 'request_id': '{0:08x}'.format(self.request_id), + } + + def make_response(): + payloadstr = '' + if 'payload' in self.response_parts: + payloadstr = ' {0}'.format(self.response_parts['payload']) + return ('V2 {length} {crc} {request_id} ' + '{command}{payloadstr}\n'.format( + payloadstr=payloadstr, + **self.response_parts).encode('ascii')) + + self.metasource_data = None + + def read_response(length): + if not self.metasource_data: + self.metasource_data = make_response() + self.metasource_data_len = len(self.metasource_data) + resp = self.metasource_data[:length] + self.metasource_data = self.metasource_data[length:] + return resp + + self.serial.read.side_effect = read_response + self.patched_funcs.enter_context( + mock.patch('cloudinit.sources.DataSourceSmartOS.random.randint', + mock.Mock(return_value=self.request_id))) + + def _get_client(self): + return DataSourceSmartOS.JoyentMetadataClient( + fp=self.serial, smartos_type=DataSourceSmartOS.SMARTOS_ENV_KVM) + + def _get_serial_client(self): + self.serial.timeout = 1 + return DataSourceSmartOS.JoyentMetadataSerialClient(None, + fp=self.serial) + + def assertEndsWith(self, haystack, prefix): + self.assertTrue(haystack.endswith(prefix), + "{0} does not end with '{1}'".format( + repr(haystack), prefix)) + + def assertStartsWith(self, haystack, prefix): + self.assertTrue(haystack.startswith(prefix), + "{0} does not start with '{1}'".format( + repr(haystack), prefix)) + + def assertNoMoreSideEffects(self, obj): + self.assertRaises(StopIteration, obj) + + def test_get_metadata_writes_a_single_line(self): + client = self._get_client() + client.get('some_key') + self.assertEqual(1, self.serial.write.call_count) + written_line = self.serial.write.call_args[0][0] + self.assertEndsWith(written_line.decode('ascii'), + b'\n'.decode('ascii')) + self.assertEqual(1, written_line.count(b'\n')) + + def _get_written_line(self, key='some_key'): + client = self._get_client() + client.get(key) + return self.serial.write.call_args[0][0] + + def test_get_metadata_writes_bytes(self): + self.assertIsInstance(self._get_written_line(), bytes) + + def test_get_metadata_line_starts_with_v2(self): + foo = self._get_written_line() + self.assertStartsWith(foo.decode('ascii'), b'V2'.decode('ascii')) + + def test_get_metadata_uses_get_command(self): + parts = self._get_written_line().decode('ascii').strip().split(' ') + self.assertEqual('GET', parts[4]) + + def test_get_metadata_base64_encodes_argument(self): + key = 'my_key' + parts = self._get_written_line(key).decode('ascii').strip().split(' ') + self.assertEqual(b64e(key), parts[5]) + + def test_get_metadata_calculates_length_correctly(self): + parts = self._get_written_line().decode('ascii').strip().split(' ') + expected_length = len(' '.join(parts[3:])) + self.assertEqual(expected_length, int(parts[1])) + + def test_get_metadata_uses_appropriate_request_id(self): + parts = self._get_written_line().decode('ascii').strip().split(' ') + request_id = parts[3] + self.assertEqual(8, len(request_id)) + self.assertEqual(request_id, request_id.lower()) + + def test_get_metadata_uses_random_number_for_request_id(self): + line = self._get_written_line() + request_id = line.decode('ascii').strip().split(' ')[3] + self.assertEqual('{0:08x}'.format(self.request_id), request_id) + + def test_get_metadata_checksums_correctly(self): + parts = self._get_written_line().decode('ascii').strip().split(' ') + expected_checksum = '{0:08x}'.format( + crc32(' '.join(parts[3:]).encode('utf-8')) & 0xffffffff) + checksum = parts[2] + self.assertEqual(expected_checksum, checksum) + + def test_get_metadata_reads_a_line(self): + client = self._get_client() + client.get('some_key') + self.assertEqual(self.metasource_data_len, self.serial.read.call_count) + + def test_get_metadata_returns_valid_value(self): + client = self._get_client() + value = client.get('some_key') + self.assertEqual(self.metadata_value, value) + + def test_get_metadata_throws_exception_for_incorrect_length(self): + self.response_parts['length'] = 0 + client = self._get_client() + self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, + client.get, 'some_key') + + def test_get_metadata_throws_exception_for_incorrect_crc(self): + self.response_parts['crc'] = 'deadbeef' + client = self._get_client() + self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, + client.get, 'some_key') + + def test_get_metadata_throws_exception_for_request_id_mismatch(self): + self.response_parts['request_id'] = 'deadbeef' + client = self._get_client() + client._checksum = lambda _: self.response_parts['crc'] + self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, + client.get, 'some_key') + + def test_get_metadata_returns_None_if_value_not_found(self): + self.response_parts['payload'] = '' + self.response_parts['command'] = 'NOTFOUND' + self.response_parts['length'] = NOTFOUND_LEN + client = self._get_client() + client._checksum = lambda _: self.response_parts['crc'] + self.assertIsNone(client.get('some_key')) + + def test_negotiate(self): + client = self._get_client() + reader = ShortReader(self.v2_ok) + client.fp.read.side_effect = reader.read + client._negotiate() + self.assertTrue(reader.emptied) + + def test_negotiate_short_response(self): + client = self._get_client() + # chopped '\n' from v2_ok. + reader = ShortReader(self.v2_ok[:-1] + b'\0') + client.fp.read.side_effect = reader.read + self.assertRaises(DataSourceSmartOS.JoyentMetadataTimeoutException, + client._negotiate) + self.assertTrue(reader.emptied) + + def test_negotiate_bad_response(self): + client = self._get_client() + reader = ShortReader(b'garbage\n' + self.v2_ok) + client.fp.read.side_effect = reader.read + self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, + client._negotiate) + self.assertEqual(self.v2_ok, client.fp.read()) + + def test_serial_open_transport(self): + client = self._get_serial_client() + reader = ShortReader(b'garbage\0' + self.invalid + self.v2_ok) + client.fp.read.side_effect = reader.read + client.open_transport() + self.assertTrue(reader.emptied) + + def test_flush_failure(self): + client = self._get_serial_client() + reader = ShortReader(b'garbage' + b'\0' + self.failure + + self.invalid + self.v2_ok) + client.fp.read.side_effect = reader.read + client.open_transport() + self.assertTrue(reader.emptied) + + def test_flush_many_timeouts(self): + client = self._get_serial_client() + reader = ShortReader(b'\0' * 100 + self.invalid + self.v2_ok) + client.fp.read.side_effect = reader.read + client.open_transport() + self.assertTrue(reader.emptied) + + def test_list_metadata_returns_list(self): + parts = ['foo', 'bar'] + value = b64e('\n'.join(parts)) + self.response_parts['payload'] = value + self.response_parts['crc'] = '40873553' + self.response_parts['length'] = SUCCESS_LEN + len(value) + client = self._get_client() + self.assertEqual(client.list(), parts) + + def test_list_metadata_returns_empty_list_if_no_customer_metadata(self): + del self.response_parts['payload'] + self.response_parts['length'] = SUCCESS_LEN - 1 + self.response_parts['crc'] = '14e563ba' + client = self._get_client() + self.assertEqual(client.list(), []) + + +class TestNetworkConversion(CiTestCase): + def test_convert_simple(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.102/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:f5:e4:f5'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'static', + 'address': '192.168.128.93/22'}], + 'mtu': 8500, 'mac_address': '90:b8:d0:a5:ff:cd'}]} + found = convert_net(SDC_NICS) + self.assertEqual(expected, found) + + def test_convert_simple_alt(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'static', + 'address': '10.210.1.217/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} + found = convert_net(SDC_NICS_ALT) + self.assertEqual(expected, found) + + def test_convert_simple_dhcp(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'dhcp4'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} + found = convert_net(SDC_NICS_DHCP) + self.assertEqual(expected, found) + + def test_convert_simple_multi_ip(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}, + {'type': 'static', + 'address': '8.12.42.52/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'static', + 'address': '10.210.1.217/24'}, + {'type': 'static', + 'address': '10.210.1.151/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} + found = convert_net(SDC_NICS_MIP) + self.assertEqual(expected, found) + + def test_convert_with_dns(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'dhcp4'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}, + {'type': 'nameserver', + 'address': ['8.8.8.8', '8.8.8.1'], 'search': ["local"]}]} + found = convert_net( + network_data=SDC_NICS_DHCP, dns_servers=['8.8.8.8', '8.8.8.1'], + dns_domain="local") + self.assertEqual(expected, found) + + def test_convert_simple_multi_ipv6(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'address': + '2001:4800:78ff:1b:be76:4eff:fe06:96b3/64'}, + {'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'static', + 'address': '10.210.1.217/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} + found = convert_net(SDC_NICS_MIP_IPV6) + self.assertEqual(expected, found) + + def test_convert_simple_both_ipv4_ipv6(self): + expected = { + 'version': 1, + 'config': [ + {'mac_address': '90:b8:d0:ae:64:51', 'mtu': 1500, + 'name': 'net0', 'type': 'physical', + 'subnets': [{'address': '2001::10/64', 'gateway': '2001::1', + 'type': 'static'}, + {'address': '8.12.42.51/24', + 'gateway': '8.12.42.1', + 'type': 'static'}, + {'address': '2001::11/64', 'type': 'static'}, + {'address': '8.12.42.52/32', 'type': 'static'}]}, + {'mac_address': '90:b8:d0:bd:4f:9c', 'mtu': 1500, + 'name': 'net1', 'type': 'physical', + 'subnets': [{'address': '10.210.1.217/24', + 'type': 'static'}]}]} + found = convert_net(SDC_NICS_IPV4_IPV6) + self.assertEqual(expected, found) + + def test_gateways_not_on_all_nics(self): + expected = { + 'version': 1, + 'config': [ + {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500, + 'name': 'net0', 'type': 'physical', + 'subnets': [{'address': '8.12.42.26/24', + 'gateway': '8.12.42.1', 'type': 'static'}]}, + {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500, + 'name': 'net1', 'type': 'physical', + 'subnets': [{'address': '10.210.1.27/24', + 'type': 'static'}]}]} + found = convert_net(SDC_NICS_SINGLE_GATEWAY) + self.assertEqual(expected, found) + + def test_routes_on_all_nics(self): + routes = [ + {'linklocal': False, 'dst': '3.0.0.0/8', 'gateway': '8.12.42.3'}, + {'linklocal': False, 'dst': '4.0.0.0/8', 'gateway': '10.210.1.4'}] + expected = { + 'version': 1, + 'config': [ + {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500, + 'name': 'net0', 'type': 'physical', + 'subnets': [{'address': '8.12.42.26/24', + 'gateway': '8.12.42.1', 'type': 'static', + 'routes': [{'network': '3.0.0.0/8', + 'gateway': '8.12.42.3'}, + {'network': '4.0.0.0/8', + 'gateway': '10.210.1.4'}]}]}, + {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500, + 'name': 'net1', 'type': 'physical', + 'subnets': [{'address': '10.210.1.27/24', 'type': 'static', + 'routes': [{'network': '3.0.0.0/8', + 'gateway': '8.12.42.3'}, + {'network': '4.0.0.0/8', + 'gateway': '10.210.1.4'}]}]}]} + found = convert_net(SDC_NICS_SINGLE_GATEWAY, routes=routes) + self.maxDiff = None + self.assertEqual(expected, found) + + +@unittest.skipUnless(get_smartos_environ() == SMARTOS_ENV_KVM, + "Only supported on KVM and bhyve guests under SmartOS") +@unittest.skipUnless(os.access(SERIAL_DEVICE, os.W_OK), + "Requires write access to " + SERIAL_DEVICE) +@unittest.skipUnless(HAS_PYSERIAL is True, "pyserial not available") +class TestSerialConcurrency(CiTestCase): + """ + This class tests locking on an actual serial port, and as such can only + be run in a kvm or bhyve guest running on a SmartOS host. A test run on + a metadata socket will not be valid because a metadata socket ensures + there is only one session over a connection. In contrast, in the + absence of proper locking multiple processes opening the same serial + port can corrupt each others' exchanges with the metadata server. + + This takes on the order of 2 to 3 minutes to run. + """ + allowed_subp = ['mdata-get'] + + def setUp(self): + self.mdata_proc = multiprocessing.Process(target=self.start_mdata_loop) + self.mdata_proc.start() + super(TestSerialConcurrency, self).setUp() + + def tearDown(self): + # os.kill() rather than mdata_proc.terminate() to avoid console spam. + os.kill(self.mdata_proc.pid, signal.SIGKILL) + self.mdata_proc.join() + super(TestSerialConcurrency, self).tearDown() + + def start_mdata_loop(self): + """ + The mdata-get command is repeatedly run in a separate process so + that it may try to race with metadata operations performed in the + main test process. Use of mdata-get is better than two processes + using the protocol implementation in DataSourceSmartOS because we + are testing to be sure that cloud-init and mdata-get respect each + others locks. + """ + rcs = list(range(0, 256)) + while True: + subp(['mdata-get', 'sdc:routes'], rcs=rcs) + + def test_all_keys(self): + self.assertIsNotNone(self.mdata_proc.pid) + ds = DataSourceSmartOS + keys = [tup[0] for tup in ds.SMARTOS_ATTRIB_MAP.values()] + keys.extend(ds.SMARTOS_ATTRIB_JSON.values()) + + client = ds.jmc_client_factory(smartos_type=SMARTOS_ENV_KVM) + self.assertIsNotNone(client) + + # The behavior that we are testing for was observed mdata-get running + # 10 times at roughly the same time as cloud-init fetched each key + # once. cloud-init would regularly see failures before making it + # through all keys once. + for _ in range(0, 3): + for key in keys: + # We don't care about the return value, just that it doesn't + # thrown any exceptions. + client.get(key) + + self.assertIsNone(self.mdata_proc.exitcode) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_upcloud.py b/tests/unittests/sources/test_upcloud.py new file mode 100644 index 00000000..1d792066 --- /dev/null +++ b/tests/unittests/sources/test_upcloud.py @@ -0,0 +1,314 @@ +# Author: Antti Myyrä +# +# This file is part of cloud-init. See LICENSE file for license information. + +import json + +from cloudinit import helpers +from cloudinit import settings +from cloudinit import sources +from cloudinit.sources.DataSourceUpCloud import DataSourceUpCloud, \ + DataSourceUpCloudLocal + +from tests.unittests.helpers import mock, CiTestCase + +UC_METADATA = json.loads(""" +{ + "cloud_name": "upcloud", + "instance_id": "00322b68-0096-4042-9406-faad61922128", + "hostname": "test.example.com", + "platform": "servers", + "subplatform": "metadata (http://169.254.169.254)", + "public_keys": [ + "ssh-rsa AAAAB.... test1@example.com", + "ssh-rsa AAAAB.... test2@example.com" + ], + "region": "fi-hel2", + "network": { + "interfaces": [ + { + "index": 1, + "ip_addresses": [ + { + "address": "94.237.105.53", + "dhcp": true, + "dns": [ + "94.237.127.9", + "94.237.40.9" + ], + "family": "IPv4", + "floating": false, + "gateway": "94.237.104.1", + "network": "94.237.104.0/22" + }, + { + "address": "94.237.105.50", + "dhcp": false, + "dns": null, + "family": "IPv4", + "floating": true, + "gateway": "", + "network": "94.237.105.50/32" + } + ], + "mac": "3a:d6:ba:4a:36:e7", + "network_id": "031457f4-0f8c-483c-96f2-eccede02909c", + "type": "public" + }, + { + "index": 2, + "ip_addresses": [ + { + "address": "10.6.3.27", + "dhcp": true, + "dns": null, + "family": "IPv4", + "floating": false, + "gateway": "10.6.0.1", + "network": "10.6.0.0/22" + } + ], + "mac": "3a:d6:ba:4a:84:cc", + "network_id": "03d82553-5bea-4132-b29a-e1cf67ec2dd1", + "type": "utility" + }, + { + "index": 3, + "ip_addresses": [ + { + "address": "2a04:3545:1000:720:38d6:baff:fe4a:63e7", + "dhcp": true, + "dns": [ + "2a04:3540:53::1", + "2a04:3544:53::1" + ], + "family": "IPv6", + "floating": false, + "gateway": "2a04:3545:1000:720::1", + "network": "2a04:3545:1000:720::/64" + } + ], + "mac": "3a:d6:ba:4a:63:e7", + "network_id": "03000000-0000-4000-8046-000000000000", + "type": "public" + }, + { + "index": 4, + "ip_addresses": [ + { + "address": "172.30.1.10", + "dhcp": true, + "dns": null, + "family": "IPv4", + "floating": false, + "gateway": "172.30.1.1", + "network": "172.30.1.0/24" + } + ], + "mac": "3a:d6:ba:4a:8a:e1", + "network_id": "035a0a4a-7704-4de5-820d-189fc8132714", + "type": "private" + } + ], + "dns": [ + "94.237.127.9", + "94.237.40.9" + ] + }, + "storage": { + "disks": [ + { + "id": "014efb65-223b-4d44-8f0a-c29535b88dcf", + "serial": "014efb65223b4d448f0a", + "size": 10240, + "type": "disk", + "tier": "maxiops" + } + ] + }, + "tags": [], + "user_data": "", + "vendor_data": "" +} +""") + +UC_METADATA["user_data"] = b"""#cloud-config +runcmd: +- [touch, /root/cloud-init-worked ] +""" + +MD_URL = 'http://169.254.169.254/metadata/v1.json' + + +def _mock_dmi(): + return True, "00322b68-0096-4042-9406-faad61922128" + + +class TestUpCloudMetadata(CiTestCase): + """ + Test reading the meta-data + """ + def setUp(self): + super(TestUpCloudMetadata, self).setUp() + self.tmp = self.tmp_dir() + + def get_ds(self, get_sysinfo=_mock_dmi): + ds = DataSourceUpCloud( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + if get_sysinfo: + ds._get_sysinfo = get_sysinfo + return ds + + @mock.patch('cloudinit.sources.helpers.upcloud.read_sysinfo') + def test_returns_false_not_on_upcloud(self, m_read_sysinfo): + m_read_sysinfo.return_value = (False, None) + ds = self.get_ds(get_sysinfo=None) + self.assertEqual(False, ds.get_data()) + self.assertTrue(m_read_sysinfo.called) + + @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata') + def test_metadata(self, mock_readmd): + mock_readmd.return_value = UC_METADATA.copy() + + ds = self.get_ds() + ds.perform_dhcp_setup = False + + ret = ds.get_data() + self.assertTrue(ret) + + self.assertTrue(mock_readmd.called) + + self.assertEqual(UC_METADATA.get('user_data'), ds.get_userdata_raw()) + self.assertEqual(UC_METADATA.get('vendor_data'), + ds.get_vendordata_raw()) + self.assertEqual(UC_METADATA.get('region'), ds.availability_zone) + self.assertEqual(UC_METADATA.get('instance_id'), ds.get_instance_id()) + self.assertEqual(UC_METADATA.get('cloud_name'), ds.cloud_name) + + self.assertEqual(UC_METADATA.get('public_keys'), + ds.get_public_ssh_keys()) + self.assertIsInstance(ds.get_public_ssh_keys(), list) + + +class TestUpCloudNetworkSetup(CiTestCase): + """ + Test reading the meta-data on networked context + """ + + def setUp(self): + super(TestUpCloudNetworkSetup, self).setUp() + self.tmp = self.tmp_dir() + + def get_ds(self, get_sysinfo=_mock_dmi): + ds = DataSourceUpCloudLocal( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + if get_sysinfo: + ds._get_sysinfo = get_sysinfo + return ds + + @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata') + @mock.patch('cloudinit.net.find_fallback_nic') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + def test_network_configured_metadata(self, m_net, m_dhcp, + m_fallback_nic, mock_readmd): + mock_readmd.return_value = UC_METADATA.copy() + + m_fallback_nic.return_value = 'eth1' + m_dhcp.return_value = [{ + 'interface': 'eth1', 'fixed-address': '10.6.3.27', + 'routers': '10.6.0.1', 'subnet-mask': '22', + 'broadcast-address': '10.6.3.255'} + ] + + ds = self.get_ds() + + ret = ds.get_data() + self.assertTrue(ret) + + self.assertTrue(m_dhcp.called) + m_dhcp.assert_called_with('eth1', None) + + m_net.assert_called_once_with( + broadcast='10.6.3.255', interface='eth1', + ip='10.6.3.27', prefix_or_mask='22', + router='10.6.0.1', static_routes=None + ) + + self.assertTrue(mock_readmd.called) + + self.assertEqual(UC_METADATA.get('region'), ds.availability_zone) + self.assertEqual(UC_METADATA.get('instance_id'), ds.get_instance_id()) + self.assertEqual(UC_METADATA.get('cloud_name'), ds.cloud_name) + + @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata') + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_network_configuration(self, m_get_by_mac, mock_readmd): + mock_readmd.return_value = UC_METADATA.copy() + + raw_ifaces = UC_METADATA.get('network').get('interfaces') + self.assertEqual(4, len(raw_ifaces)) + + m_get_by_mac.return_value = { + raw_ifaces[0].get('mac'): 'eth0', + raw_ifaces[1].get('mac'): 'eth1', + raw_ifaces[2].get('mac'): 'eth2', + raw_ifaces[3].get('mac'): 'eth3', + } + + ds = self.get_ds() + ds.perform_dhcp_setup = False + + ret = ds.get_data() + self.assertTrue(ret) + + self.assertTrue(mock_readmd.called) + + netcfg = ds.network_config + + self.assertEqual(1, netcfg.get('version')) + + config = netcfg.get('config') + self.assertIsInstance(config, list) + self.assertEqual(5, len(config)) + self.assertEqual('physical', config[3].get('type')) + + self.assertEqual(raw_ifaces[2].get('mac'), config[2] + .get('mac_address')) + self.assertEqual(1, len(config[2].get('subnets'))) + self.assertEqual('ipv6_dhcpv6-stateless', config[2].get('subnets')[0] + .get('type')) + + self.assertEqual(2, len(config[0].get('subnets'))) + self.assertEqual('static', config[0].get('subnets')[1].get('type')) + + dns = config[4] + self.assertEqual('nameserver', dns.get('type')) + self.assertEqual(2, len(dns.get('address'))) + self.assertEqual( + UC_METADATA.get('network').get('dns')[1], + dns.get('address')[1] + ) + + +class TestUpCloudDatasourceLoading(CiTestCase): + def test_get_datasource_list_returns_in_local(self): + deps = (sources.DEP_FILESYSTEM, ) + ds_list = sources.DataSourceUpCloud.get_datasource_list(deps) + self.assertEqual(ds_list, + [DataSourceUpCloudLocal]) + + def test_get_datasource_list_returns_in_normal(self): + deps = (sources.DEP_FILESYSTEM, sources.DEP_NETWORK) + ds_list = sources.DataSourceUpCloud.get_datasource_list(deps) + self.assertEqual(ds_list, + [DataSourceUpCloud]) + + def test_list_sources_finds_ds(self): + found = sources.list_sources( + ['UpCloud'], (sources.DEP_FILESYSTEM, sources.DEP_NETWORK), + ['cloudinit.sources']) + self.assertEqual([DataSourceUpCloud], + found) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_vmware.py b/tests/unittests/sources/test_vmware.py new file mode 100644 index 00000000..d34d7782 --- /dev/null +++ b/tests/unittests/sources/test_vmware.py @@ -0,0 +1,391 @@ +# Copyright (c) 2021 VMware, Inc. All Rights Reserved. +# +# Authors: Andrew Kutz +# +# This file is part of cloud-init. See LICENSE file for license information. + +import base64 +import gzip +import os + +import pytest + +from cloudinit import dmi, helpers, safeyaml +from cloudinit import settings +from cloudinit.sources import DataSourceVMware +from tests.unittests.helpers import ( + mock, + CiTestCase, + FilesystemMockingTestCase, + populate_dir, +) + + +PRODUCT_NAME_FILE_PATH = "/sys/class/dmi/id/product_name" +PRODUCT_NAME = "VMware7,1" +PRODUCT_UUID = "82343CED-E4C7-423B-8F6B-0D34D19067AB" +REROOT_FILES = { + DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, + PRODUCT_NAME_FILE_PATH: PRODUCT_NAME, +} + +VMW_MULTIPLE_KEYS = [ + "ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@vmw.com", + "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@vmw.com", +] +VMW_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@vmw.com" + +VMW_METADATA_YAML = """instance-id: cloud-vm +local-hostname: cloud-vm +network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes +""" + +VMW_USERDATA_YAML = """## template: jinja +#cloud-config +users: +- default +""" + +VMW_VENDORDATA_YAML = """## template: jinja +#cloud-config +runcmd: +- echo "Hello, world." +""" + + +@pytest.yield_fixture(autouse=True) +def common_patches(): + with mock.patch('cloudinit.util.platform.platform', return_value='Linux'): + with mock.patch.multiple( + 'cloudinit.dmi', + is_container=mock.Mock(return_value=False), + is_FreeBSD=mock.Mock(return_value=False) + ): + yield + + +class TestDataSourceVMware(CiTestCase): + """ + Test common functionality that is not transport specific. + """ + + def setUp(self): + super(TestDataSourceVMware, self).setUp() + self.tmp = self.tmp_dir() + + def test_no_data_access_method(self): + ds = get_ds(self.tmp) + ds.vmware_rpctool = None + ret = ds.get_data() + self.assertFalse(ret) + + def test_get_host_info(self): + host_info = DataSourceVMware.get_host_info() + self.assertTrue(host_info) + self.assertTrue(host_info["hostname"]) + self.assertTrue(host_info["local-hostname"]) + self.assertTrue(host_info["local_hostname"]) + self.assertTrue(host_info[DataSourceVMware.LOCAL_IPV4]) + + +class TestDataSourceVMwareEnvVars(FilesystemMockingTestCase): + """ + Test the envvar transport. + """ + + def setUp(self): + super(TestDataSourceVMwareEnvVars, self).setUp() + self.tmp = self.tmp_dir() + os.environ[DataSourceVMware.VMX_GUESTINFO] = "1" + self.create_system_files() + + def tearDown(self): + del os.environ[DataSourceVMware.VMX_GUESTINFO] + return super(TestDataSourceVMwareEnvVars, self).tearDown() + + def create_system_files(self): + rootd = self.tmp_dir() + populate_dir( + rootd, + { + DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, + }, + ) + self.assertTrue(self.reRoot(rootd)) + + def assert_get_data_ok(self, m_fn, m_fn_call_count=6): + ds = get_ds(self.tmp) + ds.vmware_rpctool = None + ret = ds.get_data() + self.assertTrue(ret) + self.assertEqual(m_fn_call_count, m_fn.call_count) + self.assertEqual( + ds.data_access_method, DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR + ) + return ds + + def assert_metadata(self, metadata, m_fn, m_fn_call_count=6): + ds = self.assert_get_data_ok(m_fn, m_fn_call_count) + assert_metadata(self, ds, metadata) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_subplatform(self, m_fn): + m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] + ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4) + self.assertEqual( + ds.subplatform, + "%s (%s)" + % ( + DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR, + DataSourceVMware.get_guestinfo_envvar_key_name("metadata"), + ), + ) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_only(self, m_fn): + m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_userdata_only(self, m_fn): + m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_vendordata_only(self, m_fn): + m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_base64(self, m_fn): + data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) + m_fn.side_effect = [data, "base64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_b64(self, m_fn): + data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) + m_fn.side_effect = [data, "b64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_gzip_base64(self, m_fn): + data = VMW_METADATA_YAML.encode("utf-8") + data = gzip.compress(data) + data = base64.b64encode(data) + m_fn.side_effect = [data, "gzip+base64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_gz_b64(self, m_fn): + data = VMW_METADATA_YAML.encode("utf-8") + data = gzip.compress(data) + data = base64.b64encode(data) + m_fn.side_effect = [data, "gz+b64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_metadata_single_ssh_key(self, m_fn): + metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) + metadata["public_keys"] = VMW_SINGLE_KEY + metadata_yaml = safeyaml.dumps(metadata) + m_fn.side_effect = [metadata_yaml, "", "", ""] + self.assert_metadata(metadata, m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_metadata_multiple_ssh_keys(self, m_fn): + metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) + metadata["public_keys"] = VMW_MULTIPLE_KEYS + metadata_yaml = safeyaml.dumps(metadata) + m_fn.side_effect = [metadata_yaml, "", "", ""] + self.assert_metadata(metadata, m_fn, m_fn_call_count=4) + + +class TestDataSourceVMwareGuestInfo(FilesystemMockingTestCase): + """ + Test the guestinfo transport on a VMware platform. + """ + + def setUp(self): + super(TestDataSourceVMwareGuestInfo, self).setUp() + self.tmp = self.tmp_dir() + self.create_system_files() + + def create_system_files(self): + rootd = self.tmp_dir() + populate_dir( + rootd, + { + DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, + PRODUCT_NAME_FILE_PATH: PRODUCT_NAME, + }, + ) + self.assertTrue(self.reRoot(rootd)) + + def assert_get_data_ok(self, m_fn, m_fn_call_count=6): + ds = get_ds(self.tmp) + ds.vmware_rpctool = "vmware-rpctool" + ret = ds.get_data() + self.assertTrue(ret) + self.assertEqual(m_fn_call_count, m_fn.call_count) + self.assertEqual( + ds.data_access_method, + DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO, + ) + return ds + + def assert_metadata(self, metadata, m_fn, m_fn_call_count=6): + ds = self.assert_get_data_ok(m_fn, m_fn_call_count) + assert_metadata(self, ds, metadata) + + def test_ds_valid_on_vmware_platform(self): + system_type = dmi.read_dmi_data("system-product-name") + self.assertEqual(system_type, PRODUCT_NAME) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_subplatform(self, m_fn): + m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] + ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4) + self.assertEqual( + ds.subplatform, + "%s (%s)" + % ( + DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO, + DataSourceVMware.get_guestinfo_key_name("metadata"), + ), + ) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_userdata_only(self, m_fn): + m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_vendordata_only(self, m_fn): + m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_metadata_single_ssh_key(self, m_fn): + metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) + metadata["public_keys"] = VMW_SINGLE_KEY + metadata_yaml = safeyaml.dumps(metadata) + m_fn.side_effect = [metadata_yaml, "", "", ""] + self.assert_metadata(metadata, m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_metadata_multiple_ssh_keys(self, m_fn): + metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) + metadata["public_keys"] = VMW_MULTIPLE_KEYS + metadata_yaml = safeyaml.dumps(metadata) + m_fn.side_effect = [metadata_yaml, "", "", ""] + self.assert_metadata(metadata, m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_metadata_base64(self, m_fn): + data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) + m_fn.side_effect = [data, "base64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_metadata_b64(self, m_fn): + data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) + m_fn.side_effect = [data, "b64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_metadata_gzip_base64(self, m_fn): + data = VMW_METADATA_YAML.encode("utf-8") + data = gzip.compress(data) + data = base64.b64encode(data) + m_fn.side_effect = [data, "gzip+base64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_metadata_gz_b64(self, m_fn): + data = VMW_METADATA_YAML.encode("utf-8") + data = gzip.compress(data) + data = base64.b64encode(data) + m_fn.side_effect = [data, "gz+b64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + +class TestDataSourceVMwareGuestInfo_InvalidPlatform(FilesystemMockingTestCase): + """ + Test the guestinfo transport on a non-VMware platform. + """ + + def setUp(self): + super(TestDataSourceVMwareGuestInfo_InvalidPlatform, self).setUp() + self.tmp = self.tmp_dir() + self.create_system_files() + + def create_system_files(self): + rootd = self.tmp_dir() + populate_dir( + rootd, + { + DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, + }, + ) + self.assertTrue(self.reRoot(rootd)) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_ds_invalid_on_non_vmware_platform(self, m_fn): + system_type = dmi.read_dmi_data("system-product-name") + self.assertEqual(system_type, None) + + m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] + ds = get_ds(self.tmp) + ds.vmware_rpctool = "vmware-rpctool" + ret = ds.get_data() + self.assertFalse(ret) + + +def assert_metadata(test_obj, ds, metadata): + test_obj.assertEqual(metadata.get("instance-id"), ds.get_instance_id()) + test_obj.assertEqual(metadata.get("local-hostname"), ds.get_hostname()) + + expected_public_keys = metadata.get("public_keys") + if not isinstance(expected_public_keys, list): + expected_public_keys = [expected_public_keys] + + test_obj.assertEqual(expected_public_keys, ds.get_public_ssh_keys()) + test_obj.assertIsInstance(ds.get_public_ssh_keys(), list) + + +def get_ds(temp_dir): + ds = DataSourceVMware.DataSourceVMware( + settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": temp_dir}) + ) + ds.vmware_rpctool = "vmware-rpctool" + return ds + + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_vultr.py b/tests/unittests/sources/test_vultr.py new file mode 100644 index 00000000..40594b95 --- /dev/null +++ b/tests/unittests/sources/test_vultr.py @@ -0,0 +1,337 @@ +# Author: Eric Benner +# +# This file is part of cloud-init. See LICENSE file for license information. + +# Vultr Metadata API: +# https://www.vultr.com/metadata/ + +import json + +from cloudinit import helpers +from cloudinit import settings +from cloudinit.sources import DataSourceVultr +from cloudinit.sources.helpers import vultr + +from tests.unittests.helpers import mock, CiTestCase + +# Vultr metadata test data +VULTR_V1_1 = { + 'bgp': { + 'ipv4': { + 'my-address': '', + 'my-asn': '', + 'peer-address': '', + 'peer-asn': '' + }, + 'ipv6': { + 'my-address': '', + 'my-asn': '', + 'peer-address': '', + 'peer-asn': '' + } + }, + 'hostname': 'CLOUDINIT_1', + 'instanceid': '42506325', + 'interfaces': [ + { + 'ipv4': { + 'additional': [ + ], + 'address': '108.61.89.242', + 'gateway': '108.61.89.1', + 'netmask': '255.255.255.0' + }, + 'ipv6': { + 'additional': [ + ], + 'address': '2001:19f0:5:56c2:5400:03ff:fe15:c465', + 'network': '2001:19f0:5:56c2::', + 'prefix': '64' + }, + 'mac': '56:00:03:15:c4:65', + 'network-type': 'public' + } + ], + 'public-keys': [ + 'ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key' + ], + 'region': { + 'regioncode': 'EWR' + }, + 'user-defined': [ + ], + 'startup-script': 'echo No configured startup script', + 'raid1-script': '', + 'user-data': [ + ], + 'vendor-data': [ + { + 'package_upgrade': 'true', + 'disable_root': 0, + 'ssh_pwauth': 1, + 'chpasswd': { + 'expire': False, + 'list': [ + 'root:$6$S2Smuj.../VqxmIR9Urw0jPZ88i4yvB/' + ] + }, + 'system_info': { + 'default_user': { + 'name': 'root' + } + } + } + ] +} + +VULTR_V1_2 = { + 'bgp': { + 'ipv4': { + 'my-address': '', + 'my-asn': '', + 'peer-address': '', + 'peer-asn': '' + }, + 'ipv6': { + 'my-address': '', + 'my-asn': '', + 'peer-address': '', + 'peer-asn': '' + } + }, + 'hostname': 'CLOUDINIT_2', + 'instance-v2-id': '29bea708-2e6e-480a-90ad-0e6b5d5ad62f', + 'instanceid': '42872224', + 'interfaces': [ + { + 'ipv4': { + 'additional': [ + ], + 'address':'45.76.7.171', + 'gateway':'45.76.6.1', + 'netmask':'255.255.254.0' + }, + 'ipv6':{ + 'additional': [ + ], + 'address':'2001:19f0:5:28a7:5400:03ff:fe1b:4eca', + 'network':'2001:19f0:5:28a7::', + 'prefix':'64' + }, + 'mac':'56:00:03:1b:4e:ca', + 'network-type':'public' + }, + { + 'ipv4': { + 'additional': [ + ], + 'address':'10.1.112.3', + 'gateway':'', + 'netmask':'255.255.240.0' + }, + 'ipv6':{ + 'additional': [ + ], + 'network':'', + 'prefix':'' + }, + 'mac':'5a:00:03:1b:4e:ca', + 'network-type':'private', + 'network-v2-id':'fbbe2b5b-b986-4396-87f5-7246660ccb64', + 'networkid':'net5e7155329d730' + } + ], + 'public-keys': [ + 'ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key' + ], + 'region': { + 'regioncode': 'EWR' + }, + 'user-defined': [ + ], + 'startup-script': 'echo No configured startup script', + 'user-data': [ + ], + + 'vendor-data': [ + { + 'package_upgrade': 'true', + 'disable_root': 0, + 'ssh_pwauth': 1, + 'chpasswd': { + 'expire': False, + 'list': [ + 'root:$6$SxXx...k2mJNIzZB5vMCDBlYT1' + ] + }, + 'system_info': { + 'default_user': { + 'name': 'root' + } + } + } + ] +} + +SSH_KEYS_1 = [ + "ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key" +] + +# Expected generated objects + +# Expected config +EXPECTED_VULTR_CONFIG = { + 'package_upgrade': 'true', + 'disable_root': 0, + 'ssh_pwauth': 1, + 'chpasswd': { + 'expire': False, + 'list': [ + 'root:$6$SxXx...k2mJNIzZB5vMCDBlYT1' + ] + }, + 'system_info': { + 'default_user': { + 'name': 'root' + } + } +} + +# Expected network config object from generator +EXPECTED_VULTR_NETWORK_1 = { + 'version': 1, + 'config': [ + { + 'type': 'nameserver', + 'address': ['108.61.10.10'] + }, + { + 'name': 'eth0', + 'type': 'physical', + 'mac_address': '56:00:03:15:c4:65', + 'accept-ra': 1, + 'subnets': [ + {'type': 'dhcp', 'control': 'auto'}, + {'type': 'ipv6_slaac', 'control': 'auto'} + ], + } + ] +} + +EXPECTED_VULTR_NETWORK_2 = { + 'version': 1, + 'config': [ + { + 'type': 'nameserver', + 'address': ['108.61.10.10'] + }, + { + 'name': 'eth0', + 'type': 'physical', + 'mac_address': '56:00:03:1b:4e:ca', + 'accept-ra': 1, + 'subnets': [ + {'type': 'dhcp', 'control': 'auto'}, + {'type': 'ipv6_slaac', 'control': 'auto'} + ], + }, + { + 'name': 'eth1', + 'type': 'physical', + 'mac_address': '5a:00:03:1b:4e:ca', + 'subnets': [ + { + "type": "static", + "control": "auto", + "address": "10.1.112.3", + "netmask": "255.255.240.0" + } + ], + } + ] +} + + +INTERFACE_MAP = { + '56:00:03:15:c4:65': 'eth0', + '56:00:03:1b:4e:ca': 'eth0', + '5a:00:03:1b:4e:ca': 'eth1' +} + + +class TestDataSourceVultr(CiTestCase): + def setUp(self): + super(TestDataSourceVultr, self).setUp() + + # Stored as a dict to make it easier to maintain + raw1 = json.dumps(VULTR_V1_1['vendor-data'][0]) + raw2 = json.dumps(VULTR_V1_2['vendor-data'][0]) + + # Make expected format + VULTR_V1_1['vendor-data'] = [raw1] + VULTR_V1_2['vendor-data'] = [raw2] + + self.tmp = self.tmp_dir() + + # Test the datasource itself + @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch('cloudinit.sources.helpers.vultr.is_vultr') + @mock.patch('cloudinit.sources.helpers.vultr.get_metadata') + def test_datasource(self, + mock_getmeta, + mock_isvultr, + mock_netmap): + mock_getmeta.return_value = VULTR_V1_2 + mock_isvultr.return_value = True + mock_netmap.return_value = INTERFACE_MAP + + source = DataSourceVultr.DataSourceVultr( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + + # Test for failure + self.assertEqual(True, source._get_data()) + + # Test instance id + self.assertEqual("42872224", source.metadata['instanceid']) + + # Test hostname + self.assertEqual("CLOUDINIT_2", source.metadata['local-hostname']) + + # Test ssh keys + self.assertEqual(SSH_KEYS_1, source.metadata['public-keys']) + + # Test vendor data generation + orig_val = self.maxDiff + self.maxDiff = None + + vendordata = source.vendordata_raw + + # Test vendor config + self.assertEqual( + EXPECTED_VULTR_CONFIG, + json.loads(vendordata[0].replace("#cloud-config", ""))) + + self.maxDiff = orig_val + + # Test network config generation + self.assertEqual(EXPECTED_VULTR_NETWORK_2, source.network_config) + + # Test network config generation + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_network_config(self, mock_netmap): + mock_netmap.return_value = INTERFACE_MAP + interf = VULTR_V1_1['interfaces'] + + self.assertEqual(EXPECTED_VULTR_NETWORK_1, + vultr.generate_network_config(interf)) + + # Test Private Networking config generation + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_private_network_config(self, mock_netmap): + mock_netmap.return_value = INTERFACE_MAP + interf = VULTR_V1_2['interfaces'] + + self.assertEqual(EXPECTED_VULTR_NETWORK_2, + vultr.generate_network_config(interf)) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/vmware/__init__.py b/tests/unittests/sources/vmware/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/sources/vmware/test_custom_script.py b/tests/unittests/sources/vmware/test_custom_script.py new file mode 100644 index 00000000..fcbb9cd5 --- /dev/null +++ b/tests/unittests/sources/vmware/test_custom_script.py @@ -0,0 +1,109 @@ +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2017-2019 VMware INC. +# +# Author: Maitreyee Saikia +# +# This file is part of cloud-init. See LICENSE file for license information. + +import os +import stat +from cloudinit import util +from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( + CustomScriptConstant, + CustomScriptNotFound, + PreCustomScript, + PostCustomScript, +) +from tests.unittests.helpers import CiTestCase, mock + + +class TestVmwareCustomScript(CiTestCase): + def setUp(self): + self.tmpDir = self.tmp_dir() + # Mock the tmpDir as the root dir in VM. + self.execDir = os.path.join(self.tmpDir, ".customization") + self.execScript = os.path.join(self.execDir, + ".customize.sh") + + def test_prepare_custom_script(self): + """ + This test is designed to verify the behavior based on the presence of + custom script. Mainly needed for scenario where a custom script is + expected, but was not properly copied. "CustomScriptNotFound" exception + is raised in such cases. + """ + # Custom script does not exist. + preCust = PreCustomScript("random-vmw-test", self.tmpDir) + self.assertEqual("random-vmw-test", preCust.scriptname) + self.assertEqual(self.tmpDir, preCust.directory) + self.assertEqual(self.tmp_path("random-vmw-test", self.tmpDir), + preCust.scriptpath) + with self.assertRaises(CustomScriptNotFound): + preCust.prepare_script() + + # Custom script exists. + custScript = self.tmp_path("test-cust", self.tmpDir) + util.write_file(custScript, "test-CR-strip\r\r") + with mock.patch.object(CustomScriptConstant, + "CUSTOM_TMP_DIR", + self.execDir): + with mock.patch.object(CustomScriptConstant, + "CUSTOM_SCRIPT", + self.execScript): + postCust = PostCustomScript("test-cust", + self.tmpDir, + self.tmpDir) + self.assertEqual("test-cust", postCust.scriptname) + self.assertEqual(self.tmpDir, postCust.directory) + self.assertEqual(custScript, postCust.scriptpath) + postCust.prepare_script() + + # Custom script is copied with exec privilege + self.assertTrue(os.path.exists(self.execScript)) + st = os.stat(self.execScript) + self.assertTrue(st.st_mode & stat.S_IEXEC) + with open(self.execScript, "r") as f: + content = f.read() + self.assertEqual(content, "test-CR-strip") + # Check if all carraige returns are stripped from script. + self.assertFalse("\r" in content) + + def test_execute_post_cust(self): + """ + This test is designed to verify the behavior after execute post + customization. + """ + # Prepare the customize package + postCustRun = self.tmp_path("post-customize-guest.sh", self.tmpDir) + util.write_file(postCustRun, "This is the script to run post cust") + userScript = self.tmp_path("test-cust", self.tmpDir) + util.write_file(userScript, "This is the post cust script") + + # Mock the cc_scripts_per_instance dir and marker file. + # Create another tmp dir for cc_scripts_per_instance. + ccScriptDir = self.tmp_dir() + ccScript = os.path.join(ccScriptDir, "post-customize-guest.sh") + markerFile = os.path.join(self.tmpDir, ".markerFile") + with mock.patch.object(CustomScriptConstant, + "CUSTOM_TMP_DIR", + self.execDir): + with mock.patch.object(CustomScriptConstant, + "CUSTOM_SCRIPT", + self.execScript): + with mock.patch.object(CustomScriptConstant, + "POST_CUSTOM_PENDING_MARKER", + markerFile): + postCust = PostCustomScript("test-cust", + self.tmpDir, + ccScriptDir) + postCust.execute() + # Check cc_scripts_per_instance and marker file + # are created. + self.assertTrue(os.path.exists(ccScript)) + with open(ccScript, "r") as f: + content = f.read() + self.assertEqual(content, + "This is the script to run post cust") + self.assertTrue(os.path.exists(markerFile)) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/vmware/test_guestcust_util.py b/tests/unittests/sources/vmware/test_guestcust_util.py new file mode 100644 index 00000000..9114f0b9 --- /dev/null +++ b/tests/unittests/sources/vmware/test_guestcust_util.py @@ -0,0 +1,98 @@ +# Copyright (C) 2019 Canonical Ltd. +# Copyright (C) 2019 VMware INC. +# +# Author: Xiaofeng Wang +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import subp +from cloudinit.sources.helpers.vmware.imc.config import Config +from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile +from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( + get_tools_config, + set_gc_status, +) +from tests.unittests.helpers import CiTestCase, mock + + +class TestGuestCustUtil(CiTestCase): + def test_get_tools_config_not_installed(self): + """ + This test is designed to verify the behavior if vmware-toolbox-cmd + is not installed. + """ + with mock.patch.object(subp, 'which', return_value=None): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), 'defaultVal') + + def test_get_tools_config_internal_exception(self): + """ + This test is designed to verify the behavior if internal exception + is raised. + """ + with mock.patch.object(subp, 'which', return_value='/dummy/path'): + with mock.patch.object(subp, 'subp', + return_value=('key=value', b''), + side_effect=subp.ProcessExecutionError( + "subp failed", exit_code=99)): + # verify return value is 'defaultVal', not 'value'. + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'defaultVal') + + def test_get_tools_config_normal(self): + """ + This test is designed to verify the value could be parsed from + key = value of the given [section] + """ + with mock.patch.object(subp, 'which', return_value='/dummy/path'): + # value is not blank + with mock.patch.object(subp, 'subp', + return_value=('key = value ', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'value') + # value is blank + with mock.patch.object(subp, 'subp', + return_value=('key = ', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + '') + # value contains = + with mock.patch.object(subp, 'subp', + return_value=('key=Bar=Wark', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'Bar=Wark') + + # value contains specific characters + with mock.patch.object(subp, 'subp', + return_value=('[a] b.c_d=e-f', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'e-f') + + def test_set_gc_status(self): + """ + This test is designed to verify the behavior of set_gc_status + """ + # config is None, return None + self.assertEqual(set_gc_status(None, 'Successful'), None) + + # post gc status is NO, return None + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertEqual(set_gc_status(conf, 'Successful'), None) + + # post gc status is YES, subp is called to execute command + cf._insertKey("MISC|POST-GC-STATUS", "YES") + conf = Config(cf) + with mock.patch.object(subp, 'subp', + return_value=('ok', b'')) as mockobj: + self.assertEqual( + set_gc_status(conf, 'Successful'), ('ok', b'')) + mockobj.assert_called_once_with( + ['vmware-rpctool', 'info-set guestinfo.gc.status Successful'], + rcs=[0]) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/vmware/test_vmware_config_file.py b/tests/unittests/sources/vmware/test_vmware_config_file.py new file mode 100644 index 00000000..54de113e --- /dev/null +++ b/tests/unittests/sources/vmware/test_vmware_config_file.py @@ -0,0 +1,545 @@ +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2016 VMware INC. +# +# Author: Sankar Tanguturi +# Pengpeng Sun +# +# This file is part of cloud-init. See LICENSE file for license information. + +import logging +import os +import sys +import tempfile +import textwrap + +from cloudinit.sources.DataSourceOVF import get_network_config_from_conf +from cloudinit.sources.DataSourceOVF import read_vmware_imc +from cloudinit.sources.helpers.vmware.imc.boot_proto import BootProtoEnum +from cloudinit.sources.helpers.vmware.imc.config import Config +from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile +from cloudinit.sources.helpers.vmware.imc.config_nic import gen_subnet +from cloudinit.sources.helpers.vmware.imc.config_nic import NicConfigurator +from tests.unittests.helpers import CiTestCase + +logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) +logger = logging.getLogger(__name__) + + +class TestVmwareConfigFile(CiTestCase): + + def test_utility_methods(self): + """Tests basic utility methods of ConfigFile class""" + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + + cf.clear() + + self.assertEqual(0, len(cf), "clear size") + + cf._insertKey(" PASSWORD|-PASS ", " foo ") + cf._insertKey("BAR", " ") + + self.assertEqual(2, len(cf), "insert size") + self.assertEqual('foo', cf["PASSWORD|-PASS"], "password") + self.assertTrue("PASSWORD|-PASS" in cf, "hasPassword") + self.assertFalse(cf.should_keep_current_value("PASSWORD|-PASS"), + "keepPassword") + self.assertFalse(cf.should_remove_current_value("PASSWORD|-PASS"), + "removePassword") + self.assertFalse("FOO" in cf, "hasFoo") + self.assertTrue(cf.should_keep_current_value("FOO"), "keepFoo") + self.assertFalse(cf.should_remove_current_value("FOO"), "removeFoo") + self.assertTrue("BAR" in cf, "hasBar") + self.assertFalse(cf.should_keep_current_value("BAR"), "keepBar") + self.assertTrue(cf.should_remove_current_value("BAR"), "removeBar") + + def test_datasource_instance_id(self): + """Tests instance id for the DatasourceOVF""" + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + + instance_id_prefix = 'iid-vmware-' + + conf = Config(cf) + + (md1, _, _) = read_vmware_imc(conf) + self.assertIn(instance_id_prefix, md1["instance-id"]) + self.assertEqual(md1["instance-id"], 'iid-vmware-imc') + + (md2, _, _) = read_vmware_imc(conf) + self.assertIn(instance_id_prefix, md2["instance-id"]) + self.assertEqual(md2["instance-id"], 'iid-vmware-imc') + + self.assertEqual(md2["instance-id"], md1["instance-id"]) + + def test_configfile_static_2nics(self): + """Tests Config class for a configuration with two static NICs.""" + cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg") + + conf = Config(cf) + + self.assertEqual('myhost1', conf.host_name, "hostName") + self.assertEqual('Africa/Abidjan', conf.timezone, "tz") + self.assertTrue(conf.utc, "utc") + + self.assertEqual(['10.20.145.1', '10.20.145.2'], + conf.name_servers, + "dns") + self.assertEqual(['eng.vmware.com', 'proxy.vmware.com'], + conf.dns_suffixes, + "suffixes") + + nics = conf.nics + ipv40 = nics[0].staticIpv4 + + self.assertEqual(2, len(nics), "nics") + self.assertEqual('NIC1', nics[0].name, "nic0") + self.assertEqual('00:50:56:a6:8c:08', nics[0].mac, "mac0") + self.assertEqual(BootProtoEnum.STATIC, nics[0].bootProto, "bootproto0") + self.assertEqual('10.20.87.154', ipv40[0].ip, "ipv4Addr0") + self.assertEqual('255.255.252.0', ipv40[0].netmask, "ipv4Mask0") + self.assertEqual(2, len(ipv40[0].gateways), "ipv4Gw0") + self.assertEqual('10.20.87.253', ipv40[0].gateways[0], "ipv4Gw0_0") + self.assertEqual('10.20.87.105', ipv40[0].gateways[1], "ipv4Gw0_1") + + self.assertEqual(1, len(nics[0].staticIpv6), "ipv6Cnt0") + self.assertEqual('fc00:10:20:87::154', + nics[0].staticIpv6[0].ip, + "ipv6Addr0") + + self.assertEqual('NIC2', nics[1].name, "nic1") + self.assertTrue(not nics[1].staticIpv6, "ipv61 dhcp") + + def test_config_file_dhcp_2nics(self): + """Tests Config class for a configuration with two DHCP NICs.""" + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + + conf = Config(cf) + nics = conf.nics + self.assertEqual(2, len(nics), "nics") + self.assertEqual('NIC1', nics[0].name, "nic0") + self.assertEqual('00:50:56:a6:8c:08', nics[0].mac, "mac0") + self.assertEqual(BootProtoEnum.DHCP, nics[0].bootProto, "bootproto0") + + def test_config_password(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + + cf._insertKey("PASSWORD|-PASS", "test-password") + cf._insertKey("PASSWORD|RESET", "no") + + conf = Config(cf) + self.assertEqual('test-password', conf.admin_password, "password") + self.assertFalse(conf.reset_password, "do not reset password") + + def test_config_reset_passwd(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + + cf._insertKey("PASSWORD|-PASS", "test-password") + cf._insertKey("PASSWORD|RESET", "random") + + conf = Config(cf) + with self.assertRaises(ValueError): + pw = conf.reset_password + self.assertIsNone(pw) + + cf.clear() + cf._insertKey("PASSWORD|RESET", "yes") + self.assertEqual(1, len(cf), "insert size") + + conf = Config(cf) + self.assertTrue(conf.reset_password, "reset password") + + def test_get_config_nameservers(self): + """Tests DNS and nameserver settings in a configuration.""" + cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg") + + config = Config(cf) + + network_config = get_network_config_from_conf(config, False) + + self.assertEqual(1, network_config.get('version')) + + config_types = network_config.get('config') + name_servers = None + dns_suffixes = None + + for type in config_types: + if type.get('type') == 'nameserver': + name_servers = type.get('address') + dns_suffixes = type.get('search') + break + + self.assertEqual(['10.20.145.1', '10.20.145.2'], + name_servers, + "dns") + self.assertEqual(['eng.vmware.com', 'proxy.vmware.com'], + dns_suffixes, + "suffixes") + + def test_gen_subnet(self): + """Tests if gen_subnet properly calculates network subnet from + IPv4 address and netmask""" + ip_subnet_list = [['10.20.87.253', '255.255.252.0', '10.20.84.0'], + ['10.20.92.105', '255.255.252.0', '10.20.92.0'], + ['192.168.0.10', '255.255.0.0', '192.168.0.0']] + for entry in ip_subnet_list: + self.assertEqual(entry[2], gen_subnet(entry[0], entry[1]), + "Subnet for a specified ip and netmask") + + def test_get_config_dns_suffixes(self): + """Tests if get_network_config_from_conf properly + generates nameservers and dns settings from a + specified configuration""" + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + + config = Config(cf) + + network_config = get_network_config_from_conf(config, False) + + self.assertEqual(1, network_config.get('version')) + + config_types = network_config.get('config') + name_servers = None + dns_suffixes = None + + for type in config_types: + if type.get('type') == 'nameserver': + name_servers = type.get('address') + dns_suffixes = type.get('search') + break + + self.assertEqual([], + name_servers, + "dns") + self.assertEqual(['eng.vmware.com'], + dns_suffixes, + "suffixes") + + def test_get_nics_list_dhcp(self): + """Tests if NicConfigurator properly calculates network subnets + for a configuration with a list of DHCP NICs""" + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + + config = Config(cf) + + nicConfigurator = NicConfigurator(config.nics, False) + nics_cfg_list = nicConfigurator.generate() + + self.assertEqual(2, len(nics_cfg_list), "number of config elements") + + nic1 = {'name': 'NIC1'} + nic2 = {'name': 'NIC2'} + for cfg in nics_cfg_list: + if cfg.get('name') == nic1.get('name'): + nic1.update(cfg) + elif cfg.get('name') == nic2.get('name'): + nic2.update(cfg) + + self.assertEqual('physical', nic1.get('type'), 'type of NIC1') + self.assertEqual('NIC1', nic1.get('name'), 'name of NIC1') + self.assertEqual('00:50:56:a6:8c:08', nic1.get('mac_address'), + 'mac address of NIC1') + subnets = nic1.get('subnets') + self.assertEqual(1, len(subnets), 'number of subnets for NIC1') + subnet = subnets[0] + self.assertEqual('dhcp', subnet.get('type'), 'DHCP type for NIC1') + self.assertEqual('auto', subnet.get('control'), 'NIC1 Control type') + + self.assertEqual('physical', nic2.get('type'), 'type of NIC2') + self.assertEqual('NIC2', nic2.get('name'), 'name of NIC2') + self.assertEqual('00:50:56:a6:5a:de', nic2.get('mac_address'), + 'mac address of NIC2') + subnets = nic2.get('subnets') + self.assertEqual(1, len(subnets), 'number of subnets for NIC2') + subnet = subnets[0] + self.assertEqual('dhcp', subnet.get('type'), 'DHCP type for NIC2') + self.assertEqual('auto', subnet.get('control'), 'NIC2 Control type') + + def test_get_nics_list_static(self): + """Tests if NicConfigurator properly calculates network subnets + for a configuration with 2 static NICs""" + cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg") + + config = Config(cf) + + nicConfigurator = NicConfigurator(config.nics, False) + nics_cfg_list = nicConfigurator.generate() + + self.assertEqual(2, len(nics_cfg_list), "number of elements") + + nic1 = {'name': 'NIC1'} + nic2 = {'name': 'NIC2'} + route_list = [] + for cfg in nics_cfg_list: + cfg_type = cfg.get('type') + if cfg_type == 'physical': + if cfg.get('name') == nic1.get('name'): + nic1.update(cfg) + elif cfg.get('name') == nic2.get('name'): + nic2.update(cfg) + + self.assertEqual('physical', nic1.get('type'), 'type of NIC1') + self.assertEqual('NIC1', nic1.get('name'), 'name of NIC1') + self.assertEqual('00:50:56:a6:8c:08', nic1.get('mac_address'), + 'mac address of NIC1') + + subnets = nic1.get('subnets') + self.assertEqual(2, len(subnets), 'Number of subnets') + + static_subnet = [] + static6_subnet = [] + + for subnet in subnets: + subnet_type = subnet.get('type') + if subnet_type == 'static': + static_subnet.append(subnet) + elif subnet_type == 'static6': + static6_subnet.append(subnet) + else: + self.assertEqual(True, False, 'Unknown type') + if 'route' in subnet: + for route in subnet.get('routes'): + route_list.append(route) + + self.assertEqual(1, len(static_subnet), 'Number of static subnet') + self.assertEqual(1, len(static6_subnet), 'Number of static6 subnet') + + subnet = static_subnet[0] + self.assertEqual('10.20.87.154', subnet.get('address'), + 'IPv4 address of static subnet') + self.assertEqual('255.255.252.0', subnet.get('netmask'), + 'NetMask of static subnet') + self.assertEqual('auto', subnet.get('control'), + 'control for static subnet') + + subnet = static6_subnet[0] + self.assertEqual('fc00:10:20:87::154', subnet.get('address'), + 'IPv6 address of static subnet') + self.assertEqual('64', subnet.get('netmask'), + 'NetMask of static6 subnet') + + route_set = set(['10.20.87.253', '10.20.87.105', '192.168.0.10']) + for route in route_list: + self.assertEqual(10000, route.get('metric'), 'metric of route') + gateway = route.get('gateway') + if gateway in route_set: + route_set.discard(gateway) + else: + self.assertEqual(True, False, 'invalid gateway %s' % (gateway)) + + self.assertEqual('physical', nic2.get('type'), 'type of NIC2') + self.assertEqual('NIC2', nic2.get('name'), 'name of NIC2') + self.assertEqual('00:50:56:a6:ef:7d', nic2.get('mac_address'), + 'mac address of NIC2') + + subnets = nic2.get('subnets') + self.assertEqual(1, len(subnets), 'Number of subnets for NIC2') + + subnet = subnets[0] + self.assertEqual('static', subnet.get('type'), 'Subnet type') + self.assertEqual('192.168.6.102', subnet.get('address'), + 'Subnet address') + self.assertEqual('255.255.0.0', subnet.get('netmask'), + 'Subnet netmask') + + def test_custom_script(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertIsNone(conf.custom_script_name) + cf._insertKey("CUSTOM-SCRIPT|SCRIPT-NAME", "test-script") + conf = Config(cf) + self.assertEqual("test-script", conf.custom_script_name) + + def test_post_gc_status(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertFalse(conf.post_gc_status) + cf._insertKey("MISC|POST-GC-STATUS", "YES") + conf = Config(cf) + self.assertTrue(conf.post_gc_status) + + def test_no_default_run_post_script(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertFalse(conf.default_run_post_script) + cf._insertKey("MISC|DEFAULT-RUN-POST-CUST-SCRIPT", "NO") + conf = Config(cf) + self.assertFalse(conf.default_run_post_script) + + def test_yes_default_run_post_script(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + cf._insertKey("MISC|DEFAULT-RUN-POST-CUST-SCRIPT", "yes") + conf = Config(cf) + self.assertTrue(conf.default_run_post_script) + + +class TestVmwareNetConfig(CiTestCase): + """Test conversion of vmware config to cloud-init config.""" + + maxDiff = None + + def _get_NicConfigurator(self, text): + fp = None + try: + with tempfile.NamedTemporaryFile(mode="w", dir=self.tmp_dir(), + delete=False) as fp: + fp.write(text) + fp.close() + cfg = Config(ConfigFile(fp.name)) + return NicConfigurator(cfg.nics, use_system_devices=False) + finally: + if fp: + os.unlink(fp.name) + + def test_non_primary_nic_without_gateway(self): + """A non primary nic set is not required to have a gateway.""" + config = textwrap.dedent("""\ + [NETWORK] + NETWORKING = yes + BOOTPROTO = dhcp + HOSTNAME = myhost1 + DOMAINNAME = eng.vmware.com + + [NIC-CONFIG] + NICS = NIC1 + + [NIC1] + MACADDR = 00:50:56:a6:8c:08 + ONBOOT = yes + IPv4_MODE = BACKWARDS_COMPATIBLE + BOOTPROTO = static + IPADDR = 10.20.87.154 + NETMASK = 255.255.252.0 + """) + nc = self._get_NicConfigurator(config) + self.assertEqual( + [{'type': 'physical', 'name': 'NIC1', + 'mac_address': '00:50:56:a6:8c:08', + 'subnets': [ + {'control': 'auto', 'type': 'static', + 'address': '10.20.87.154', 'netmask': '255.255.252.0'}]}], + nc.generate()) + + def test_non_primary_nic_with_gateway(self): + """A non primary nic set can have a gateway.""" + config = textwrap.dedent("""\ + [NETWORK] + NETWORKING = yes + BOOTPROTO = dhcp + HOSTNAME = myhost1 + DOMAINNAME = eng.vmware.com + + [NIC-CONFIG] + NICS = NIC1 + + [NIC1] + MACADDR = 00:50:56:a6:8c:08 + ONBOOT = yes + IPv4_MODE = BACKWARDS_COMPATIBLE + BOOTPROTO = static + IPADDR = 10.20.87.154 + NETMASK = 255.255.252.0 + GATEWAY = 10.20.87.253 + """) + nc = self._get_NicConfigurator(config) + self.assertEqual( + [{'type': 'physical', 'name': 'NIC1', + 'mac_address': '00:50:56:a6:8c:08', + 'subnets': [ + {'control': 'auto', 'type': 'static', + 'address': '10.20.87.154', 'netmask': '255.255.252.0', + 'routes': + [{'type': 'route', 'destination': '10.20.84.0/22', + 'gateway': '10.20.87.253', 'metric': 10000}]}]}], + nc.generate()) + + def test_cust_non_primary_nic_with_gateway_(self): + """A customer non primary nic set can have a gateway.""" + config = textwrap.dedent("""\ + [NETWORK] + NETWORKING = yes + BOOTPROTO = dhcp + HOSTNAME = static-debug-vm + DOMAINNAME = cluster.local + + [NIC-CONFIG] + NICS = NIC1 + + [NIC1] + MACADDR = 00:50:56:ac:d1:8a + ONBOOT = yes + IPv4_MODE = BACKWARDS_COMPATIBLE + BOOTPROTO = static + IPADDR = 100.115.223.75 + NETMASK = 255.255.255.0 + GATEWAY = 100.115.223.254 + + + [DNS] + DNSFROMDHCP=no + + NAMESERVER|1 = 8.8.8.8 + + [DATETIME] + UTC = yes + """) + nc = self._get_NicConfigurator(config) + self.assertEqual( + [{'type': 'physical', 'name': 'NIC1', + 'mac_address': '00:50:56:ac:d1:8a', + 'subnets': [ + {'control': 'auto', 'type': 'static', + 'address': '100.115.223.75', 'netmask': '255.255.255.0', + 'routes': + [{'type': 'route', 'destination': '100.115.223.0/24', + 'gateway': '100.115.223.254', 'metric': 10000}]}]}], + nc.generate()) + + def test_a_primary_nic_with_gateway(self): + """A primary nic set can have a gateway.""" + config = textwrap.dedent("""\ + [NETWORK] + NETWORKING = yes + BOOTPROTO = dhcp + HOSTNAME = myhost1 + DOMAINNAME = eng.vmware.com + + [NIC-CONFIG] + NICS = NIC1 + + [NIC1] + MACADDR = 00:50:56:a6:8c:08 + ONBOOT = yes + IPv4_MODE = BACKWARDS_COMPATIBLE + BOOTPROTO = static + IPADDR = 10.20.87.154 + NETMASK = 255.255.252.0 + PRIMARY = true + GATEWAY = 10.20.87.253 + """) + nc = self._get_NicConfigurator(config) + self.assertEqual( + [{'type': 'physical', 'name': 'NIC1', + 'mac_address': '00:50:56:a6:8c:08', + 'subnets': [ + {'control': 'auto', 'type': 'static', + 'address': '10.20.87.154', 'netmask': '255.255.252.0', + 'gateway': '10.20.87.253'}]}], + nc.generate()) + + def test_meta_data(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertIsNone(conf.meta_data_name) + cf._insertKey("CLOUDINIT|METADATA", "test-metadata") + conf = Config(cf) + self.assertEqual("test-metadata", conf.meta_data_name) + + def test_user_data(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertIsNone(conf.user_data_name) + cf._insertKey("CLOUDINIT|USERDATA", "test-userdata") + conf = Config(cf) + self.assertEqual("test-userdata", conf.user_data_name) + + +# vi: ts=4 expandtab -- cgit v1.2.3 From 2969ceaa6447774f921fe7dd06a39c175e87e45c Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Tue, 7 Dec 2021 11:14:13 -0700 Subject: mock sleep() in azure test (#1137) --- tests/unittests/sources/test_azure.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'tests/unittests/sources') diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py index b221a0d7..9728a1e7 100644 --- a/tests/unittests/sources/test_azure.py +++ b/tests/unittests/sources/test_azure.py @@ -2981,7 +2981,8 @@ class TestPreprovisioningHotAttachNics(CiTestCase): m_is_up.side_effect = is_up_mock - dsa.wait_for_link_up("eth0") + with mock.patch('cloudinit.sources.DataSourceAzure.sleep'): + dsa.wait_for_link_up("eth0") self.assertEqual(2, m_try_set_link_up.call_count) self.assertEqual(2, m_is_up.call_count) -- cgit v1.2.3 From 65c2cfd7f21758746444c8c79444994a4638d563 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Wed, 8 Dec 2021 14:27:37 -0700 Subject: factor out function for getting top level directory of cloudinit (#1136) Add a test helper to get top level directory Many tests need to get the location of files & dirs within the cloud-init project directory. Tests implement this in various different ways, and often those ways depend on the current working directory of the pytest invocation. Create helper functions (and tests) that gets the path of the top directory or any sub directory under the top directory. This function does not depend on the environment. --- tests/unittests/config/test_cc_chef.py | 17 +++++++---- tests/unittests/config/test_cc_resolv_conf.py | 2 +- tests/unittests/config/test_cc_update_etc_hosts.py | 5 +++- tests/unittests/config/test_schema.py | 16 +++++++---- tests/unittests/helpers.py | 22 ++++++++++++++- .../sources/vmware/test_vmware_config_file.py | 10 +++++-- tests/unittests/test_ds_identify.py | 10 +++++-- tests/unittests/test_helpers.py | 33 ++++++++++++++++++++++ tests/unittests/test_render_cloudcfg.py | 6 ++-- tests/unittests/test_subp.py | 20 +++++++------ 10 files changed, 111 insertions(+), 30 deletions(-) (limited to 'tests/unittests/sources') diff --git a/tests/unittests/config/test_cc_chef.py b/tests/unittests/config/test_cc_chef.py index 060293c8..1c90a4fc 100644 --- a/tests/unittests/config/test_cc_chef.py +++ b/tests/unittests/config/test_cc_chef.py @@ -9,13 +9,18 @@ from cloudinit.config import cc_chef from cloudinit import util from tests.unittests.helpers import ( - HttprettyTestCase, FilesystemMockingTestCase, mock, skipIf) + HttprettyTestCase, + FilesystemMockingTestCase, + mock, + skipIf, + cloud_init_project_dir, +) from tests.unittests.util import get_cloud LOG = logging.getLogger(__name__) -CLIENT_TEMPL = os.path.sep.join(["templates", "chef_client.rb.tmpl"]) +CLIENT_TEMPL = cloud_init_project_dir("templates/chef_client.rb.tmpl") # This is adjusted to use http because using with https causes issue # in some openssl/httpretty combinations. @@ -138,7 +143,7 @@ class TestChef(FilesystemMockingTestCase): Chef::Log::Formatter.show_time = true encrypted_data_bag_secret "/etc/chef/encrypted_data_bag_secret" """ - tpl_file = util.load_file('templates/chef_client.rb.tmpl') + tpl_file = util.load_file(CLIENT_TEMPL) self.patchUtils(self.tmp) self.patchOS(self.tmp) @@ -200,7 +205,7 @@ class TestChef(FilesystemMockingTestCase): @skipIf(not os.path.isfile(CLIENT_TEMPL), CLIENT_TEMPL + " is not available") def test_template_deletes(self): - tpl_file = util.load_file('templates/chef_client.rb.tmpl') + tpl_file = util.load_file(CLIENT_TEMPL) self.patchUtils(self.tmp) self.patchOS(self.tmp) @@ -222,7 +227,7 @@ class TestChef(FilesystemMockingTestCase): CLIENT_TEMPL + " is not available") def test_validation_cert_and_validation_key(self): # test validation_cert content is written to validation_key path - tpl_file = util.load_file('templates/chef_client.rb.tmpl') + tpl_file = util.load_file(CLIENT_TEMPL) self.patchUtils(self.tmp) self.patchOS(self.tmp) @@ -245,7 +250,7 @@ class TestChef(FilesystemMockingTestCase): def test_validation_cert_with_system(self): # test validation_cert content is not written over system file - tpl_file = util.load_file('templates/chef_client.rb.tmpl') + tpl_file = util.load_file(CLIENT_TEMPL) self.patchUtils(self.tmp) self.patchOS(self.tmp) diff --git a/tests/unittests/config/test_cc_resolv_conf.py b/tests/unittests/config/test_cc_resolv_conf.py index 0aa90a23..ab2de17a 100644 --- a/tests/unittests/config/test_cc_resolv_conf.py +++ b/tests/unittests/config/test_cc_resolv_conf.py @@ -114,7 +114,7 @@ class TestResolvConf(t_help.FilesystemMockingTestCase): class TestGenerateResolvConf: dist = MockDistro() - tmpl_fn = "templates/resolv.conf.tmpl" + tmpl_fn = t_help.cloud_init_project_dir("templates/resolv.conf.tmpl") @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") def test_dist_resolv_conf_fn(self, m_render_to_file): diff --git a/tests/unittests/config/test_cc_update_etc_hosts.py b/tests/unittests/config/test_cc_update_etc_hosts.py index 77a7f78f..35ad6413 100644 --- a/tests/unittests/config/test_cc_update_etc_hosts.py +++ b/tests/unittests/config/test_cc_update_etc_hosts.py @@ -55,7 +55,10 @@ class TestHostsFile(t_help.FilesystemMockingTestCase): 'manage_etc_hosts': 'template', 'hostname': 'cloud-init.test.us' } - shutil.copytree('templates', '%s/etc/cloud/templates' % self.tmp) + shutil.copytree( + t_help.cloud_init_project_dir('templates'), + '%s/etc/cloud/templates' % self.tmp, + ) distro = self._fetch_distro('sles') paths = helpers.Paths({}) paths.template_tpl = '%s' % self.tmp + '/etc/cloud/templates/%s.tmpl' diff --git a/tests/unittests/config/test_schema.py b/tests/unittests/config/test_schema.py index f90e0f62..ed7ab527 100644 --- a/tests/unittests/config/test_schema.py +++ b/tests/unittests/config/test_schema.py @@ -12,7 +12,6 @@ from pathlib import Path from textwrap import dedent from yaml import safe_load -import cloudinit from cloudinit.config.schema import ( CLOUD_CONFIG_HEADER, SchemaValidationError, @@ -27,7 +26,12 @@ from cloudinit.config.schema import ( MetaSchema, ) from cloudinit.util import write_file -from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema +from tests.unittests.helpers import ( + CiTestCase, + mock, + skipUnlessJsonSchema, + cloud_init_project_dir, +) def get_schemas() -> dict: @@ -50,7 +54,10 @@ def get_module_variable(var_name) -> dict: """Inspect modules and get variable from module matching var_name""" schemas = {} - files = list(Path("../../cloudinit/config/").glob("cc_*.py")) + files = list( + Path(cloud_init_project_dir("../../cloudinit/config/")).glob("cc_*.py") + ) + modules = [mod.stem for mod in files] for module in modules: @@ -616,8 +623,7 @@ class TestMain: def _get_meta_doc_examples(): - examples_dir = Path( - cloudinit.__file__).parent.parent / 'doc' / 'examples' + examples_dir = Path(cloud_init_project_dir('doc/examples')) assert examples_dir.is_dir() all_text_files = (f for f in examples_dir.glob('cloud-config*.txt') diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index ccd56793..e9afbd36 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -12,10 +12,12 @@ import sys import tempfile import time import unittest +from pathlib import Path from contextlib import ExitStack, contextmanager from unittest import mock from unittest.util import strclass +import cloudinit from cloudinit.config.schema import ( SchemaValidationError, validate_cloudconfig_schema) from cloudinit import cloud @@ -462,7 +464,7 @@ def wrap_and_call(prefix, mocks, func, *args, **kwargs): def resourceLocation(subname=None): - path = os.path.join('tests', 'data') + path = cloud_init_project_dir('tests/data') if not subname: return path return os.path.join(path, subname) @@ -504,4 +506,22 @@ if not hasattr(mock.Mock, 'assert_not_called'): raise AssertionError(msg) mock.Mock.assert_not_called = __mock_assert_not_called + +def get_top_level_dir() -> Path: + """Return the absolute path to the top cloudinit project directory + + @return Path('') + """ + return Path(cloudinit.__file__).parent.parent.resolve() + + +def cloud_init_project_dir(sub_path: str) -> str: + """Get a path within the cloudinit project directory + + @return str of the combined path + + Example: cloud_init_project_dir("my/path") -> "/path/to/cloud-init/my/path" + """ + return str(get_top_level_dir() / sub_path) + # vi: ts=4 expandtab diff --git a/tests/unittests/sources/vmware/test_vmware_config_file.py b/tests/unittests/sources/vmware/test_vmware_config_file.py index 54de113e..1d66ab4a 100644 --- a/tests/unittests/sources/vmware/test_vmware_config_file.py +++ b/tests/unittests/sources/vmware/test_vmware_config_file.py @@ -16,15 +16,21 @@ from cloudinit.sources.DataSourceOVF import get_network_config_from_conf from cloudinit.sources.DataSourceOVF import read_vmware_imc from cloudinit.sources.helpers.vmware.imc.boot_proto import BootProtoEnum from cloudinit.sources.helpers.vmware.imc.config import Config -from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile +from cloudinit.sources.helpers.vmware.imc.config_file import ( + ConfigFile as WrappedConfigFile, +) from cloudinit.sources.helpers.vmware.imc.config_nic import gen_subnet from cloudinit.sources.helpers.vmware.imc.config_nic import NicConfigurator -from tests.unittests.helpers import CiTestCase +from tests.unittests.helpers import CiTestCase, cloud_init_project_dir logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) logger = logging.getLogger(__name__) +def ConfigFile(path: str): + return WrappedConfigFile(cloud_init_project_dir(path)) + + class TestVmwareConfigFile(CiTestCase): def test_utility_methods(self): diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 62c3e403..eb8992d9 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -9,8 +9,12 @@ from cloudinit import safeyaml from cloudinit import subp from cloudinit import util from tests.unittests.helpers import ( - CiTestCase, dir2dict, populate_dir, populate_dir_with_ts) - + CiTestCase, + dir2dict, + populate_dir, + populate_dir_with_ts, + cloud_init_project_dir, +) from cloudinit.sources import DataSourceIBMCloud as ds_ibm from cloudinit.sources import DataSourceSmartOS as ds_smartos from cloudinit.sources import DataSourceOracle as ds_oracle @@ -92,7 +96,7 @@ CallReturn = namedtuple('CallReturn', class DsIdentifyBase(CiTestCase): - dsid_path = os.path.realpath('tools/ds-identify') + dsid_path = cloud_init_project_dir('tools/ds-identify') allowed_subp = ['sh'] def call(self, rootd=None, mocks=None, func="main", args=None, files=None, diff --git a/tests/unittests/test_helpers.py b/tests/unittests/test_helpers.py index c6f9b94a..f491f8cd 100644 --- a/tests/unittests/test_helpers.py +++ b/tests/unittests/test_helpers.py @@ -3,6 +3,7 @@ """Tests of the built-in user data handlers.""" import os +from pathlib import Path from tests.unittests import helpers as test_helpers @@ -34,4 +35,36 @@ class TestPaths(test_helpers.ResourceUsingTestCase): self.assertIsNone(mypaths.get_ipath()) + +class Testcloud_init_project_dir: + top_dir = test_helpers.get_top_level_dir() + + @staticmethod + def _get_top_level_dir_alt_implementation(): + """Alternative implementation for comparing against. + + Note: Recursively searching for .git/ fails during build tests due to + .git not existing. This implementation assumes that ../../../ is the + relative path to the cloud-init project directory form this file. + """ + out = Path(__file__).parent.parent.parent.resolve() + return out + + def test_top_level_dir(self): + """Assert the location of the top project directory is correct""" + assert (self.top_dir == + self._get_top_level_dir_alt_implementation()) + + def test_cloud_init_project_dir(self): + """Assert cloud_init_project_dir produces an expected location + + Compare the returned value to an alternate (naive) implementation + """ + assert ( + str(Path(self.top_dir, "test")) + == test_helpers.cloud_init_project_dir("test") + == str(Path(self._get_top_level_dir_alt_implementation(), "test")) + ) + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_render_cloudcfg.py b/tests/unittests/test_render_cloudcfg.py index 00d50e66..b2222747 100644 --- a/tests/unittests/test_render_cloudcfg.py +++ b/tests/unittests/test_render_cloudcfg.py @@ -1,12 +1,12 @@ """Tests for tools/render-cloudcfg""" -import os import sys import pytest from cloudinit import subp from cloudinit import util +from tests.unittests.helpers import cloud_init_project_dir # TODO(Look to align with tools.render-cloudcfg or cloudinit.distos.OSFAMILIES) DISTRO_VARIANTS = ["amazon", "arch", "centos", "debian", "eurolinux", "fedora", @@ -17,8 +17,8 @@ DISTRO_VARIANTS = ["amazon", "arch", "centos", "debian", "eurolinux", "fedora", @pytest.mark.allow_subp_for(sys.executable) class TestRenderCloudCfg: - cmd = [sys.executable, os.path.realpath('tools/render-cloudcfg')] - tmpl_path = os.path.realpath('config/cloud.cfg.tmpl') + cmd = [sys.executable, cloud_init_project_dir('tools/render-cloudcfg')] + tmpl_path = cloud_init_project_dir('config/cloud.cfg.tmpl') @pytest.mark.parametrize('variant', (DISTRO_VARIANTS)) def test_variant_sets_distro_in_cloud_cfg(self, variant, tmpdir): diff --git a/tests/unittests/test_subp.py b/tests/unittests/test_subp.py index ec513d01..572510d7 100644 --- a/tests/unittests/test_subp.py +++ b/tests/unittests/test_subp.py @@ -10,7 +10,7 @@ import stat from unittest import mock from cloudinit import subp, util -from tests.unittests.helpers import CiTestCase +from tests.unittests.helpers import CiTestCase, get_top_level_dir BASH = subp.which('bash') @@ -232,13 +232,17 @@ class TestSubp(CiTestCase): the default encoding will be set to ascii. In such an environment Popen(['command', 'non-ascii-arg']) would cause a UnicodeDecodeError. """ - python_prog = '\n'.join([ - 'import json, sys', - 'from cloudinit.subp import subp', - 'data = sys.stdin.read()', - 'cmd = json.loads(data)', - 'subp(cmd, capture=False)', - '']) + python_prog = '\n'.join( + [ + 'import json, sys', + 'sys.path.insert(0, "{}")'.format(get_top_level_dir()), + 'from cloudinit.subp import subp', + 'data = sys.stdin.read()', + 'cmd = json.loads(data)', + 'subp(cmd, capture=False)', + '', + ] + ) cmd = [BASH, '-c', 'echo -n "$@"', '--', self.utf8_valid.decode("utf-8")] python_subp = [sys.executable, '-c', python_prog] -- cgit v1.2.3 From e9634266ea52bf184727fb0782d5dc35f9ed1468 Mon Sep 17 00:00:00 2001 From: Chris Patterson Date: Fri, 10 Dec 2021 12:16:16 -0500 Subject: sources/azure: remove unnecessary hostname bounce (#1143) Thanks to [1], the hostname is set prior to network bring-up. The Azure data source has been bouncing the hostname during setup(), occurring after the hostname has already been properly configured. Note that this doesn't prevent leaking the image's hostname during Azure's _get_data() when it brings up ephemeral DHCP. However, as are not guaranteed to have the hostname metadata available from a truly "local" source, this behavior is to be expected unless we disable `send host-name` from dhclient config. [1]: https://github.com/canonical/cloud-init/commit/133ad2cb327ad17b7b81319fac8f9f14577c04df Signed-off-by: Chris Patterson --- cloudinit/sources/DataSourceAzure.py | 126 -------------- doc/examples/cloud-config-datasources.txt | 6 - doc/rtd/topics/datasources/azure.rst | 20 --- tests/unittests/sources/test_azure.py | 263 ------------------------------ 4 files changed, 415 deletions(-) (limited to 'tests/unittests/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 6c1bc085..eee98fa8 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -6,7 +6,6 @@ import base64 from collections import namedtuple -import contextlib import crypt from functools import partial import os @@ -52,20 +51,10 @@ LOG = logging.getLogger(__name__) DS_NAME = 'Azure' DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} -BOUNCE_COMMAND_IFUP = [ - 'sh', '-xc', - "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x" -] -BOUNCE_COMMAND_FREEBSD = [ - 'sh', '-xc', - ("i=$interface; x=0; ifconfig down $i || x=$?; " - "ifconfig up $i || x=$?; exit $x") -] # azure systems will always have a resource disk, and 66-azure-ephemeral.rules # ensures that it gets linked to this path. RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource' -DEFAULT_PRIMARY_NIC = 'eth0' LEASE_FILE = '/var/lib/dhcp/dhclient.eth0.leases' DEFAULT_FS = 'ext4' # DMI chassis-asset-tag is set static for all azure instances @@ -247,7 +236,6 @@ def get_resource_disk_on_freebsd(port_id): # update the FreeBSD specific information if util.is_FreeBSD(): - DEFAULT_PRIMARY_NIC = 'hn0' LEASE_FILE = '/var/db/dhclient.leases.hn0' DEFAULT_FS = 'freebsd-ufs' res_disk = get_resource_disk_on_freebsd(1) @@ -261,13 +249,6 @@ if util.is_FreeBSD(): BUILTIN_DS_CONFIG = { 'data_dir': AGENT_SEED_DIR, - 'set_hostname': True, - 'hostname_bounce': { - 'interface': DEFAULT_PRIMARY_NIC, - 'policy': True, - 'command': 'builtin', - 'hostname_command': 'hostname', - }, 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH}, 'dhclient_lease_file': LEASE_FILE, 'apply_network_config': True, # Use IMDS published network configuration @@ -293,46 +274,6 @@ DEF_EPHEMERAL_LABEL = 'Temporary Storage' DEF_PASSWD_REDACTION = 'REDACTED' -def get_hostname(hostname_command='hostname'): - if not isinstance(hostname_command, (list, tuple)): - hostname_command = (hostname_command,) - return subp.subp(hostname_command, capture=True)[0].strip() - - -def set_hostname(hostname, hostname_command='hostname'): - subp.subp([hostname_command, hostname]) - - -@azure_ds_telemetry_reporter -@contextlib.contextmanager -def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): - """ - Set a temporary hostname, restoring the previous hostname on exit. - - Will have the value of the previous hostname when used as a context - manager, or None if the hostname was not changed. - """ - policy = cfg['hostname_bounce']['policy'] - previous_hostname = get_hostname(hostname_command) - if (not util.is_true(cfg.get('set_hostname')) or - util.is_false(policy) or - (previous_hostname == temp_hostname and policy != 'force')): - yield None - return - try: - set_hostname(temp_hostname, hostname_command) - except Exception as e: - report_diagnostic_event( - 'Failed setting temporary hostname: %s' % e, - logger_func=LOG.warning) - yield None - return - try: - yield previous_hostname - finally: - set_hostname(previous_hostname, hostname_command) - - class DataSourceAzure(sources.DataSource): dsname = 'Azure' @@ -369,34 +310,6 @@ class DataSourceAzure(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) - @azure_ds_telemetry_reporter - def bounce_network_with_azure_hostname(self): - # When using cloud-init to provision, we have to set the hostname from - # the metadata and "bounce" the network to force DDNS to update via - # dhclient - azure_hostname = self.metadata.get('local-hostname') - LOG.debug("Hostname in metadata is %s", azure_hostname) - hostname_command = self.ds_cfg['hostname_bounce']['hostname_command'] - - with temporary_hostname(azure_hostname, self.ds_cfg, - hostname_command=hostname_command) \ - as previous_hn: - if (previous_hn is not None and - util.is_true(self.ds_cfg.get('set_hostname'))): - cfg = self.ds_cfg['hostname_bounce'] - - # "Bouncing" the network - try: - return perform_hostname_bounce(hostname=azure_hostname, - cfg=cfg, - prev_hostname=previous_hn) - except Exception as e: - report_diagnostic_event( - "Failed publishing hostname: %s" % e, - logger_func=LOG.warning) - util.logexc(LOG, "handling set_hostname failed") - return False - def _get_subplatform(self): """Return the subplatform metadata source details.""" if self.seed.startswith('/dev'): @@ -1502,9 +1415,6 @@ class DataSourceAzure(sources.DataSource): On success, returns a dictionary including 'public_keys'. On failure, returns False. """ - - self.bounce_network_with_azure_hostname() - pubkey_info = None ssh_keys_and_source = self._get_public_ssh_keys_and_source() @@ -1763,42 +1673,6 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, return -@azure_ds_telemetry_reporter -def perform_hostname_bounce(hostname, cfg, prev_hostname): - # set the hostname to 'hostname' if it is not already set to that. - # then, if policy is not off, bounce the interface using command - # Returns True if the network was bounced, False otherwise. - command = cfg['command'] - interface = cfg['interface'] - policy = cfg['policy'] - - msg = ("hostname=%s policy=%s interface=%s" % - (hostname, policy, interface)) - env = os.environ.copy() - env['interface'] = interface - env['hostname'] = hostname - env['old_hostname'] = prev_hostname - - if command == "builtin": - if util.is_FreeBSD(): - command = BOUNCE_COMMAND_FREEBSD - elif subp.which('ifup'): - command = BOUNCE_COMMAND_IFUP - else: - LOG.debug( - "Skipping network bounce: ifupdown utils aren't present.") - # Don't bounce as networkd handles hostname DDNS updates - return False - LOG.debug("pubhname: publishing hostname [%s]", msg) - shell = not isinstance(command, (list, tuple)) - # capture=False, see comments in bug 1202758 and bug 1206164. - util.log_time(logfunc=LOG.debug, msg="publishing hostname", - get_uptime=True, func=subp.subp, - kwargs={'args': command, 'shell': shell, 'capture': False, - 'env': env}) - return True - - @azure_ds_telemetry_reporter def write_files(datadir, files, dirmode=None): diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt index d1a4d79e..7a8c4284 100644 --- a/doc/examples/cloud-config-datasources.txt +++ b/doc/examples/cloud-config-datasources.txt @@ -45,12 +45,6 @@ datasource: instance-id: i-87018aed local-hostname: myhost.internal - Azure: - set_hostname: True - hostname_bounce: - interface: eth0 - policy: on # [can be 'on', 'off' or 'force'] - SmartOS: # For KVM guests: # Smart OS datasource works over a serial console interacting with diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst index ad9f2236..bc672486 100644 --- a/doc/rtd/topics/datasources/azure.rst +++ b/doc/rtd/topics/datasources/azure.rst @@ -60,20 +60,6 @@ The settings that may be configured are: custom DHCP option 245 from Azure fabric. * **disk_aliases**: A dictionary defining which device paths should be interpreted as ephemeral images. See cc_disk_setup module for more info. - * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to - metadata changes. The '``hostname_bounce: command``' entry can be either - the literal string 'builtin' or a command to execute. The command will be - invoked after the hostname is set, and will have the 'interface' in its - environment. If ``set_hostname`` is not true, then ``hostname_bounce`` - will be ignored. An example might be: - - ``command: ["sh", "-c", "killall dhclient; dhclient $interface"]`` - - * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to - metadata changes. Azure will throttle ifup/down in some cases after metadata - has been updated to inform dhcp server about updated hostnames. - * **set_hostname**: Boolean set to True when we want Azure to set the hostname - based on metadata. Configuration for the datasource can also be read from a ``dscfg`` entry in the ``LinuxProvisioningConfigurationSet``. Content in @@ -91,12 +77,6 @@ An example configuration with the default values is provided below: dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases disk_aliases: ephemeral0: /dev/disk/cloud/azure_resource - hostname_bounce: - interface: eth0 - command: builtin - policy: true - hostname_command: hostname - set_hostname: true Userdata diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py index 9728a1e7..ad8be04b 100644 --- a/tests/unittests/sources/test_azure.py +++ b/tests/unittests/sources/test_azure.py @@ -696,9 +696,6 @@ scbus-1 on xpt0 bus 0 self.apply_patches([ (dsaz, 'list_possible_azure_ds', self.m_list_possible_azure_ds), - (dsaz, 'perform_hostname_bounce', mock.MagicMock()), - (dsaz, 'get_hostname', mock.MagicMock()), - (dsaz, 'set_hostname', mock.MagicMock()), (dsaz, '_is_platform_viable', self.m_is_platform_viable), (dsaz, 'get_metadata_from_fabric', @@ -1794,21 +1791,6 @@ scbus-1 on xpt0 bus 0 m_net_get_interfaces.assert_called_with( blacklist_drivers=dsaz.BLACKLIST_DRIVERS) - @mock.patch(MOCKPATH + 'subp.subp', autospec=True) - def test_get_hostname_with_no_args(self, m_subp): - dsaz.get_hostname() - m_subp.assert_called_once_with(("hostname",), capture=True) - - @mock.patch(MOCKPATH + 'subp.subp', autospec=True) - def test_get_hostname_with_string_arg(self, m_subp): - dsaz.get_hostname(hostname_command="hostname") - m_subp.assert_called_once_with(("hostname",), capture=True) - - @mock.patch(MOCKPATH + 'subp.subp', autospec=True) - def test_get_hostname_with_iterable_arg(self, m_subp): - dsaz.get_hostname(hostname_command=("hostname",)) - m_subp.assert_called_once_with(("hostname",), capture=True) - @mock.patch( 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates') def test_get_public_ssh_keys_with_imds(self, m_parse_certificates): @@ -2023,251 +2005,6 @@ scbus-1 on xpt0 bus 0 self.assertEqual(dsrc.userdata_raw, userdataOVF.encode('utf-8')) -class TestAzureBounce(CiTestCase): - - with_logs = True - - def mock_out_azure_moving_parts(self): - - def _load_possible_azure_ds(seed_dir, cache_dir): - yield seed_dir - yield dsaz.DEFAULT_PROVISIONING_ISO_DEV - if cache_dir: - yield cache_dir - - self.patches.enter_context( - mock.patch.object(dsaz.util, 'wait_for_files')) - self.patches.enter_context( - mock.patch.object( - dsaz, 'list_possible_azure_ds', - mock.MagicMock(side_effect=_load_possible_azure_ds))) - self.patches.enter_context( - mock.patch.object(dsaz, 'get_metadata_from_fabric', - mock.MagicMock(return_value={}))) - self.patches.enter_context( - mock.patch.object(dsaz, 'get_metadata_from_imds', - mock.MagicMock(return_value={}))) - self.patches.enter_context( - mock.patch.object(dsaz.subp, 'which', lambda x: True)) - self.patches.enter_context(mock.patch.object( - dsaz, '_get_random_seed', return_value='wild')) - - def _dmi_mocks(key): - if key == 'system-uuid': - return 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8' - elif key == 'chassis-asset-tag': - return '7783-7084-3265-9085-8269-3286-77' - raise RuntimeError('should not get here') - - self.patches.enter_context( - mock.patch.object(dsaz.dmi, 'read_dmi_data', - mock.MagicMock(side_effect=_dmi_mocks))) - - def setUp(self): - super(TestAzureBounce, self).setUp() - self.tmp = self.tmp_dir() - self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') - self.paths = helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp}) - dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d - self.patches = ExitStack() - self.mock_out_azure_moving_parts() - self.get_hostname = self.patches.enter_context( - mock.patch.object(dsaz, 'get_hostname')) - self.set_hostname = self.patches.enter_context( - mock.patch.object(dsaz, 'set_hostname')) - self.subp = self.patches.enter_context( - mock.patch(MOCKPATH + 'subp.subp')) - self.find_fallback_nic = self.patches.enter_context( - mock.patch('cloudinit.net.find_fallback_nic', return_value='eth9')) - - def tearDown(self): - self.patches.close() - super(TestAzureBounce, self).tearDown() - - def _get_ds(self, ovfcontent=None): - if ovfcontent is not None: - populate_dir(os.path.join(self.paths.seed_dir, "azure"), - {'ovf-env.xml': ovfcontent}) - dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - return dsrc - - def _get_and_setup(self, dsrc): - ret = dsrc.get_data() - if ret: - dsrc.setup(True) - return ret - - def get_ovf_env_with_dscfg(self, hostname, cfg): - odata = { - 'HostName': hostname, - 'dscfg': { - 'text': b64e(yaml.dump(cfg)), - 'encoding': 'base64' - } - } - return construct_valid_ovf_env(data=odata) - - def test_disabled_bounce_does_not_change_hostname(self): - cfg = {'hostname_bounce': {'policy': 'off'}} - ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)) - ds.get_data() - self.assertEqual(0, self.set_hostname.call_count) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_disabled_bounce_does_not_perform_bounce( - self, perform_hostname_bounce): - cfg = {'hostname_bounce': {'policy': 'off'}} - ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)) - ds.get_data() - self.assertEqual(0, perform_hostname_bounce.call_count) - - def test_same_hostname_does_not_change_hostname(self): - host_name = 'unchanged-host-name' - self.get_hostname.return_value = host_name - cfg = {'hostname_bounce': {'policy': 'yes'}} - ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) - ds.get_data() - self.assertEqual(0, self.set_hostname.call_count) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_unchanged_hostname_does_not_perform_bounce( - self, perform_hostname_bounce): - host_name = 'unchanged-host-name' - self.get_hostname.return_value = host_name - cfg = {'hostname_bounce': {'policy': 'yes'}} - ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) - ds.get_data() - self.assertEqual(0, perform_hostname_bounce.call_count) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_force_performs_bounce_regardless(self, perform_hostname_bounce): - host_name = 'unchanged-host-name' - self.get_hostname.return_value = host_name - cfg = {'hostname_bounce': {'policy': 'force'}} - dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(1, perform_hostname_bounce.call_count) - - def test_bounce_skipped_on_ifupdown_absent(self): - host_name = 'unchanged-host-name' - self.get_hostname.return_value = host_name - cfg = {'hostname_bounce': {'policy': 'force'}} - dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) - patch_path = MOCKPATH + 'subp.which' - with mock.patch(patch_path) as m_which: - m_which.return_value = None - ret = self._get_and_setup(dsrc) - self.assertEqual([mock.call('ifup')], m_which.call_args_list) - self.assertTrue(ret) - self.assertIn( - "Skipping network bounce: ifupdown utils aren't present.", - self.logs.getvalue()) - - def test_different_hostnames_sets_hostname(self): - expected_hostname = 'azure-expected-host-name' - self.get_hostname.return_value = 'default-host-name' - dsrc = self._get_ds( - self.get_ovf_env_with_dscfg(expected_hostname, {})) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(expected_hostname, - self.set_hostname.call_args_list[0][0][0]) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_different_hostnames_performs_bounce( - self, perform_hostname_bounce): - expected_hostname = 'azure-expected-host-name' - self.get_hostname.return_value = 'default-host-name' - dsrc = self._get_ds( - self.get_ovf_env_with_dscfg(expected_hostname, {})) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(1, perform_hostname_bounce.call_count) - - def test_different_hostnames_sets_hostname_back(self): - initial_host_name = 'default-host-name' - self.get_hostname.return_value = initial_host_name - dsrc = self._get_ds( - self.get_ovf_env_with_dscfg('some-host-name', {})) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(initial_host_name, - self.set_hostname.call_args_list[-1][0][0]) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_failure_in_bounce_still_resets_host_name( - self, perform_hostname_bounce): - perform_hostname_bounce.side_effect = Exception - initial_host_name = 'default-host-name' - self.get_hostname.return_value = initial_host_name - dsrc = self._get_ds( - self.get_ovf_env_with_dscfg('some-host-name', {})) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(initial_host_name, - self.set_hostname.call_args_list[-1][0][0]) - - @mock.patch.object(dsaz, 'get_boot_telemetry') - def test_environment_correct_for_bounce_command( - self, mock_get_boot_telemetry): - interface = 'int0' - hostname = 'my-new-host' - old_hostname = 'my-old-host' - self.get_hostname.return_value = old_hostname - cfg = {'hostname_bounce': {'interface': interface, 'policy': 'force'}} - data = self.get_ovf_env_with_dscfg(hostname, cfg) - dsrc = self._get_ds(data) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(1, self.subp.call_count) - bounce_env = self.subp.call_args[1]['env'] - self.assertEqual(interface, bounce_env['interface']) - self.assertEqual(hostname, bounce_env['hostname']) - self.assertEqual(old_hostname, bounce_env['old_hostname']) - - @mock.patch.object(dsaz, 'get_boot_telemetry') - def test_default_bounce_command_ifup_used_by_default( - self, mock_get_boot_telemetry): - cfg = {'hostname_bounce': {'policy': 'force'}} - data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - dsrc = self._get_ds(data) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(1, self.subp.call_count) - bounce_args = self.subp.call_args[1]['args'] - self.assertEqual( - dsaz.BOUNCE_COMMAND_IFUP, bounce_args) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_set_hostname_option_can_disable_bounce( - self, perform_hostname_bounce): - cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}} - data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - self._get_ds(data).get_data() - - self.assertEqual(0, perform_hostname_bounce.call_count) - - def test_set_hostname_option_can_disable_hostname_set(self): - cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}} - data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - self._get_ds(data).get_data() - - self.assertEqual(0, self.set_hostname.call_count) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_set_hostname_failed_disable_bounce( - self, perform_hostname_bounce): - cfg = {'set_hostname': True, 'hostname_bounce': {'policy': 'force'}} - self.get_hostname.return_value = "old-hostname" - self.set_hostname.side_effect = Exception - data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - self._get_ds(data).get_data() - - self.assertEqual(0, perform_hostname_bounce.call_count) - - class TestLoadAzureDsDir(CiTestCase): """Tests for load_azure_ds_dir.""" -- cgit v1.2.3 From bae9b11da9ed7dd0b16fe5adeaf4774b7cc628cf Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 15 Dec 2021 20:16:38 -0600 Subject: Adopt Black and isort (SC-700) (#1157) Applied Black and isort, fixed any linting issues, updated tox.ini and CI. --- .travis.yml | 4 + CONTRIBUTING.rst | 5 + cloudinit/analyze/__main__.py | 269 +- cloudinit/analyze/dump.py | 71 +- cloudinit/analyze/show.py | 192 +- cloudinit/apport.py | 153 +- cloudinit/atomic_helper.py | 25 +- cloudinit/cloud.py | 14 +- cloudinit/cmd/clean.py | 59 +- cloudinit/cmd/cloud_id.py | 68 +- cloudinit/cmd/devel/__init__.py | 3 +- cloudinit/cmd/devel/hotplug_hook.py | 138 +- cloudinit/cmd/devel/logs.py | 120 +- cloudinit/cmd/devel/make_mime.py | 76 +- cloudinit/cmd/devel/net_convert.py | 145 +- cloudinit/cmd/devel/parser.py | 48 +- cloudinit/cmd/devel/render.py | 54 +- cloudinit/cmd/main.py | 595 ++- cloudinit/cmd/query.py | 170 +- cloudinit/cmd/status.py | 101 +- cloudinit/config/__init__.py | 20 +- cloudinit/config/cc_apk_configure.py | 195 +- cloudinit/config/cc_apt_configure.py | 618 +-- cloudinit/config/cc_apt_pipelining.py | 13 +- cloudinit/config/cc_bootcmd.py | 65 +- cloudinit/config/cc_byobu.py | 27 +- cloudinit/config/cc_ca_certs.py | 84 +- cloudinit/config/cc_chef.py | 659 +-- cloudinit/config/cc_debug.py | 21 +- cloudinit/config/cc_disable_ec2_metadata.py | 25 +- cloudinit/config/cc_disk_setup.py | 334 +- cloudinit/config/cc_emit_upstart.py | 24 +- cloudinit/config/cc_fan.py | 34 +- cloudinit/config/cc_final_message.py | 24 +- cloudinit/config/cc_foo.py | 1 + cloudinit/config/cc_growpart.py | 134 +- cloudinit/config/cc_grub_dpkg.py | 54 +- cloudinit/config/cc_install_hotplug.py | 48 +- cloudinit/config/cc_keys_to_console.py | 36 +- cloudinit/config/cc_landscape.py | 24 +- cloudinit/config/cc_locale.py | 51 +- cloudinit/config/cc_lxd.py | 186 +- cloudinit/config/cc_mcollective.py | 50 +- cloudinit/config/cc_migrator.py | 25 +- cloudinit/config/cc_mounts.py | 172 +- cloudinit/config/cc_ntp.py | 546 ++- .../config/cc_package_update_upgrade_install.py | 30 +- cloudinit/config/cc_phone_home.py | 98 +- cloudinit/config/cc_power_state_change.py | 58 +- cloudinit/config/cc_puppet.py | 194 +- cloudinit/config/cc_refresh_rmc_and_interface.py | 51 +- cloudinit/config/cc_reset_rmc.py | 43 +- cloudinit/config/cc_resizefs.py | 183 +- cloudinit/config/cc_resolv_conf.py | 41 +- cloudinit/config/cc_rh_subscription.py | 240 +- cloudinit/config/cc_rightscale_userdata.py | 31 +- cloudinit/config/cc_rsyslog.py | 86 +- cloudinit/config/cc_runcmd.py | 70 +- cloudinit/config/cc_salt_minion.py | 69 +- cloudinit/config/cc_scripts_per_boot.py | 14 +- cloudinit/config/cc_scripts_per_instance.py | 14 +- cloudinit/config/cc_scripts_per_once.py | 14 +- cloudinit/config/cc_scripts_user.py | 12 +- cloudinit/config/cc_scripts_vendor.py | 22 +- cloudinit/config/cc_seed_random.py | 41 +- cloudinit/config/cc_set_hostname.py | 30 +- cloudinit/config/cc_set_passwords.py | 65 +- cloudinit/config/cc_snap.py | 168 +- cloudinit/config/cc_spacewalk.py | 67 +- cloudinit/config/cc_ssh.py | 106 +- cloudinit/config/cc_ssh_authkey_fingerprints.py | 73 +- cloudinit/config/cc_ssh_import_id.py | 23 +- cloudinit/config/cc_timezone.py | 2 +- cloudinit/config/cc_ubuntu_advantage.py | 154 +- cloudinit/config/cc_ubuntu_drivers.py | 133 +- cloudinit/config/cc_update_etc_hosts.py | 42 +- cloudinit/config/cc_update_hostname.py | 25 +- cloudinit/config/cc_users_groups.py | 39 +- cloudinit/config/cc_write_files.py | 242 +- cloudinit/config/cc_write_files_deferred.py | 22 +- cloudinit/config/cc_yum_add_repo.py | 65 +- cloudinit/config/cc_zypper_add_repo.py | 159 +- cloudinit/config/schema.py | 239 +- cloudinit/cs_utils.py | 20 +- cloudinit/dhclient_hook.py | 21 +- cloudinit/distros/__init__.py | 420 +- cloudinit/distros/almalinux.py | 1 + cloudinit/distros/alpine.py | 45 +- cloudinit/distros/amazon.py | 1 - cloudinit/distros/arch.py | 147 +- cloudinit/distros/bsd.py | 66 +- cloudinit/distros/bsd_utils.py | 18 +- cloudinit/distros/centos.py | 1 + cloudinit/distros/cloudlinux.py | 1 + cloudinit/distros/debian.py | 168 +- cloudinit/distros/dragonflybsd.py | 2 +- cloudinit/distros/eurolinux.py | 1 + cloudinit/distros/fedora.py | 1 + cloudinit/distros/freebsd.py | 93 +- cloudinit/distros/gentoo.py | 140 +- cloudinit/distros/miraclelinux.py | 2 + cloudinit/distros/net_util.py | 68 +- cloudinit/distros/netbsd.py | 85 +- cloudinit/distros/networking.py | 13 +- cloudinit/distros/openEuler.py | 1 + cloudinit/distros/openbsd.py | 20 +- cloudinit/distros/opensuse.py | 119 +- cloudinit/distros/parsers/__init__.py | 3 +- cloudinit/distros/parsers/hostname.py | 24 +- cloudinit/distros/parsers/hosts.py | 24 +- cloudinit/distros/parsers/networkmanager_conf.py | 6 +- cloudinit/distros/parsers/resolv_conf.py | 73 +- cloudinit/distros/parsers/sys_conf.py | 38 +- cloudinit/distros/photon.py | 86 +- cloudinit/distros/rhel.py | 76 +- cloudinit/distros/rhel_util.py | 4 +- cloudinit/distros/rocky.py | 1 + cloudinit/distros/sles.py | 1 + cloudinit/distros/ubuntu.py | 33 +- cloudinit/distros/ug_util.py | 106 +- cloudinit/distros/virtuozzo.py | 1 + cloudinit/dmi.py | 68 +- cloudinit/ec2_utils.py | 165 +- cloudinit/event.py | 8 +- cloudinit/filters/launch_index.py | 12 +- cloudinit/gpg.py | 48 +- cloudinit/handlers/__init__.py | 152 +- cloudinit/handlers/boot_hook.py | 21 +- cloudinit/handlers/cloud_config.py | 29 +- cloudinit/handlers/jinja_template.py | 87 +- cloudinit/handlers/shell_script.py | 15 +- cloudinit/handlers/upstart_job.py | 22 +- cloudinit/helpers.py | 111 +- cloudinit/importer.py | 3 +- cloudinit/log.py | 21 +- cloudinit/mergers/__init__.py | 43 +- cloudinit/mergers/m_dict.py | 34 +- cloudinit/mergers/m_list.py | 37 +- cloudinit/mergers/m_str.py | 5 +- cloudinit/net/__init__.py | 579 ++- cloudinit/net/activators.py | 87 +- cloudinit/net/bsd.py | 112 +- cloudinit/net/cmdline.py | 97 +- cloudinit/net/dhcp.py | 194 +- cloudinit/net/eni.py | 454 +- cloudinit/net/freebsd.py | 44 +- cloudinit/net/netbsd.py | 27 +- cloudinit/net/netplan.py | 313 +- cloudinit/net/network_state.py | 734 +-- cloudinit/net/networkd.py | 208 +- cloudinit/net/openbsd.py | 33 +- cloudinit/net/renderer.py | 31 +- cloudinit/net/renderers.py | 40 +- cloudinit/net/sysconfig.py | 886 ++-- cloudinit/net/udev.py | 23 +- cloudinit/netinfo.py | 403 +- cloudinit/patcher.py | 9 +- cloudinit/registry.py | 4 +- cloudinit/reporting/__init__.py | 9 +- cloudinit/reporting/events.py | 97 +- cloudinit/reporting/handlers.py | 128 +- cloudinit/safeyaml.py | 25 +- cloudinit/serial.py | 25 +- cloudinit/settings.py | 82 +- cloudinit/signal_handler.py | 12 +- cloudinit/simpletable.py | 26 +- cloudinit/sources/DataSourceAliYun.py | 18 +- cloudinit/sources/DataSourceAltCloud.py | 113 +- cloudinit/sources/DataSourceAzure.py | 1350 +++--- cloudinit/sources/DataSourceBigstep.py | 9 +- cloudinit/sources/DataSourceCloudSigma.py | 39 +- cloudinit/sources/DataSourceCloudStack.py | 135 +- cloudinit/sources/DataSourceConfigDrive.py | 117 +- cloudinit/sources/DataSourceDigitalOcean.py | 65 +- cloudinit/sources/DataSourceEc2.py | 461 +- cloudinit/sources/DataSourceExoscale.py | 171 +- cloudinit/sources/DataSourceGCE.py | 221 +- cloudinit/sources/DataSourceHetzner.py | 74 +- cloudinit/sources/DataSourceIBMCloud.py | 128 +- cloudinit/sources/DataSourceLXD.py | 61 +- cloudinit/sources/DataSourceMAAS.py | 180 +- cloudinit/sources/DataSourceNoCloud.py | 154 +- cloudinit/sources/DataSourceNone.py | 15 +- cloudinit/sources/DataSourceOVF.py | 311 +- cloudinit/sources/DataSourceOpenNebula.py | 190 +- cloudinit/sources/DataSourceOpenStack.py | 129 +- cloudinit/sources/DataSourceOracle.py | 125 +- cloudinit/sources/DataSourceRbxCloud.py | 194 +- cloudinit/sources/DataSourceScaleway.py | 131 +- cloudinit/sources/DataSourceSmartOS.py | 555 ++- cloudinit/sources/DataSourceUpCloud.py | 7 +- cloudinit/sources/DataSourceVMware.py | 13 +- cloudinit/sources/DataSourceVultr.py | 86 +- cloudinit/sources/__init__.py | 385 +- cloudinit/sources/helpers/azure.py | 693 +-- cloudinit/sources/helpers/digitalocean.py | 195 +- cloudinit/sources/helpers/hetzner.py | 15 +- cloudinit/sources/helpers/netlink.py | 187 +- cloudinit/sources/helpers/openstack.py | 438 +- cloudinit/sources/helpers/upcloud.py | 12 +- cloudinit/sources/helpers/vmware/imc/boot_proto.py | 5 +- cloudinit/sources/helpers/vmware/imc/config.py | 59 +- .../helpers/vmware/imc/config_custom_script.py | 45 +- .../sources/helpers/vmware/imc/config_file.py | 7 +- .../sources/helpers/vmware/imc/config_namespace.py | 1 + cloudinit/sources/helpers/vmware/imc/config_nic.py | 84 +- .../sources/helpers/vmware/imc/config_passwd.py | 38 +- .../sources/helpers/vmware/imc/config_source.py | 1 + .../sources/helpers/vmware/imc/guestcust_error.py | 1 + .../sources/helpers/vmware/imc/guestcust_event.py | 1 + .../sources/helpers/vmware/imc/guestcust_state.py | 1 + .../sources/helpers/vmware/imc/guestcust_util.py | 46 +- cloudinit/sources/helpers/vmware/imc/ipv4_mode.py | 11 +- cloudinit/sources/helpers/vmware/imc/nic.py | 33 +- cloudinit/sources/helpers/vmware/imc/nic_base.py | 29 +- cloudinit/sources/helpers/vultr.py | 172 +- cloudinit/ssh_util.py | 172 +- cloudinit/stages.py | 649 +-- cloudinit/subp.py | 165 +- cloudinit/temp_utils.py | 20 +- cloudinit/templater.py | 96 +- cloudinit/type_utils.py | 4 +- cloudinit/url_helper.py | 273 +- cloudinit/user_data.py | 121 +- cloudinit/util.py | 873 ++-- cloudinit/version.py | 9 +- cloudinit/warnings.py | 21 +- conftest.py | 5 +- doc/rtd/conf.py | 30 +- pyproject.toml | 8 + setup.py | 263 +- tests/integration_tests/__init__.py | 8 +- tests/integration_tests/bugs/test_gh570.py | 13 +- tests/integration_tests/bugs/test_gh626.py | 25 +- tests/integration_tests/bugs/test_gh632.py | 20 +- tests/integration_tests/bugs/test_gh668.py | 15 +- tests/integration_tests/bugs/test_gh671.py | 35 +- tests/integration_tests/bugs/test_gh868.py | 3 +- tests/integration_tests/bugs/test_lp1813396.py | 3 +- tests/integration_tests/bugs/test_lp1835584.py | 19 +- tests/integration_tests/bugs/test_lp1886531.py | 2 - tests/integration_tests/bugs/test_lp1897099.py | 13 +- tests/integration_tests/bugs/test_lp1898997.py | 14 +- tests/integration_tests/bugs/test_lp1900837.py | 2 +- tests/integration_tests/bugs/test_lp1901011.py | 49 +- tests/integration_tests/bugs/test_lp1910835.py | 1 - tests/integration_tests/bugs/test_lp1912844.py | 4 +- tests/integration_tests/clouds.py | 163 +- tests/integration_tests/conftest.py | 130 +- .../datasources/test_lxd_discovery.py | 43 +- .../datasources/test_network_dependency.py | 17 +- tests/integration_tests/instances.py | 83 +- tests/integration_tests/integration_settings.py | 13 +- tests/integration_tests/modules/test_apt.py | 88 +- tests/integration_tests/modules/test_ca_certs.py | 1 - tests/integration_tests/modules/test_cli.py | 9 +- tests/integration_tests/modules/test_combined.py | 155 +- .../modules/test_command_output.py | 5 +- tests/integration_tests/modules/test_disk_setup.py | 76 +- tests/integration_tests/modules/test_growpart.py | 38 +- tests/integration_tests/modules/test_hotplug.py | 55 +- .../modules/test_jinja_templating.py | 11 +- .../modules/test_keys_to_console.py | 9 +- tests/integration_tests/modules/test_lxd_bridge.py | 2 - .../integration_tests/modules/test_ntp_servers.py | 30 +- .../modules/test_package_update_upgrade_install.py | 18 +- .../integration_tests/modules/test_persistence.py | 26 +- .../modules/test_power_state_change.py | 48 +- tests/integration_tests/modules/test_puppet.py | 6 +- .../integration_tests/modules/test_set_hostname.py | 10 +- .../integration_tests/modules/test_set_password.py | 15 +- .../modules/test_ssh_auth_key_fingerprints.py | 13 +- .../integration_tests/modules/test_ssh_generate.py | 16 +- .../modules/test_ssh_keys_provided.py | 58 +- .../integration_tests/modules/test_ssh_keysfile.py | 159 +- .../integration_tests/modules/test_user_events.py | 50 +- .../integration_tests/modules/test_users_groups.py | 21 +- .../modules/test_version_change.py | 45 +- .../integration_tests/modules/test_write_files.py | 32 +- tests/integration_tests/test_upgrade.py | 120 +- tests/integration_tests/util.py | 39 +- tests/unittests/__init__.py | 1 + tests/unittests/analyze/test_boot.py | 135 +- tests/unittests/analyze/test_dump.py | 213 +- tests/unittests/cmd/devel/test_hotplug_hook.py | 162 +- tests/unittests/cmd/devel/test_logs.py | 232 +- tests/unittests/cmd/devel/test_render.py | 152 +- tests/unittests/cmd/test_clean.py | 179 +- tests/unittests/cmd/test_cloud_id.py | 99 +- tests/unittests/cmd/test_main.py | 223 +- tests/unittests/cmd/test_query.py | 403 +- tests/unittests/cmd/test_status.py | 561 ++- tests/unittests/config/test_apt_conf_v1.py | 68 +- .../config/test_apt_configure_sources_list_v1.py | 131 +- .../config/test_apt_configure_sources_list_v3.py | 158 +- tests/unittests/config/test_apt_key.py | 117 +- tests/unittests/config/test_apt_source_v1.py | 765 ++-- tests/unittests/config/test_apt_source_v3.py | 1220 +++-- tests/unittests/config/test_cc_apk_configure.py | 148 +- tests/unittests/config/test_cc_apt_pipelining.py | 12 +- tests/unittests/config/test_cc_bootcmd.py | 100 +- tests/unittests/config/test_cc_ca_certs.py | 220 +- tests/unittests/config/test_cc_chef.py | 202 +- tests/unittests/config/test_cc_debug.py | 39 +- .../config/test_cc_disable_ec2_metadata.py | 44 +- tests/unittests/config/test_cc_disk_setup.py | 270 +- tests/unittests/config/test_cc_growpart.py | 232 +- tests/unittests/config/test_cc_grub_dpkg.py | 121 +- tests/unittests/config/test_cc_install_hotplug.py | 58 +- tests/unittests/config/test_cc_keys_to_console.py | 18 +- tests/unittests/config/test_cc_landscape.py | 178 +- tests/unittests/config/test_cc_locale.py | 99 +- tests/unittests/config/test_cc_lxd.py | 250 +- tests/unittests/config/test_cc_mcollective.py | 104 +- tests/unittests/config/test_cc_mounts.py | 449 +- tests/unittests/config/test_cc_ntp.py | 682 +-- .../unittests/config/test_cc_power_state_change.py | 74 +- tests/unittests/config/test_cc_puppet.py | 432 +- .../config/test_cc_refresh_rmc_and_interface.py | 162 +- tests/unittests/config/test_cc_resizefs.py | 436 +- tests/unittests/config/test_cc_resolv_conf.py | 76 +- tests/unittests/config/test_cc_rh_subscription.py | 366 +- tests/unittests/config/test_cc_rsyslog.py | 112 +- tests/unittests/config/test_cc_runcmd.py | 74 +- tests/unittests/config/test_cc_seed_random.py | 158 +- tests/unittests/config/test_cc_set_hostname.py | 185 +- tests/unittests/config/test_cc_set_passwords.py | 111 +- tests/unittests/config/test_cc_snap.py | 445 +- tests/unittests/config/test_cc_spacewalk.py | 36 +- tests/unittests/config/test_cc_ssh.py | 356 +- tests/unittests/config/test_cc_timezone.py | 31 +- tests/unittests/config/test_cc_ubuntu_advantage.py | 311 +- tests/unittests/config/test_cc_ubuntu_drivers.py | 213 +- tests/unittests/config/test_cc_update_etc_hosts.py | 63 +- tests/unittests/config/test_cc_users_groups.py | 264 +- tests/unittests/config/test_cc_write_files.py | 148 +- .../config/test_cc_write_files_deferred.py | 62 +- tests/unittests/config/test_cc_yum_add_repo.py | 105 +- tests/unittests/config/test_cc_zypper_add_repo.py | 166 +- tests/unittests/config/test_schema.py | 301 +- tests/unittests/distros/__init__.py | 10 +- tests/unittests/distros/test_arch.py | 50 +- tests/unittests/distros/test_bsd_utils.py | 49 +- tests/unittests/distros/test_create_users.py | 252 +- tests/unittests/distros/test_debian.py | 155 +- tests/unittests/distros/test_freebsd.py | 28 +- tests/unittests/distros/test_generic.py | 300 +- tests/unittests/distros/test_gentoo.py | 11 +- tests/unittests/distros/test_hostname.py | 16 +- tests/unittests/distros/test_hosts.py | 36 +- tests/unittests/distros/test_init.py | 273 +- tests/unittests/distros/test_manage_service.py | 33 +- tests/unittests/distros/test_netbsd.py | 11 +- tests/unittests/distros/test_netconfig.py | 605 ++- tests/unittests/distros/test_networking.py | 30 +- tests/unittests/distros/test_opensuse.py | 3 +- tests/unittests/distros/test_photon.py | 42 +- tests/unittests/distros/test_resolv.py | 55 +- tests/unittests/distros/test_sles.py | 3 +- tests/unittests/distros/test_sysconfig.py | 62 +- .../unittests/distros/test_user_data_normalize.py | 383 +- tests/unittests/filters/test_launch_index.py | 23 +- tests/unittests/helpers.py | 191 +- tests/unittests/net/test_dhcp.py | 678 +-- tests/unittests/net/test_init.py | 1368 +++--- tests/unittests/net/test_network_state.py | 82 +- tests/unittests/net/test_networkd.py | 2 +- tests/unittests/runs/test_merge_run.py | 49 +- tests/unittests/runs/test_simple_run.py | 132 +- tests/unittests/sources/helpers/test_netlink.py | 357 +- tests/unittests/sources/helpers/test_openstack.py | 51 +- tests/unittests/sources/test_aliyun.py | 217 +- tests/unittests/sources/test_altcloud.py | 311 +- tests/unittests/sources/test_azure.py | 3174 +++++++------ tests/unittests/sources/test_azure_helper.py | 1138 +++-- tests/unittests/sources/test_cloudsigma.py | 72 +- tests/unittests/sources/test_cloudstack.py | 121 +- tests/unittests/sources/test_common.py | 86 +- tests/unittests/sources/test_configdrive.py | 1100 +++-- tests/unittests/sources/test_digitalocean.py | 283 +- tests/unittests/sources/test_ec2.py | 851 ++-- tests/unittests/sources/test_exoscale.py | 248 +- tests/unittests/sources/test_gce.py | 304 +- tests/unittests/sources/test_hetzner.py | 85 +- tests/unittests/sources/test_ibmcloud.py | 299 +- tests/unittests/sources/test_init.py | 879 ++-- tests/unittests/sources/test_lxd.py | 134 +- tests/unittests/sources/test_maas.py | 147 +- tests/unittests/sources/test_nocloud.py | 320 +- tests/unittests/sources/test_opennebula.py | 888 ++-- tests/unittests/sources/test_openstack.py | 652 +-- tests/unittests/sources/test_oracle.py | 412 +- tests/unittests/sources/test_ovf.py | 1053 +++-- tests/unittests/sources/test_rbx.py | 215 +- tests/unittests/sources/test_scaleway.py | 481 +- tests/unittests/sources/test_smartos.py | 956 ++-- tests/unittests/sources/test_upcloud.py | 161 +- tests/unittests/sources/test_vmware.py | 12 +- tests/unittests/sources/test_vultr.py | 375 +- .../unittests/sources/vmware/test_custom_script.py | 61 +- .../sources/vmware/test_guestcust_util.py | 79 +- .../sources/vmware/test_vmware_config_file.py | 430 +- tests/unittests/test__init__.py | 193 +- tests/unittests/test_atomic_helper.py | 4 +- tests/unittests/test_builtin_handlers.py | 405 +- tests/unittests/test_cli.py | 214 +- tests/unittests/test_conftest.py | 10 +- tests/unittests/test_cs_util.py | 39 +- tests/unittests/test_data.py | 526 ++- tests/unittests/test_dhclient_hook.py | 89 +- tests/unittests/test_dmi.py | 90 +- tests/unittests/test_ds_identify.py | 1609 ++++--- tests/unittests/test_ec2_util.py | 376 +- tests/unittests/test_event.py | 16 +- tests/unittests/test_features.py | 36 +- tests/unittests/test_gpg.py | 103 +- tests/unittests/test_helpers.py | 11 +- tests/unittests/test_log.py | 12 +- tests/unittests/test_merging.py | 123 +- tests/unittests/test_net.py | 4833 ++++++++++++-------- tests/unittests/test_net_activators.py | 154 +- tests/unittests/test_net_freebsd.py | 45 +- tests/unittests/test_netinfo.py | 193 +- tests/unittests/test_pathprefix2dict.py | 28 +- tests/unittests/test_registry.py | 21 +- tests/unittests/test_render_cloudcfg.py | 71 +- tests/unittests/test_reporting.py | 379 +- tests/unittests/test_reporting_hyperv.py | 193 +- tests/unittests/test_simpletable.py | 47 +- tests/unittests/test_sshutil.py | 817 ++-- tests/unittests/test_stages.py | 444 +- tests/unittests/test_subp.py | 289 +- tests/unittests/test_temp_utils.py | 118 +- tests/unittests/test_templating.py | 103 +- tests/unittests/test_url_helper.py | 134 +- tests/unittests/test_util.py | 934 ++-- tests/unittests/test_version.py | 11 +- tests/unittests/util.py | 14 +- tools/mock-meta.py | 301 +- tools/validate-yaml.py | 4 +- tox.ini | 28 +- 441 files changed, 43425 insertions(+), 31496 deletions(-) create mode 100644 pyproject.toml (limited to 'tests/unittests/sources') diff --git a/.travis.yml b/.travis.yml index 9470cc31..c458db48 100644 --- a/.travis.yml +++ b/.travis.yml @@ -133,6 +133,10 @@ matrix: env: TOXENV=flake8 - python: 3.6 env: TOXENV=pylint + - python: 3.6 + env: TOXENV=black + - python: 3.6 + env: TOXENV=isort - python: 3.7 env: TOXENV=doc # Test all supported Python versions (but at the end, so we schedule diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 06b31497..aa09c61e 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -19,6 +19,7 @@ Before any pull request can be accepted, you must do the following: `tools/.github-cla-signers`_ * Add or update any `unit tests`_ accordingly * Add or update any `integration tests`_ (if applicable) +* Format code (using black and isort) with `tox -e format` * Ensure unit tests and linting pass using `tox`_ * Submit a PR against the `main` branch of the `cloud-init` repository @@ -133,6 +134,10 @@ Do these things for each feature or bug git commit +* Apply black and isort formatting rules with `tox`_:: + + tox -e format + * Run unit tests and lint/formatting checks with `tox`_:: tox diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py index 99e5c203..36a5be78 100644 --- a/cloudinit/analyze/__main__.py +++ b/cloudinit/analyze/__main__.py @@ -5,62 +5,111 @@ import argparse import re import sys +from datetime import datetime from cloudinit.util import json_dumps -from datetime import datetime -from . import dump -from . import show + +from . import dump, show def get_parser(parser=None): if not parser: parser = argparse.ArgumentParser( - prog='cloudinit-analyze', - description='Devel tool: Analyze cloud-init logs and data') - subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand') + prog="cloudinit-analyze", + description="Devel tool: Analyze cloud-init logs and data", + ) + subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand") subparsers.required = True parser_blame = subparsers.add_parser( - 'blame', help='Print list of executed stages ordered by time to init') + "blame", help="Print list of executed stages ordered by time to init" + ) parser_blame.add_argument( - '-i', '--infile', action='store', dest='infile', - default='/var/log/cloud-init.log', - help='specify where to read input.') + "-i", + "--infile", + action="store", + dest="infile", + default="/var/log/cloud-init.log", + help="specify where to read input.", + ) parser_blame.add_argument( - '-o', '--outfile', action='store', dest='outfile', default='-', - help='specify where to write output. ') - parser_blame.set_defaults(action=('blame', analyze_blame)) + "-o", + "--outfile", + action="store", + dest="outfile", + default="-", + help="specify where to write output. ", + ) + parser_blame.set_defaults(action=("blame", analyze_blame)) parser_show = subparsers.add_parser( - 'show', help='Print list of in-order events during execution') - parser_show.add_argument('-f', '--format', action='store', - dest='print_format', default='%I%D @%Es +%ds', - help='specify formatting of output.') - parser_show.add_argument('-i', '--infile', action='store', - dest='infile', default='/var/log/cloud-init.log', - help='specify where to read input.') - parser_show.add_argument('-o', '--outfile', action='store', - dest='outfile', default='-', - help='specify where to write output.') - parser_show.set_defaults(action=('show', analyze_show)) + "show", help="Print list of in-order events during execution" + ) + parser_show.add_argument( + "-f", + "--format", + action="store", + dest="print_format", + default="%I%D @%Es +%ds", + help="specify formatting of output.", + ) + parser_show.add_argument( + "-i", + "--infile", + action="store", + dest="infile", + default="/var/log/cloud-init.log", + help="specify where to read input.", + ) + parser_show.add_argument( + "-o", + "--outfile", + action="store", + dest="outfile", + default="-", + help="specify where to write output.", + ) + parser_show.set_defaults(action=("show", analyze_show)) parser_dump = subparsers.add_parser( - 'dump', help='Dump cloud-init events in JSON format') - parser_dump.add_argument('-i', '--infile', action='store', - dest='infile', default='/var/log/cloud-init.log', - help='specify where to read input. ') - parser_dump.add_argument('-o', '--outfile', action='store', - dest='outfile', default='-', - help='specify where to write output. ') - parser_dump.set_defaults(action=('dump', analyze_dump)) + "dump", help="Dump cloud-init events in JSON format" + ) + parser_dump.add_argument( + "-i", + "--infile", + action="store", + dest="infile", + default="/var/log/cloud-init.log", + help="specify where to read input. ", + ) + parser_dump.add_argument( + "-o", + "--outfile", + action="store", + dest="outfile", + default="-", + help="specify where to write output. ", + ) + parser_dump.set_defaults(action=("dump", analyze_dump)) parser_boot = subparsers.add_parser( - 'boot', help='Print list of boot times for kernel and cloud-init') - parser_boot.add_argument('-i', '--infile', action='store', - dest='infile', default='/var/log/cloud-init.log', - help='specify where to read input. ') - parser_boot.add_argument('-o', '--outfile', action='store', - dest='outfile', default='-', - help='specify where to write output.') - parser_boot.set_defaults(action=('boot', analyze_boot)) + "boot", help="Print list of boot times for kernel and cloud-init" + ) + parser_boot.add_argument( + "-i", + "--infile", + action="store", + dest="infile", + default="/var/log/cloud-init.log", + help="specify where to read input. ", + ) + parser_boot.add_argument( + "-o", + "--outfile", + action="store", + dest="outfile", + default="-", + help="specify where to write output.", + ) + parser_boot.set_defaults(action=("boot", analyze_boot)) return parser @@ -78,61 +127,68 @@ def analyze_boot(name, args): """ infh, outfh = configure_io(args) kernel_info = show.dist_check_timestamp() - status_code, kernel_start, kernel_end, ci_sysd_start = \ - kernel_info + status_code, kernel_start, kernel_end, ci_sysd_start = kernel_info kernel_start_timestamp = datetime.utcfromtimestamp(kernel_start) kernel_end_timestamp = datetime.utcfromtimestamp(kernel_end) ci_sysd_start_timestamp = datetime.utcfromtimestamp(ci_sysd_start) try: - last_init_local = \ - [e for e in _get_events(infh) if e['name'] == 'init-local' and - 'starting search' in e['description']][-1] - ci_start = datetime.utcfromtimestamp(last_init_local['timestamp']) + last_init_local = [ + e + for e in _get_events(infh) + if e["name"] == "init-local" + and "starting search" in e["description"] + ][-1] + ci_start = datetime.utcfromtimestamp(last_init_local["timestamp"]) except IndexError: - ci_start = 'Could not find init-local log-line in cloud-init.log' + ci_start = "Could not find init-local log-line in cloud-init.log" status_code = show.FAIL_CODE - FAILURE_MSG = 'Your Linux distro or container does not support this ' \ - 'functionality.\n' \ - 'You must be running a Kernel Telemetry supported ' \ - 'distro.\nPlease check ' \ - 'https://cloudinit.readthedocs.io/en/latest' \ - '/topics/analyze.html for more ' \ - 'information on supported distros.\n' - - SUCCESS_MSG = '-- Most Recent Boot Record --\n' \ - ' Kernel Started at: {k_s_t}\n' \ - ' Kernel ended boot at: {k_e_t}\n' \ - ' Kernel time to boot (seconds): {k_r}\n' \ - ' Cloud-init activated by systemd at: {ci_sysd_t}\n' \ - ' Time between Kernel end boot and Cloud-init ' \ - 'activation (seconds): {bt_r}\n' \ - ' Cloud-init start: {ci_start}\n' - - CONTAINER_MSG = '-- Most Recent Container Boot Record --\n' \ - ' Container started at: {k_s_t}\n' \ - ' Cloud-init activated by systemd at: {ci_sysd_t}\n' \ - ' Cloud-init start: {ci_start}\n' \ - + FAILURE_MSG = ( + "Your Linux distro or container does not support this " + "functionality.\n" + "You must be running a Kernel Telemetry supported " + "distro.\nPlease check " + "https://cloudinit.readthedocs.io/en/latest" + "/topics/analyze.html for more " + "information on supported distros.\n" + ) + + SUCCESS_MSG = ( + "-- Most Recent Boot Record --\n" + " Kernel Started at: {k_s_t}\n" + " Kernel ended boot at: {k_e_t}\n" + " Kernel time to boot (seconds): {k_r}\n" + " Cloud-init activated by systemd at: {ci_sysd_t}\n" + " Time between Kernel end boot and Cloud-init " + "activation (seconds): {bt_r}\n" + " Cloud-init start: {ci_start}\n" + ) + + CONTAINER_MSG = ( + "-- Most Recent Container Boot Record --\n" + " Container started at: {k_s_t}\n" + " Cloud-init activated by systemd at: {ci_sysd_t}\n" + " Cloud-init start: {ci_start}\n" + ) status_map = { show.FAIL_CODE: FAILURE_MSG, show.CONTAINER_CODE: CONTAINER_MSG, - show.SUCCESS_CODE: SUCCESS_MSG + show.SUCCESS_CODE: SUCCESS_MSG, } kernel_runtime = kernel_end - kernel_start between_process_runtime = ci_sysd_start - kernel_end kwargs = { - 'k_s_t': kernel_start_timestamp, - 'k_e_t': kernel_end_timestamp, - 'k_r': kernel_runtime, - 'bt_r': between_process_runtime, - 'k_e': kernel_end, - 'k_s': kernel_start, - 'ci_sysd': ci_sysd_start, - 'ci_sysd_t': ci_sysd_start_timestamp, - 'ci_start': ci_start + "k_s_t": kernel_start_timestamp, + "k_e_t": kernel_end_timestamp, + "k_r": kernel_runtime, + "bt_r": between_process_runtime, + "k_e": kernel_end, + "k_s": kernel_start, + "ci_sysd": ci_sysd_start, + "ci_sysd_t": ci_sysd_start_timestamp, + "ci_start": ci_start, } outfh.write(status_map[status_code].format(**kwargs)) @@ -152,15 +208,16 @@ def analyze_blame(name, args): and sorting by record data ('delta') """ (infh, outfh) = configure_io(args) - blame_format = ' %ds (%n)' - r = re.compile(r'(^\s+\d+\.\d+)', re.MULTILINE) - for idx, record in enumerate(show.show_events(_get_events(infh), - blame_format)): + blame_format = " %ds (%n)" + r = re.compile(r"(^\s+\d+\.\d+)", re.MULTILINE) + for idx, record in enumerate( + show.show_events(_get_events(infh), blame_format) + ): srecs = sorted(filter(r.match, record), reverse=True) - outfh.write('-- Boot Record %02d --\n' % (idx + 1)) - outfh.write('\n'.join(srecs) + '\n') - outfh.write('\n') - outfh.write('%d boot records analyzed\n' % (idx + 1)) + outfh.write("-- Boot Record %02d --\n" % (idx + 1)) + outfh.write("\n".join(srecs) + "\n") + outfh.write("\n") + outfh.write("%d boot records analyzed\n" % (idx + 1)) def analyze_show(name, args): @@ -184,21 +241,25 @@ def analyze_show(name, args): Finished stage: (modules-final) 0.NNN seconds """ (infh, outfh) = configure_io(args) - for idx, record in enumerate(show.show_events(_get_events(infh), - args.print_format)): - outfh.write('-- Boot Record %02d --\n' % (idx + 1)) - outfh.write('The total time elapsed since completing an event is' - ' printed after the "@" character.\n') - outfh.write('The time the event takes is printed after the "+" ' - 'character.\n\n') - outfh.write('\n'.join(record) + '\n') - outfh.write('%d boot records analyzed\n' % (idx + 1)) + for idx, record in enumerate( + show.show_events(_get_events(infh), args.print_format) + ): + outfh.write("-- Boot Record %02d --\n" % (idx + 1)) + outfh.write( + "The total time elapsed since completing an event is" + ' printed after the "@" character.\n' + ) + outfh.write( + 'The time the event takes is printed after the "+" character.\n\n' + ) + outfh.write("\n".join(record) + "\n") + outfh.write("%d boot records analyzed\n" % (idx + 1)) def analyze_dump(name, args): """Dump cloud-init events in json format""" (infh, outfh) = configure_io(args) - outfh.write(json_dumps(_get_events(infh)) + '\n') + outfh.write(json_dumps(_get_events(infh)) + "\n") def _get_events(infile): @@ -211,28 +272,28 @@ def _get_events(infile): def configure_io(args): """Common parsing and setup of input/output files""" - if args.infile == '-': + if args.infile == "-": infh = sys.stdin else: try: - infh = open(args.infile, 'r') + infh = open(args.infile, "r") except OSError: - sys.stderr.write('Cannot open file %s\n' % args.infile) + sys.stderr.write("Cannot open file %s\n" % args.infile) sys.exit(1) - if args.outfile == '-': + if args.outfile == "-": outfh = sys.stdout else: try: - outfh = open(args.outfile, 'w') + outfh = open(args.outfile, "w") except OSError: - sys.stderr.write('Cannot open file %s\n' % args.outfile) + sys.stderr.write("Cannot open file %s\n" % args.outfile) sys.exit(1) return (infh, outfh) -if __name__ == '__main__': +if __name__ == "__main__": parser = get_parser() args = parser.parse_args() (name, action_functor) = args.action diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py index 62ad51fe..8e6e3c6a 100644 --- a/cloudinit/analyze/dump.py +++ b/cloudinit/analyze/dump.py @@ -1,21 +1,20 @@ # This file is part of cloud-init. See LICENSE file for license information. import calendar -from datetime import datetime import sys +from datetime import datetime -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util stage_to_description = { - 'finished': 'finished running cloud-init', - 'init-local': 'starting search for local datasources', - 'init-network': 'searching for network datasources', - 'init': 'searching for network datasources', - 'modules-config': 'running config modules', - 'modules-final': 'finalizing modules', - 'modules': 'running modules for', - 'single': 'running single module ', + "finished": "finished running cloud-init", + "init-local": "starting search for local datasources", + "init-network": "searching for network datasources", + "init": "searching for network datasources", + "modules-config": "running config modules", + "modules-final": "finalizing modules", + "modules": "running modules for", + "single": "running single module ", } # logger's asctime format @@ -34,11 +33,11 @@ def parse_timestamp(timestampstr): if timestampstr.split()[0] in months: # Aug 29 22:55:26 FMT = DEFAULT_FMT - if '.' in timestampstr: + if "." in timestampstr: FMT = CLOUD_INIT_JOURNALCTL_FMT - dt = datetime.strptime(timestampstr + " " + - str(datetime.now().year), - FMT) + dt = datetime.strptime( + timestampstr + " " + str(datetime.now().year), FMT + ) timestamp = dt.strftime("%s.%f") elif "," in timestampstr: # 2016-09-12 14:39:20,839 @@ -52,7 +51,7 @@ def parse_timestamp(timestampstr): def parse_timestamp_from_date(timestampstr): - out, _ = subp.subp(['date', '+%s.%3N', '-d', timestampstr]) + out, _ = subp.subp(["date", "+%s.%3N", "-d", timestampstr]) timestamp = out.strip() return float(timestamp) @@ -79,8 +78,8 @@ def parse_ci_logline(line): # Apr 30 19:39:11 cloud-init[2673]: handlers.py[DEBUG]: start: \ # init-local/check-cache: attempting to read from cache [check] - amazon_linux_2_sep = ' cloud-init[' - separators = [' - ', ' [CLOUDINIT] ', amazon_linux_2_sep] + amazon_linux_2_sep = " cloud-init[" + separators = [" - ", " [CLOUDINIT] ", amazon_linux_2_sep] found = False for sep in separators: if sep in line: @@ -99,7 +98,7 @@ def parse_ci_logline(line): if "," in timehost: timestampstr, extra = timehost.split(",") timestampstr += ",%s" % extra.split()[0] - if ' ' in extra: + if " " in extra: hostname = extra.split()[-1] else: hostname = timehost.split()[-1] @@ -111,11 +110,11 @@ def parse_ci_logline(line): eventstr = eventstr.split(maxsplit=1)[1] else: timestampstr = timehost.split(hostname)[0].strip() - if 'Cloud-init v.' in eventstr: - event_type = 'start' - if 'running' in eventstr: - stage_and_timestamp = eventstr.split('running')[1].lstrip() - event_name, _ = stage_and_timestamp.split(' at ') + if "Cloud-init v." in eventstr: + event_type = "start" + if "running" in eventstr: + stage_and_timestamp = eventstr.split("running")[1].lstrip() + event_name, _ = stage_and_timestamp.split(" at ") event_name = event_name.replace("'", "").replace(":", "-") if event_name == "init": event_name = "init-network" @@ -128,17 +127,17 @@ def parse_ci_logline(line): event_description = eventstr.split(event_name)[1].strip() event = { - 'name': event_name.rstrip(":"), - 'description': event_description, - 'timestamp': parse_timestamp(timestampstr), - 'origin': 'cloudinit', - 'event_type': event_type.rstrip(":"), + "name": event_name.rstrip(":"), + "description": event_description, + "timestamp": parse_timestamp(timestampstr), + "origin": "cloudinit", + "event_type": event_type.rstrip(":"), } - if event['event_type'] == "finish": + if event["event_type"] == "finish": result = event_description.split(":")[0] - desc = event_description.split(result)[1].lstrip(':').strip() - event['result'] = result - event['description'] = desc.strip() + desc = event_description.split(result)[1].lstrip(":").strip() + event["result"] = result + event["description"] = desc.strip() return event @@ -146,10 +145,10 @@ def parse_ci_logline(line): def dump_events(cisource=None, rawdata=None): events = [] event = None - CI_EVENT_MATCHES = ['start:', 'finish:', 'Cloud-init v.'] + CI_EVENT_MATCHES = ["start:", "finish:", "Cloud-init v."] if not any([cisource, rawdata]): - raise ValueError('Either cisource or rawdata parameters are required') + raise ValueError("Either cisource or rawdata parameters are required") if rawdata: data = rawdata.splitlines() @@ -162,7 +161,7 @@ def dump_events(cisource=None, rawdata=None): try: event = parse_ci_logline(line) except ValueError: - sys.stderr.write('Skipping invalid entry\n') + sys.stderr.write("Skipping invalid entry\n") if event: events.append(event) diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py index 01a4d3e5..5fd9cdfd 100644 --- a/cloudinit/analyze/show.py +++ b/cloudinit/analyze/show.py @@ -8,11 +8,10 @@ import base64 import datetime import json import os -import time import sys +import time -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util from cloudinit.distros import uses_systemd # Example events: @@ -35,24 +34,25 @@ from cloudinit.distros import uses_systemd # } format_key = { - '%d': 'delta', - '%D': 'description', - '%E': 'elapsed', - '%e': 'event_type', - '%I': 'indent', - '%l': 'level', - '%n': 'name', - '%o': 'origin', - '%r': 'result', - '%t': 'timestamp', - '%T': 'total_time', + "%d": "delta", + "%D": "description", + "%E": "elapsed", + "%e": "event_type", + "%I": "indent", + "%l": "level", + "%n": "name", + "%o": "origin", + "%r": "result", + "%t": "timestamp", + "%T": "total_time", } -formatting_help = " ".join(["{0}: {1}".format(k.replace('%', '%%'), v) - for k, v in format_key.items()]) -SUCCESS_CODE = 'successful' -FAIL_CODE = 'failure' -CONTAINER_CODE = 'container' +formatting_help = " ".join( + ["{0}: {1}".format(k.replace("%", "%%"), v) for k, v in format_key.items()] +) +SUCCESS_CODE = "successful" +FAIL_CODE = "failure" +CONTAINER_CODE = "container" TIMESTAMP_UNKNOWN = (FAIL_CODE, -1, -1, -1) @@ -60,7 +60,7 @@ def format_record(msg, event): for i, j in format_key.items(): if i in msg: # ensure consistent formatting of time values - if j in ['delta', 'elapsed', 'timestamp']: + if j in ["delta", "elapsed", "timestamp"]: msg = msg.replace(i, "{%s:08.5f}" % j) else: msg = msg.replace(i, "{%s}" % j) @@ -68,13 +68,13 @@ def format_record(msg, event): def dump_event_files(event): - content = dict((k, v) for k, v in event.items() if k not in ['content']) - files = content['files'] + content = dict((k, v) for k, v in event.items() if k not in ["content"]) + files = content["files"] saved = [] for f in files: - fname = f['path'] + fname = f["path"] fn_local = os.path.basename(fname) - fcontent = base64.b64decode(f['content']).decode('ascii') + fcontent = base64.b64decode(f["content"]).decode("ascii") util.write_file(fn_local, fcontent) saved.append(fn_local) @@ -83,13 +83,13 @@ def dump_event_files(event): def event_name(event): if event: - return event.get('name') + return event.get("name") return None def event_type(event): if event: - return event.get('event_type') + return event.get("event_type") return None @@ -100,7 +100,7 @@ def event_parent(event): def event_timestamp(event): - return float(event.get('timestamp')) + return float(event.get("timestamp")) def event_datetime(event): @@ -117,41 +117,44 @@ def event_duration(start, finish): def event_record(start_time, start, finish): record = finish.copy() - record.update({ - 'delta': event_duration(start, finish), - 'elapsed': delta_seconds(start_time, event_datetime(start)), - 'indent': '|' + ' ' * (event_name(start).count('/') - 1) + '`->', - }) + record.update( + { + "delta": event_duration(start, finish), + "elapsed": delta_seconds(start_time, event_datetime(start)), + "indent": "|" + " " * (event_name(start).count("/") - 1) + "`->", + } + ) return record def total_time_record(total_time): - return 'Total Time: %3.5f seconds\n' % total_time + return "Total Time: %3.5f seconds\n" % total_time class SystemctlReader(object): - ''' + """ Class for dealing with all systemctl subp calls in a consistent manner. - ''' + """ + def __init__(self, property, parameter=None): self.epoch = None - self.args = ['/bin/systemctl', 'show'] + self.args = ["/bin/systemctl", "show"] if parameter: self.args.append(parameter) - self.args.extend(['-p', property]) + self.args.extend(["-p", property]) # Don't want the init of our object to break. Instead of throwing # an exception, set an error code that gets checked when data is # requested from the object self.failure = self.subp() def subp(self): - ''' + """ Make a subp call based on set args and handle errors by setting failure code :return: whether the subp call failed or not - ''' + """ try: value, err = subp.subp(self.args, capture=True) if err: @@ -162,41 +165,41 @@ class SystemctlReader(object): return systemctl_fail def parse_epoch_as_float(self): - ''' + """ If subp call succeeded, return the timestamp from subp as a float. :return: timestamp as a float - ''' + """ # subp has 2 ways to fail: it either fails and throws an exception, # or returns an error code. Raise an exception here in order to make # sure both scenarios throw exceptions if self.failure: - raise RuntimeError('Subprocess call to systemctl has failed, ' - 'returning error code ({})' - .format(self.failure)) + raise RuntimeError( + "Subprocess call to systemctl has failed, " + "returning error code ({})".format(self.failure) + ) # Output from systemctl show has the format Property=Value. # For example, UserspaceMonotonic=1929304 - timestamp = self.epoch.split('=')[1] + timestamp = self.epoch.split("=")[1] # Timestamps reported by systemctl are in microseconds, converting return float(timestamp) / 1000000 def dist_check_timestamp(): - ''' + """ Determine which init system a particular linux distro is using. Each init system (systemd, upstart, etc) has a different way of providing timestamps. :return: timestamps of kernelboot, kernelendboot, and cloud-initstart or TIMESTAMP_UNKNOWN if the timestamps cannot be retrieved. - ''' + """ if uses_systemd(): return gather_timestamps_using_systemd() # Use dmesg to get timestamps if the distro does not have systemd - if util.is_FreeBSD() or 'gentoo' in \ - util.system_info()['system'].lower(): + if util.is_FreeBSD() or "gentoo" in util.system_info()["system"].lower(): return gather_timestamps_using_dmesg() # this distro doesn't fit anything that is supported by cloud-init. just @@ -205,20 +208,20 @@ def dist_check_timestamp(): def gather_timestamps_using_dmesg(): - ''' + """ Gather timestamps that corresponds to kernel begin initialization, kernel finish initialization using dmesg as opposed to systemctl :return: the two timestamps plus a dummy timestamp to keep consistency with gather_timestamps_using_systemd - ''' + """ try: - data, _ = subp.subp(['dmesg'], capture=True) + data, _ = subp.subp(["dmesg"], capture=True) split_entries = data[0].splitlines() for i in split_entries: - if i.decode('UTF-8').find('user') != -1: - splitup = i.decode('UTF-8').split() - stripped = splitup[1].strip(']') + if i.decode("UTF-8").find("user") != -1: + splitup = i.decode("UTF-8").split() + stripped = splitup[1].strip("]") # kernel timestamp from dmesg is equal to 0, # with the userspace timestamp relative to it. @@ -228,8 +231,7 @@ def gather_timestamps_using_dmesg(): # systemd wont start cloud-init in this case, # so we cannot get that timestamp - return SUCCESS_CODE, kernel_start, kernel_end, \ - kernel_end + return SUCCESS_CODE, kernel_start, kernel_end, kernel_end except Exception: pass @@ -237,18 +239,20 @@ def gather_timestamps_using_dmesg(): def gather_timestamps_using_systemd(): - ''' + """ Gather timestamps that corresponds to kernel begin initialization, kernel finish initialization. and cloud-init systemd unit activation :return: the three timestamps - ''' + """ kernel_start = float(time.time()) - float(util.uptime()) try: - delta_k_end = SystemctlReader('UserspaceTimestampMonotonic')\ - .parse_epoch_as_float() - delta_ci_s = SystemctlReader('InactiveExitTimestampMonotonic', - 'cloud-init-local').parse_epoch_as_float() + delta_k_end = SystemctlReader( + "UserspaceTimestampMonotonic" + ).parse_epoch_as_float() + delta_ci_s = SystemctlReader( + "InactiveExitTimestampMonotonic", "cloud-init-local" + ).parse_epoch_as_float() base_time = kernel_start status = SUCCESS_CODE # lxc based containers do not set their monotonic zero point to be when @@ -262,12 +266,13 @@ def gather_timestamps_using_systemd(): # in containers when https://github.com/lxc/lxcfs/issues/292 # is fixed, util.uptime() should be used instead of stat on try: - file_stat = os.stat('/proc/1/cmdline') + file_stat = os.stat("/proc/1/cmdline") kernel_start = file_stat.st_atime except OSError as err: - raise RuntimeError('Could not determine container boot ' - 'time from /proc/1/cmdline. ({})' - .format(err)) from err + raise RuntimeError( + "Could not determine container boot " + "time from /proc/1/cmdline. ({})".format(err) + ) from err status = CONTAINER_CODE else: status = FAIL_CODE @@ -283,10 +288,14 @@ def gather_timestamps_using_systemd(): return status, kernel_start, kernel_end, cloudinit_sysd -def generate_records(events, blame_sort=False, - print_format="(%n) %d seconds in %I%D", - dump_files=False, log_datafiles=False): - ''' +def generate_records( + events, + blame_sort=False, + print_format="(%n) %d seconds in %I%D", + dump_files=False, + log_datafiles=False, +): + """ Take in raw events and create parent-child dependencies between events in order to order events in chronological order. @@ -298,9 +307,9 @@ def generate_records(events, blame_sort=False, :param log_datafiles: whether or not to log events generated :return: boot records ordered chronologically - ''' + """ - sorted_events = sorted(events, key=lambda x: x['timestamp']) + sorted_events = sorted(events, key=lambda x: x["timestamp"]) records = [] start_time = None total_time = 0.0 @@ -316,8 +325,8 @@ def generate_records(events, blame_sort=False, except IndexError: next_evt = None - if event_type(event) == 'start': - if event.get('name') in stages_seen: + if event_type(event) == "start": + if event.get("name") in stages_seen: records.append(total_time_record(total_time)) boot_records.append(records) records = [] @@ -331,25 +340,28 @@ def generate_records(events, blame_sort=False, # see if we have a pair if event_name(event) == event_name(next_evt): - if event_type(next_evt) == 'finish': - records.append(format_record(print_format, - event_record(start_time, - event, - next_evt))) + if event_type(next_evt) == "finish": + records.append( + format_record( + print_format, + event_record(start_time, event, next_evt), + ) + ) else: # This is a parent event - records.append("Starting stage: %s" % event.get('name')) + records.append("Starting stage: %s" % event.get("name")) unprocessed.append(event) - stages_seen.append(event.get('name')) + stages_seen.append(event.get("name")) continue else: prev_evt = unprocessed.pop() if event_name(event) == event_name(prev_evt): record = event_record(start_time, prev_evt, event) - records.append(format_record("Finished stage: " - "(%n) %d seconds", - record) + "\n") - total_time += record.get('delta') + records.append( + format_record("Finished stage: (%n) %d seconds", record) + + "\n" + ) + total_time += record.get("delta") else: # not a match, put it back unprocessed.append(prev_evt) @@ -360,7 +372,7 @@ def generate_records(events, blame_sort=False, def show_events(events, print_format): - ''' + """ A passthrough method that makes it easier to call generate_records() :param events: JSONs from dump that represents events taken from logs @@ -368,18 +380,18 @@ def show_events(events, print_format): and time taken by the event in one line :return: boot records ordered chronologically - ''' + """ return generate_records(events, print_format=print_format) def load_events_infile(infile): - ''' + """ Takes in a log file, read it, and convert to json. :param infile: The Log file to be read :return: json version of logfile, raw file - ''' + """ data = infile.read() try: return json.loads(data), data diff --git a/cloudinit/apport.py b/cloudinit/apport.py index aadc638f..92068aa9 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -2,127 +2,143 @@ # # This file is part of cloud-init. See LICENSE file for license information. -'''Cloud-init apport interface''' +"""Cloud-init apport interface""" try: from apport.hookutils import ( - attach_file, attach_root_command_outputs, root_command_output) + attach_file, + attach_root_command_outputs, + root_command_output, + ) + has_apport = True except ImportError: has_apport = False KNOWN_CLOUD_NAMES = [ - 'AliYun', - 'AltCloud', - 'Amazon - Ec2', - 'Azure', - 'Bigstep', - 'Brightbox', - 'CloudSigma', - 'CloudStack', - 'DigitalOcean', - 'E24Cloud', - 'GCE - Google Compute Engine', - 'Exoscale', - 'Hetzner Cloud', - 'IBM - (aka SoftLayer or BlueMix)', - 'LXD', - 'MAAS', - 'NoCloud', - 'OpenNebula', - 'OpenStack', - 'Oracle', - 'OVF', - 'RbxCloud - (HyperOne, Rootbox, Rubikon)', - 'OpenTelekomCloud', - 'SAP Converged Cloud', - 'Scaleway', - 'SmartOS', - 'UpCloud', - 'VMware', - 'Vultr', - 'ZStack', - 'Other' + "AliYun", + "AltCloud", + "Amazon - Ec2", + "Azure", + "Bigstep", + "Brightbox", + "CloudSigma", + "CloudStack", + "DigitalOcean", + "E24Cloud", + "GCE - Google Compute Engine", + "Exoscale", + "Hetzner Cloud", + "IBM - (aka SoftLayer or BlueMix)", + "LXD", + "MAAS", + "NoCloud", + "OpenNebula", + "OpenStack", + "Oracle", + "OVF", + "RbxCloud - (HyperOne, Rootbox, Rubikon)", + "OpenTelekomCloud", + "SAP Converged Cloud", + "Scaleway", + "SmartOS", + "UpCloud", + "VMware", + "Vultr", + "ZStack", + "Other", ] # Potentially clear text collected logs -CLOUDINIT_LOG = '/var/log/cloud-init.log' -CLOUDINIT_OUTPUT_LOG = '/var/log/cloud-init-output.log' -USER_DATA_FILE = '/var/lib/cloud/instance/user-data.txt' # Optional +CLOUDINIT_LOG = "/var/log/cloud-init.log" +CLOUDINIT_OUTPUT_LOG = "/var/log/cloud-init-output.log" +USER_DATA_FILE = "/var/lib/cloud/instance/user-data.txt" # Optional def attach_cloud_init_logs(report, ui=None): - '''Attach cloud-init logs and tarfile from 'cloud-init collect-logs'.''' - attach_root_command_outputs(report, { - 'cloud-init-log-warnings': - 'egrep -i "warn|error" /var/log/cloud-init.log', - 'cloud-init-output.log.txt': 'cat /var/log/cloud-init-output.log'}) + """Attach cloud-init logs and tarfile from 'cloud-init collect-logs'.""" + attach_root_command_outputs( + report, + { + "cloud-init-log-warnings": ( + 'egrep -i "warn|error" /var/log/cloud-init.log' + ), + "cloud-init-output.log.txt": "cat /var/log/cloud-init-output.log", + }, + ) root_command_output( - ['cloud-init', 'collect-logs', '-t', '/tmp/cloud-init-logs.tgz']) - attach_file(report, '/tmp/cloud-init-logs.tgz', 'logs.tgz') + ["cloud-init", "collect-logs", "-t", "/tmp/cloud-init-logs.tgz"] + ) + attach_file(report, "/tmp/cloud-init-logs.tgz", "logs.tgz") def attach_hwinfo(report, ui=None): - '''Optionally attach hardware info from lshw.''' + """Optionally attach hardware info from lshw.""" prompt = ( - 'Your device details (lshw) may be useful to developers when' - ' addressing this bug, but gathering it requires admin privileges.' - ' Would you like to include this info?') + "Your device details (lshw) may be useful to developers when" + " addressing this bug, but gathering it requires admin privileges." + " Would you like to include this info?" + ) if ui and ui.yesno(prompt): - attach_root_command_outputs(report, {'lshw.txt': 'lshw'}) + attach_root_command_outputs(report, {"lshw.txt": "lshw"}) def attach_cloud_info(report, ui=None): - '''Prompt for cloud details if available.''' + """Prompt for cloud details if available.""" if ui: - prompt = 'Is this machine running in a cloud environment?' + prompt = "Is this machine running in a cloud environment?" response = ui.yesno(prompt) if response is None: raise StopIteration # User cancelled if response: - prompt = ('Please select the cloud vendor or environment in which' - ' this instance is running') + prompt = ( + "Please select the cloud vendor or environment in which" + " this instance is running" + ) response = ui.choice(prompt, KNOWN_CLOUD_NAMES) if response: - report['CloudName'] = KNOWN_CLOUD_NAMES[response[0]] + report["CloudName"] = KNOWN_CLOUD_NAMES[response[0]] else: - report['CloudName'] = 'None' + report["CloudName"] = "None" def attach_user_data(report, ui=None): - '''Optionally provide user-data if desired.''' + """Optionally provide user-data if desired.""" if ui: prompt = ( - 'Your user-data or cloud-config file can optionally be provided' - ' from {0} and could be useful to developers when addressing this' - ' bug. Do you wish to attach user-data to this bug?'.format( - USER_DATA_FILE)) + "Your user-data or cloud-config file can optionally be provided" + " from {0} and could be useful to developers when addressing this" + " bug. Do you wish to attach user-data to this bug?".format( + USER_DATA_FILE + ) + ) response = ui.yesno(prompt) if response is None: raise StopIteration # User cancelled if response: - attach_file(report, USER_DATA_FILE, 'user_data.txt') + attach_file(report, USER_DATA_FILE, "user_data.txt") def add_bug_tags(report): - '''Add any appropriate tags to the bug.''' - if 'JournalErrors' in report.keys(): - errors = report['JournalErrors'] - if 'Breaking ordering cycle' in errors: - report['Tags'] = 'systemd-ordering' + """Add any appropriate tags to the bug.""" + if "JournalErrors" in report.keys(): + errors = report["JournalErrors"] + if "Breaking ordering cycle" in errors: + report["Tags"] = "systemd-ordering" def add_info(report, ui): - '''This is an entry point to run cloud-init's apport functionality. + """This is an entry point to run cloud-init's apport functionality. Distros which want apport support will have a cloud-init package-hook at /usr/share/apport/package-hooks/cloud-init.py which defines an add_info function and returns the result of cloudinit.apport.add_info(report, ui). - ''' + """ if not has_apport: raise RuntimeError( - 'No apport imports discovered. Apport functionality disabled') + "No apport imports discovered. Apport functionality disabled" + ) attach_cloud_init_logs(report, ui) attach_hwinfo(report, ui) attach_cloud_info(report, ui) @@ -130,4 +146,5 @@ def add_info(report, ui): add_bug_tags(report) return True + # vi: ts=4 expandtab diff --git a/cloudinit/atomic_helper.py b/cloudinit/atomic_helper.py index 485ff92f..ae117fad 100644 --- a/cloudinit/atomic_helper.py +++ b/cloudinit/atomic_helper.py @@ -10,8 +10,9 @@ _DEF_PERMS = 0o644 LOG = logging.getLogger(__name__) -def write_file(filename, content, mode=_DEF_PERMS, - omode="wb", preserve_mode=False): +def write_file( + filename, content, mode=_DEF_PERMS, omode="wb", preserve_mode=False +): # open filename in mode 'omode', write content, set permissions to 'mode' if preserve_mode: @@ -23,12 +24,18 @@ def write_file(filename, content, mode=_DEF_PERMS, tf = None try: - tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(filename), - delete=False, mode=omode) + tf = tempfile.NamedTemporaryFile( + dir=os.path.dirname(filename), delete=False, mode=omode + ) LOG.debug( "Atomically writing to file %s (via temporary file %s) - %s: [%o]" " %d bytes/chars", - filename, tf.name, omode, mode, len(content)) + filename, + tf.name, + omode, + mode, + len(content), + ) tf.write(content) tf.close() os.chmod(tf.name, mode) @@ -42,7 +49,11 @@ def write_file(filename, content, mode=_DEF_PERMS, def write_json(filename, data, mode=_DEF_PERMS): # dump json representation of data to file filename. return write_file( - filename, json.dumps(data, indent=1, sort_keys=True) + "\n", - omode="w", mode=mode) + filename, + json.dumps(data, indent=1, sort_keys=True) + "\n", + omode="w", + mode=mode, + ) + # vi: ts=4 expandtab diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py index 7ae98e1c..91e48103 100644 --- a/cloudinit/cloud.py +++ b/cloudinit/cloud.py @@ -35,7 +35,8 @@ class Cloud(object): reporter = events.ReportEventStack( name="unnamed-cloud-reporter", description="unnamed-cloud-reporter", - reporting_enabled=False) + reporting_enabled=False, + ) self.reporter = reporter # If a 'user' manipulates logging or logging services @@ -56,8 +57,11 @@ class Cloud(object): def get_template_filename(self, name): fn = self.paths.template_tpl % (name) if not os.path.isfile(fn): - LOG.warning("No template found in %s for template named %s", - os.path.dirname(fn), name) + LOG.warning( + "No template found in %s for template named %s", + os.path.dirname(fn), + name, + ) return None return fn @@ -80,7 +84,8 @@ class Cloud(object): def get_hostname(self, fqdn=False, metadata_only=False): return self.datasource.get_hostname( - fqdn=fqdn, metadata_only=metadata_only) + fqdn=fqdn, metadata_only=metadata_only + ) def device_name_to_device(self, name): return self.datasource.device_name_to_device(name) @@ -94,4 +99,5 @@ class Cloud(object): def get_ipath(self, name=None): return self.paths.get_ipath(name) + # vi: ts=4 expandtab diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py index 3502dd56..0e1db118 100644 --- a/cloudinit/cmd/clean.py +++ b/cloudinit/cmd/clean.py @@ -10,9 +10,13 @@ import os import sys from cloudinit.stages import Init -from cloudinit.subp import (ProcessExecutionError, subp) +from cloudinit.subp import ProcessExecutionError, subp from cloudinit.util import ( - del_dir, del_file, get_config_logfiles, is_link, error + del_dir, + del_file, + error, + get_config_logfiles, + is_link, ) @@ -27,18 +31,35 @@ def get_parser(parser=None): """ if not parser: parser = argparse.ArgumentParser( - prog='clean', - description=('Remove logs and artifacts so cloud-init re-runs on ' - 'a clean system')) + prog="clean", + description=( + "Remove logs and artifacts so cloud-init re-runs on " + "a clean system" + ), + ) parser.add_argument( - '-l', '--logs', action='store_true', default=False, dest='remove_logs', - help='Remove cloud-init logs.') + "-l", + "--logs", + action="store_true", + default=False, + dest="remove_logs", + help="Remove cloud-init logs.", + ) parser.add_argument( - '-r', '--reboot', action='store_true', default=False, - help='Reboot system after logs are cleaned so cloud-init re-runs.') + "-r", + "--reboot", + action="store_true", + default=False, + help="Reboot system after logs are cleaned so cloud-init re-runs.", + ) parser.add_argument( - '-s', '--seed', action='store_true', default=False, dest='remove_seed', - help='Remove cloud-init seed directory /var/lib/cloud/seed.') + "-s", + "--seed", + action="store_true", + default=False, + dest="remove_seed", + help="Remove cloud-init seed directory /var/lib/cloud/seed.", + ) return parser @@ -59,8 +80,8 @@ def remove_artifacts(remove_logs, remove_seed=False): if not os.path.isdir(init.paths.cloud_dir): return 0 # Artifacts dir already cleaned - seed_path = os.path.join(init.paths.cloud_dir, 'seed') - for path in glob.glob('%s/*' % init.paths.cloud_dir): + seed_path = os.path.join(init.paths.cloud_dir, "seed") + for path in glob.glob("%s/*" % init.paths.cloud_dir): if path == seed_path and not remove_seed: continue try: @@ -69,7 +90,7 @@ def remove_artifacts(remove_logs, remove_seed=False): else: del_file(path) except OSError as e: - error('Could not remove {0}: {1}'.format(path, str(e))) + error("Could not remove {0}: {1}".format(path, str(e))) return 1 return 0 @@ -78,13 +99,15 @@ def handle_clean_args(name, args): """Handle calls to 'cloud-init clean' as a subcommand.""" exit_code = remove_artifacts(args.remove_logs, args.remove_seed) if exit_code == 0 and args.reboot: - cmd = ['shutdown', '-r', 'now'] + cmd = ["shutdown", "-r", "now"] try: subp(cmd, capture=False) except ProcessExecutionError as e: error( 'Could not reboot this system using "{0}": {1}'.format( - cmd, str(e))) + cmd, str(e) + ) + ) exit_code = 1 return exit_code @@ -92,10 +115,10 @@ def handle_clean_args(name, args): def main(): """Tool to collect and tar all cloud-init related logs.""" parser = get_parser() - sys.exit(handle_clean_args('clean', parser.parse_args())) + sys.exit(handle_clean_args("clean", parser.parse_args())) -if __name__ == '__main__': +if __name__ == "__main__": main() # vi: ts=4 expandtab diff --git a/cloudinit/cmd/cloud_id.py b/cloudinit/cmd/cloud_id.py index 0cdc9675..b92b03a8 100755 --- a/cloudinit/cmd/cloud_id.py +++ b/cloudinit/cmd/cloud_id.py @@ -6,13 +6,16 @@ import argparse import json import sys -from cloudinit.util import error from cloudinit.sources import ( - INSTANCE_JSON_FILE, METADATA_UNKNOWN, canonical_cloud_id) + INSTANCE_JSON_FILE, + METADATA_UNKNOWN, + canonical_cloud_id, +) +from cloudinit.util import error -DEFAULT_INSTANCE_JSON = '/run/cloud-init/%s' % INSTANCE_JSON_FILE +DEFAULT_INSTANCE_JSON = "/run/cloud-init/%s" % INSTANCE_JSON_FILE -NAME = 'cloud-id' +NAME = "cloud-id" def get_parser(parser=None): @@ -27,17 +30,30 @@ def get_parser(parser=None): if not parser: parser = argparse.ArgumentParser( prog=NAME, - description='Report the canonical cloud-id for this instance') + description="Report the canonical cloud-id for this instance", + ) parser.add_argument( - '-j', '--json', action='store_true', default=False, - help='Report all standardized cloud-id information as json.') + "-j", + "--json", + action="store_true", + default=False, + help="Report all standardized cloud-id information as json.", + ) parser.add_argument( - '-l', '--long', action='store_true', default=False, - help='Report extended cloud-id information as tab-delimited string.') + "-l", + "--long", + action="store_true", + default=False, + help="Report extended cloud-id information as tab-delimited string.", + ) parser.add_argument( - '-i', '--instance-data', type=str, default=DEFAULT_INSTANCE_JSON, - help=('Path to instance-data.json file. Default is %s' % - DEFAULT_INSTANCE_JSON)) + "-i", + "--instance-data", + type=str, + default=DEFAULT_INSTANCE_JSON, + help="Path to instance-data.json file. Default is %s" + % DEFAULT_INSTANCE_JSON, + ) return parser @@ -53,24 +69,28 @@ def handle_args(name, args): except IOError: return error( "File not found '%s'. Provide a path to instance data json file" - ' using --instance-data' % args.instance_data) + " using --instance-data" % args.instance_data + ) except ValueError as e: return error( - "File '%s' is not valid json. %s" % (args.instance_data, e)) - v1 = instance_data.get('v1', {}) + "File '%s' is not valid json. %s" % (args.instance_data, e) + ) + v1 = instance_data.get("v1", {}) cloud_id = canonical_cloud_id( - v1.get('cloud_name', METADATA_UNKNOWN), - v1.get('region', METADATA_UNKNOWN), - v1.get('platform', METADATA_UNKNOWN)) + v1.get("cloud_name", METADATA_UNKNOWN), + v1.get("region", METADATA_UNKNOWN), + v1.get("platform", METADATA_UNKNOWN), + ) if args.json: - v1['cloud_id'] = cloud_id - response = json.dumps( # Pretty, sorted json - v1, indent=1, sort_keys=True, separators=(',', ': ')) + v1["cloud_id"] = cloud_id + response = json.dumps( # Pretty, sorted json + v1, indent=1, sort_keys=True, separators=(",", ": ") + ) elif args.long: - response = '%s\t%s' % (cloud_id, v1.get('region', METADATA_UNKNOWN)) + response = "%s\t%s" % (cloud_id, v1.get("region", METADATA_UNKNOWN)) else: response = cloud_id - sys.stdout.write('%s\n' % response) + sys.stdout.write("%s\n" % response) return 0 @@ -80,7 +100,7 @@ def main(): sys.exit(handle_args(NAME, parser.parse_args())) -if __name__ == '__main__': +if __name__ == "__main__": main() # vi: ts=4 expandtab diff --git a/cloudinit/cmd/devel/__init__.py b/cloudinit/cmd/devel/__init__.py index 3ae28b69..ead5f7a9 100644 --- a/cloudinit/cmd/devel/__init__.py +++ b/cloudinit/cmd/devel/__init__.py @@ -11,7 +11,7 @@ from cloudinit.stages import Init def addLogHandlerCLI(logger, log_level): """Add a commandline logging handler to emit messages to stderr.""" - formatter = logging.Formatter('%(levelname)s: %(message)s') + formatter = logging.Formatter("%(levelname)s: %(message)s") log.setupBasicLogging(log_level, formatter=formatter) return logger @@ -22,4 +22,5 @@ def read_cfg_paths(): init.read_cfg() return init.paths + # vi: ts=4 expandtab diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py index f6f36a00..a9be0379 100644 --- a/cloudinit/cmd/devel/hotplug_hook.py +++ b/cloudinit/cmd/devel/hotplug_hook.py @@ -6,20 +6,17 @@ import os import sys import time -from cloudinit import log -from cloudinit import reporting -from cloudinit import stages +from cloudinit import log, reporting, stages from cloudinit.event import EventScope, EventType from cloudinit.net import activators, read_sys_net_safe from cloudinit.net.network_state import parse_net_config_data from cloudinit.reporting import events -from cloudinit.stages import Init from cloudinit.sources import DataSource # noqa: F401 from cloudinit.sources import DataSourceNotFoundException - +from cloudinit.stages import Init LOG = log.getLogger(__name__) -NAME = 'hotplug-hook' +NAME = "hotplug-hook" def get_parser(parser=None): @@ -35,33 +32,38 @@ def get_parser(parser=None): parser.description = __doc__ parser.add_argument( - "-s", "--subsystem", required=True, + "-s", + "--subsystem", + required=True, help="subsystem to act on", - choices=['net'] + choices=["net"], ) subparsers = parser.add_subparsers( - title='Hotplug Action', - dest='hotplug_action' + title="Hotplug Action", dest="hotplug_action" ) subparsers.required = True subparsers.add_parser( - 'query', - help='query if hotplug is enabled for given subsystem' + "query", help="query if hotplug is enabled for given subsystem" ) parser_handle = subparsers.add_parser( - 'handle', help='handle the hotplug event') + "handle", help="handle the hotplug event" + ) parser_handle.add_argument( - "-d", "--devpath", required=True, + "-d", + "--devpath", + required=True, metavar="PATH", - help="sysfs path to hotplugged device" + help="sysfs path to hotplugged device", ) parser_handle.add_argument( - "-u", "--udevaction", required=True, + "-u", + "--udevaction", + required=True, help="action to take", - choices=['add', 'remove'] + choices=["add", "remove"], ) return parser @@ -90,27 +92,29 @@ class UeventHandler(abc.ABC): def detect_hotplugged_device(self): detect_presence = None - if self.action == 'add': + if self.action == "add": detect_presence = True - elif self.action == 'remove': + elif self.action == "remove": detect_presence = False else: - raise ValueError('Unknown action: %s' % self.action) + raise ValueError("Unknown action: %s" % self.action) if detect_presence != self.device_detected(): raise RuntimeError( - 'Failed to detect %s in updated metadata' % self.id) + "Failed to detect %s in updated metadata" % self.id + ) def success(self): return self.success_fn() def update_metadata(self): - result = self.datasource.update_metadata_if_supported([ - EventType.HOTPLUG]) + result = self.datasource.update_metadata_if_supported( + [EventType.HOTPLUG] + ) if not result: raise RuntimeError( - 'Datasource %s not updated for ' - 'event %s' % (self.datasource, EventType.HOTPLUG) + "Datasource %s not updated for event %s" + % (self.datasource, EventType.HOTPLUG) ) return result @@ -118,7 +122,7 @@ class UeventHandler(abc.ABC): class NetHandler(UeventHandler): def __init__(self, datasource, devpath, action, success_fn): # convert devpath to mac address - id = read_sys_net_safe(os.path.basename(devpath), 'address') + id = read_sys_net_safe(os.path.basename(devpath), "address") super().__init__(id, datasource, devpath, action, success_fn) def apply(self): @@ -128,14 +132,16 @@ class NetHandler(UeventHandler): ) interface_name = os.path.basename(self.devpath) activator = activators.select_activator() - if self.action == 'add': + if self.action == "add": if not activator.bring_up_interface(interface_name): raise RuntimeError( - 'Failed to bring up device: {}'.format(self.devpath)) - elif self.action == 'remove': + "Failed to bring up device: {}".format(self.devpath) + ) + elif self.action == "remove": if not activator.bring_down_interface(interface_name): raise RuntimeError( - 'Failed to bring down device: {}'.format(self.devpath)) + "Failed to bring down device: {}".format(self.devpath) + ) @property def config(self): @@ -144,15 +150,16 @@ class NetHandler(UeventHandler): def device_detected(self) -> bool: netstate = parse_net_config_data(self.config) found = [ - iface for iface in netstate.iter_interfaces() - if iface.get('mac_address') == self.id + iface + for iface in netstate.iter_interfaces() + if iface.get("mac_address") == self.id ] - LOG.debug('Ifaces with ID=%s : %s', self.id, found) + LOG.debug("Ifaces with ID=%s : %s", self.id, found) return len(found) > 0 SUBSYSTEM_PROPERTES_MAP = { - 'net': (NetHandler, EventScope.NETWORK), + "net": (NetHandler, EventScope.NETWORK), } @@ -161,66 +168,65 @@ def is_enabled(hotplug_init, subsystem): scope = SUBSYSTEM_PROPERTES_MAP[subsystem][1] except KeyError as e: raise Exception( - 'hotplug-hook: cannot handle events for subsystem: {}'.format( - subsystem) + "hotplug-hook: cannot handle events for subsystem: {}".format( + subsystem + ) ) from e return stages.update_event_enabled( datasource=hotplug_init.datasource, cfg=hotplug_init.cfg, event_source_type=EventType.HOTPLUG, - scope=scope + scope=scope, ) def initialize_datasource(hotplug_init, subsystem): - LOG.debug('Fetching datasource') + LOG.debug("Fetching datasource") datasource = hotplug_init.fetch(existing="trust") if not datasource.get_supported_events([EventType.HOTPLUG]): - LOG.debug('hotplug not supported for event of type %s', subsystem) + LOG.debug("hotplug not supported for event of type %s", subsystem) return if not is_enabled(hotplug_init, subsystem): - LOG.debug('hotplug not enabled for event of type %s', subsystem) + LOG.debug("hotplug not enabled for event of type %s", subsystem) return return datasource -def handle_hotplug( - hotplug_init: Init, devpath, subsystem, udevaction -): +def handle_hotplug(hotplug_init: Init, devpath, subsystem, udevaction): datasource = initialize_datasource(hotplug_init, subsystem) if not datasource: return handler_cls = SUBSYSTEM_PROPERTES_MAP[subsystem][0] - LOG.debug('Creating %s event handler', subsystem) + LOG.debug("Creating %s event handler", subsystem) event_handler = handler_cls( datasource=datasource, devpath=devpath, action=udevaction, - success_fn=hotplug_init._write_to_cache + success_fn=hotplug_init._write_to_cache, ) # type: UeventHandler wait_times = [1, 3, 5, 10, 30] for attempt, wait in enumerate(wait_times): LOG.debug( - 'subsystem=%s update attempt %s/%s', + "subsystem=%s update attempt %s/%s", subsystem, attempt, - len(wait_times) + len(wait_times), ) try: - LOG.debug('Refreshing metadata') + LOG.debug("Refreshing metadata") event_handler.update_metadata() - LOG.debug('Detecting device in updated metadata') + LOG.debug("Detecting device in updated metadata") event_handler.detect_hotplugged_device() - LOG.debug('Applying config change') + LOG.debug("Applying config change") event_handler.apply() - LOG.debug('Updating cache') + LOG.debug("Updating cache") event_handler.success() break except Exception as e: - LOG.debug('Exception while processing hotplug event. %s', e) + LOG.debug("Exception while processing hotplug event. %s", e) time.sleep(wait) last_exception = e else: @@ -238,31 +244,33 @@ def handle_args(name, args): hotplug_init.read_cfg() log.setupLogging(hotplug_init.cfg) - if 'reporting' in hotplug_init.cfg: - reporting.update_configuration(hotplug_init.cfg.get('reporting')) + if "reporting" in hotplug_init.cfg: + reporting.update_configuration(hotplug_init.cfg.get("reporting")) # Logging isn't going to be setup until now LOG.debug( - '%s called with the following arguments: {' - 'hotplug_action: %s, subsystem: %s, udevaction: %s, devpath: %s}', + "%s called with the following arguments: {" + "hotplug_action: %s, subsystem: %s, udevaction: %s, devpath: %s}", name, args.hotplug_action, args.subsystem, - args.udevaction if 'udevaction' in args else None, - args.devpath if 'devpath' in args else None, + args.udevaction if "udevaction" in args else None, + args.devpath if "devpath" in args else None, ) with hotplug_reporter: try: - if args.hotplug_action == 'query': + if args.hotplug_action == "query": try: datasource = initialize_datasource( - hotplug_init, args.subsystem) + hotplug_init, args.subsystem + ) except DataSourceNotFoundException: print( "Unable to determine hotplug state. No datasource " - "detected") + "detected" + ) sys.exit(1) - print('enabled' if datasource else 'disabled') + print("enabled" if datasource else "disabled") else: handle_hotplug( hotplug_init=hotplug_init, @@ -271,13 +279,13 @@ def handle_args(name, args): udevaction=args.udevaction, ) except Exception: - LOG.exception('Received fatal exception handling hotplug!') + LOG.exception("Received fatal exception handling hotplug!") raise - LOG.debug('Exiting hotplug handler') + LOG.debug("Exiting hotplug handler") reporting.flush_events() -if __name__ == '__main__': +if __name__ == "__main__": args = get_parser().parse_args() handle_args(NAME, args) diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py index 31ade73d..d54b809a 100644 --- a/cloudinit/cmd/devel/logs.py +++ b/cloudinit/cmd/devel/logs.py @@ -5,20 +5,19 @@ """Define 'collect-logs' utility and handler to include in cloud-init cmd.""" import argparse -from datetime import datetime import os import shutil import sys +from datetime import datetime from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE +from cloudinit.subp import ProcessExecutionError, subp from cloudinit.temp_utils import tempdir -from cloudinit.subp import (ProcessExecutionError, subp) -from cloudinit.util import (chdir, copy, ensure_dir, write_file) +from cloudinit.util import chdir, copy, ensure_dir, write_file - -CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log'] -CLOUDINIT_RUN_DIR = '/run/cloud-init' -USER_DATA_FILE = '/var/lib/cloud/instance/user-data.txt' # Optional +CLOUDINIT_LOGS = ["/var/log/cloud-init.log", "/var/log/cloud-init-output.log"] +CLOUDINIT_RUN_DIR = "/run/cloud-init" +USER_DATA_FILE = "/var/lib/cloud/instance/user-data.txt" # Optional def get_parser(parser=None): @@ -32,26 +31,44 @@ def get_parser(parser=None): """ if not parser: parser = argparse.ArgumentParser( - prog='collect-logs', - description='Collect and tar all cloud-init debug info') - parser.add_argument('--verbose', '-v', action='count', default=0, - dest='verbosity', help="Be more verbose.") + prog="collect-logs", + description="Collect and tar all cloud-init debug info", + ) + parser.add_argument( + "--verbose", + "-v", + action="count", + default=0, + dest="verbosity", + help="Be more verbose.", + ) parser.add_argument( - "--tarfile", '-t', default='cloud-init.tar.gz', - help=('The tarfile to create containing all collected logs.' - ' Default: cloud-init.tar.gz')) + "--tarfile", + "-t", + default="cloud-init.tar.gz", + help=( + "The tarfile to create containing all collected logs." + " Default: cloud-init.tar.gz" + ), + ) parser.add_argument( - "--include-userdata", '-u', default=False, action='store_true', - dest='userdata', help=( - 'Optionally include user-data from {0} which could contain' - ' sensitive information.'.format(USER_DATA_FILE))) + "--include-userdata", + "-u", + default=False, + action="store_true", + dest="userdata", + help=( + "Optionally include user-data from {0} which could contain" + " sensitive information.".format(USER_DATA_FILE) + ), + ) return parser def _copytree_rundir_ignore_files(curdir, files): """Return a list of files to ignore for /run/cloud-init directory""" ignored_files = [ - 'hook-hotplug-cmd', # named pipe for hotplug + "hook-hotplug-cmd", # named pipe for hotplug ] if os.getuid() != 0: # Ignore root-permissioned files @@ -94,52 +111,67 @@ def collect_logs(tarfile, include_userdata, verbosity=0): if include_userdata and os.getuid() != 0: sys.stderr.write( "To include userdata, root user is required." - " Try sudo cloud-init collect-logs\n") + " Try sudo cloud-init collect-logs\n" + ) return 1 tarfile = os.path.abspath(tarfile) - date = datetime.utcnow().date().strftime('%Y-%m-%d') - log_dir = 'cloud-init-logs-{0}'.format(date) - with tempdir(dir='/tmp') as tmp_dir: + date = datetime.utcnow().date().strftime("%Y-%m-%d") + log_dir = "cloud-init-logs-{0}".format(date) + with tempdir(dir="/tmp") as tmp_dir: log_dir = os.path.join(tmp_dir, log_dir) version = _write_command_output_to_file( - ['cloud-init', '--version'], - os.path.join(log_dir, 'version'), - "cloud-init --version", verbosity) + ["cloud-init", "--version"], + os.path.join(log_dir, "version"), + "cloud-init --version", + verbosity, + ) dpkg_ver = _write_command_output_to_file( - ['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'], - os.path.join(log_dir, 'dpkg-version'), - "dpkg version", verbosity) + ["dpkg-query", "--show", "-f=${Version}\n", "cloud-init"], + os.path.join(log_dir, "dpkg-version"), + "dpkg version", + verbosity, + ) if not version: version = dpkg_ver if dpkg_ver else "not-available" _debug("collected cloud-init version: %s\n" % version, 1, verbosity) _write_command_output_to_file( - ['dmesg'], os.path.join(log_dir, 'dmesg.txt'), - "dmesg output", verbosity) + ["dmesg"], + os.path.join(log_dir, "dmesg.txt"), + "dmesg output", + verbosity, + ) _write_command_output_to_file( - ['journalctl', '--boot=0', '-o', 'short-precise'], - os.path.join(log_dir, 'journal.txt'), - "systemd journal of current boot", verbosity) + ["journalctl", "--boot=0", "-o", "short-precise"], + os.path.join(log_dir, "journal.txt"), + "systemd journal of current boot", + verbosity, + ) for log in CLOUDINIT_LOGS: _collect_file(log, log_dir, verbosity) if include_userdata: _collect_file(USER_DATA_FILE, log_dir, verbosity) - run_dir = os.path.join(log_dir, 'run') + run_dir = os.path.join(log_dir, "run") ensure_dir(run_dir) if os.path.exists(CLOUDINIT_RUN_DIR): try: - shutil.copytree(CLOUDINIT_RUN_DIR, - os.path.join(run_dir, 'cloud-init'), - ignore=_copytree_rundir_ignore_files) + shutil.copytree( + CLOUDINIT_RUN_DIR, + os.path.join(run_dir, "cloud-init"), + ignore=_copytree_rundir_ignore_files, + ) except shutil.Error as e: sys.stderr.write("Failed collecting file(s) due to error:\n") - sys.stderr.write(str(e) + '\n') + sys.stderr.write(str(e) + "\n") _debug("collected dir %s\n" % CLOUDINIT_RUN_DIR, 1, verbosity) else: - _debug("directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, 1, - verbosity) + _debug( + "directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, + 1, + verbosity, + ) with chdir(tmp_dir): - subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')]) + subp(["tar", "czvf", tarfile, log_dir.replace(tmp_dir + "/", "")]) sys.stderr.write("Wrote %s\n" % tarfile) return 0 @@ -152,10 +184,10 @@ def handle_collect_logs_args(name, args): def main(): """Tool to collect and tar all cloud-init related logs.""" parser = get_parser() - return handle_collect_logs_args('collect-logs', parser.parse_args()) + return handle_collect_logs_args("collect-logs", parser.parse_args()) -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(main()) # vi: ts=4 expandtab diff --git a/cloudinit/cmd/devel/make_mime.py b/cloudinit/cmd/devel/make_mime.py index 4e6a5778..a7493c74 100755 --- a/cloudinit/cmd/devel/make_mime.py +++ b/cloudinit/cmd/devel/make_mime.py @@ -9,19 +9,22 @@ from email.mime.text import MIMEText from cloudinit import log from cloudinit.handlers import INCLUSION_TYPES_MAP + from . import addLogHandlerCLI -NAME = 'make-mime' +NAME = "make-mime" LOG = log.getLogger(NAME) -EPILOG = ("Example: make-mime -a config.yaml:cloud-config " - "-a script.sh:x-shellscript > user-data") +EPILOG = ( + "Example: make-mime -a config.yaml:cloud-config " + "-a script.sh:x-shellscript > user-data" +) def file_content_type(text): - """ Return file content type by reading the first line of the input. """ + """Return file content type by reading the first line of the input.""" try: filename, content_type = text.split(":", 1) - return (open(filename, 'r'), filename, content_type.strip()) + return (open(filename, "r"), filename, content_type.strip()) except ValueError as e: raise argparse.ArgumentError( text, "Invalid value for %r" % (text) @@ -41,26 +44,43 @@ def get_parser(parser=None): # update the parser's doc and add an epilog to show an example parser.description = __doc__ parser.epilog = EPILOG - parser.add_argument("-a", "--attach", dest="files", type=file_content_type, - action='append', default=[], - metavar=":", - help=("attach the given file as the specified " - "content-type")) - parser.add_argument('-l', '--list-types', action='store_true', - default=False, - help='List support cloud-init content types.') - parser.add_argument('-f', '--force', action='store_true', - default=False, - help='Ignore unknown content-type warnings') + parser.add_argument( + "-a", + "--attach", + dest="files", + type=file_content_type, + action="append", + default=[], + metavar=":", + help="attach the given file as the specified content-type", + ) + parser.add_argument( + "-l", + "--list-types", + action="store_true", + default=False, + help="List support cloud-init content types.", + ) + parser.add_argument( + "-f", + "--force", + action="store_true", + default=False, + help="Ignore unknown content-type warnings", + ) return parser def get_content_types(strip_prefix=False): - """ Return a list of cloud-init supported content types. Optionally - strip out the leading 'text/' of the type if strip_prefix=True. + """Return a list of cloud-init supported content types. Optionally + strip out the leading 'text/' of the type if strip_prefix=True. """ - return sorted([ctype.replace("text/", "") if strip_prefix else ctype - for ctype in INCLUSION_TYPES_MAP.values()]) + return sorted( + [ + ctype.replace("text/", "") if strip_prefix else ctype + for ctype in INCLUSION_TYPES_MAP.values() + ] + ) def handle_args(name, args): @@ -82,14 +102,16 @@ def handle_args(name, args): for i, (fh, filename, format_type) in enumerate(args.files): contents = fh.read() sub_message = MIMEText(contents, format_type, sys.getdefaultencoding()) - sub_message.add_header('Content-Disposition', - 'attachment; filename="%s"' % (filename)) + sub_message.add_header( + "Content-Disposition", 'attachment; filename="%s"' % (filename) + ) content_type = sub_message.get_content_type().lower() if content_type not in get_content_types(): level = "WARNING" if args.force else "ERROR" - msg = (level + ": content type %r for attachment %s " - "may be incorrect!") % (content_type, i + 1) - sys.stderr.write(msg + '\n') + msg = ( + level + ": content type %r for attachment %s may be incorrect!" + ) % (content_type, i + 1) + sys.stderr.write(msg + "\n") errors.append(msg) sub_messages.append(sub_message) if len(errors) and not args.force: @@ -104,10 +126,10 @@ def handle_args(name, args): def main(): args = get_parser().parse_args() - return(handle_args(NAME, args)) + return handle_args(NAME, args) -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(main()) diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py index f4a98e5e..18b1e7ff 100755 --- a/cloudinit/cmd/devel/net_convert.py +++ b/cloudinit/cmd/devel/net_convert.py @@ -6,15 +6,13 @@ import json import os import sys -from cloudinit.sources.helpers import openstack +from cloudinit import distros, log, safeyaml +from cloudinit.net import eni, netplan, network_state, networkd, sysconfig from cloudinit.sources import DataSourceAzure as azure from cloudinit.sources import DataSourceOVF as ovf +from cloudinit.sources.helpers import openstack -from cloudinit import distros, safeyaml -from cloudinit.net import eni, netplan, networkd, network_state, sysconfig -from cloudinit import log - -NAME = 'net-convert' +NAME = "net-convert" def get_parser(parser=None): @@ -27,33 +25,59 @@ def get_parser(parser=None): """ if not parser: parser = argparse.ArgumentParser(prog=NAME, description=__doc__) - parser.add_argument("-p", "--network-data", type=open, - metavar="PATH", required=True, - help="The network configuration to read") - parser.add_argument("-k", "--kind", - choices=['eni', 'network_data.json', 'yaml', - 'azure-imds', 'vmware-imc'], - required=True, - help="The format of the given network config") - parser.add_argument("-d", "--directory", - metavar="PATH", - help="directory to place output in", - required=True) - parser.add_argument("-D", "--distro", - choices=[item for sublist in - distros.OSFAMILIES.values() - for item in sublist], - required=True) - parser.add_argument("-m", "--mac", - metavar="name,mac", - action='append', - help="interface name to mac mapping") - parser.add_argument("--debug", action='store_true', - help='enable debug logging to stderr.') - parser.add_argument("-O", "--output-kind", - choices=['eni', 'netplan', 'networkd', 'sysconfig'], - required=True, - help="The network config format to emit") + parser.add_argument( + "-p", + "--network-data", + type=open, + metavar="PATH", + required=True, + help="The network configuration to read", + ) + parser.add_argument( + "-k", + "--kind", + choices=[ + "eni", + "network_data.json", + "yaml", + "azure-imds", + "vmware-imc", + ], + required=True, + help="The format of the given network config", + ) + parser.add_argument( + "-d", + "--directory", + metavar="PATH", + help="directory to place output in", + required=True, + ) + parser.add_argument( + "-D", + "--distro", + choices=[ + item for sublist in distros.OSFAMILIES.values() for item in sublist + ], + required=True, + ) + parser.add_argument( + "-m", + "--mac", + metavar="name,mac", + action="append", + help="interface name to mac mapping", + ) + parser.add_argument( + "--debug", action="store_true", help="enable debug logging to stderr." + ) + parser.add_argument( + "-O", + "--output-kind", + choices=["eni", "netplan", "networkd", "sysconfig"], + required=True, + help="The network config format to emit", + ) return parser @@ -81,59 +105,68 @@ def handle_args(name, args): pre_ns = eni.convert_eni_data(net_data) elif args.kind == "yaml": pre_ns = safeyaml.load(net_data) - if 'network' in pre_ns: - pre_ns = pre_ns.get('network') + if "network" in pre_ns: + pre_ns = pre_ns.get("network") if args.debug: - sys.stderr.write('\n'.join( - ["Input YAML", safeyaml.dumps(pre_ns), ""])) - elif args.kind == 'network_data.json': + sys.stderr.write( + "\n".join(["Input YAML", safeyaml.dumps(pre_ns), ""]) + ) + elif args.kind == "network_data.json": pre_ns = openstack.convert_net_json( - json.loads(net_data), known_macs=known_macs) - elif args.kind == 'azure-imds': + json.loads(net_data), known_macs=known_macs + ) + elif args.kind == "azure-imds": pre_ns = azure.parse_network_config(json.loads(net_data)) - elif args.kind == 'vmware-imc': + elif args.kind == "vmware-imc": config = ovf.Config(ovf.ConfigFile(args.network_data.name)) pre_ns = ovf.get_network_config_from_conf(config, False) ns = network_state.parse_net_config_data(pre_ns) if args.debug: - sys.stderr.write('\n'.join( - ["", "Internal State", safeyaml.dumps(ns), ""])) + sys.stderr.write( + "\n".join(["", "Internal State", safeyaml.dumps(ns), ""]) + ) distro_cls = distros.fetch(args.distro) distro = distro_cls(args.distro, {}, None) config = {} if args.output_kind == "eni": r_cls = eni.Renderer - config = distro.renderer_configs.get('eni') + config = distro.renderer_configs.get("eni") elif args.output_kind == "netplan": r_cls = netplan.Renderer - config = distro.renderer_configs.get('netplan') + config = distro.renderer_configs.get("netplan") # don't run netplan generate/apply - config['postcmds'] = False + config["postcmds"] = False # trim leading slash - config['netplan_path'] = config['netplan_path'][1:] + config["netplan_path"] = config["netplan_path"][1:] # enable some netplan features - config['features'] = ['dhcp-use-domains', 'ipv6-mtu'] + config["features"] = ["dhcp-use-domains", "ipv6-mtu"] elif args.output_kind == "networkd": r_cls = networkd.Renderer - config = distro.renderer_configs.get('networkd') + config = distro.renderer_configs.get("networkd") elif args.output_kind == "sysconfig": r_cls = sysconfig.Renderer - config = distro.renderer_configs.get('sysconfig') + config = distro.renderer_configs.get("sysconfig") else: raise RuntimeError("Invalid output_kind") r = r_cls(config=config) - sys.stderr.write(''.join([ - "Read input format '%s' from '%s'.\n" % ( - args.kind, args.network_data.name), - "Wrote output format '%s' to '%s'\n" % ( - args.output_kind, args.directory)]) + "\n") + sys.stderr.write( + "".join( + [ + "Read input format '%s' from '%s'.\n" + % (args.kind, args.network_data.name), + "Wrote output format '%s' to '%s'\n" + % (args.output_kind, args.directory), + ] + ) + + "\n" + ) r.render_network_state(network_state=ns, target=args.directory) -if __name__ == '__main__': +if __name__ == "__main__": args = get_parser().parse_args() handle_args(NAME, args) diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py index be304630..76b16c2e 100644 --- a/cloudinit/cmd/devel/parser.py +++ b/cloudinit/cmd/devel/parser.py @@ -5,33 +5,47 @@ """Define 'devel' subcommand argument parsers to include in cloud-init cmd.""" import argparse + from cloudinit.config import schema -from . import hotplug_hook -from . import net_convert -from . import render -from . import make_mime +from . import hotplug_hook, make_mime, net_convert, render def get_parser(parser=None): if not parser: parser = argparse.ArgumentParser( - prog='cloudinit-devel', - description='Run development cloud-init tools') - subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand') + prog="cloudinit-devel", + description="Run development cloud-init tools", + ) + subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand") subparsers.required = True subcmds = [ - (hotplug_hook.NAME, hotplug_hook.__doc__, - hotplug_hook.get_parser, hotplug_hook.handle_args), - ('schema', 'Validate cloud-config files for document schema', - schema.get_parser, schema.handle_schema_args), - (net_convert.NAME, net_convert.__doc__, - net_convert.get_parser, net_convert.handle_args), - (render.NAME, render.__doc__, - render.get_parser, render.handle_args), - (make_mime.NAME, make_mime.__doc__, - make_mime.get_parser, make_mime.handle_args), + ( + hotplug_hook.NAME, + hotplug_hook.__doc__, + hotplug_hook.get_parser, + hotplug_hook.handle_args, + ), + ( + "schema", + "Validate cloud-config files for document schema", + schema.get_parser, + schema.handle_schema_args, + ), + ( + net_convert.NAME, + net_convert.__doc__, + net_convert.get_parser, + net_convert.handle_args, + ), + (render.NAME, render.__doc__, render.get_parser, render.handle_args), + ( + make_mime.NAME, + make_mime.__doc__, + make_mime.get_parser, + make_mime.handle_args, + ), ] for (subcmd, helpmsg, get_parser, handler) in subcmds: parser = subparsers.add_parser(subcmd, help=helpmsg) diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py index 1090aa16..2f9a22a8 100755 --- a/cloudinit/cmd/devel/render.py +++ b/cloudinit/cmd/devel/render.py @@ -6,12 +6,13 @@ import argparse import os import sys -from cloudinit.handlers.jinja_template import render_jinja_payload_from_file from cloudinit import log +from cloudinit.handlers.jinja_template import render_jinja_payload_from_file from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE + from . import addLogHandlerCLI, read_cfg_paths -NAME = 'render' +NAME = "render" LOG = log.getLogger(NAME) @@ -27,13 +28,24 @@ def get_parser(parser=None): if not parser: parser = argparse.ArgumentParser(prog=NAME, description=__doc__) parser.add_argument( - 'user_data', type=str, help='Path to the user-data file to render') + "user_data", type=str, help="Path to the user-data file to render" + ) + parser.add_argument( + "-i", + "--instance-data", + type=str, + help=( + "Optional path to instance-data.json file. Defaults to" + " /run/cloud-init/instance-data.json" + ), + ) parser.add_argument( - '-i', '--instance-data', type=str, - help=('Optional path to instance-data.json file. Defaults to' - ' /run/cloud-init/instance-data.json')) - parser.add_argument('-d', '--debug', action='store_true', default=False, - help='Add verbose messages during template render') + "-d", + "--debug", + action="store_true", + default=False, + help="Add verbose messages during template render", + ) return parser @@ -54,34 +66,38 @@ def handle_args(name, args): redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE) if uid == 0: instance_data_fn = os.path.join( - paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE) + paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE + ) if not os.path.exists(instance_data_fn): LOG.warning( - 'Missing root-readable %s. Using redacted %s instead.', - instance_data_fn, redacted_data_fn + "Missing root-readable %s. Using redacted %s instead.", + instance_data_fn, + redacted_data_fn, ) instance_data_fn = redacted_data_fn else: instance_data_fn = redacted_data_fn if not os.path.exists(instance_data_fn): - LOG.error('Missing instance-data.json file: %s', instance_data_fn) + LOG.error("Missing instance-data.json file: %s", instance_data_fn) return 1 try: with open(args.user_data) as stream: user_data = stream.read() except IOError: - LOG.error('Missing user-data file: %s', args.user_data) + LOG.error("Missing user-data file: %s", args.user_data) return 1 try: rendered_payload = render_jinja_payload_from_file( - payload=user_data, payload_fn=args.user_data, + payload=user_data, + payload_fn=args.user_data, instance_data_file=instance_data_fn, - debug=True if args.debug else False) + debug=True if args.debug else False, + ) except RuntimeError as e: - LOG.error('Cannot render from instance data: %s', str(e)) + LOG.error("Cannot render from instance data: %s", str(e)) return 1 if not rendered_payload: - LOG.error('Unable to render user-data file: %s', args.user_data) + LOG.error("Unable to render user-data file: %s", args.user_data) return 1 sys.stdout.write(rendered_payload) return 0 @@ -89,10 +105,10 @@ def handle_args(name, args): def main(): args = get_parser().parse_args() - return(handle_args(NAME, args)) + return handle_args(NAME, args) -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(main()) diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 63186d34..e67edbc3 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -19,6 +19,7 @@ import time import traceback from cloudinit import patcher + patcher.patch_logging() from cloudinit import log as logging @@ -34,8 +35,7 @@ from cloudinit import warnings from cloudinit import reporting from cloudinit.reporting import events -from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE, - CLOUD_CONFIG) +from cloudinit.settings import PER_INSTANCE, PER_ALWAYS, PER_ONCE, CLOUD_CONFIG from cloudinit import atomic_helper @@ -44,8 +44,10 @@ from cloudinit import dhclient_hook # Welcome message template -WELCOME_MSG_TPL = ("Cloud-init v. {version} running '{action}' at " - "{timestamp}. Up {uptime} seconds.") +WELCOME_MSG_TPL = ( + "Cloud-init v. {version} running '{action}' at " + "{timestamp}. Up {uptime} seconds." +) # Module section template MOD_SECTION_TPL = "cloud_%s_modules" @@ -53,9 +55,9 @@ MOD_SECTION_TPL = "cloud_%s_modules" # Frequency shortname to full name # (so users don't have to remember the full name...) FREQ_SHORT_NAMES = { - 'instance': PER_INSTANCE, - 'always': PER_ALWAYS, - 'once': PER_ONCE, + "instance": PER_INSTANCE, + "always": PER_ALWAYS, + "once": PER_ONCE, } LOG = logging.getLogger() @@ -63,21 +65,20 @@ LOG = logging.getLogger() # Used for when a logger may not be active # and we still want to print exceptions... -def print_exc(msg=''): +def print_exc(msg=""): if msg: sys.stderr.write("%s\n" % (msg)) - sys.stderr.write('-' * 60) + sys.stderr.write("-" * 60) sys.stderr.write("\n") traceback.print_exc(file=sys.stderr) - sys.stderr.write('-' * 60) + sys.stderr.write("-" * 60) sys.stderr.write("\n") def welcome(action, msg=None): if not msg: msg = welcome_format(action) - util.multi_log("%s\n" % (msg), - console=False, stderr=True, log=LOG) + util.multi_log("%s\n" % (msg), console=False, stderr=True, log=LOG) return msg @@ -86,7 +87,8 @@ def welcome_format(action): version=version.version_string(), uptime=util.uptime(), timestamp=util.time_rfc2822(), - action=action) + action=action, + ) def extract_fns(args): @@ -107,29 +109,31 @@ def run_module_section(mods, action_name, section): (which_ran, failures) = mods.run_section(full_section_name) total_attempted = len(which_ran) + len(failures) if total_attempted == 0: - msg = ("No '%s' modules to run" - " under section '%s'") % (action_name, full_section_name) + msg = "No '%s' modules to run under section '%s'" % ( + action_name, + full_section_name, + ) sys.stderr.write("%s\n" % (msg)) LOG.debug(msg) return [] else: - LOG.debug("Ran %s modules with %s failures", - len(which_ran), len(failures)) + LOG.debug( + "Ran %s modules with %s failures", len(which_ran), len(failures) + ) return failures def apply_reporting_cfg(cfg): - if cfg.get('reporting'): - reporting.update_configuration(cfg.get('reporting')) + if cfg.get("reporting"): + reporting.update_configuration(cfg.get("reporting")) -def parse_cmdline_url(cmdline, names=('cloud-config-url', 'url')): +def parse_cmdline_url(cmdline, names=("cloud-config-url", "url")): data = util.keyval_str_to_dict(cmdline) for key in names: if key in data: return key, data[key] - raise KeyError("No keys (%s) found in string '%s'" % - (cmdline, names)) + raise KeyError("No keys (%s) found in string '%s'" % (cmdline, names)) def attempt_cmdline_url(path, network=True, cmdline=None): @@ -163,51 +167,60 @@ def attempt_cmdline_url(path, network=True, cmdline=None): if path_is_local and os.path.exists(path): if network: - m = ("file '%s' existed, possibly from local stage download" - " of command line url '%s'. Not re-writing." % (path, url)) + m = ( + "file '%s' existed, possibly from local stage download" + " of command line url '%s'. Not re-writing." % (path, url) + ) level = logging.INFO if path_is_local: level = logging.DEBUG else: - m = ("file '%s' existed, possibly from previous boot download" - " of command line url '%s'. Not re-writing." % (path, url)) + m = ( + "file '%s' existed, possibly from previous boot download" + " of command line url '%s'. Not re-writing." % (path, url) + ) level = logging.WARN return (level, m) - kwargs = {'url': url, 'timeout': 10, 'retries': 2} + kwargs = {"url": url, "timeout": 10, "retries": 2} if network or path_is_local: level = logging.WARN - kwargs['sec_between'] = 1 + kwargs["sec_between"] = 1 else: level = logging.DEBUG - kwargs['sec_between'] = .1 + kwargs["sec_between"] = 0.1 data = None - header = b'#cloud-config' + header = b"#cloud-config" try: resp = url_helper.read_file_or_url(**kwargs) if resp.ok(): data = resp.contents if not resp.contents.startswith(header): - if cmdline_name == 'cloud-config-url': + if cmdline_name == "cloud-config-url": level = logging.WARN else: level = logging.INFO return ( level, - "contents of '%s' did not start with %s" % (url, header)) + "contents of '%s' did not start with %s" % (url, header), + ) else: - return (level, - "url '%s' returned code %s. Ignoring." % (url, resp.code)) + return ( + level, + "url '%s' returned code %s. Ignoring." % (url, resp.code), + ) except url_helper.UrlError as e: return (level, "retrieving url '%s' failed: %s" % (url, e)) util.write_file(path, data, mode=0o600) - return (logging.INFO, - "wrote cloud-config data from %s='%s' to %s" % - (cmdline_name, url, path)) + return ( + logging.INFO, + "wrote cloud-config data from %s='%s' to %s" + % (cmdline_name, url, path), + ) def purge_cache_on_python_version_change(init): @@ -216,31 +229,32 @@ def purge_cache_on_python_version_change(init): There could be changes not represented in our cache (obj.pkl) after we upgrade to a new version of python, so at that point clear the cache """ - current_python_version = '%d.%d' % ( - sys.version_info.major, sys.version_info.minor + current_python_version = "%d.%d" % ( + sys.version_info.major, + sys.version_info.minor, ) python_version_path = os.path.join( - init.paths.get_cpath('data'), 'python-version' + init.paths.get_cpath("data"), "python-version" ) if os.path.exists(python_version_path): cached_python_version = open(python_version_path).read() # The Python version has changed out from under us, anything that was # pickled previously is likely useless due to API changes. if cached_python_version != current_python_version: - LOG.debug('Python version change detected. Purging cache') + LOG.debug("Python version change detected. Purging cache") init.purge_cache(True) util.write_file(python_version_path, current_python_version) else: - if os.path.exists(init.paths.get_ipath_cur('obj_pkl')): + if os.path.exists(init.paths.get_ipath_cur("obj_pkl")): LOG.info( - 'Writing python-version file. ' - 'Cache compatibility status is currently unknown.' + "Writing python-version file. " + "Cache compatibility status is currently unknown." ) util.write_file(python_version_path, current_python_version) def _should_bring_up_interfaces(init, args): - if util.get_cfg_option_bool(init.cfg, 'disable_network_activation'): + if util.get_cfg_option_bool(init.cfg, "disable_network_activation"): return False return not args.local @@ -250,10 +264,14 @@ def main_init(name, args): if args.local: deps = [sources.DEP_FILESYSTEM] - early_logs = [attempt_cmdline_url( - path=os.path.join("%s.d" % CLOUD_CONFIG, - "91_kernel_cmdline_url.cfg"), - network=not args.local)] + early_logs = [ + attempt_cmdline_url( + path=os.path.join( + "%s.d" % CLOUD_CONFIG, "91_kernel_cmdline_url.cfg" + ), + network=not args.local, + ) + ] # Cloud-init 'init' stage is broken up into the following sub-stages # 1. Ensure that the init object fetches its config without errors @@ -289,8 +307,9 @@ def main_init(name, args): early_logs.append((logging.WARN, msg)) if args.debug: # Reset so that all the debug handlers are closed out - LOG.debug(("Logging being reset, this logger may no" - " longer be active shortly")) + LOG.debug( + "Logging being reset, this logger may no longer be active shortly" + ) logging.resetLogging() logging.setupLogging(init.cfg) apply_reporting_cfg(init.cfg) @@ -317,9 +336,11 @@ def main_init(name, args): if mode == sources.DSMODE_NETWORK: existing = "trust" sys.stderr.write("%s\n" % (netinfo.debug_info())) - LOG.debug(("Checking to see if files that we need already" - " exist from a previous run that would allow us" - " to stop early.")) + LOG.debug( + "Checking to see if files that we need already" + " exist from a previous run that would allow us" + " to stop early." + ) # no-net is written by upstart cloud-init-nonet when network failed # to come up stop_files = [ @@ -331,15 +352,18 @@ def main_init(name, args): existing_files.append(fn) if existing_files: - LOG.debug("[%s] Exiting. stop file %s existed", - mode, existing_files) + LOG.debug( + "[%s] Exiting. stop file %s existed", mode, existing_files + ) return (None, []) else: - LOG.debug("Execution continuing, no previous run detected that" - " would allow us to stop early.") + LOG.debug( + "Execution continuing, no previous run detected that" + " would allow us to stop early." + ) else: existing = "check" - mcfg = util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False) + mcfg = util.get_cfg_option_bool(init.cfg, "manual_cache_clean", False) if mcfg: LOG.debug("manual cache clean set from config") existing = "trust" @@ -360,8 +384,11 @@ def main_init(name, args): # if in network mode, and the datasource is local # then work was done at that stage. if mode == sources.DSMODE_NETWORK and init.datasource.dsmode != mode: - LOG.debug("[%s] Exiting. datasource %s in local mode", - mode, init.datasource) + LOG.debug( + "[%s] Exiting. datasource %s in local mode", + mode, + init.datasource, + ) return (None, []) except sources.DataSourceNotFoundException: # In the case of 'cloud-init init' without '--local' it is a bit @@ -371,8 +398,9 @@ def main_init(name, args): if mode == sources.DSMODE_LOCAL: LOG.debug("No local datasource found") else: - util.logexc(LOG, ("No instance datasource found!" - " Likely bad things to come!")) + util.logexc( + LOG, "No instance datasource found! Likely bad things to come!" + ) if not args.force: init.apply_network_config(bring_up=bring_up_interfaces) LOG.debug("[%s] Exiting without datasource", mode) @@ -381,46 +409,60 @@ def main_init(name, args): else: return (None, ["No instance datasource found."]) else: - LOG.debug("[%s] barreling on in force mode without datasource", - mode) + LOG.debug( + "[%s] barreling on in force mode without datasource", mode + ) _maybe_persist_instance_data(init) # Stage 6 iid = init.instancify() - LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s", - mode, name, iid, init.is_new_instance()) + LOG.debug( + "[%s] %s will now be targeting instance id: %s. new=%s", + mode, + name, + iid, + init.is_new_instance(), + ) if mode == sources.DSMODE_LOCAL: # Before network comes up, set any configured hostname to allow # dhcp clients to advertize this hostname to any DDNS services # LP: #1746455. - _maybe_set_hostname(init, stage='local', retry_stage='network') + _maybe_set_hostname(init, stage="local", retry_stage="network") init.apply_network_config(bring_up=bring_up_interfaces) if mode == sources.DSMODE_LOCAL: if init.datasource.dsmode != mode: - LOG.debug("[%s] Exiting. datasource %s not in local mode.", - mode, init.datasource) + LOG.debug( + "[%s] Exiting. datasource %s not in local mode.", + mode, + init.datasource, + ) return (init.datasource, []) else: - LOG.debug("[%s] %s is in local mode, will apply init modules now.", - mode, init.datasource) + LOG.debug( + "[%s] %s is in local mode, will apply init modules now.", + mode, + init.datasource, + ) # Give the datasource a chance to use network resources. # This is used on Azure to communicate with the fabric over network. init.setup_datasource() # update fully realizes user-data (pulling in #include if necessary) init.update() - _maybe_set_hostname(init, stage='init-net', retry_stage='modules:config') + _maybe_set_hostname(init, stage="init-net", retry_stage="modules:config") # Stage 7 try: # Attempt to consume the data per instance. # This may run user-data handlers and/or perform # url downloads and such as needed. - (ran, _results) = init.cloudify().run('consume_data', - init.consume_data, - args=[PER_INSTANCE], - freq=PER_INSTANCE) + (ran, _results) = init.cloudify().run( + "consume_data", + init.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE, + ) if not ran: # Just consume anything that is set to run per-always # if nothing ran in the per-instance code @@ -442,8 +484,7 @@ def main_init(name, args): errfmt_orig = errfmt (outfmt, errfmt) = util.get_output_cfg(mods.cfg, name) if outfmt_orig != outfmt or errfmt_orig != errfmt: - LOG.warning("Stdout, stderr changing to (%s, %s)", - outfmt, errfmt) + LOG.warning("Stdout, stderr changing to (%s, %s)", outfmt, errfmt) (outfmt, errfmt) = util.fixup_output(mods.cfg, name) except Exception: util.logexc(LOG, "Failed to re-adjust output redirection!") @@ -459,11 +500,11 @@ def main_init(name, args): def di_report_warn(datasource, cfg): - if 'di_report' not in cfg: + if "di_report" not in cfg: LOG.debug("no di_report found in config.") return - dicfg = cfg['di_report'] + dicfg = cfg["di_report"] if dicfg is None: # ds-identify may write 'di_report:\n #comment\n' # which reads as {'di_report': None} @@ -474,7 +515,7 @@ def di_report_warn(datasource, cfg): LOG.warning("di_report config not a dictionary: %s", dicfg) return - dslist = dicfg.get('datasource_list') + dslist = dicfg.get("datasource_list") if dslist is None: LOG.warning("no 'datasource_list' found in di_report.") return @@ -486,18 +527,26 @@ def di_report_warn(datasource, cfg): # where Name is the thing that shows up in datasource_list. modname = datasource.__module__.rpartition(".")[2] if modname.startswith(sources.DS_PREFIX): - modname = modname[len(sources.DS_PREFIX):] + modname = modname[len(sources.DS_PREFIX) :] else: - LOG.warning("Datasource '%s' came from unexpected module '%s'.", - datasource, modname) + LOG.warning( + "Datasource '%s' came from unexpected module '%s'.", + datasource, + modname, + ) if modname in dslist: - LOG.debug("used datasource '%s' from '%s' was in di_report's list: %s", - datasource, modname, dslist) + LOG.debug( + "used datasource '%s' from '%s' was in di_report's list: %s", + datasource, + modname, + dslist, + ) return - warnings.show_warning('dsid_missing_source', cfg, - source=modname, dslist=str(dslist)) + warnings.show_warning( + "dsid_missing_source", cfg, source=modname, dslist=str(dslist) + ) def main_modules(action_name, args): @@ -521,8 +570,10 @@ def main_modules(action_name, args): init.fetch(existing="trust") except sources.DataSourceNotFoundException: # There was no datasource found, theres nothing to do - msg = ('Can not apply stage %s, no datasource found! Likely bad ' - 'things to come!' % name) + msg = ( + "Can not apply stage %s, no datasource found! Likely bad " + "things to come!" % name + ) util.logexc(LOG, msg) print_exc(msg) if not args.force: @@ -539,8 +590,9 @@ def main_modules(action_name, args): util.logexc(LOG, "Failed to setup output redirection!") if args.debug: # Reset so that all the debug handlers are closed out - LOG.debug(("Logging being reset, this logger may no" - " longer be active shortly")) + LOG.debug( + "Logging being reset, this logger may no longer be active shortly" + ) logging.resetLogging() logging.setupLogging(mods.cfg) apply_reporting_cfg(init.cfg) @@ -573,10 +625,12 @@ def main_single(name, args): # There was no datasource found, # that might be bad (or ok) depending on # the module being ran (so continue on) - util.logexc(LOG, ("Failed to fetch your datasource," - " likely bad things to come!")) - print_exc(("Failed to fetch your datasource," - " likely bad things to come!")) + util.logexc( + LOG, "Failed to fetch your datasource, likely bad things to come!" + ) + print_exc( + "Failed to fetch your datasource, likely bad things to come!" + ) if not args.force: return 1 _maybe_persist_instance_data(init) @@ -598,8 +652,9 @@ def main_single(name, args): util.logexc(LOG, "Failed to setup output redirection!") if args.debug: # Reset so that all the debug handlers are closed out - LOG.debug(("Logging being reset, this logger may no" - " longer be active shortly")) + LOG.debug( + "Logging being reset, this logger may no longer be active shortly" + ) logging.resetLogging() logging.setupLogging(mods.cfg) apply_reporting_cfg(init.cfg) @@ -608,9 +663,7 @@ def main_single(name, args): welcome(name, msg=w_msg) # Stage 5 - (which_ran, failures) = mods.run_single(mod_name, - mod_args, - mod_freq) + (which_ran, failures) = mods.run_single(mod_name, mod_args, mod_freq) if failures: LOG.warning("Ran %s but it failed!", mod_name) return 1 @@ -633,7 +686,12 @@ def status_wrapper(name, args, data_d=None, link_d=None): result_path = os.path.join(data_d, "result.json") result_link = os.path.join(link_d, "result.json") - util.ensure_dirs((data_d, link_d,)) + util.ensure_dirs( + ( + data_d, + link_d, + ) + ) (_name, functor) = args.action @@ -647,14 +705,20 @@ def status_wrapper(name, args, data_d=None, link_d=None): else: raise ValueError("unknown name: %s" % name) - modes = ('init', 'init-local', 'modules-init', 'modules-config', - 'modules-final') + modes = ( + "init", + "init-local", + "modules-init", + "modules-config", + "modules-final", + ) if mode not in modes: raise ValueError( - "Invalid cloud init mode specified '{0}'".format(mode)) + "Invalid cloud init mode specified '{0}'".format(mode) + ) status = None - if mode == 'init-local': + if mode == "init-local": for f in (status_link, result_link, status_path, result_path): util.del_file(f) else: @@ -664,45 +728,46 @@ def status_wrapper(name, args, data_d=None, link_d=None): pass nullstatus = { - 'errors': [], - 'start': None, - 'finished': None, + "errors": [], + "start": None, + "finished": None, } if status is None: - status = {'v1': {}} - status['v1']['datasource'] = None + status = {"v1": {}} + status["v1"]["datasource"] = None for m in modes: - if m not in status['v1']: - status['v1'][m] = nullstatus.copy() + if m not in status["v1"]: + status["v1"][m] = nullstatus.copy() - v1 = status['v1'] - v1['stage'] = mode - v1[mode]['start'] = time.time() + v1 = status["v1"] + v1["stage"] = mode + v1[mode]["start"] = time.time() atomic_helper.write_json(status_path, status) - util.sym_link(os.path.relpath(status_path, link_d), status_link, - force=True) + util.sym_link( + os.path.relpath(status_path, link_d), status_link, force=True + ) try: ret = functor(name, args) - if mode in ('init', 'init-local'): + if mode in ("init", "init-local"): (datasource, errors) = ret if datasource is not None: - v1['datasource'] = str(datasource) + v1["datasource"] = str(datasource) else: errors = ret - v1[mode]['errors'] = [str(e) for e in errors] + v1[mode]["errors"] = [str(e) for e in errors] except Exception as e: util.logexc(LOG, "failed stage %s", mode) print_exc("failed run of stage %s" % mode) - v1[mode]['errors'] = [str(e)] + v1[mode]["errors"] = [str(e)] - v1[mode]['finished'] = time.time() - v1['stage'] = None + v1[mode]["finished"] = time.time() + v1["stage"] = None atomic_helper.write_json(status_path, status) @@ -710,23 +775,26 @@ def status_wrapper(name, args, data_d=None, link_d=None): # write the 'finished' file errors = [] for m in modes: - if v1[m]['errors']: - errors.extend(v1[m].get('errors', [])) + if v1[m]["errors"]: + errors.extend(v1[m].get("errors", [])) atomic_helper.write_json( - result_path, {'v1': {'datasource': v1['datasource'], - 'errors': errors}}) - util.sym_link(os.path.relpath(result_path, link_d), result_link, - force=True) + result_path, + {"v1": {"datasource": v1["datasource"], "errors": errors}}, + ) + util.sym_link( + os.path.relpath(result_path, link_d), result_link, force=True + ) - return len(v1[mode]['errors']) + return len(v1[mode]["errors"]) def _maybe_persist_instance_data(init): """Write instance-data.json file if absent and datasource is restored.""" if init.ds_restored: instance_data_file = os.path.join( - init.paths.run_dir, sources.INSTANCE_JSON_FILE) + init.paths.run_dir, sources.INSTANCE_JSON_FILE + ) if not os.path.exists(instance_data_file): init.datasource.persist_instance_data() @@ -739,18 +807,23 @@ def _maybe_set_hostname(init, stage, retry_stage): """ cloud = init.cloudify() (hostname, _fqdn) = util.get_hostname_fqdn( - init.cfg, cloud, metadata_only=True) + init.cfg, cloud, metadata_only=True + ) if hostname: # meta-data or user-data hostname content try: - cc_set_hostname.handle('set-hostname', init.cfg, cloud, LOG, None) + cc_set_hostname.handle("set-hostname", init.cfg, cloud, LOG, None) except cc_set_hostname.SetHostnameError as e: LOG.debug( - 'Failed setting hostname in %s stage. Will' - ' retry in %s stage. Error: %s.', stage, retry_stage, str(e)) + "Failed setting hostname in %s stage. Will" + " retry in %s stage. Error: %s.", + stage, + retry_stage, + str(e), + ) def main_features(name, args): - sys.stdout.write('\n'.join(sorted(version.FEATURES)) + '\n') + sys.stdout.write("\n".join(sorted(version.FEATURES)) + "\n") def main(sysv_args=None): @@ -760,129 +833,182 @@ def main(sysv_args=None): sysv_args = sysv_args[1:] # Top level args - parser.add_argument('--version', '-v', action='version', - version='%(prog)s ' + (version.version_string())) - parser.add_argument('--file', '-f', action='append', - dest='files', - help=('additional yaml configuration' - ' files to use'), - type=argparse.FileType('rb')) - parser.add_argument('--debug', '-d', action='store_true', - help=('show additional pre-action' - ' logging (default: %(default)s)'), - default=False) - parser.add_argument('--force', action='store_true', - help=('force running even if no datasource is' - ' found (use at your own risk)'), - dest='force', - default=False) + parser.add_argument( + "--version", + "-v", + action="version", + version="%(prog)s " + (version.version_string()), + ) + parser.add_argument( + "--file", + "-f", + action="append", + dest="files", + help="additional yaml configuration files to use", + type=argparse.FileType("rb"), + ) + parser.add_argument( + "--debug", + "-d", + action="store_true", + help="show additional pre-action logging (default: %(default)s)", + default=False, + ) + parser.add_argument( + "--force", + action="store_true", + help=( + "force running even if no datasource is" + " found (use at your own risk)" + ), + dest="force", + default=False, + ) parser.set_defaults(reporter=None) - subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand') + subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand") subparsers.required = True # Each action and its sub-options (if any) - parser_init = subparsers.add_parser('init', - help=('initializes cloud-init and' - ' performs initial modules')) - parser_init.add_argument("--local", '-l', action='store_true', - help="start in local mode (default: %(default)s)", - default=False) + parser_init = subparsers.add_parser( + "init", help="initializes cloud-init and performs initial modules" + ) + parser_init.add_argument( + "--local", + "-l", + action="store_true", + help="start in local mode (default: %(default)s)", + default=False, + ) # This is used so that we can know which action is selected + # the functor to use to run this subcommand - parser_init.set_defaults(action=('init', main_init)) + parser_init.set_defaults(action=("init", main_init)) # These settings are used for the 'config' and 'final' stages - parser_mod = subparsers.add_parser('modules', - help=('activates modules using ' - 'a given configuration key')) - parser_mod.add_argument("--mode", '-m', action='store', - help=("module configuration name " - "to use (default: %(default)s)"), - default='config', - choices=('init', 'config', 'final')) - parser_mod.set_defaults(action=('modules', main_modules)) + parser_mod = subparsers.add_parser( + "modules", help="activates modules using a given configuration key" + ) + parser_mod.add_argument( + "--mode", + "-m", + action="store", + help="module configuration name to use (default: %(default)s)", + default="config", + choices=("init", "config", "final"), + ) + parser_mod.set_defaults(action=("modules", main_modules)) # This subcommand allows you to run a single module - parser_single = subparsers.add_parser('single', - help=('run a single module ')) - parser_single.add_argument("--name", '-n', action="store", - help="module name to run", - required=True) - parser_single.add_argument("--frequency", action="store", - help=("frequency of the module"), - required=False, - choices=list(FREQ_SHORT_NAMES.keys())) - parser_single.add_argument("--report", action="store_true", - help="enable reporting", - required=False) - parser_single.add_argument("module_args", nargs="*", - metavar='argument', - help=('any additional arguments to' - ' pass to this module')) - parser_single.set_defaults(action=('single', main_single)) + parser_single = subparsers.add_parser( + "single", help="run a single module " + ) + parser_single.add_argument( + "--name", + "-n", + action="store", + help="module name to run", + required=True, + ) + parser_single.add_argument( + "--frequency", + action="store", + help="frequency of the module", + required=False, + choices=list(FREQ_SHORT_NAMES.keys()), + ) + parser_single.add_argument( + "--report", + action="store_true", + help="enable reporting", + required=False, + ) + parser_single.add_argument( + "module_args", + nargs="*", + metavar="argument", + help="any additional arguments to pass to this module", + ) + parser_single.set_defaults(action=("single", main_single)) parser_query = subparsers.add_parser( - 'query', - help='Query standardized instance metadata from the command line.') + "query", + help="Query standardized instance metadata from the command line.", + ) parser_dhclient = subparsers.add_parser( - dhclient_hook.NAME, help=dhclient_hook.__doc__) + dhclient_hook.NAME, help=dhclient_hook.__doc__ + ) dhclient_hook.get_parser(parser_dhclient) - parser_features = subparsers.add_parser('features', - help=('list defined features')) - parser_features.set_defaults(action=('features', main_features)) + parser_features = subparsers.add_parser( + "features", help="list defined features" + ) + parser_features.set_defaults(action=("features", main_features)) parser_analyze = subparsers.add_parser( - 'analyze', help='Devel tool: Analyze cloud-init logs and data') + "analyze", help="Devel tool: Analyze cloud-init logs and data" + ) - parser_devel = subparsers.add_parser( - 'devel', help='Run development tools') + parser_devel = subparsers.add_parser("devel", help="Run development tools") parser_collect_logs = subparsers.add_parser( - 'collect-logs', help='Collect and tar all cloud-init debug info') + "collect-logs", help="Collect and tar all cloud-init debug info" + ) parser_clean = subparsers.add_parser( - 'clean', help='Remove logs and artifacts so cloud-init can re-run.') + "clean", help="Remove logs and artifacts so cloud-init can re-run." + ) parser_status = subparsers.add_parser( - 'status', help='Report cloud-init status or wait on completion.') + "status", help="Report cloud-init status or wait on completion." + ) if sysv_args: # Only load subparsers if subcommand is specified to avoid load cost - if sysv_args[0] == 'analyze': + if sysv_args[0] == "analyze": from cloudinit.analyze.__main__ import get_parser as analyze_parser + # Construct analyze subcommand parser analyze_parser(parser_analyze) - elif sysv_args[0] == 'devel': + elif sysv_args[0] == "devel": from cloudinit.cmd.devel.parser import get_parser as devel_parser + # Construct devel subcommand parser devel_parser(parser_devel) - elif sysv_args[0] == 'collect-logs': + elif sysv_args[0] == "collect-logs": from cloudinit.cmd.devel.logs import ( - get_parser as logs_parser, handle_collect_logs_args) + get_parser as logs_parser, + handle_collect_logs_args, + ) + logs_parser(parser_collect_logs) parser_collect_logs.set_defaults( - action=('collect-logs', handle_collect_logs_args)) - elif sysv_args[0] == 'clean': + action=("collect-logs", handle_collect_logs_args) + ) + elif sysv_args[0] == "clean": from cloudinit.cmd.clean import ( - get_parser as clean_parser, handle_clean_args) + get_parser as clean_parser, + handle_clean_args, + ) + clean_parser(parser_clean) - parser_clean.set_defaults( - action=('clean', handle_clean_args)) - elif sysv_args[0] == 'query': + parser_clean.set_defaults(action=("clean", handle_clean_args)) + elif sysv_args[0] == "query": from cloudinit.cmd.query import ( - get_parser as query_parser, handle_args as handle_query_args) + get_parser as query_parser, + handle_args as handle_query_args, + ) + query_parser(parser_query) - parser_query.set_defaults( - action=('render', handle_query_args)) - elif sysv_args[0] == 'status': + parser_query.set_defaults(action=("render", handle_query_args)) + elif sysv_args[0] == "status": from cloudinit.cmd.status import ( - get_parser as status_parser, handle_status_args) + get_parser as status_parser, + handle_status_args, + ) + status_parser(parser_status) - parser_status.set_defaults( - action=('status', handle_status_args)) + parser_status.set_defaults(action=("status", handle_status_args)) args = parser.parse_args(args=sysv_args) @@ -906,14 +1032,20 @@ def main(sysv_args=None): if args.local: rname, rdesc = ("init-local", "searching for local datasources") else: - rname, rdesc = ("init-network", - "searching for network datasources") + rname, rdesc = ( + "init-network", + "searching for network datasources", + ) elif name == "modules": - rname, rdesc = ("modules-%s" % args.mode, - "running modules for %s" % args.mode) + rname, rdesc = ( + "modules-%s" % args.mode, + "running modules for %s" % args.mode, + ) elif name == "single": - rname, rdesc = ("single/%s" % args.name, - "running single module %s" % args.name) + rname, rdesc = ( + "single/%s" % args.name, + "running single module %s" % args.name, + ) report_on = args.report else: rname = name @@ -921,19 +1053,24 @@ def main(sysv_args=None): report_on = False args.reporter = events.ReportEventStack( - rname, rdesc, reporting_enabled=report_on) + rname, rdesc, reporting_enabled=report_on + ) with args.reporter: retval = util.log_time( - logfunc=LOG.debug, msg="cloud-init mode '%s'" % name, - get_uptime=True, func=functor, args=(name, args)) + logfunc=LOG.debug, + msg="cloud-init mode '%s'" % name, + get_uptime=True, + func=functor, + args=(name, args), + ) reporting.flush_events() return retval -if __name__ == '__main__': - if 'TZ' not in os.environ: - os.environ['TZ'] = ":/etc/localtime" +if __name__ == "__main__": + if "TZ" not in os.environ: + os.environ["TZ"] = ":/etc/localtime" return_value = main(sys.argv) if return_value: sys.exit(return_value) diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py index e53cd855..46f17699 100644 --- a/cloudinit/cmd/query.py +++ b/cloudinit/cmd/query.py @@ -14,22 +14,24 @@ output; if this fails, they are treated as binary. """ import argparse -from errno import EACCES import os import sys +from errno import EACCES +from cloudinit import log, util +from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths from cloudinit.handlers.jinja_template import ( convert_jinja_instance_data, get_jinja_variable_alias, - render_jinja_payload + render_jinja_payload, ) -from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths -from cloudinit import log from cloudinit.sources import ( - INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE, REDACT_SENSITIVE_VALUE) -from cloudinit import util + INSTANCE_JSON_FILE, + INSTANCE_JSON_SENSITIVE_FILE, + REDACT_SENSITIVE_VALUE, +) -NAME = 'query' +NAME = "query" LOG = log.getLogger(NAME) @@ -43,41 +45,79 @@ def get_parser(parser=None): @returns: ArgumentParser with proper argument configuration. """ if not parser: - parser = argparse.ArgumentParser( - prog=NAME, description=__doc__) + parser = argparse.ArgumentParser(prog=NAME, description=__doc__) parser.add_argument( - '-d', '--debug', action='store_true', default=False, - help='Add verbose messages during template render') + "-d", + "--debug", + action="store_true", + default=False, + help="Add verbose messages during template render", + ) parser.add_argument( - '-i', '--instance-data', type=str, - help=('Path to instance-data.json file. Default is /run/cloud-init/%s' - % INSTANCE_JSON_FILE)) + "-i", + "--instance-data", + type=str, + help="Path to instance-data.json file. Default is /run/cloud-init/%s" + % INSTANCE_JSON_FILE, + ) parser.add_argument( - '-l', '--list-keys', action='store_true', default=False, - help=('List query keys available at the provided instance-data' - ' .')) + "-l", + "--list-keys", + action="store_true", + default=False, + help=( + "List query keys available at the provided instance-data" + " ." + ), + ) parser.add_argument( - '-u', '--user-data', type=str, - help=('Path to user-data file. Default is' - ' /var/lib/cloud/instance/user-data.txt')) + "-u", + "--user-data", + type=str, + help=( + "Path to user-data file. Default is" + " /var/lib/cloud/instance/user-data.txt" + ), + ) parser.add_argument( - '-v', '--vendor-data', type=str, - help=('Path to vendor-data file. Default is' - ' /var/lib/cloud/instance/vendor-data.txt')) + "-v", + "--vendor-data", + type=str, + help=( + "Path to vendor-data file. Default is" + " /var/lib/cloud/instance/vendor-data.txt" + ), + ) parser.add_argument( - 'varname', type=str, nargs='?', - help=('A dot-delimited specific variable to query from' - ' instance-data. For example: v1.local_hostname. If the' - ' value is not JSON serializable, it will be base64-encoded and' - ' will contain the prefix "ci-b64:". ')) + "varname", + type=str, + nargs="?", + help=( + "A dot-delimited specific variable to query from" + " instance-data. For example: v1.local_hostname. If the" + " value is not JSON serializable, it will be base64-encoded and" + ' will contain the prefix "ci-b64:". ' + ), + ) parser.add_argument( - '-a', '--all', action='store_true', default=False, dest='dump_all', - help='Dump all available instance-data') + "-a", + "--all", + action="store_true", + default=False, + dest="dump_all", + help="Dump all available instance-data", + ) parser.add_argument( - '-f', '--format', type=str, dest='format', - help=('Optionally specify a custom output format string. Any' - ' instance-data variable can be specified between double-curly' - ' braces. For example -f "{{ v2.cloud_name }}"')) + "-f", + "--format", + type=str, + dest="format", + help=( + "Optionally specify a custom output format string. Any" + " instance-data variable can be specified between double-curly" + ' braces. For example -f "{{ v2.cloud_name }}"' + ), + ) return parser @@ -91,7 +131,7 @@ def load_userdata(ud_file_path): """ bdata = util.load_file(ud_file_path, decode=False) try: - return bdata.decode('utf-8') + return bdata.decode("utf-8") except UnicodeDecodeError: return util.decomp_gzip(bdata, quiet=False, decode=True) @@ -118,13 +158,15 @@ def _read_instance_data(instance_data, user_data, vendor_data) -> dict: redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE) if uid == 0: sensitive_data_fn = os.path.join( - paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE) + paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE + ) if os.path.exists(sensitive_data_fn): instance_data_fn = sensitive_data_fn else: LOG.warning( - 'Missing root-readable %s. Using redacted %s instead.', - sensitive_data_fn, redacted_data_fn + "Missing root-readable %s. Using redacted %s instead.", + sensitive_data_fn, + redacted_data_fn, ) instance_data_fn = redacted_data_fn else: @@ -132,11 +174,11 @@ def _read_instance_data(instance_data, user_data, vendor_data) -> dict: if user_data: user_data_fn = user_data else: - user_data_fn = os.path.join(paths.instance_link, 'user-data.txt') + user_data_fn = os.path.join(paths.instance_link, "user-data.txt") if vendor_data: vendor_data_fn = vendor_data else: - vendor_data_fn = os.path.join(paths.instance_link, 'vendor-data.txt') + vendor_data_fn = os.path.join(paths.instance_link, "vendor-data.txt") try: instance_json = util.load_file(instance_data_fn) @@ -144,24 +186,30 @@ def _read_instance_data(instance_data, user_data, vendor_data) -> dict: if e.errno == EACCES: LOG.error("No read permission on '%s'. Try sudo", instance_data_fn) else: - LOG.error('Missing instance-data file: %s', instance_data_fn) + LOG.error("Missing instance-data file: %s", instance_data_fn) raise instance_data = util.load_json(instance_json) if uid != 0: - instance_data['userdata'] = ( - '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, user_data_fn)) - instance_data['vendordata'] = ( - '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, vendor_data_fn)) + instance_data["userdata"] = "<%s> file:%s" % ( + REDACT_SENSITIVE_VALUE, + user_data_fn, + ) + instance_data["vendordata"] = "<%s> file:%s" % ( + REDACT_SENSITIVE_VALUE, + vendor_data_fn, + ) else: - instance_data['userdata'] = load_userdata(user_data_fn) - instance_data['vendordata'] = load_userdata(vendor_data_fn) + instance_data["userdata"] = load_userdata(user_data_fn) + instance_data["vendordata"] = load_userdata(vendor_data_fn) return instance_data def _find_instance_data_leaf_by_varname_path( - jinja_vars_without_aliases: dict, jinja_vars_with_aliases: dict, - varname: str, list_keys: bool + jinja_vars_without_aliases: dict, + jinja_vars_with_aliases: dict, + varname: str, + list_keys: bool, ): """Return the value of the dot-delimited varname path in instance-data @@ -174,7 +222,7 @@ def _find_instance_data_leaf_by_varname_path( """ walked_key_path = "" response = jinja_vars_without_aliases - for key_path_part in varname.split('.'): + for key_path_part in varname.split("."): try: # Walk key path using complete aliases dict, yet response # should only contain jinja_without_aliases @@ -205,8 +253,9 @@ def handle_args(name, args): addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING) if not any([args.list_keys, args.varname, args.format, args.dump_all]): LOG.error( - 'Expected one of the options: --all, --format,' - ' --list-keys or varname') + "Expected one of the options: --all, --format," + " --list-keys or varname" + ) get_parser().print_help() return 1 try: @@ -216,11 +265,13 @@ def handle_args(name, args): except (IOError, OSError): return 1 if args.format: - payload = '## template: jinja\n{fmt}'.format(fmt=args.format) + payload = "## template: jinja\n{fmt}".format(fmt=args.format) rendered_payload = render_jinja_payload( - payload=payload, payload_fn='query commandline', + payload=payload, + payload_fn="query commandline", instance_data=instance_data, - debug=True if args.debug else False) + debug=True if args.debug else False, + ) if rendered_payload: print(rendered_payload) return 0 @@ -240,7 +291,7 @@ def handle_args(name, args): jinja_vars_without_aliases=response, jinja_vars_with_aliases=jinja_vars_with_aliases, varname=args.varname, - list_keys=args.list_keys + list_keys=args.list_keys, ) except (KeyError, ValueError) as e: LOG.error(e) @@ -248,11 +299,10 @@ def handle_args(name, args): if args.list_keys: if not isinstance(response, dict): LOG.error( - "--list-keys provided but '%s' is not a dict", - args.varname + "--list-keys provided but '%s' is not a dict", args.varname ) return 1 - response = '\n'.join(sorted(response.keys())) + response = "\n".join(sorted(response.keys())) if not isinstance(response, str): response = util.json_dumps(response) print(response) @@ -265,7 +315,7 @@ def main(): sys.exit(handle_args(NAME, parser.parse_args())) -if __name__ == '__main__': +if __name__ == "__main__": main() # vi: ts=4 expandtab diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py index ea79a85b..cff16c34 100644 --- a/cloudinit/cmd/status.py +++ b/cloudinit/cmd/status.py @@ -7,20 +7,20 @@ import argparse import os import sys -from time import gmtime, strftime, sleep +from time import gmtime, sleep, strftime from cloudinit.distros import uses_systemd from cloudinit.stages import Init from cloudinit.util import get_cmdline, load_file, load_json -CLOUDINIT_DISABLED_FILE = '/etc/cloud/cloud-init.disabled' +CLOUDINIT_DISABLED_FILE = "/etc/cloud/cloud-init.disabled" # customer visible status messages -STATUS_ENABLED_NOT_RUN = 'not run' -STATUS_RUNNING = 'running' -STATUS_DONE = 'done' -STATUS_ERROR = 'error' -STATUS_DISABLED = 'disabled' +STATUS_ENABLED_NOT_RUN = "not run" +STATUS_RUNNING = "running" +STATUS_DONE = "done" +STATUS_ERROR = "error" +STATUS_DISABLED = "disabled" def get_parser(parser=None): @@ -34,15 +34,25 @@ def get_parser(parser=None): """ if not parser: parser = argparse.ArgumentParser( - prog='status', - description='Report run status of cloud init') + prog="status", description="Report run status of cloud init" + ) parser.add_argument( - '-l', '--long', action='store_true', default=False, - help=('Report long format of statuses including run stage name and' - ' error messages')) + "-l", + "--long", + action="store_true", + default=False, + help=( + "Report long format of statuses including run stage name and" + " error messages" + ), + ) parser.add_argument( - '-w', '--wait', action='store_true', default=False, - help='Block waiting on cloud-init to complete') + "-w", + "--wait", + action="store_true", + default=False, + help="Block waiting on cloud-init to complete", + ) return parser @@ -55,18 +65,18 @@ def handle_status_args(name, args): status, status_detail, time = _get_status_details(init.paths) if args.wait: while status in (STATUS_ENABLED_NOT_RUN, STATUS_RUNNING): - sys.stdout.write('.') + sys.stdout.write(".") sys.stdout.flush() status, status_detail, time = _get_status_details(init.paths) sleep(0.25) - sys.stdout.write('\n') + sys.stdout.write("\n") if args.long: - print('status: {0}'.format(status)) + print("status: {0}".format(status)) if time: - print('time: {0}'.format(time)) - print('detail:\n{0}'.format(status_detail)) + print("time: {0}".format(time)) + print("detail:\n{0}".format(status_detail)) else: - print('status: {0}'.format(status)) + print("status: {0}".format(status)) return 1 if status == STATUS_ERROR else 0 @@ -81,20 +91,20 @@ def _is_cloudinit_disabled(disable_file, paths): is_disabled = False cmdline_parts = get_cmdline().split() if not uses_systemd(): - reason = 'Cloud-init enabled on sysvinit' - elif 'cloud-init=enabled' in cmdline_parts: - reason = 'Cloud-init enabled by kernel command line cloud-init=enabled' + reason = "Cloud-init enabled on sysvinit" + elif "cloud-init=enabled" in cmdline_parts: + reason = "Cloud-init enabled by kernel command line cloud-init=enabled" elif os.path.exists(disable_file): is_disabled = True - reason = 'Cloud-init disabled by {0}'.format(disable_file) - elif 'cloud-init=disabled' in cmdline_parts: + reason = "Cloud-init disabled by {0}".format(disable_file) + elif "cloud-init=disabled" in cmdline_parts: is_disabled = True - reason = 'Cloud-init disabled by kernel parameter cloud-init=disabled' - elif not os.path.exists(os.path.join(paths.run_dir, 'enabled')): + reason = "Cloud-init disabled by kernel parameter cloud-init=disabled" + elif not os.path.exists(os.path.join(paths.run_dir, "enabled")): is_disabled = True - reason = 'Cloud-init disabled by cloud-init-generator' + reason = "Cloud-init disabled by cloud-init-generator" else: - reason = 'Cloud-init enabled by systemd cloud-init-generator' + reason = "Cloud-init enabled by systemd cloud-init-generator" return (is_disabled, reason) @@ -106,34 +116,35 @@ def _get_status_details(paths): Values are obtained from parsing paths.run_dir/status.json. """ status = STATUS_ENABLED_NOT_RUN - status_detail = '' + status_detail = "" status_v1 = {} - status_file = os.path.join(paths.run_dir, 'status.json') - result_file = os.path.join(paths.run_dir, 'result.json') + status_file = os.path.join(paths.run_dir, "status.json") + result_file = os.path.join(paths.run_dir, "result.json") (is_disabled, reason) = _is_cloudinit_disabled( - CLOUDINIT_DISABLED_FILE, paths) + CLOUDINIT_DISABLED_FILE, paths + ) if is_disabled: status = STATUS_DISABLED status_detail = reason if os.path.exists(status_file): if not os.path.exists(result_file): status = STATUS_RUNNING - status_v1 = load_json(load_file(status_file)).get('v1', {}) + status_v1 = load_json(load_file(status_file)).get("v1", {}) errors = [] latest_event = 0 for key, value in sorted(status_v1.items()): - if key == 'stage': + if key == "stage": if value: status = STATUS_RUNNING - status_detail = 'Running in stage: {0}'.format(value) - elif key == 'datasource': + status_detail = "Running in stage: {0}".format(value) + elif key == "datasource": status_detail = value elif isinstance(value, dict): - errors.extend(value.get('errors', [])) - start = value.get('start') or 0 - finished = value.get('finished') or 0 + errors.extend(value.get("errors", [])) + start = value.get("start") or 0 + finished = value.get("finished") or 0 if finished == 0 and start != 0: status = STATUS_RUNNING event_time = max(start, finished) @@ -141,23 +152,23 @@ def _get_status_details(paths): latest_event = event_time if errors: status = STATUS_ERROR - status_detail = '\n'.join(errors) + status_detail = "\n".join(errors) elif status == STATUS_ENABLED_NOT_RUN and latest_event > 0: status = STATUS_DONE if latest_event: - time = strftime('%a, %d %b %Y %H:%M:%S %z', gmtime(latest_event)) + time = strftime("%a, %d %b %Y %H:%M:%S %z", gmtime(latest_event)) else: - time = '' + time = "" return status, status_detail, time def main(): """Tool to report status of cloud-init.""" parser = get_parser() - sys.exit(handle_status_args('status', parser.parse_args())) + sys.exit(handle_status_args("status", parser.parse_args())) -if __name__ == '__main__': +if __name__ == "__main__": main() # vi: ts=4 expandtab diff --git a/cloudinit/config/__init__.py b/cloudinit/config/__init__.py index 0ef9a748..ed124180 100644 --- a/cloudinit/config/__init__.py +++ b/cloudinit/config/__init__.py @@ -6,9 +6,8 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.settings import (PER_INSTANCE, FREQUENCIES) - from cloudinit import log as logging +from cloudinit.settings import FREQUENCIES, PER_INSTANCE LOG = logging.getLogger(__name__) @@ -22,26 +21,27 @@ MOD_PREFIX = "cc_" def form_module_name(name): canon_name = name.replace("-", "_") if canon_name.lower().endswith(".py"): - canon_name = canon_name[0:(len(canon_name) - 3)] + canon_name = canon_name[0 : (len(canon_name) - 3)] canon_name = canon_name.strip() if not canon_name: return None if not canon_name.startswith(MOD_PREFIX): - canon_name = '%s%s' % (MOD_PREFIX, canon_name) + canon_name = "%s%s" % (MOD_PREFIX, canon_name) return canon_name def fixup_module(mod, def_freq=PER_INSTANCE): - if not hasattr(mod, 'frequency'): - setattr(mod, 'frequency', def_freq) + if not hasattr(mod, "frequency"): + setattr(mod, "frequency", def_freq) else: freq = mod.frequency if freq and freq not in FREQUENCIES: LOG.warning("Module %s has an unknown frequency %s", mod, freq) - if not hasattr(mod, 'distros'): - setattr(mod, 'distros', []) - if not hasattr(mod, 'osfamilies'): - setattr(mod, 'osfamilies', []) + if not hasattr(mod, "distros"): + setattr(mod, "distros", []) + if not hasattr(mod, "osfamilies"): + setattr(mod, "osfamilies", []) return mod + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_apk_configure.py b/cloudinit/config/cc_apk_configure.py index d227a58d..a615c814 100644 --- a/cloudinit/config/cc_apk_configure.py +++ b/cloudinit/config/cc_apk_configure.py @@ -9,9 +9,7 @@ from textwrap import dedent from cloudinit import log as logging -from cloudinit import temp_utils -from cloudinit import templater -from cloudinit import util +from cloudinit import temp_utils, templater, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE @@ -54,34 +52,41 @@ REPOSITORIES_TEMPLATE = """\ frequency = PER_INSTANCE -distros = ['alpine'] +distros = ["alpine"] meta = { - 'id': 'cc_apk_configure', - 'name': 'APK Configure', - 'title': 'Configure apk repositories file', - 'description': dedent("""\ + "id": "cc_apk_configure", + "name": "APK Configure", + "title": "Configure apk repositories file", + "description": dedent( + """\ This module handles configuration of the /etc/apk/repositories file. .. note:: To ensure that apk configuration is valid yaml, any strings containing special characters, especially ``:`` should be quoted. - """), - 'distros': distros, - 'examples': [ - dedent("""\ + """ + ), + "distros": distros, + "examples": [ + dedent( + """\ # Keep the existing /etc/apk/repositories file unaltered. apk_repos: preserve_repositories: true - """), - dedent("""\ + """ + ), + dedent( + """\ # Create repositories file for Alpine v3.12 main and community # using default mirror site. apk_repos: alpine_repo: community_enabled: true version: 'v3.12' - """), - dedent("""\ + """ + ), + dedent( + """\ # Create repositories file for Alpine Edge main, community, and # testing using a specified mirror site and also a local repo. apk_repos: @@ -91,21 +96,23 @@ meta = { testing_enabled: true version: 'edge' local_repo_base_url: 'https://my-local-server/local-alpine' - """), + """ + ), ], - 'frequency': frequency, + "frequency": frequency, } schema = { - 'type': 'object', - 'properties': { - 'apk_repos': { - 'type': 'object', - 'properties': { - 'preserve_repositories': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "type": "object", + "properties": { + "apk_repos": { + "type": "object", + "properties": { + "preserve_repositories": { + "type": "boolean", + "default": False, + "description": dedent( + """\ By default, cloud-init will generate a new repositories file ``/etc/apk/repositories`` based on any valid configuration settings specified within a apk_repos @@ -116,33 +123,41 @@ schema = { The ``preserve_repositories`` option overrides all other config keys that would alter ``/etc/apk/repositories``. - """) + """ + ), }, - 'alpine_repo': { - 'type': ['object', 'null'], - 'properties': { - 'base_url': { - 'type': 'string', - 'default': DEFAULT_MIRROR, - 'description': dedent("""\ + "alpine_repo": { + "type": ["object", "null"], + "properties": { + "base_url": { + "type": "string", + "default": DEFAULT_MIRROR, + "description": dedent( + """\ The base URL of an Alpine repository, or mirror, to download official packages from. If not specified then it defaults to ``{}`` - """.format(DEFAULT_MIRROR)) + """.format( + DEFAULT_MIRROR + ) + ), }, - 'community_enabled': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "community_enabled": { + "type": "boolean", + "default": False, + "description": dedent( + """\ Whether to add the Community repo to the repositories file. By default the Community repo is not included. - """) + """ + ), }, - 'testing_enabled': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "testing_enabled": { + "type": "boolean", + "default": False, + "description": dedent( + """\ Whether to add the Testing repo to the repositories file. By default the Testing repo is not included. It is only recommended @@ -151,32 +166,37 @@ schema = { installed from Testing may have dependancies that conflict with those in non-Edge Main or Community repos." - """) + """ + ), }, - 'version': { - 'type': 'string', - 'description': dedent("""\ + "version": { + "type": "string", + "description": dedent( + """\ The Alpine version to use (e.g. ``v3.12`` or ``edge``) - """) + """ + ), }, }, - 'required': ['version'], - 'minProperties': 1, - 'additionalProperties': False, + "required": ["version"], + "minProperties": 1, + "additionalProperties": False, }, - 'local_repo_base_url': { - 'type': 'string', - 'description': dedent("""\ + "local_repo_base_url": { + "type": "string", + "description": dedent( + """\ The base URL of an Alpine repository containing unofficial packages - """) - } + """ + ), + }, }, - 'minProperties': 1, # Either preserve_repositories or alpine_repo - 'additionalProperties': False, + "minProperties": 1, # Either preserve_repositories or alpine_repo + "additionalProperties": False, } - } + }, } __doc__ = get_meta_doc(meta, schema) @@ -195,38 +215,44 @@ def handle(name, cfg, cloud, log, _args): # If there is no "apk_repos" section in the configuration # then do nothing. - apk_section = cfg.get('apk_repos') + apk_section = cfg.get("apk_repos") if not apk_section: - LOG.debug(("Skipping module named %s," - " no 'apk_repos' section found"), name) + LOG.debug( + "Skipping module named %s, no 'apk_repos' section found", name + ) return validate_cloudconfig_schema(cfg, schema) # If "preserve_repositories" is explicitly set to True in # the configuration do nothing. - if util.get_cfg_option_bool(apk_section, 'preserve_repositories', False): - LOG.debug(("Skipping module named %s," - " 'preserve_repositories' is set"), name) + if util.get_cfg_option_bool(apk_section, "preserve_repositories", False): + LOG.debug( + "Skipping module named %s, 'preserve_repositories' is set", name + ) return # If there is no "alpine_repo" subsection of "apk_repos" present in the # configuration then do nothing, as at least "version" is required to # create valid repositories entries. - alpine_repo = apk_section.get('alpine_repo') + alpine_repo = apk_section.get("alpine_repo") if not alpine_repo: - LOG.debug(("Skipping module named %s," - " no 'alpine_repo' configuration found"), name) + LOG.debug( + "Skipping module named %s, no 'alpine_repo' configuration found", + name, + ) return # If there is no "version" value present in configuration then do nothing. - alpine_version = alpine_repo.get('version') + alpine_version = alpine_repo.get("version") if not alpine_version: - LOG.debug(("Skipping module named %s," - " 'version' not specified in alpine_repo"), name) + LOG.debug( + "Skipping module named %s, 'version' not specified in alpine_repo", + name, + ) return - local_repo = apk_section.get('local_repo_base_url', '') + local_repo = apk_section.get("local_repo_base_url", "") _write_repositories_file(alpine_repo, alpine_version, local_repo) @@ -240,22 +266,23 @@ def _write_repositories_file(alpine_repo, alpine_version, local_repo): @param local_repo: A string containing the base URL of a local repo. """ - repo_file = '/etc/apk/repositories' + repo_file = "/etc/apk/repositories" - alpine_baseurl = alpine_repo.get('base_url', DEFAULT_MIRROR) + alpine_baseurl = alpine_repo.get("base_url", DEFAULT_MIRROR) - params = {'alpine_baseurl': alpine_baseurl, - 'alpine_version': alpine_version, - 'community_enabled': alpine_repo.get('community_enabled'), - 'testing_enabled': alpine_repo.get('testing_enabled'), - 'local_repo': local_repo} + params = { + "alpine_baseurl": alpine_baseurl, + "alpine_version": alpine_version, + "community_enabled": alpine_repo.get("community_enabled"), + "testing_enabled": alpine_repo.get("testing_enabled"), + "local_repo": local_repo, + } - tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl") + tfile = temp_utils.mkstemp(prefix="template_name-", suffix=".tmpl") template_fn = tfile[1] # Filepath is second item in tuple util.write_file(template_fn, content=REPOSITORIES_TEMPLATE) - LOG.debug('Generating Alpine repository configuration file: %s', - repo_file) + LOG.debug("Generating Alpine repository configuration file: %s", repo_file) templater.render_to_file(template_fn, repo_file, params) # Clean up temporary template util.del_file(template_fn) diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 2e844c2c..b0728517 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -10,16 +10,14 @@ import glob import os -import re import pathlib +import re from textwrap import dedent -from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit import gpg from cloudinit import log as logging -from cloudinit import subp -from cloudinit import templater -from cloudinit import util +from cloudinit import subp, templater, util +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -27,59 +25,46 @@ LOG = logging.getLogger(__name__) # this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar') ADD_APT_REPO_MATCH = r"^[\w-]+:\w" -APT_LOCAL_KEYS = '/etc/apt/trusted.gpg' -APT_TRUSTED_GPG_DIR = '/etc/apt/trusted.gpg.d/' -CLOUD_INIT_GPG_DIR = '/etc/apt/cloud-init.gpg.d/' +APT_LOCAL_KEYS = "/etc/apt/trusted.gpg" +APT_TRUSTED_GPG_DIR = "/etc/apt/trusted.gpg.d/" +CLOUD_INIT_GPG_DIR = "/etc/apt/cloud-init.gpg.d/" frequency = PER_INSTANCE distros = ["ubuntu", "debian"] mirror_property = { - 'type': 'array', - 'items': { - 'type': 'object', - 'additionalProperties': False, - 'required': ['arches'], - 'properties': { - 'arches': { - 'type': 'array', - 'items': { - 'type': 'string' - }, - 'minItems': 1 - }, - 'uri': { - 'type': 'string', - 'format': 'uri' - }, - 'search': { - 'type': 'array', - 'items': { - 'type': 'string', - 'format': 'uri' - }, - 'minItems': 1 - }, - 'search_dns': { - 'type': 'boolean', + "type": "array", + "items": { + "type": "object", + "additionalProperties": False, + "required": ["arches"], + "properties": { + "arches": { + "type": "array", + "items": {"type": "string"}, + "minItems": 1, }, - 'keyid': { - 'type': 'string' + "uri": {"type": "string", "format": "uri"}, + "search": { + "type": "array", + "items": {"type": "string", "format": "uri"}, + "minItems": 1, }, - 'key': { - 'type': 'string' + "search_dns": { + "type": "boolean", }, - 'keyserver': { - 'type': 'string' - } - } - } + "keyid": {"type": "string"}, + "key": {"type": "string"}, + "keyserver": {"type": "string"}, + }, + }, } meta = { - 'id': 'cc_apt_configure', - 'name': 'Apt Configure', - 'title': 'Configure apt for the user', - 'description': dedent("""\ + "id": "cc_apt_configure", + "name": "Apt Configure", + "title": "Configure apt for the user", + "description": dedent( + """\ This module handles both configuration of apt options and adding source lists. There are configuration options such as ``apt_get_wrapper`` and ``apt_get_command`` that control how @@ -94,9 +79,12 @@ meta = { .. note:: For more information about apt configuration, see the - ``Additional apt configuration`` example."""), - 'distros': distros, - 'examples': [dedent("""\ + ``Additional apt configuration`` example.""" + ), + "distros": distros, + "examples": [ + dedent( + """\ apt: preserve_sources_list: false disable_suites: @@ -153,21 +141,24 @@ meta = { key: | ------BEGIN PGP PUBLIC KEY BLOCK------- - ------END PGP PUBLIC KEY BLOCK-------""")], - 'frequency': frequency, + ------END PGP PUBLIC KEY BLOCK-------""" + ) + ], + "frequency": frequency, } schema = { - 'type': 'object', - 'properties': { - 'apt': { - 'type': 'object', - 'additionalProperties': False, - 'properties': { - 'preserve_sources_list': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "type": "object", + "properties": { + "apt": { + "type": "object", + "additionalProperties": False, + "properties": { + "preserve_sources_list": { + "type": "boolean", + "default": False, + "description": dedent( + """\ By default, cloud-init will generate a new sources list in ``/etc/apt/sources.list.d`` based on any changes specified in cloud config. To disable this @@ -179,15 +170,15 @@ schema = { all other config keys that would alter ``sources.list`` or ``sources.list.d``, **except** for additional sources to be added - to ``sources.list.d``.""") + to ``sources.list.d``.""" + ), }, - 'disable_suites': { - 'type': 'array', - 'items': { - 'type': 'string' - }, - 'uniqueItems': True, - 'description': dedent("""\ + "disable_suites": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": True, + "description": dedent( + """\ Entries in the sources list can be disabled using ``disable_suites``, which takes a list of suites to be disabled. If the string ``$RELEASE`` is @@ -206,11 +197,13 @@ schema = { When a suite is disabled using ``disable_suites``, its entry in ``sources.list`` is not deleted; it - is just commented out.""") + is just commented out.""" + ), }, - 'primary': { + "primary": { **mirror_property, - 'description': dedent("""\ + "description": dedent( + """\ The primary and security archive mirrors can be specified using the ``primary`` and ``security`` keys, respectively. Both the @@ -264,27 +257,35 @@ schema = { ``http://archive.ubuntu.com/ubuntu``. - ``security`` => \ ``http://security.ubuntu.com/ubuntu`` - """) + """ + ), }, - 'security': { + "security": { **mirror_property, - 'description': dedent("""\ - Please refer to the primary config documentation""") + "description": dedent( + """\ + Please refer to the primary config documentation""" + ), }, - 'add_apt_repo_match': { - 'type': 'string', - 'default': ADD_APT_REPO_MATCH, - 'description': dedent("""\ + "add_apt_repo_match": { + "type": "string", + "default": ADD_APT_REPO_MATCH, + "description": dedent( + """\ All source entries in ``apt-sources`` that match regex in ``add_apt_repo_match`` will be added to the system using ``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it - defaults to ``{}``""".format(ADD_APT_REPO_MATCH)) + defaults to ``{}``""".format( + ADD_APT_REPO_MATCH + ) + ), }, - 'debconf_selections': { - 'type': 'object', - 'items': {'type': 'string'}, - 'description': dedent("""\ + "debconf_selections": { + "type": "object", + "items": {"type": "string"}, + "description": dedent( + """\ Debconf additional configurations can be specified as a dictionary under the ``debconf_selections`` config key, with each key in the dict representing a @@ -308,11 +309,13 @@ schema = { For example: \ ``ippackage ippackage/ip string 127.0.01`` - """) + """ + ), }, - 'sources_list': { - 'type': 'string', - 'description': dedent("""\ + "sources_list": { + "type": "string", + "description": dedent( + """\ Specifies a custom template for rendering ``sources.list`` . If no ``sources_list`` template is given, cloud-init will use sane default. Within @@ -323,45 +326,55 @@ schema = { - ``$RELEASE`` - ``$PRIMARY`` - ``$SECURITY`` - - ``$KEY_FILE``""") + - ``$KEY_FILE``""" + ), }, - 'conf': { - 'type': 'string', - 'description': dedent("""\ + "conf": { + "type": "string", + "description": dedent( + """\ Specify configuration for apt, such as proxy configuration. This configuration is specified as a string. For multiline apt configuration, make sure - to follow yaml syntax.""") + to follow yaml syntax.""" + ), }, - 'https_proxy': { - 'type': 'string', - 'description': dedent("""\ + "https_proxy": { + "type": "string", + "description": dedent( + """\ More convenient way to specify https apt proxy. https proxy url is specified in the format - ``https://[[user][:pass]@]host[:port]/``.""") + ``https://[[user][:pass]@]host[:port]/``.""" + ), }, - 'http_proxy': { - 'type': 'string', - 'description': dedent("""\ + "http_proxy": { + "type": "string", + "description": dedent( + """\ More convenient way to specify http apt proxy. http proxy url is specified in the format - ``http://[[user][:pass]@]host[:port]/``.""") + ``http://[[user][:pass]@]host[:port]/``.""" + ), }, - 'proxy': { - 'type': 'string', - 'description': 'Alias for defining a http apt proxy.' + "proxy": { + "type": "string", + "description": "Alias for defining a http apt proxy.", }, - 'ftp_proxy': { - 'type': 'string', - 'description': dedent("""\ + "ftp_proxy": { + "type": "string", + "description": dedent( + """\ More convenient way to specify ftp apt proxy. ftp proxy url is specified in the format - ``ftp://[[user][:pass]@]host[:port]/``.""") + ``ftp://[[user][:pass]@]host[:port]/``.""" + ), }, - 'sources': { - 'type': 'object', - 'items': {'type': 'string'}, - 'description': dedent("""\ + "sources": { + "type": "object", + "items": {"type": "string"}, + "description": dedent( + """\ Source list entries can be specified as a dictionary under the ``sources`` config key, with each key in the dict representing a different source @@ -394,11 +407,12 @@ schema = { - ``$PRIMARY`` - ``$SECURITY`` - ``$RELEASE`` - - ``$KEY_FILE``""") - } - } + - ``$KEY_FILE``""" + ), + }, + }, } - } + }, } __doc__ = get_meta_doc(meta, schema) @@ -415,18 +429,22 @@ APT_PROXY_FN = "/etc/apt/apt.conf.d/90cloud-init-aptproxy" DEFAULT_KEYSERVER = "keyserver.ubuntu.com" # Default archive mirrors -PRIMARY_ARCH_MIRRORS = {"PRIMARY": "http://archive.ubuntu.com/ubuntu/", - "SECURITY": "http://security.ubuntu.com/ubuntu/"} -PORTS_MIRRORS = {"PRIMARY": "http://ports.ubuntu.com/ubuntu-ports", - "SECURITY": "http://ports.ubuntu.com/ubuntu-ports"} -PRIMARY_ARCHES = ['amd64', 'i386'] -PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el', 'riscv64'] +PRIMARY_ARCH_MIRRORS = { + "PRIMARY": "http://archive.ubuntu.com/ubuntu/", + "SECURITY": "http://security.ubuntu.com/ubuntu/", +} +PORTS_MIRRORS = { + "PRIMARY": "http://ports.ubuntu.com/ubuntu-ports", + "SECURITY": "http://ports.ubuntu.com/ubuntu-ports", +} +PRIMARY_ARCHES = ["amd64", "i386"] +PORTS_ARCHES = ["s390x", "arm64", "armhf", "powerpc", "ppc64el", "riscv64"] def get_default_mirrors(arch=None, target=None): """returns the default mirrors for the target. These depend on the - architecture, for more see: - https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports""" + architecture, for more see: + https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports""" if arch is None: arch = util.get_dpkg_architecture(target) if arch in PRIMARY_ARCHES: @@ -438,8 +456,8 @@ def get_default_mirrors(arch=None, target=None): def handle(name, ocfg, cloud, log, _): """process the config for apt_config. This can be called from - curthooks if a global apt config was provided or via the "apt" - standalone command.""" + curthooks if a global apt config was provided or via the "apt" + standalone command.""" # keeping code close to curtin codebase via entry handler target = None if log is not None: @@ -447,12 +465,14 @@ def handle(name, ocfg, cloud, log, _): LOG = log # feed back converted config, but only work on the subset under 'apt' ocfg = convert_to_v3_apt_format(ocfg) - cfg = ocfg.get('apt', {}) + cfg = ocfg.get("apt", {}) if not isinstance(cfg, dict): raise ValueError( "Expected dictionary for 'apt' config, found {config_type}".format( - config_type=type(cfg))) + config_type=type(cfg) + ) + ) validate_cloudconfig_schema(cfg, schema) apply_debconf_selections(cfg, target) @@ -463,7 +483,7 @@ def _should_configure_on_empty_apt(): # if no config was provided, should apt configuration be done? if util.system_is_snappy(): return False, "system is snappy." - if not (subp.which('apt-get') or subp.which('apt')): + if not (subp.which("apt-get") or subp.which("apt")): return False, "no apt commands." return True, "Apt is available." @@ -478,12 +498,12 @@ def apply_apt(cfg, cloud, target): LOG.debug("handling apt config: %s", cfg) - release = util.lsb_release(target=target)['codename'] + release = util.lsb_release(target=target)["codename"] arch = util.get_dpkg_architecture(target) mirrors = find_apt_mirror_info(cfg, cloud, arch=arch) LOG.debug("Apt Mirror info: %s", mirrors) - if util.is_false(cfg.get('preserve_sources_list', False)): + if util.is_false(cfg.get("preserve_sources_list", False)): add_mirror_keys(cfg, target) generate_sources_list(cfg, release, mirrors, cloud) rename_apt_lists(mirrors, target, arch) @@ -494,25 +514,34 @@ def apply_apt(cfg, cloud, target): LOG.exception("Failed to apply proxy or apt config info:") # Process 'apt_source -> sources {dict}' - if 'sources' in cfg: + if "sources" in cfg: params = mirrors - params['RELEASE'] = release - params['MIRROR'] = mirrors["MIRROR"] + params["RELEASE"] = release + params["MIRROR"] = mirrors["MIRROR"] matcher = None - matchcfg = cfg.get('add_apt_repo_match', ADD_APT_REPO_MATCH) + matchcfg = cfg.get("add_apt_repo_match", ADD_APT_REPO_MATCH) if matchcfg: matcher = re.compile(matchcfg).search - add_apt_sources(cfg['sources'], cloud, target=target, - template_params=params, aa_repo_match=matcher) + add_apt_sources( + cfg["sources"], + cloud, + target=target, + template_params=params, + aa_repo_match=matcher, + ) def debconf_set_selections(selections, target=None): - if not selections.endswith(b'\n'): - selections += b'\n' - subp.subp(['debconf-set-selections'], data=selections, target=target, - capture=True) + if not selections.endswith(b"\n"): + selections += b"\n" + subp.subp( + ["debconf-set-selections"], + data=selections, + target=target, + capture=True, + ) def dpkg_reconfigure(packages, target=None): @@ -532,12 +561,20 @@ def dpkg_reconfigure(packages, target=None): unhandled.append(pkg) if len(unhandled): - LOG.warning("The following packages were installed and preseeded, " - "but cannot be unconfigured: %s", unhandled) + LOG.warning( + "The following packages were installed and preseeded, " + "but cannot be unconfigured: %s", + unhandled, + ) if len(to_config): - subp.subp(['dpkg-reconfigure', '--frontend=noninteractive'] + - list(to_config), data=None, target=target, capture=True) + subp.subp( + ["dpkg-reconfigure", "--frontend=noninteractive"] + + list(to_config), + data=None, + target=target, + capture=True, + ) def apply_debconf_selections(cfg, target=None): @@ -546,13 +583,12 @@ def apply_debconf_selections(cfg, target=None): # set1: | # cloud-init cloud-init/datasources multiselect MAAS # set2: pkg pkg/value string bar - selsets = cfg.get('debconf_selections') + selsets = cfg.get("debconf_selections") if not selsets: LOG.debug("debconf_selections was not set in config") return - selections = '\n'.join( - [selsets[key] for key in sorted(selsets.keys())]) + selections = "\n".join([selsets[key] for key in sorted(selsets.keys())]) debconf_set_selections(selections.encode(), target=target) # get a complete list of packages listed in input @@ -579,7 +615,8 @@ def apply_debconf_selections(cfg, target=None): def clean_cloud_init(target): """clean out any local cloud-init config""" flist = glob.glob( - subp.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*")) + subp.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*") + ) LOG.debug("cleaning cloud-init config from: %s", flist) for dpkg_cfg in flist: @@ -588,18 +625,18 @@ def clean_cloud_init(target): def mirrorurl_to_apt_fileprefix(mirror): """mirrorurl_to_apt_fileprefix - Convert a mirror url to the file prefix used by apt on disk to - store cache information for that mirror. - To do so do: - - take off ???:// - - drop tailing / - - convert in string / to _""" + Convert a mirror url to the file prefix used by apt on disk to + store cache information for that mirror. + To do so do: + - take off ???:// + - drop tailing / + - convert in string / to _""" string = mirror if string.endswith("/"): string = string[0:-1] pos = string.find("://") if pos >= 0: - string = string[pos + 3:] + string = string[pos + 3 :] string = string.replace("/", "_") return string @@ -631,8 +668,8 @@ def rename_apt_lists(new_mirrors, target, arch): def mirror_to_placeholder(tmpl, mirror, placeholder): """mirror_to_placeholder - replace the specified mirror in a template with a placeholder string - Checks for existance of the expected mirror and warns if not found""" + replace the specified mirror in a template with a placeholder string + Checks for existance of the expected mirror and warns if not found""" if mirror not in tmpl: LOG.warning("Expected mirror '%s' not found in: %s", mirror, tmpl) return tmpl.replace(mirror, placeholder) @@ -640,13 +677,15 @@ def mirror_to_placeholder(tmpl, mirror, placeholder): def map_known_suites(suite): """there are a few default names which will be auto-extended. - This comes at the inability to use those names literally as suites, - but on the other hand increases readability of the cfg quite a lot""" - mapping = {'updates': '$RELEASE-updates', - 'backports': '$RELEASE-backports', - 'security': '$RELEASE-security', - 'proposed': '$RELEASE-proposed', - 'release': '$RELEASE'} + This comes at the inability to use those names literally as suites, + but on the other hand increases readability of the cfg quite a lot""" + mapping = { + "updates": "$RELEASE-updates", + "backports": "$RELEASE-backports", + "security": "$RELEASE-security", + "proposed": "$RELEASE-proposed", + "release": "$RELEASE", + } try: retsuite = mapping[suite] except KeyError: @@ -656,14 +695,14 @@ def map_known_suites(suite): def disable_suites(disabled, src, release): """reads the config for suites to be disabled and removes those - from the template""" + from the template""" if not disabled: return src retsrc = src for suite in disabled: suite = map_known_suites(suite) - releasesuite = templater.render_string(suite, {'RELEASE': release}) + releasesuite = templater.render_string(suite, {"RELEASE": release}) LOG.debug("Disabling suite %s as %s", suite, releasesuite) newsrc = "" @@ -685,7 +724,7 @@ def disable_suites(disabled, src, release): break if cols[pcol] == releasesuite: - line = '# suite disabled by cloud-init: %s' % line + line = "# suite disabled by cloud-init: %s" % line newsrc += line retsrc = newsrc @@ -694,36 +733,38 @@ def disable_suites(disabled, src, release): def add_mirror_keys(cfg, target): """Adds any keys included in the primary/security mirror clauses""" - for key in ('primary', 'security'): + for key in ("primary", "security"): for mirror in cfg.get(key, []): add_apt_key(mirror, target, file_name=key) def generate_sources_list(cfg, release, mirrors, cloud): """generate_sources_list - create a source.list file based on a custom or default template - by replacing mirrors and release in the template""" + create a source.list file based on a custom or default template + by replacing mirrors and release in the template""" aptsrc = "/etc/apt/sources.list" - params = {'RELEASE': release, 'codename': release} + params = {"RELEASE": release, "codename": release} for k in mirrors: params[k] = mirrors[k] params[k.lower()] = mirrors[k] - tmpl = cfg.get('sources_list', None) + tmpl = cfg.get("sources_list", None) if tmpl is None: LOG.info("No custom template provided, fall back to builtin") - template_fn = cloud.get_template_filename('sources.list.%s' % - (cloud.distro.name)) + template_fn = cloud.get_template_filename( + "sources.list.%s" % (cloud.distro.name) + ) if not template_fn: - template_fn = cloud.get_template_filename('sources.list') + template_fn = cloud.get_template_filename("sources.list") if not template_fn: - LOG.warning("No template found, " - "not rendering /etc/apt/sources.list") + LOG.warning( + "No template found, not rendering /etc/apt/sources.list" + ) return tmpl = util.load_file(template_fn) rendered = templater.render_string(tmpl, params) - disabled = disable_suites(cfg.get('disable_suites'), rendered, release) + disabled = disable_suites(cfg.get("disable_suites"), rendered, release) util.write_file(aptsrc, disabled, mode=0o644) @@ -735,7 +776,7 @@ def add_apt_key_raw(key, file_name, hardened=False, target=None): LOG.debug("Adding key:\n'%s'", key) try: name = pathlib.Path(file_name).stem - return apt_key('add', output_file=name, data=key, hardened=hardened) + return apt_key("add", output_file=name, data=key, hardened=hardened) except subp.ProcessExecutionError: LOG.exception("failed to add apt GPG Key to apt keyring") raise @@ -747,26 +788,26 @@ def add_apt_key(ent, target=None, hardened=False, file_name=None): Supports raw keys or keyid's The latter will as a first step fetched to get the raw key """ - if 'keyid' in ent and 'key' not in ent: + if "keyid" in ent and "key" not in ent: keyserver = DEFAULT_KEYSERVER - if 'keyserver' in ent: - keyserver = ent['keyserver'] + if "keyserver" in ent: + keyserver = ent["keyserver"] - ent['key'] = gpg.getkeybyid(ent['keyid'], keyserver) + ent["key"] = gpg.getkeybyid(ent["keyid"], keyserver) - if 'key' in ent: + if "key" in ent: return add_apt_key_raw( - ent['key'], - file_name or ent['filename'], - hardened=hardened) + ent["key"], file_name or ent["filename"], hardened=hardened + ) def update_packages(cloud): cloud.distro.update_package_sources() -def add_apt_sources(srcdict, cloud, target=None, template_params=None, - aa_repo_match=None): +def add_apt_sources( + srcdict, cloud, target=None, template_params=None, aa_repo_match=None +): """ install keys and repo source .list files defined in 'sources' @@ -795,33 +836,34 @@ def add_apt_sources(srcdict, cloud, target=None, template_params=None, template_params = {} if aa_repo_match is None: - raise ValueError('did not get a valid repo matcher') + raise ValueError("did not get a valid repo matcher") if not isinstance(srcdict, dict): - raise TypeError('unknown apt format: %s' % (srcdict)) + raise TypeError("unknown apt format: %s" % (srcdict)) for filename in srcdict: ent = srcdict[filename] LOG.debug("adding source/key '%s'", ent) - if 'filename' not in ent: - ent['filename'] = filename + if "filename" not in ent: + ent["filename"] = filename - if 'source' in ent and '$KEY_FILE' in ent['source']: + if "source" in ent and "$KEY_FILE" in ent["source"]: key_file = add_apt_key(ent, target, hardened=True) - template_params['KEY_FILE'] = key_file + template_params["KEY_FILE"] = key_file else: key_file = add_apt_key(ent, target) - if 'source' not in ent: + if "source" not in ent: continue - source = ent['source'] + source = ent["source"] source = templater.render_string(source, template_params) - if not ent['filename'].startswith("/"): - ent['filename'] = os.path.join("/etc/apt/sources.list.d/", - ent['filename']) - if not ent['filename'].endswith(".list"): - ent['filename'] += ".list" + if not ent["filename"].startswith("/"): + ent["filename"] = os.path.join( + "/etc/apt/sources.list.d/", ent["filename"] + ) + if not ent["filename"].endswith(".list"): + ent["filename"] += ".list" if aa_repo_match(source): try: @@ -831,7 +873,7 @@ def add_apt_sources(srcdict, cloud, target=None, template_params=None, raise continue - sourcefn = subp.target_path(target, ent['filename']) + sourcefn = subp.target_path(target, ent["filename"]) try: contents = "%s\n" % (source) util.write_file(sourcefn, contents, omode="a") @@ -850,14 +892,14 @@ def convert_v1_to_v2_apt_format(srclist): if isinstance(srclist, list): LOG.debug("apt config: convert V1 to V2 format (source list to dict)") for srcent in srclist: - if 'filename' not in srcent: + if "filename" not in srcent: # file collides for multiple !filename cases for compatibility # yet we need them all processed, so not same dictionary key - srcent['filename'] = "cloud_config_sources.list" + srcent["filename"] = "cloud_config_sources.list" key = util.rand_dict_key(srcdict, "cloud_config_sources.list") else: # all with filename use that as key (matching new format) - key = srcent['filename'] + key = srcent["filename"] srcdict[key] = srcent elif isinstance(srclist, dict): srcdict = srclist @@ -869,7 +911,7 @@ def convert_v1_to_v2_apt_format(srclist): def convert_key(oldcfg, aptcfg, oldkey, newkey): """convert an old key to the new one if the old one exists - returns true if a key was found and converted""" + returns true if a key was found and converted""" if oldcfg.get(oldkey, None) is not None: aptcfg[newkey] = oldcfg.get(oldkey) del oldcfg[oldkey] @@ -879,33 +921,37 @@ def convert_key(oldcfg, aptcfg, oldkey, newkey): def convert_mirror(oldcfg, aptcfg): """convert old apt_mirror keys into the new more advanced mirror spec""" - keymap = [('apt_mirror', 'uri'), - ('apt_mirror_search', 'search'), - ('apt_mirror_search_dns', 'search_dns')] + keymap = [ + ("apt_mirror", "uri"), + ("apt_mirror_search", "search"), + ("apt_mirror_search_dns", "search_dns"), + ] converted = False - newmcfg = {'arches': ['default']} + newmcfg = {"arches": ["default"]} for oldkey, newkey in keymap: if convert_key(oldcfg, newmcfg, oldkey, newkey): converted = True # only insert new style config if anything was converted if converted: - aptcfg['primary'] = [newmcfg] + aptcfg["primary"] = [newmcfg] def convert_v2_to_v3_apt_format(oldcfg): """convert old to new keys and adapt restructured mirror spec""" - mapoldkeys = {'apt_sources': 'sources', - 'apt_mirror': None, - 'apt_mirror_search': None, - 'apt_mirror_search_dns': None, - 'apt_proxy': 'proxy', - 'apt_http_proxy': 'http_proxy', - 'apt_ftp_proxy': 'https_proxy', - 'apt_https_proxy': 'ftp_proxy', - 'apt_preserve_sources_list': 'preserve_sources_list', - 'apt_custom_sources_list': 'sources_list', - 'add_apt_repo_match': 'add_apt_repo_match'} + mapoldkeys = { + "apt_sources": "sources", + "apt_mirror": None, + "apt_mirror_search": None, + "apt_mirror_search_dns": None, + "apt_proxy": "proxy", + "apt_http_proxy": "http_proxy", + "apt_ftp_proxy": "https_proxy", + "apt_https_proxy": "ftp_proxy", + "apt_preserve_sources_list": "preserve_sources_list", + "apt_custom_sources_list": "sources_list", + "add_apt_repo_match": "add_apt_repo_match", + } needtoconvert = [] for oldkey in mapoldkeys: if oldkey in oldcfg: @@ -917,11 +963,13 @@ def convert_v2_to_v3_apt_format(oldcfg): # no old config, so no new one to be created if not needtoconvert: return oldcfg - LOG.debug("apt config: convert V2 to V3 format for keys '%s'", - ", ".join(needtoconvert)) + LOG.debug( + "apt config: convert V2 to V3 format for keys '%s'", + ", ".join(needtoconvert), + ) # if old AND new config are provided, prefer the new one (LP #1616831) - newaptcfg = oldcfg.get('apt', None) + newaptcfg = oldcfg.get("apt", None) if newaptcfg is not None: LOG.debug("apt config: V1/2 and V3 format specified, preferring V3") for oldkey in needtoconvert: @@ -932,10 +980,11 @@ def convert_v2_to_v3_apt_format(oldcfg): # no simple mapping or no collision on this particular key continue if verify != newaptcfg[newkey]: - raise ValueError("Old and New apt format defined with unequal " - "values %s vs %s @ %s" % (verify, - newaptcfg[newkey], - oldkey)) + raise ValueError( + "Old and New apt format defined with unequal " + "values %s vs %s @ %s" + % (verify, newaptcfg[newkey], oldkey) + ) # return conf after clearing conflicting V1/2 keys return oldcfg @@ -955,17 +1004,17 @@ def convert_v2_to_v3_apt_format(oldcfg): raise ValueError("old apt key '%s' left after conversion" % oldkey) # insert new format into config and return full cfg with only v3 content - oldcfg['apt'] = aptcfg + oldcfg["apt"] = aptcfg return oldcfg def convert_to_v3_apt_format(cfg): """convert the old list based format to the new dict based one. After that - convert the old dict keys/format to v3 a.k.a 'new apt config'""" + convert the old dict keys/format to v3 a.k.a 'new apt config'""" # V1 -> V2, the apt_sources entry from list to dict - apt_sources = cfg.get('apt_sources', None) + apt_sources = cfg.get("apt_sources", None) if apt_sources is not None: - cfg['apt_sources'] = convert_v1_to_v2_apt_format(apt_sources) + cfg["apt_sources"] = convert_v1_to_v2_apt_format(apt_sources) # V2 -> V3, move all former globals under the "apt" key # Restructure into new key names and mirror hierarchy @@ -997,7 +1046,12 @@ def search_for_mirror_dns(configured, mirrortype, cfg, cloud): if mydom: doms.append(".%s" % mydom) - doms.extend((".localdomain", "",)) + doms.extend( + ( + ".localdomain", + "", + ) + ) mirror_list = [] distro = cloud.distro.name @@ -1012,12 +1066,11 @@ def search_for_mirror_dns(configured, mirrortype, cfg, cloud): def update_mirror_info(pmirror, smirror, arch, cloud): """sets security mirror to primary if not defined. - returns defaults if no mirrors are defined""" + returns defaults if no mirrors are defined""" if pmirror is not None: if smirror is None: smirror = pmirror - return {'PRIMARY': pmirror, - 'SECURITY': smirror} + return {"PRIMARY": pmirror, "SECURITY": smirror} # None specified at all, get default mirrors from cloud mirror_info = cloud.datasource.get_package_mirror_info() @@ -1026,8 +1079,8 @@ def update_mirror_info(pmirror, smirror, arch, cloud): # arbitrary key/value pairs including 'primary' and 'security' keys. # caller expects dict with PRIMARY and SECURITY. m = mirror_info.copy() - m['PRIMARY'] = m['primary'] - m['SECURITY'] = m['security'] + m["PRIMARY"] = m["primary"] + m["SECURITY"] = m["security"] return m @@ -1037,7 +1090,7 @@ def update_mirror_info(pmirror, smirror, arch, cloud): def get_arch_mirrorconfig(cfg, mirrortype, arch): """out of a list of potential mirror configurations select - and return the one matching the architecture (or default)""" + and return the one matching the architecture (or default)""" # select the mirror specification (if-any) mirror_cfg_list = cfg.get(mirrortype, None) if mirror_cfg_list is None: @@ -1056,8 +1109,8 @@ def get_arch_mirrorconfig(cfg, mirrortype, arch): def get_mirror(cfg, mirrortype, arch, cloud): """pass the three potential stages of mirror specification - returns None is neither of them found anything otherwise the first - hit is returned""" + returns None is neither of them found anything otherwise the first + hit is returned""" mcfg = get_arch_mirrorconfig(cfg, mirrortype, arch) if mcfg is None: return None @@ -1073,18 +1126,19 @@ def get_mirror(cfg, mirrortype, arch, cloud): # fallback to search_dns if specified if mirror is None: # list of mirrors to try to resolve - mirror = search_for_mirror_dns(mcfg.get("search_dns", None), - mirrortype, cfg, cloud) + mirror = search_for_mirror_dns( + mcfg.get("search_dns", None), mirrortype, cfg, cloud + ) return mirror def find_apt_mirror_info(cfg, cloud, arch=None): """find_apt_mirror_info - find an apt_mirror given the cfg provided. - It can check for separate config of primary and security mirrors - If only primary is given security is assumed to be equal to primary - If the generic apt_mirror is given that is defining for both + find an apt_mirror given the cfg provided. + It can check for separate config of primary and security mirrors + If only primary is given security is assumed to be equal to primary + If the generic apt_mirror is given that is defining for both """ if arch is None: @@ -1105,32 +1159,35 @@ def find_apt_mirror_info(cfg, cloud, arch=None): def apply_apt_config(cfg, proxy_fname, config_fname): """apply_apt_config - Applies any apt*proxy config from if specified + Applies any apt*proxy config from if specified """ # Set up any apt proxy - cfgs = (('proxy', 'Acquire::http::Proxy "%s";'), - ('http_proxy', 'Acquire::http::Proxy "%s";'), - ('ftp_proxy', 'Acquire::ftp::Proxy "%s";'), - ('https_proxy', 'Acquire::https::Proxy "%s";')) + cfgs = ( + ("proxy", 'Acquire::http::Proxy "%s";'), + ("http_proxy", 'Acquire::http::Proxy "%s";'), + ("ftp_proxy", 'Acquire::ftp::Proxy "%s";'), + ("https_proxy", 'Acquire::https::Proxy "%s";'), + ) proxies = [fmt % cfg.get(name) for (name, fmt) in cfgs if cfg.get(name)] if len(proxies): LOG.debug("write apt proxy info to %s", proxy_fname) - util.write_file(proxy_fname, '\n'.join(proxies) + '\n') + util.write_file(proxy_fname, "\n".join(proxies) + "\n") elif os.path.isfile(proxy_fname): util.del_file(proxy_fname) LOG.debug("no apt proxy configured, removed %s", proxy_fname) - if cfg.get('conf', None): + if cfg.get("conf", None): LOG.debug("write apt config info to %s", config_fname) - util.write_file(config_fname, cfg.get('conf')) + util.write_file(config_fname, cfg.get("conf")) elif os.path.isfile(config_fname): util.del_file(config_fname) LOG.debug("no apt config configured, removed %s", config_fname) -def apt_key(command, output_file=None, data=None, hardened=False, - human_output=True): +def apt_key( + command, output_file=None, data=None, hardened=False, human_output=True +): """apt-key replacement commands implemented: 'add', 'list', 'finger' @@ -1153,32 +1210,36 @@ def apt_key(command, output_file=None, data=None, hardened=False, key_files = [APT_LOCAL_KEYS] if os.path.isfile(APT_LOCAL_KEYS) else [] for file in os.listdir(APT_TRUSTED_GPG_DIR): - if file.endswith('.gpg') or file.endswith('.asc'): + if file.endswith(".gpg") or file.endswith(".asc"): key_files.append(APT_TRUSTED_GPG_DIR + file) - return key_files if key_files else '' + return key_files if key_files else "" def apt_key_add(): """apt-key add returns filepath to new keyring, or '/dev/null' when an error occurs """ - file_name = '/dev/null' + file_name = "/dev/null" if not output_file: util.logexc( - LOG, 'Unknown filename, failed to add key: "{}"'.format(data)) + LOG, 'Unknown filename, failed to add key: "{}"'.format(data) + ) else: try: - key_dir = \ + key_dir = ( CLOUD_INIT_GPG_DIR if hardened else APT_TRUSTED_GPG_DIR + ) stdout = gpg.dearmor(data) - file_name = '{}{}.gpg'.format(key_dir, output_file) + file_name = "{}{}.gpg".format(key_dir, output_file) util.write_file(file_name, stdout) except subp.ProcessExecutionError: - util.logexc(LOG, 'Gpg error, failed to add key: {}'.format( - data)) + util.logexc( + LOG, "Gpg error, failed to add key: {}".format(data) + ) except UnicodeDecodeError: - util.logexc(LOG, 'Decode error, failed to add key: {}'.format( - data)) + util.logexc( + LOG, "Decode error, failed to add key: {}".format(data) + ) return file_name def apt_key_list(): @@ -1193,19 +1254,20 @@ def apt_key(command, output_file=None, data=None, hardened=False, key_list.append(gpg.list(key_file, human_output=human_output)) except subp.ProcessExecutionError as error: LOG.warning('Failed to list key "%s": %s', key_file, error) - return '\n'.join(key_list) + return "\n".join(key_list) - if command == 'add': + if command == "add": return apt_key_add() - elif command == 'finger' or command == 'list': + elif command == "finger" or command == "list": return apt_key_list() else: raise ValueError( - 'apt_key() commands add, list, and finger are currently supported') + "apt_key() commands add, list, and finger are currently supported" + ) CONFIG_CLEANERS = { - 'cloud-init': clean_cloud_init, + "cloud-init": clean_cloud_init, } # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py index aa186ce2..569849d1 100644 --- a/cloudinit/config/cc_apt_pipelining.py +++ b/cloudinit/config/cc_apt_pipelining.py @@ -29,17 +29,19 @@ not recommended. apt_pipelining: """ -from cloudinit.settings import PER_INSTANCE from cloudinit import util +from cloudinit.settings import PER_INSTANCE frequency = PER_INSTANCE -distros = ['ubuntu', 'debian'] +distros = ["ubuntu", "debian"] DEFAULT_FILE = "/etc/apt/apt.conf.d/90cloud-init-pipelining" -APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n" - 'Acquire::http::Pipeline-Depth "%s";\n') +APT_PIPE_TPL = ( + "//Written by cloud-init per 'apt_pipelining'\n" + 'Acquire::http::Pipeline-Depth "%s";\n' +) # Acquire::http::Pipeline-Depth can be a value # from 0 to 5 indicating how many outstanding requests APT should send. @@ -49,7 +51,7 @@ APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n" def handle(_name, cfg, _cloud, log, _args): - apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", 'os') + apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", "os") apt_pipe_value_s = str(apt_pipe_value).lower().strip() if apt_pipe_value_s == "false": @@ -69,4 +71,5 @@ def write_apt_snippet(setting, log, f_name): util.write_file(f_name, file_contents) log.debug("Wrote %s with apt pipeline depth setting %s", f_name, setting) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py index 06f7a26e..bff11a24 100644 --- a/cloudinit/config/cc_bootcmd.py +++ b/cloudinit/config/cc_bootcmd.py @@ -12,11 +12,9 @@ import os from textwrap import dedent +from cloudinit import subp, temp_utils, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_ALWAYS -from cloudinit import temp_utils -from cloudinit import subp -from cloudinit import util frequency = PER_ALWAYS @@ -26,13 +24,14 @@ frequency = PER_ALWAYS # configuration options before actually attempting to deploy with said # configuration. -distros = ['all'] +distros = ["all"] meta = { - 'id': 'cc_bootcmd', - 'name': 'Bootcmd', - 'title': 'Run arbitrary commands early in the boot process', - 'description': dedent("""\ + "id": "cc_bootcmd", + "name": "Bootcmd", + "title": "Run arbitrary commands early in the boot process", + "description": dedent( + """\ This module runs arbitrary commands very early in the boot process, only slightly after a boothook would run. This is very similar to a boothook, but more user friendly. The environment variable @@ -48,31 +47,37 @@ meta = { when writing files, do not use /tmp dir as it races with systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead. - """), - 'distros': distros, - 'examples': [dedent("""\ + """ + ), + "distros": distros, + "examples": [ + dedent( + """\ bootcmd: - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ] - """)], - 'frequency': PER_ALWAYS, + """ + ) + ], + "frequency": PER_ALWAYS, } schema = { - 'type': 'object', - 'properties': { - 'bootcmd': { - 'type': 'array', - 'items': { - 'oneOf': [ - {'type': 'array', 'items': {'type': 'string'}}, - {'type': 'string'}] + "type": "object", + "properties": { + "bootcmd": { + "type": "array", + "items": { + "oneOf": [ + {"type": "array", "items": {"type": "string"}}, + {"type": "string"}, + ] }, - 'additionalItems': False, # Reject items of non-string non-list - 'additionalProperties': False, - 'minItems': 1, + "additionalItems": False, # Reject items of non-string non-list + "additionalProperties": False, + "minItems": 1, } - } + }, } __doc__ = get_meta_doc(meta, schema) # Supplement python help() @@ -81,8 +86,9 @@ __doc__ = get_meta_doc(meta, schema) # Supplement python help() def handle(name, cfg, cloud, log, _args): if "bootcmd" not in cfg: - log.debug(("Skipping module named %s," - " no 'bootcmd' key in configuration"), name) + log.debug( + "Skipping module named %s, no 'bootcmd' key in configuration", name + ) return validate_cloudconfig_schema(cfg, schema) @@ -99,11 +105,12 @@ def handle(name, cfg, cloud, log, _args): env = os.environ.copy() iid = cloud.get_instance_id() if iid: - env['INSTANCE_ID'] = str(iid) - cmd = ['/bin/sh', tmpf.name] + env["INSTANCE_ID"] = str(iid) + cmd = ["/bin/sh", tmpf.name] subp.subp(cmd, env=env, capture=False) except Exception: util.logexc(log, "Failed to run bootcmd module %s", name) raise + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py index 9fdaeba1..53b6d0c8 100755 --- a/cloudinit/config/cc_byobu.py +++ b/cloudinit/config/cc_byobu.py @@ -38,11 +38,10 @@ Valid configuration options for this module are: byobu_by_default: """ +from cloudinit import subp, util from cloudinit.distros import ug_util -from cloudinit import subp -from cloudinit import util -distros = ['ubuntu', 'debian'] +distros = ["ubuntu", "debian"] def handle(name, cfg, cloud, log, args): @@ -58,8 +57,14 @@ def handle(name, cfg, cloud, log, args): if value == "user" or value == "system": value = "enable-%s" % value - valid = ("enable-user", "enable-system", "enable", - "disable-user", "disable-system", "disable") + valid = ( + "enable-user", + "enable-system", + "enable", + "disable-user", + "disable-system", + "disable", + ) if value not in valid: log.warning("Unknown value %s for byobu_by_default", value) @@ -81,13 +86,16 @@ def handle(name, cfg, cloud, log, args): (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) (user, _user_config) = ug_util.extract_default(users) if not user: - log.warning(("No default byobu user provided, " - "can not launch %s for the default user"), bl_inst) + log.warning( + "No default byobu user provided, " + "can not launch %s for the default user", + bl_inst, + ) else: - shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst) + shcmd += ' sudo -Hu "%s" byobu-launcher-%s' % (user, bl_inst) shcmd += " || X=$(($X+1)); " if mod_sys: - shcmd += "echo \"%s\" | debconf-set-selections" % dc_val + shcmd += 'echo "%s" | debconf-set-selections' % dc_val shcmd += " && dpkg-reconfigure byobu --frontend=noninteractive" shcmd += " || X=$(($X+1)); " @@ -96,4 +104,5 @@ def handle(name, cfg, cloud, log, args): log.debug("Setting byobu to %s", value) subp.subp(cmd, capture=False) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index bd7bead9..9de065ab 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -41,28 +41,27 @@ can be removed from the system with the configuration option import os -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util DEFAULT_CONFIG = { - 'ca_cert_path': '/usr/share/ca-certificates/', - 'ca_cert_filename': 'cloud-init-ca-certs.crt', - 'ca_cert_config': '/etc/ca-certificates.conf', - 'ca_cert_system_path': '/etc/ssl/certs/', - 'ca_cert_update_cmd': ['update-ca-certificates'] + "ca_cert_path": "/usr/share/ca-certificates/", + "ca_cert_filename": "cloud-init-ca-certs.crt", + "ca_cert_config": "/etc/ca-certificates.conf", + "ca_cert_system_path": "/etc/ssl/certs/", + "ca_cert_update_cmd": ["update-ca-certificates"], } DISTRO_OVERRIDES = { - 'rhel': { - 'ca_cert_path': '/usr/share/pki/ca-trust-source/', - 'ca_cert_filename': 'anchors/cloud-init-ca-certs.crt', - 'ca_cert_config': None, - 'ca_cert_system_path': '/etc/pki/ca-trust/', - 'ca_cert_update_cmd': ['update-ca-trust'] + "rhel": { + "ca_cert_path": "/usr/share/pki/ca-trust-source/", + "ca_cert_filename": "anchors/cloud-init-ca-certs.crt", + "ca_cert_config": None, + "ca_cert_system_path": "/etc/pki/ca-trust/", + "ca_cert_update_cmd": ["update-ca-trust"], } } -distros = ['alpine', 'debian', 'ubuntu', 'rhel'] +distros = ["alpine", "debian", "ubuntu", "rhel"] def _distro_ca_certs_configs(distro_name): @@ -72,8 +71,9 @@ def _distro_ca_certs_configs(distro_name): @returns: Dict of distro configurations for ca-cert. """ cfg = DISTRO_OVERRIDES.get(distro_name, DEFAULT_CONFIG) - cfg['ca_cert_full_path'] = os.path.join(cfg['ca_cert_path'], - cfg['ca_cert_filename']) + cfg["ca_cert_full_path"] = os.path.join( + cfg["ca_cert_path"], cfg["ca_cert_filename"] + ) return cfg @@ -83,7 +83,7 @@ def update_ca_certs(distro_cfg): @param distro_cfg: A hash providing _distro_ca_certs_configs function. """ - subp.subp(distro_cfg['ca_cert_update_cmd'], capture=False) + subp.subp(distro_cfg["ca_cert_update_cmd"], capture=False) def add_ca_certs(distro_cfg, certs): @@ -98,9 +98,9 @@ def add_ca_certs(distro_cfg, certs): return # First ensure they are strings... cert_file_contents = "\n".join([str(c) for c in certs]) - util.write_file(distro_cfg['ca_cert_full_path'], - cert_file_contents, - mode=0o644) + util.write_file( + distro_cfg["ca_cert_full_path"], cert_file_contents, mode=0o644 + ) update_cert_config(distro_cfg) @@ -110,23 +110,27 @@ def update_cert_config(distro_cfg): @param distro_cfg: A hash providing _distro_ca_certs_configs function. """ - if distro_cfg['ca_cert_config'] is None: + if distro_cfg["ca_cert_config"] is None: return - if os.stat(distro_cfg['ca_cert_config']).st_size == 0: + if os.stat(distro_cfg["ca_cert_config"]).st_size == 0: # If the CA_CERT_CONFIG file is empty (i.e. all existing # CA certs have been deleted) then simply output a single # line with the cloud-init cert filename. - out = "%s\n" % distro_cfg['ca_cert_filename'] + out = "%s\n" % distro_cfg["ca_cert_filename"] else: # Append cert filename to CA_CERT_CONFIG file. # We have to strip the content because blank lines in the file # causes subsequent entries to be ignored. (LP: #1077020) - orig = util.load_file(distro_cfg['ca_cert_config']) - cr_cont = '\n'.join([line for line in orig.splitlines() - if line != distro_cfg['ca_cert_filename']]) - out = "%s\n%s\n" % (cr_cont.rstrip(), - distro_cfg['ca_cert_filename']) - util.write_file(distro_cfg['ca_cert_config'], out, omode="wb") + orig = util.load_file(distro_cfg["ca_cert_config"]) + cr_cont = "\n".join( + [ + line + for line in orig.splitlines() + if line != distro_cfg["ca_cert_filename"] + ] + ) + out = "%s\n%s\n" % (cr_cont.rstrip(), distro_cfg["ca_cert_filename"]) + util.write_file(distro_cfg["ca_cert_config"], out, omode="wb") def remove_default_ca_certs(distro_name, distro_cfg): @@ -137,14 +141,15 @@ def remove_default_ca_certs(distro_name, distro_cfg): @param distro_name: String providing the distro class name. @param distro_cfg: A hash providing _distro_ca_certs_configs function. """ - util.delete_dir_contents(distro_cfg['ca_cert_path']) - util.delete_dir_contents(distro_cfg['ca_cert_system_path']) - util.write_file(distro_cfg['ca_cert_config'], "", mode=0o644) + util.delete_dir_contents(distro_cfg["ca_cert_path"]) + util.delete_dir_contents(distro_cfg["ca_cert_system_path"]) + util.write_file(distro_cfg["ca_cert_config"], "", mode=0o644) - if distro_name in ['debian', 'ubuntu']: + if distro_name in ["debian", "ubuntu"]: debconf_sel = ( - "ca-certificates ca-certificates/trust_new_crts " + "select no") - subp.subp(('debconf-set-selections', '-'), debconf_sel) + "ca-certificates ca-certificates/trust_new_crts " + "select no" + ) + subp.subp(("debconf-set-selections", "-"), debconf_sel) def handle(name, cfg, cloud, log, _args): @@ -159,11 +164,13 @@ def handle(name, cfg, cloud, log, _args): """ # If there isn't a ca-certs section in the configuration don't do anything if "ca-certs" not in cfg: - log.debug(("Skipping module named %s," - " no 'ca-certs' key in configuration"), name) + log.debug( + "Skipping module named %s, no 'ca-certs' key in configuration", + name, + ) return - ca_cert_cfg = cfg['ca-certs'] + ca_cert_cfg = cfg["ca-certs"] distro_cfg = _distro_ca_certs_configs(cloud.distro.name) # If there is a remove-defaults option set to true, remove the system @@ -183,4 +190,5 @@ def handle(name, cfg, cloud, log, _args): log.debug("Updating certificates") update_ca_certs(distro_cfg) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index ed734d1c..67889683 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -13,87 +13,91 @@ import json import os from textwrap import dedent -from cloudinit import subp +from cloudinit import subp, temp_utils, templater, url_helper, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema -from cloudinit import templater -from cloudinit import temp_utils -from cloudinit import url_helper -from cloudinit import util from cloudinit.settings import PER_ALWAYS - RUBY_VERSION_DEFAULT = "1.8" -CHEF_DIRS = tuple([ - '/etc/chef', - '/var/log/chef', - '/var/lib/chef', - '/var/cache/chef', - '/var/backups/chef', - '/var/run/chef', -]) -REQUIRED_CHEF_DIRS = tuple([ - '/etc/chef', -]) +CHEF_DIRS = tuple( + [ + "/etc/chef", + "/var/log/chef", + "/var/lib/chef", + "/var/cache/chef", + "/var/backups/chef", + "/var/run/chef", + ] +) +REQUIRED_CHEF_DIRS = tuple( + [ + "/etc/chef", + ] +) # Used if fetching chef from a omnibus style package OMNIBUS_URL = "https://www.chef.io/chef/install.sh" OMNIBUS_URL_RETRIES = 5 -CHEF_VALIDATION_PEM_PATH = '/etc/chef/validation.pem' -CHEF_ENCRYPTED_DATA_BAG_PATH = '/etc/chef/encrypted_data_bag_secret' -CHEF_ENVIRONMENT = '_default' -CHEF_FB_PATH = '/etc/chef/firstboot.json' +CHEF_VALIDATION_PEM_PATH = "/etc/chef/validation.pem" +CHEF_ENCRYPTED_DATA_BAG_PATH = "/etc/chef/encrypted_data_bag_secret" +CHEF_ENVIRONMENT = "_default" +CHEF_FB_PATH = "/etc/chef/firstboot.json" CHEF_RB_TPL_DEFAULTS = { # These are ruby symbols... - 'ssl_verify_mode': ':verify_none', - 'log_level': ':info', + "ssl_verify_mode": ":verify_none", + "log_level": ":info", # These are not symbols... - 'log_location': '/var/log/chef/client.log', - 'validation_key': CHEF_VALIDATION_PEM_PATH, - 'validation_cert': None, - 'client_key': '/etc/chef/client.pem', - 'json_attribs': CHEF_FB_PATH, - 'file_cache_path': '/var/cache/chef', - 'file_backup_path': '/var/backups/chef', - 'pid_file': '/var/run/chef/client.pid', - 'show_time': True, - 'encrypted_data_bag_secret': None, + "log_location": "/var/log/chef/client.log", + "validation_key": CHEF_VALIDATION_PEM_PATH, + "validation_cert": None, + "client_key": "/etc/chef/client.pem", + "json_attribs": CHEF_FB_PATH, + "file_cache_path": "/var/cache/chef", + "file_backup_path": "/var/backups/chef", + "pid_file": "/var/run/chef/client.pid", + "show_time": True, + "encrypted_data_bag_secret": None, } -CHEF_RB_TPL_BOOL_KEYS = frozenset(['show_time']) -CHEF_RB_TPL_PATH_KEYS = frozenset([ - 'log_location', - 'validation_key', - 'client_key', - 'file_cache_path', - 'json_attribs', - 'pid_file', - 'encrypted_data_bag_secret', -]) +CHEF_RB_TPL_BOOL_KEYS = frozenset(["show_time"]) +CHEF_RB_TPL_PATH_KEYS = frozenset( + [ + "log_location", + "validation_key", + "client_key", + "file_cache_path", + "json_attribs", + "pid_file", + "encrypted_data_bag_secret", + ] +) CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys()) CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS) CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_PATH_KEYS) -CHEF_RB_TPL_KEYS.extend([ - 'server_url', - 'node_name', - 'environment', - 'validation_name', - 'chef_license', -]) +CHEF_RB_TPL_KEYS.extend( + [ + "server_url", + "node_name", + "environment", + "validation_name", + "chef_license", + ] +) CHEF_RB_TPL_KEYS = frozenset(CHEF_RB_TPL_KEYS) -CHEF_RB_PATH = '/etc/chef/client.rb' -CHEF_EXEC_PATH = '/usr/bin/chef-client' -CHEF_EXEC_DEF_ARGS = tuple(['-d', '-i', '1800', '-s', '20']) +CHEF_RB_PATH = "/etc/chef/client.rb" +CHEF_EXEC_PATH = "/usr/bin/chef-client" +CHEF_EXEC_DEF_ARGS = tuple(["-d", "-i", "1800", "-s", "20"]) frequency = PER_ALWAYS distros = ["all"] meta = { - 'id': 'cc_chef', - 'name': 'Chef', - 'title': 'module that configures, starts and installs chef', - 'description': dedent("""\ + "id": "cc_chef", + "name": "Chef", + "title": "module that configures, starts and installs chef", + "description": dedent( + """\ This module enables chef to be installed (from packages, gems, or from omnibus). Before this occurs, chef configuration is written to disk (validation.pem, client.pem, firstboot.json, @@ -101,9 +105,12 @@ meta = { /var/log/chef and so-on). If configured, chef will be installed and started in either daemon or non-daemon mode. If run in non-daemon mode, post run actions are executed to do - finishing activities such as removing validation.pem."""), - 'distros': distros, - 'examples': [dedent(""" + finishing activities such as removing validation.pem.""" + ), + "distros": distros, + "examples": [ + dedent( + """ chef: directories: - /etc/chef @@ -124,180 +131,237 @@ meta = { omnibus_url_retries: 2 server_url: https://chef.yourorg.com:4000 ssl_verify_mode: :verify_peer - validation_name: yourorg-validator""")], - 'frequency': frequency, + validation_name: yourorg-validator""" + ) + ], + "frequency": frequency, } schema = { - 'type': 'object', - 'properties': { - 'chef': { - 'type': 'object', - 'additionalProperties': False, - 'properties': { - 'directories': { - 'type': 'array', - 'items': { - 'type': 'string' - }, - 'uniqueItems': True, - 'description': dedent("""\ + "type": "object", + "properties": { + "chef": { + "type": "object", + "additionalProperties": False, + "properties": { + "directories": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": True, + "description": dedent( + """\ Create the necessary directories for chef to run. By default, it creates the following directories: - {chef_dirs}""").format( + {chef_dirs}""" + ).format( chef_dirs="\n".join( [" - ``{}``".format(d) for d in CHEF_DIRS] ) - ) + ), }, - 'validation_cert': { - 'type': 'string', - 'description': dedent("""\ + "validation_cert": { + "type": "string", + "description": dedent( + """\ Optional string to be written to file validation_key. Special value ``system`` means set use existing file. - """) + """ + ), }, - 'validation_key': { - 'type': 'string', - 'default': CHEF_VALIDATION_PEM_PATH, - 'description': dedent("""\ + "validation_key": { + "type": "string", + "default": CHEF_VALIDATION_PEM_PATH, + "description": dedent( + """\ Optional path for validation_cert. default to - ``{}``.""".format(CHEF_VALIDATION_PEM_PATH)) + ``{}``.""".format( + CHEF_VALIDATION_PEM_PATH + ) + ), }, - 'firstboot_path': { - 'type': 'string', - 'default': CHEF_FB_PATH, - 'description': dedent("""\ + "firstboot_path": { + "type": "string", + "default": CHEF_FB_PATH, + "description": dedent( + """\ Path to write run_list and initial_attributes keys that should also be present in this configuration, defaults - to ``{}``.""".format(CHEF_FB_PATH)) + to ``{}``.""".format( + CHEF_FB_PATH + ) + ), }, - 'exec': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "exec": { + "type": "boolean", + "default": False, + "description": dedent( + """\ define if we should run or not run chef (defaults to false, unless a gem installed is requested where this - will then default to true).""") + will then default to true).""" + ), }, - 'client_key': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['client_key'], - 'description': dedent("""\ + "client_key": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["client_key"], + "description": dedent( + """\ Optional path for client_cert. default to - ``{}``.""".format(CHEF_RB_TPL_DEFAULTS['client_key'])) + ``{}``.""".format( + CHEF_RB_TPL_DEFAULTS["client_key"] + ) + ), }, - 'encrypted_data_bag_secret': { - 'type': 'string', - 'default': None, - 'description': dedent("""\ + "encrypted_data_bag_secret": { + "type": "string", + "default": None, + "description": dedent( + """\ Specifies the location of the secret key used by chef to encrypt data items. By default, this path is set to None, meaning that chef will have to look at the path ``{}`` for it. - """.format(CHEF_ENCRYPTED_DATA_BAG_PATH)) + """.format( + CHEF_ENCRYPTED_DATA_BAG_PATH + ) + ), }, - 'environment': { - 'type': 'string', - 'default': CHEF_ENVIRONMENT, - 'description': dedent("""\ + "environment": { + "type": "string", + "default": CHEF_ENVIRONMENT, + "description": dedent( + """\ Specifies which environment chef will use. By default, it will use the ``{}`` configuration. - """.format(CHEF_ENVIRONMENT)) + """.format( + CHEF_ENVIRONMENT + ) + ), }, - 'file_backup_path': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['file_backup_path'], - 'description': dedent("""\ + "file_backup_path": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["file_backup_path"], + "description": dedent( + """\ Specifies the location in which backup files are stored. By default, it uses the ``{}`` location.""".format( - CHEF_RB_TPL_DEFAULTS['file_backup_path'])) + CHEF_RB_TPL_DEFAULTS["file_backup_path"] + ) + ), }, - 'file_cache_path': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['file_cache_path'], - 'description': dedent("""\ + "file_cache_path": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["file_cache_path"], + "description": dedent( + """\ Specifies the location in which chef cache files will be saved. By default, it uses the ``{}`` location.""".format( - CHEF_RB_TPL_DEFAULTS['file_cache_path'])) + CHEF_RB_TPL_DEFAULTS["file_cache_path"] + ) + ), }, - 'json_attribs': { - 'type': 'string', - 'default': CHEF_FB_PATH, - 'description': dedent("""\ + "json_attribs": { + "type": "string", + "default": CHEF_FB_PATH, + "description": dedent( + """\ Specifies the location in which some chef json data is stored. By default, it uses the - ``{}`` location.""".format(CHEF_FB_PATH)) + ``{}`` location.""".format( + CHEF_FB_PATH + ) + ), }, - 'log_level': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['log_level'], - 'description': dedent("""\ + "log_level": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["log_level"], + "description": dedent( + """\ Defines the level of logging to be stored in the log file. By default this value is set to ``{}``. - """.format(CHEF_RB_TPL_DEFAULTS['log_level'])) + """.format( + CHEF_RB_TPL_DEFAULTS["log_level"] + ) + ), }, - 'log_location': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['log_location'], - 'description': dedent("""\ + "log_location": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["log_location"], + "description": dedent( + """\ Specifies the location of the chef lof file. By default, the location is specified at ``{}``.""".format( - CHEF_RB_TPL_DEFAULTS['log_location'])) + CHEF_RB_TPL_DEFAULTS["log_location"] + ) + ), }, - 'node_name': { - 'type': 'string', - 'description': dedent("""\ + "node_name": { + "type": "string", + "description": dedent( + """\ The name of the node to run. By default, we will - use th instance id as the node name.""") + use th instance id as the node name.""" + ), }, - 'omnibus_url': { - 'type': 'string', - 'default': OMNIBUS_URL, - 'description': dedent("""\ + "omnibus_url": { + "type": "string", + "default": OMNIBUS_URL, + "description": dedent( + """\ Omnibus URL if chef should be installed through Omnibus. By default, it uses the - ``{}``.""".format(OMNIBUS_URL)) + ``{}``.""".format( + OMNIBUS_URL + ) + ), }, - 'omnibus_url_retries': { - 'type': 'integer', - 'default': OMNIBUS_URL_RETRIES, - 'description': dedent("""\ + "omnibus_url_retries": { + "type": "integer", + "default": OMNIBUS_URL_RETRIES, + "description": dedent( + """\ The number of retries that will be attempted to reach - the Omnibus URL""") + the Omnibus URL""" + ), }, - 'omnibus_version': { - 'type': 'string', - 'description': dedent("""\ + "omnibus_version": { + "type": "string", + "description": dedent( + """\ Optional version string to require for omnibus - install.""") + install.""" + ), }, - 'pid_file': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['pid_file'], - 'description': dedent("""\ + "pid_file": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["pid_file"], + "description": dedent( + """\ The location in which a process identification number (pid) is saved. By default, it saves in the ``{}`` location.""".format( - CHEF_RB_TPL_DEFAULTS['pid_file'])) + CHEF_RB_TPL_DEFAULTS["pid_file"] + ) + ), }, - 'server_url': { - 'type': 'string', - 'description': 'The URL for the chef server' + "server_url": { + "type": "string", + "description": "The URL for the chef server", }, - 'show_time': { - 'type': 'boolean', - 'default': True, - 'description': 'Show time in chef logs' + "show_time": { + "type": "boolean", + "default": True, + "description": "Show time in chef logs", }, - 'ssl_verify_mode': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['ssl_verify_mode'], - 'description': dedent("""\ + "ssl_verify_mode": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["ssl_verify_mode"], + "description": dedent( + """\ Set the verify mode for HTTPS requests. We can have two possible values for this parameter: @@ -306,67 +370,76 @@ schema = { - ``:verify_peer``: Validate all SSL certificates. By default, the parameter is set as ``{}``. - """.format(CHEF_RB_TPL_DEFAULTS['ssl_verify_mode'])) + """.format( + CHEF_RB_TPL_DEFAULTS["ssl_verify_mode"] + ) + ), }, - 'validation_name': { - 'type': 'string', - 'description': dedent("""\ + "validation_name": { + "type": "string", + "description": dedent( + """\ The name of the chef-validator key that Chef Infra Client uses to access the Chef Infra Server during - the initial Chef Infra Client run.""") + the initial Chef Infra Client run.""" + ), }, - 'force_install': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "force_install": { + "type": "boolean", + "default": False, + "description": dedent( + """\ If set to ``True``, forces chef installation, even - if it is already installed.""") + if it is already installed.""" + ), }, - 'initial_attributes': { - 'type': 'object', - 'items': { - 'type': 'string' - }, - 'description': dedent("""\ + "initial_attributes": { + "type": "object", + "items": {"type": "string"}, + "description": dedent( + """\ Specify a list of initial attributes used by the - cookbooks.""") + cookbooks.""" + ), }, - 'install_type': { - 'type': 'string', - 'default': 'packages', - 'description': dedent("""\ + "install_type": { + "type": "string", + "default": "packages", + "description": dedent( + """\ The type of installation for chef. It can be one of the following values: - ``packages`` - ``gems`` - - ``omnibus``""") + - ``omnibus``""" + ), }, - 'run_list': { - 'type': 'array', - 'items': { - 'type': 'string' - }, - 'description': 'A run list for a first boot json.' + "run_list": { + "type": "array", + "items": {"type": "string"}, + "description": "A run list for a first boot json.", }, "chef_license": { - 'type': 'string', - 'description': dedent("""\ + "type": "string", + "description": dedent( + """\ string that indicates if user accepts or not license - related to some of chef products""") - } - } + related to some of chef products""" + ), + }, + }, } - } + }, } __doc__ = get_meta_doc(meta, schema) def post_run_chef(chef_cfg, log): - delete_pem = util.get_cfg_option_bool(chef_cfg, - 'delete_validation_post_exec', - default=False) + delete_pem = util.get_cfg_option_bool( + chef_cfg, "delete_validation_post_exec", default=False + ) if delete_pem and os.path.isfile(CHEF_VALIDATION_PEM_PATH): os.unlink(CHEF_VALIDATION_PEM_PATH) @@ -389,16 +462,20 @@ def get_template_params(iid, chef_cfg, log): else: params[k] = util.get_cfg_option_str(chef_cfg, k) # These ones are overwritten to be exact values... - params.update({ - 'generated_by': util.make_header(), - 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name', - default=iid), - 'environment': util.get_cfg_option_str(chef_cfg, 'environment', - default='_default'), - # These two are mandatory... - 'server_url': chef_cfg['server_url'], - 'validation_name': chef_cfg['validation_name'], - }) + params.update( + { + "generated_by": util.make_header(), + "node_name": util.get_cfg_option_str( + chef_cfg, "node_name", default=iid + ), + "environment": util.get_cfg_option_str( + chef_cfg, "environment", default="_default" + ), + # These two are mandatory... + "server_url": chef_cfg["server_url"], + "validation_name": chef_cfg["validation_name"], + } + ) return params @@ -406,35 +483,38 @@ def handle(name, cfg, cloud, log, _args): """Handler method activated by cloud-init.""" # If there isn't a chef key in the configuration don't do anything - if 'chef' not in cfg: - log.debug(("Skipping module named %s," - " no 'chef' key in configuration"), name) + if "chef" not in cfg: + log.debug( + "Skipping module named %s, no 'chef' key in configuration", name + ) return validate_cloudconfig_schema(cfg, schema) - chef_cfg = cfg['chef'] + chef_cfg = cfg["chef"] # Ensure the chef directories we use exist - chef_dirs = util.get_cfg_option_list(chef_cfg, 'directories') + chef_dirs = util.get_cfg_option_list(chef_cfg, "directories") if not chef_dirs: chef_dirs = list(CHEF_DIRS) for d in itertools.chain(chef_dirs, REQUIRED_CHEF_DIRS): util.ensure_dir(d) - vkey_path = chef_cfg.get('validation_key', CHEF_VALIDATION_PEM_PATH) - vcert = chef_cfg.get('validation_cert') + vkey_path = chef_cfg.get("validation_key", CHEF_VALIDATION_PEM_PATH) + vcert = chef_cfg.get("validation_cert") # special value 'system' means do not overwrite the file # but still render the template to contain 'validation_key' if vcert: if vcert != "system": util.write_file(vkey_path, vcert) elif not os.path.isfile(vkey_path): - log.warning("chef validation_cert provided as 'system', but " - "validation_key path '%s' does not exist.", - vkey_path) + log.warning( + "chef validation_cert provided as 'system', but " + "validation_key path '%s' does not exist.", + vkey_path, + ) # Create the chef config from template - template_fn = cloud.get_template_filename('chef_client.rb') + template_fn = cloud.get_template_filename("chef_client.rb") if template_fn: iid = str(cloud.datasource.get_instance_id()) params = get_template_params(iid, chef_cfg, log) @@ -448,32 +528,33 @@ def handle(name, cfg, cloud, log, _args): util.ensure_dirs(param_paths) templater.render_to_file(template_fn, CHEF_RB_PATH, params) else: - log.warning("No template found, not rendering to %s", - CHEF_RB_PATH) + log.warning("No template found, not rendering to %s", CHEF_RB_PATH) # Set the firstboot json - fb_filename = util.get_cfg_option_str(chef_cfg, 'firstboot_path', - default=CHEF_FB_PATH) + fb_filename = util.get_cfg_option_str( + chef_cfg, "firstboot_path", default=CHEF_FB_PATH + ) if not fb_filename: log.info("First boot path empty, not writing first boot json file") else: initial_json = {} - if 'run_list' in chef_cfg: - initial_json['run_list'] = chef_cfg['run_list'] - if 'initial_attributes' in chef_cfg: - initial_attributes = chef_cfg['initial_attributes'] + if "run_list" in chef_cfg: + initial_json["run_list"] = chef_cfg["run_list"] + if "initial_attributes" in chef_cfg: + initial_attributes = chef_cfg["initial_attributes"] for k in list(initial_attributes.keys()): initial_json[k] = initial_attributes[k] util.write_file(fb_filename, json.dumps(initial_json)) # Try to install chef, if its not already installed... - force_install = util.get_cfg_option_bool(chef_cfg, - 'force_install', default=False) + force_install = util.get_cfg_option_bool( + chef_cfg, "force_install", default=False + ) installed = subp.is_exe(CHEF_EXEC_PATH) if not installed or force_install: run = install_chef(cloud, chef_cfg, log) elif installed: - run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False) + run = util.get_cfg_option_bool(chef_cfg, "exec", default=False) else: run = False if run: @@ -482,18 +563,21 @@ def handle(name, cfg, cloud, log, _args): def run_chef(chef_cfg, log): - log.debug('Running chef-client') + log.debug("Running chef-client") cmd = [CHEF_EXEC_PATH] - if 'exec_arguments' in chef_cfg: - cmd_args = chef_cfg['exec_arguments'] + if "exec_arguments" in chef_cfg: + cmd_args = chef_cfg["exec_arguments"] if isinstance(cmd_args, (list, tuple)): cmd.extend(cmd_args) elif isinstance(cmd_args, str): cmd.append(cmd_args) else: - log.warning("Unknown type %s provided for chef" - " 'exec_arguments' expected list, tuple," - " or string", type(cmd_args)) + log.warning( + "Unknown type %s provided for chef" + " 'exec_arguments' expected list, tuple," + " or string", + type(cmd_args), + ) cmd.extend(CHEF_EXEC_DEF_ARGS) else: cmd.extend(CHEF_EXEC_DEF_ARGS) @@ -507,16 +591,16 @@ def subp_blob_in_tempfile(blob, *args, **kwargs): The 'args' argument to subp will be updated with the full path to the filename as the first argument. """ - basename = kwargs.pop('basename', "subp_blob") + basename = kwargs.pop("basename", "subp_blob") - if len(args) == 0 and 'args' not in kwargs: + if len(args) == 0 and "args" not in kwargs: args = [tuple()] # Use tmpdir over tmpfile to avoid 'text file busy' on execute with temp_utils.tempdir(needs_exe=True) as tmpd: tmpf = os.path.join(tmpd, basename) - if 'args' in kwargs: - kwargs['args'] = [tmpf] + list(kwargs['args']) + if "args" in kwargs: + kwargs["args"] = [tmpf] + list(kwargs["args"]) else: args = list(args) args[0] = [tmpf] + args[0] @@ -543,36 +627,39 @@ def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None): if omnibus_version is None: args = [] else: - args = ['-v', omnibus_version] + args = ["-v", omnibus_version] content = url_helper.readurl(url=url, retries=retries).contents return subp_blob_in_tempfile( - blob=content, args=args, - basename='chef-omnibus-install', capture=False) + blob=content, args=args, basename="chef-omnibus-install", capture=False + ) def install_chef(cloud, chef_cfg, log): # If chef is not installed, we install chef based on 'install_type' - install_type = util.get_cfg_option_str(chef_cfg, 'install_type', - 'packages') - run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False) + install_type = util.get_cfg_option_str( + chef_cfg, "install_type", "packages" + ) + run = util.get_cfg_option_bool(chef_cfg, "exec", default=False) if install_type == "gems": # This will install and run the chef-client from gems - chef_version = util.get_cfg_option_str(chef_cfg, 'version', None) - ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version', - RUBY_VERSION_DEFAULT) + chef_version = util.get_cfg_option_str(chef_cfg, "version", None) + ruby_version = util.get_cfg_option_str( + chef_cfg, "ruby_version", RUBY_VERSION_DEFAULT + ) install_chef_from_gems(ruby_version, chef_version, cloud.distro) # Retain backwards compat, by preferring True instead of False # when not provided/overriden... - run = util.get_cfg_option_bool(chef_cfg, 'exec', default=True) - elif install_type == 'packages': + run = util.get_cfg_option_bool(chef_cfg, "exec", default=True) + elif install_type == "packages": # This will install and run the chef-client from packages - cloud.distro.install_packages(('chef',)) - elif install_type == 'omnibus': + cloud.distro.install_packages(("chef",)) + elif install_type == "omnibus": omnibus_version = util.get_cfg_option_str(chef_cfg, "omnibus_version") install_chef_from_omnibus( url=util.get_cfg_option_str(chef_cfg, "omnibus_url"), retries=util.get_cfg_option_int(chef_cfg, "omnibus_url_retries"), - omnibus_version=omnibus_version) + omnibus_version=omnibus_version, + ) else: log.warning("Unknown chef install type '%s'", install_type) run = False @@ -581,25 +668,47 @@ def install_chef(cloud, chef_cfg, log): def get_ruby_packages(version): # return a list of packages needed to install ruby at version - pkgs = ['ruby%s' % version, 'ruby%s-dev' % version] + pkgs = ["ruby%s" % version, "ruby%s-dev" % version] if version == "1.8": - pkgs.extend(('libopenssl-ruby1.8', 'rubygems1.8')) + pkgs.extend(("libopenssl-ruby1.8", "rubygems1.8")) return pkgs def install_chef_from_gems(ruby_version, chef_version, distro): distro.install_packages(get_ruby_packages(ruby_version)) - if not os.path.exists('/usr/bin/gem'): - util.sym_link('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem') - if not os.path.exists('/usr/bin/ruby'): - util.sym_link('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby') + if not os.path.exists("/usr/bin/gem"): + util.sym_link("/usr/bin/gem%s" % ruby_version, "/usr/bin/gem") + if not os.path.exists("/usr/bin/ruby"): + util.sym_link("/usr/bin/ruby%s" % ruby_version, "/usr/bin/ruby") if chef_version: - subp.subp(['/usr/bin/gem', 'install', 'chef', - '-v %s' % chef_version, '--no-ri', - '--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False) + subp.subp( + [ + "/usr/bin/gem", + "install", + "chef", + "-v %s" % chef_version, + "--no-ri", + "--no-rdoc", + "--bindir", + "/usr/bin", + "-q", + ], + capture=False, + ) else: - subp.subp(['/usr/bin/gem', 'install', 'chef', - '--no-ri', '--no-rdoc', '--bindir', - '/usr/bin', '-q'], capture=False) + subp.subp( + [ + "/usr/bin/gem", + "install", + "chef", + "--no-ri", + "--no-rdoc", + "--bindir", + "/usr/bin", + "-q", + ], + capture=False, + ) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py index 4d5a6aa2..d09fc129 100644 --- a/cloudinit/config/cc_debug.py +++ b/cloudinit/config/cc_debug.py @@ -30,18 +30,16 @@ location that this cloud-init has been configured with when running. import copy from io import StringIO -from cloudinit import type_utils -from cloudinit import util -from cloudinit import safeyaml +from cloudinit import safeyaml, type_utils, util -SKIP_KEYS = frozenset(['log_cfgs']) +SKIP_KEYS = frozenset(["log_cfgs"]) def _make_header(text): header = StringIO() header.write("-" * 80) header.write("\n") - header.write(text.center(80, ' ')) + header.write(text.center(80, " ")) header.write("\n") header.write("-" * 80) header.write("\n") @@ -56,17 +54,16 @@ def _dumps(obj): def handle(name, cfg, cloud, log, args): """Handler method activated by cloud-init.""" - verbose = util.get_cfg_by_path(cfg, ('debug', 'verbose'), default=True) + verbose = util.get_cfg_by_path(cfg, ("debug", "verbose"), default=True) if args: # if args are provided (from cmdline) then explicitly set verbose out_file = args[0] verbose = True else: - out_file = util.get_cfg_by_path(cfg, ('debug', 'output')) + out_file = util.get_cfg_by_path(cfg, ("debug", "output")) if not verbose: - log.debug(("Skipping module named %s," - " verbose printing disabled"), name) + log.debug("Skipping module named %s, verbose printing disabled", name) return # Clean out some keys that we just don't care about showing... dump_cfg = copy.deepcopy(cfg) @@ -85,8 +82,9 @@ def handle(name, cfg, cloud, log, args): to_print.write(_dumps(cloud.datasource.metadata)) to_print.write("\n") to_print.write(_make_header("Misc")) - to_print.write("Datasource: %s\n" % - (type_utils.obj_name(cloud.datasource))) + to_print.write( + "Datasource: %s\n" % (type_utils.obj_name(cloud.datasource)) + ) to_print.write("Distro: %s\n" % (type_utils.obj_name(cloud.distro))) to_print.write("Hostname: %s\n" % (cloud.get_hostname(True))) to_print.write("Instance ID: %s\n" % (cloud.get_instance_id())) @@ -102,4 +100,5 @@ def handle(name, cfg, cloud, log, args): else: util.multi_log("".join(content_to_file), console=True, stderr=False) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py index 61c769b3..5e528e81 100644 --- a/cloudinit/config/cc_disable_ec2_metadata.py +++ b/cloudinit/config/cc_disable_ec2_metadata.py @@ -26,32 +26,35 @@ by default. disable_ec2_metadata: """ -from cloudinit import subp -from cloudinit import util - +from cloudinit import subp, util from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS -REJECT_CMD_IF = ['route', 'add', '-host', '169.254.169.254', 'reject'] -REJECT_CMD_IP = ['ip', 'route', 'add', 'prohibit', '169.254.169.254'] +REJECT_CMD_IF = ["route", "add", "-host", "169.254.169.254", "reject"] +REJECT_CMD_IP = ["ip", "route", "add", "prohibit", "169.254.169.254"] def handle(name, cfg, _cloud, log, _args): disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False) if disabled: reject_cmd = None - if subp.which('ip'): + if subp.which("ip"): reject_cmd = REJECT_CMD_IP - elif subp.which('ifconfig'): + elif subp.which("ifconfig"): reject_cmd = REJECT_CMD_IF else: - log.error(('Neither "route" nor "ip" command found, unable to ' - 'manipulate routing table')) + log.error( + 'Neither "route" nor "ip" command found, unable to ' + "manipulate routing table" + ) return subp.subp(reject_cmd, capture=False) else: - log.debug(("Skipping module named %s," - " disabling the ec2 route not enabled"), name) + log.debug( + "Skipping module named %s, disabling the ec2 route not enabled", + name, + ) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index 440f05f1..4d527c7a 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -100,13 +100,13 @@ A label can be specified for the filesystem using replace_fs: """ -from cloudinit.settings import PER_INSTANCE -from cloudinit import util -from cloudinit import subp import logging import os import shlex +from cloudinit import subp, util +from cloudinit.settings import PER_INSTANCE + frequency = PER_INSTANCE # Define the commands to use @@ -118,7 +118,7 @@ BLKDEV_CMD = subp.which("blockdev") PARTPROBE_CMD = subp.which("partprobe") WIPEFS_CMD = subp.which("wipefs") -LANG_C_ENV = {'LANG': 'C'} +LANG_C_ENV = {"LANG": "C"} LOG = logging.getLogger(__name__) @@ -145,9 +145,12 @@ def handle(_name, cfg, cloud, log, _args): try: log.debug("Creating new partition table/disk") - util.log_time(logfunc=LOG.debug, - msg="Creating partition on %s" % disk, - func=mkpart, args=(disk, definition)) + util.log_time( + logfunc=LOG.debug, + msg="Creating partition on %s" % disk, + func=mkpart, + args=(disk, definition), + ) except Exception as e: util.logexc(LOG, "Failed partitioning operation\n%s" % e) @@ -162,10 +165,13 @@ def handle(_name, cfg, cloud, log, _args): try: log.debug("Creating new filesystem.") - device = definition.get('device') - util.log_time(logfunc=LOG.debug, - msg="Creating fs for %s" % device, - func=mkfs, args=(definition,)) + device = definition.get("device") + util.log_time( + logfunc=LOG.debug, + msg="Creating fs for %s" % device, + func=mkfs, + args=(definition,), + ) except Exception as e: util.logexc(LOG, "Failed during filesystem operation\n%s" % e) @@ -178,16 +184,22 @@ def update_disk_setup_devices(disk_setup, tformer): if transformed is None or transformed == origname: continue if transformed in disk_setup: - LOG.info("Replacing %s in disk_setup for translation of %s", - origname, transformed) + LOG.info( + "Replacing %s in disk_setup for translation of %s", + origname, + transformed, + ) del disk_setup[transformed] disk_setup[transformed] = disk_setup[origname] if isinstance(disk_setup[transformed], dict): - disk_setup[transformed]['_origname'] = origname + disk_setup[transformed]["_origname"] = origname del disk_setup[origname] - LOG.debug("updated disk_setup device entry '%s' to '%s'", - origname, transformed) + LOG.debug( + "updated disk_setup device entry '%s' to '%s'", + origname, + transformed, + ) def update_fs_setup_devices(disk_setup, tformer): @@ -198,7 +210,7 @@ def update_fs_setup_devices(disk_setup, tformer): LOG.warning("entry in disk_setup not a dict: %s", definition) continue - origname = definition.get('device') + origname = definition.get("device") if origname is None: continue @@ -208,19 +220,24 @@ def update_fs_setup_devices(disk_setup, tformer): tformed = tformer(dev) if tformed is not None: dev = tformed - LOG.debug("%s is mapped to disk=%s part=%s", - origname, tformed, part) - definition['_origname'] = origname - definition['device'] = tformed + LOG.debug( + "%s is mapped to disk=%s part=%s", origname, tformed, part + ) + definition["_origname"] = origname + definition["device"] = tformed if part: # In origname with .N, N overrides 'partition' key. - if 'partition' in definition: - LOG.warning("Partition '%s' from dotted device name '%s' " - "overrides 'partition' key in %s", part, origname, - definition) - definition['_partition'] = definition['partition'] - definition['partition'] = part + if "partition" in definition: + LOG.warning( + "Partition '%s' from dotted device name '%s' " + "overrides 'partition' key in %s", + part, + origname, + definition, + ) + definition["_partition"] = definition["partition"] + definition["partition"] = part def value_splitter(values, start=None): @@ -232,7 +249,7 @@ def value_splitter(values, start=None): if start: _values = _values[start:] - for key, value in [x.split('=') for x in _values]: + for key, value in [x.split("=") for x in _values]: yield key, value @@ -251,11 +268,16 @@ def enumerate_disk(device, nodeps=False): name: the device name, i.e. sda """ - lsblk_cmd = [LSBLK_CMD, '--pairs', '--output', 'NAME,TYPE,FSTYPE,LABEL', - device] + lsblk_cmd = [ + LSBLK_CMD, + "--pairs", + "--output", + "NAME,TYPE,FSTYPE,LABEL", + device, + ] if nodeps: - lsblk_cmd.append('--nodeps') + lsblk_cmd.append("--nodeps") info = None try: @@ -269,10 +291,10 @@ def enumerate_disk(device, nodeps=False): for part in parts: d = { - 'name': None, - 'type': None, - 'fstype': None, - 'label': None, + "name": None, + "type": None, + "fstype": None, + "label": None, } for key, value in value_splitter(part): @@ -303,9 +325,9 @@ def is_device_valid(name, partition=False): LOG.warning("Query against device %s failed", name) return False - if partition and d_type == 'part': + if partition and d_type == "part": return True - elif not partition and d_type == 'disk': + elif not partition and d_type == "disk": return True return False @@ -321,7 +343,7 @@ def check_fs(device): """ out, label, fs_type, uuid = None, None, None, None - blkid_cmd = [BLKID_CMD, '-c', '/dev/null', device] + blkid_cmd = [BLKID_CMD, "-c", "/dev/null", device] try: out, _err = subp.subp(blkid_cmd, rcs=[0, 2]) except Exception as e: @@ -332,11 +354,11 @@ def check_fs(device): if out: if len(out.splitlines()) == 1: for key, value in value_splitter(out, start=1): - if key.lower() == 'label': + if key.lower() == "label": label = value - elif key.lower() == 'type': + elif key.lower() == "type": fs_type = value - elif key.lower() == 'uuid': + elif key.lower() == "uuid": uuid = value return label, fs_type, uuid @@ -350,8 +372,14 @@ def is_filesystem(device): return fs_type -def find_device_node(device, fs_type=None, label=None, valid_targets=None, - label_match=True, replace_fs=None): +def find_device_node( + device, + fs_type=None, + label=None, + valid_targets=None, + label_match=True, + replace_fs=None, +): """ Find a device that is either matches the spec, or the first @@ -366,31 +394,32 @@ def find_device_node(device, fs_type=None, label=None, valid_targets=None, label = "" if not valid_targets: - valid_targets = ['disk', 'part'] + valid_targets = ["disk", "part"] raw_device_used = False for d in enumerate_disk(device): - if d['fstype'] == replace_fs and label_match is False: + if d["fstype"] == replace_fs and label_match is False: # We found a device where we want to replace the FS - return ('/dev/%s' % d['name'], False) + return ("/dev/%s" % d["name"], False) - if (d['fstype'] == fs_type and - ((label_match and d['label'] == label) or not label_match)): + if d["fstype"] == fs_type and ( + (label_match and d["label"] == label) or not label_match + ): # If we find a matching device, we return that - return ('/dev/%s' % d['name'], True) + return ("/dev/%s" % d["name"], True) - if d['type'] in valid_targets: + if d["type"] in valid_targets: - if d['type'] != 'disk' or d['fstype']: + if d["type"] != "disk" or d["fstype"]: raw_device_used = True - if d['type'] == 'disk': + if d["type"] == "disk": # Skip the raw disk, its the default pass - elif not d['fstype']: - return ('/dev/%s' % d['name'], False) + elif not d["fstype"]: + return ("/dev/%s" % d["name"], False) if not raw_device_used: return (device, False) @@ -433,7 +462,7 @@ def get_dyn_func(*args): if len(args) < 2: raise Exception("Unable to determine dynamic funcation name") - func_name = (args[0] % args[1]) + func_name = args[0] % args[1] func_args = args[2:] try: @@ -448,8 +477,8 @@ def get_dyn_func(*args): def get_hdd_size(device): try: - size_in_bytes, _ = subp.subp([BLKDEV_CMD, '--getsize64', device]) - sector_size, _ = subp.subp([BLKDEV_CMD, '--getss', device]) + size_in_bytes, _ = subp.subp([BLKDEV_CMD, "--getsize64", device]) + sector_size, _ = subp.subp([BLKDEV_CMD, "--getss", device]) except Exception as e: raise Exception("Failed to get %s size\n%s" % (device, e)) from e @@ -481,13 +510,13 @@ def check_partition_mbr_layout(device, layout): if device in _line[0]: # We don't understand extended partitions yet - if _line[-1].lower() in ['extended', 'empty']: + if _line[-1].lower() in ["extended", "empty"]: continue # Find the partition types type_label = None for x in sorted(range(1, len(_line)), reverse=True): - if _line[x].isdigit() and _line[x] != '/': + if _line[x].isdigit() and _line[x] != "/": type_label = _line[x] break @@ -496,7 +525,7 @@ def check_partition_mbr_layout(device, layout): def check_partition_gpt_layout(device, layout): - prt_cmd = [SGDISK_CMD, '-p', device] + prt_cmd = [SGDISK_CMD, "-p", device] try: out, _err = subp.subp(prt_cmd, update_env=LANG_C_ENV) except Exception as e: @@ -522,7 +551,7 @@ def check_partition_gpt_layout(device, layout): # Number Start (sector) End (sector) Size Code Name # 1 2048 206847 100.0 MiB 0700 Microsoft basic data for line in out_lines: - if line.strip().startswith('Number'): + if line.strip().startswith("Number"): break codes = [line.strip().split()[5] for line in out_lines] @@ -545,10 +574,16 @@ def check_partition_layout(table_type, device, layout): function called check_partition_%s_layout """ found_layout = get_dyn_func( - "check_partition_%s_layout", table_type, device, layout) - - LOG.debug("called check_partition_%s_layout(%s, %s), returned: %s", - table_type, device, layout, found_layout) + "check_partition_%s_layout", table_type, device, layout + ) + + LOG.debug( + "called check_partition_%s_layout(%s, %s), returned: %s", + table_type, + device, + layout, + found_layout, + ) if isinstance(layout, bool): # if we are using auto partitioning, or "True" be happy # if a single partition exists. @@ -559,10 +594,12 @@ def check_partition_layout(table_type, device, layout): elif len(found_layout) == len(layout): # This just makes sure that the number of requested # partitions and the type labels are right - layout_types = [str(x[1]) if isinstance(x, (tuple, list)) else None - for x in layout] - LOG.debug("Layout types=%s. Found types=%s", - layout_types, found_layout) + layout_types = [ + str(x[1]) if isinstance(x, (tuple, list)) else None for x in layout + ] + LOG.debug( + "Layout types=%s. Found types=%s", layout_types, found_layout + ) for itype, ftype in zip(layout_types, found_layout): if itype is not None and str(ftype) != str(itype): return False @@ -588,8 +625,9 @@ def get_partition_mbr_layout(size, layout): # Create a single partition return "0," - if ((len(layout) == 0 and isinstance(layout, list)) or - not isinstance(layout, list)): + if (len(layout) == 0 and isinstance(layout, list)) or not isinstance( + layout, list + ): raise Exception("Partition layout is invalid") last_part_num = len(layout) @@ -617,8 +655,10 @@ def get_partition_mbr_layout(size, layout): sfdisk_definition = "\n".join(part_definition) if len(part_definition) > 4: - raise Exception("Calculated partition definition is too big\n%s" % - sfdisk_definition) + raise Exception( + "Calculated partition definition is too big\n%s" + % sfdisk_definition + ) return sfdisk_definition @@ -632,14 +672,15 @@ def get_partition_gpt_layout(size, layout): if isinstance(partition, list): if len(partition) != 2: raise Exception( - "Partition was incorrectly defined: %s" % partition) + "Partition was incorrectly defined: %s" % partition + ) percent, partition_type = partition else: percent = partition partition_type = None part_size = int(float(size) * (float(percent) / 100)) - partition_specs.append((partition_type, [0, '+{}'.format(part_size)])) + partition_specs.append((partition_type, [0, "+{}".format(part_size)])) # The last partition should use up all remaining space partition_specs[-1][-1][-1] = 0 @@ -649,7 +690,7 @@ def get_partition_gpt_layout(size, layout): def purge_disk_ptable(device): # wipe the first and last megabyte of a disk (or file) # gpt stores partition table both at front and at end. - null = '\0' + null = "\0" start_len = 1024 * 1024 end_len = 1024 * 1024 with open(device, "rb+") as fp: @@ -668,14 +709,14 @@ def purge_disk(device): # wipe any file systems first for d in enumerate_disk(device): - if d['type'] not in ["disk", "crypt"]: - wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']] + if d["type"] not in ["disk", "crypt"]: + wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d["name"]] try: - LOG.info("Purging filesystem on /dev/%s", d['name']) + LOG.info("Purging filesystem on /dev/%s", d["name"]) subp.subp(wipefs_cmd) except Exception as e: raise Exception( - "Failed FS purge of /dev/%s" % d['name'] + "Failed FS purge of /dev/%s" % d["name"] ) from e purge_disk_ptable(device) @@ -701,7 +742,7 @@ def read_parttbl(device): if PARTPROBE_CMD is not None: probe_cmd = [PARTPROBE_CMD, device] else: - probe_cmd = [BLKDEV_CMD, '--rereadpt', device] + probe_cmd = [BLKDEV_CMD, "--rereadpt", device] util.udevadm_settle() try: subp.subp(probe_cmd) @@ -730,17 +771,24 @@ def exec_mkpart_mbr(device, layout): def exec_mkpart_gpt(device, layout): try: - subp.subp([SGDISK_CMD, '-Z', device]) + subp.subp([SGDISK_CMD, "-Z", device]) for index, (partition_type, (start, end)) in enumerate(layout): index += 1 - subp.subp([SGDISK_CMD, - '-n', '{}:{}:{}'.format(index, start, end), device]) + subp.subp( + [ + SGDISK_CMD, + "-n", + "{}:{}:{}".format(index, start, end), + device, + ] + ) if partition_type is not None: # convert to a 4 char (or more) string right padded with 0 # 82 -> 8200. 'Linux' -> 'Linux' pinput = str(partition_type).ljust(4, "0") subp.subp( - [SGDISK_CMD, '-t', '{}:{}'.format(index, pinput), device]) + [SGDISK_CMD, "-t", "{}:{}".format(index, pinput), device] + ) except Exception: LOG.warning("Failed to partition device %s", device) raise @@ -766,8 +814,10 @@ def assert_and_settle_device(device): if not os.path.exists(device): util.udevadm_settle() if not os.path.exists(device): - raise RuntimeError("Device %s did not exist and was not created " - "with a udevadm settle." % device) + raise RuntimeError( + "Device %s did not exist and was not created " + "with a udevadm settle." % device + ) # Whether or not the device existed above, it is possible that udev # events that would populate udev database (for reading by lsdname) have @@ -794,9 +844,9 @@ def mkpart(device, definition): device = os.path.realpath(device) LOG.debug("Checking values for %s definition", device) - overwrite = definition.get('overwrite', False) - layout = definition.get('layout', False) - table_type = definition.get('table_type', 'mbr') + overwrite = definition.get("overwrite", False) + layout = definition.get("layout", False) + table_type = definition.get("table_type", "mbr") # Check if the default device is a partition or not LOG.debug("Checking against default devices") @@ -809,7 +859,8 @@ def mkpart(device, definition): LOG.debug("Checking if device %s is a valid device", device) if not is_device_valid(device): raise Exception( - 'Device {device} is not a disk device!'.format(device=device)) + "Device {device} is not a disk device!".format(device=device) + ) # Remove the partition table entries if isinstance(layout, str) and layout.lower() == "remove": @@ -845,21 +896,21 @@ def lookup_force_flag(fs): A force flag might be -F or -F, this look it up """ flags = { - 'ext': '-F', - 'btrfs': '-f', - 'xfs': '-f', - 'reiserfs': '-f', - 'swap': '-f', + "ext": "-F", + "btrfs": "-f", + "xfs": "-f", + "reiserfs": "-f", + "swap": "-f", } - if 'ext' in fs.lower(): - fs = 'ext' + if "ext" in fs.lower(): + fs = "ext" if fs.lower() in flags: return flags[fs] LOG.warning("Force flag for %s is unknown.", fs) - return '' + return "" def mkfs(fs_cfg): @@ -883,14 +934,14 @@ def mkfs(fs_cfg): When 'cmd' is provided then no other parameter is required. """ - label = fs_cfg.get('label') - device = fs_cfg.get('device') - partition = str(fs_cfg.get('partition', 'any')) - fs_type = fs_cfg.get('filesystem') - fs_cmd = fs_cfg.get('cmd', []) - fs_opts = fs_cfg.get('extra_opts', []) - fs_replace = fs_cfg.get('replace_fs', False) - overwrite = fs_cfg.get('overwrite', False) + label = fs_cfg.get("label") + device = fs_cfg.get("device") + partition = str(fs_cfg.get("partition", "any")) + fs_type = fs_cfg.get("filesystem") + fs_cmd = fs_cfg.get("cmd", []) + fs_opts = fs_cfg.get("extra_opts", []) + fs_replace = fs_cfg.get("replace_fs", False) + overwrite = fs_cfg.get("overwrite", False) # ensure that we get a real device rather than a symbolic link assert_and_settle_device(device) @@ -903,14 +954,19 @@ def mkfs(fs_cfg): # Handle manual definition of partition if partition.isdigit(): device = "%s%s" % (device, partition) - LOG.debug("Manual request of partition %s for %s", - partition, device) + LOG.debug( + "Manual request of partition %s for %s", partition, device + ) # Check to see if the fs already exists LOG.debug("Checking device %s", device) check_label, check_fstype, _ = check_fs(device) - LOG.debug("Device '%s' has check_label='%s' check_fstype=%s", - device, check_label, check_fstype) + LOG.debug( + "Device '%s' has check_label='%s' check_fstype=%s", + device, + check_label, + check_fstype, + ) if check_label == label and check_fstype == fs_type: LOG.debug("Existing file system found at %s", device) @@ -924,19 +980,23 @@ def mkfs(fs_cfg): else: LOG.debug("Device %s is cleared for formating", device) - elif partition and str(partition).lower() in ('auto', 'any'): + elif partition and str(partition).lower() in ("auto", "any"): # For auto devices, we match if the filesystem does exist odevice = device LOG.debug("Identifying device to create %s filesytem on", label) # any mean pick the first match on the device with matching fs_type label_match = True - if partition.lower() == 'any': + if partition.lower() == "any": label_match = False - device, reuse = find_device_node(device, fs_type=fs_type, label=label, - label_match=label_match, - replace_fs=fs_replace) + device, reuse = find_device_node( + device, + fs_type=fs_type, + label=label, + label_match=label_match, + replace_fs=fs_replace, + ) LOG.debug("Automatic device for %s identified as %s", odevice, device) if reuse: @@ -947,18 +1007,25 @@ def mkfs(fs_cfg): LOG.debug("Replacing file system on %s as instructed.", device) if not device: - LOG.debug("No device aviable that matches request. " - "Skipping fs creation for %s", fs_cfg) + LOG.debug( + "No device aviable that matches request. " + "Skipping fs creation for %s", + fs_cfg, + ) return - elif not partition or str(partition).lower() == 'none': + elif not partition or str(partition).lower() == "none": LOG.debug("Using the raw device to place filesystem %s on", label) else: LOG.debug("Error in device identification handling.") return - LOG.debug("File system type '%s' with label '%s' will be created on %s", - fs_type, label, device) + LOG.debug( + "File system type '%s' with label '%s' will be created on %s", + fs_type, + label, + device, + ) # Make sure the device is defined if not device: @@ -969,26 +1036,29 @@ def mkfs(fs_cfg): if not (fs_type or fs_cmd): raise Exception( "No way to create filesystem '{label}'. fs_type or fs_cmd " - "must be set.".format(label=label)) + "must be set.".format(label=label) + ) # Create the commands shell = False if fs_cmd: - fs_cmd = fs_cfg['cmd'] % { - 'label': label, - 'filesystem': fs_type, - 'device': device, + fs_cmd = fs_cfg["cmd"] % { + "label": label, + "filesystem": fs_type, + "device": device, } shell = True if overwrite: LOG.warning( "fs_setup:overwrite ignored because cmd was specified: %s", - fs_cmd) + fs_cmd, + ) if fs_opts: LOG.warning( "fs_setup:extra_opts ignored because cmd was specified: %s", - fs_cmd) + fs_cmd, + ) else: # Find the mkfs command mkfs_cmd = subp.which("mkfs.%s" % fs_type) @@ -996,8 +1066,11 @@ def mkfs(fs_cfg): mkfs_cmd = subp.which("mk%s" % fs_type) if not mkfs_cmd: - LOG.warning("Cannot create fstype '%s'. No mkfs.%s command", - fs_type, fs_type) + LOG.warning( + "Cannot create fstype '%s'. No mkfs.%s command", + fs_type, + fs_type, + ) return fs_cmd = [mkfs_cmd, device] @@ -1022,4 +1095,5 @@ def mkfs(fs_cfg): except Exception as e: raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e)) from e + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py index 40eee052..a928082b 100644 --- a/cloudinit/config/cc_emit_upstart.py +++ b/cloudinit/config/cc_emit_upstart.py @@ -24,12 +24,12 @@ user configuration should be required. import os from cloudinit import log as logging -from cloudinit.settings import PER_ALWAYS from cloudinit import subp +from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS -distros = ['ubuntu', 'debian'] +distros = ["ubuntu", "debian"] LOG = logging.getLogger(__name__) @@ -39,15 +39,18 @@ def is_upstart_system(): return False myenv = os.environ.copy() - if 'UPSTART_SESSION' in myenv: - del myenv['UPSTART_SESSION'] - check_cmd = ['initctl', 'version'] + if "UPSTART_SESSION" in myenv: + del myenv["UPSTART_SESSION"] + check_cmd = ["initctl", "version"] try: (out, _err) = subp.subp(check_cmd, env=myenv) - return 'upstart' in out + return "upstart" in out except subp.ProcessExecutionError as e: - LOG.debug("'%s' returned '%s', not using upstart", - ' '.join(check_cmd), e.exit_code) + LOG.debug( + "'%s' returned '%s', not using upstart", + " ".join(check_cmd), + e.exit_code, + ) return False @@ -56,7 +59,7 @@ def handle(name, _cfg, cloud, log, args): if not event_names: # Default to the 'cloud-config' # event for backwards compat. - event_names = ['cloud-config'] + event_names = ["cloud-config"] if not is_upstart_system(): log.debug("not upstart system, '%s' disabled", name) @@ -64,11 +67,12 @@ def handle(name, _cfg, cloud, log, args): cfgpath = cloud.paths.get_ipath_cur("cloud_config") for n in event_names: - cmd = ['initctl', 'emit', str(n), 'CLOUD_CFG=%s' % cfgpath] + cmd = ["initctl", "emit", str(n), "CLOUD_CFG=%s" % cfgpath] try: subp.subp(cmd) except Exception as e: # TODO(harlowja), use log exception from utils?? log.warning("Emission of upstart event %s failed due to: %s", n, e) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py index 91f50e22..50a81744 100644 --- a/cloudinit/config/cc_fan.py +++ b/cloudinit/config/cc_fan.py @@ -38,60 +38,62 @@ If cloud-init sees a ``fan`` entry in cloud-config it will: """ from cloudinit import log as logging +from cloudinit import subp, util from cloudinit.settings import PER_INSTANCE -from cloudinit import subp -from cloudinit import util LOG = logging.getLogger(__name__) frequency = PER_INSTANCE BUILTIN_CFG = { - 'config': None, - 'config_path': '/etc/network/fan', + "config": None, + "config_path": "/etc/network/fan", } def stop_update_start(distro, service, config_file, content): try: - distro.manage_service('stop', service) + distro.manage_service("stop", service) stop_failed = False except subp.ProcessExecutionError as e: stop_failed = True LOG.warning("failed to stop %s: %s", service, e) - if not content.endswith('\n'): - content += '\n' + if not content.endswith("\n"): + content += "\n" util.write_file(config_file, content, omode="w") try: - distro.manage_service('start', service) + distro.manage_service("start", service) if stop_failed: LOG.warning("success: %s started", service) except subp.ProcessExecutionError as e: LOG.warning("failed to start %s: %s", service, e) - distro.manage_service('enable', service) + distro.manage_service("enable", service) def handle(name, cfg, cloud, log, args): - cfgin = cfg.get('fan') + cfgin = cfg.get("fan") if not cfgin: cfgin = {} mycfg = util.mergemanydict([cfgin, BUILTIN_CFG]) - if not mycfg.get('config'): + if not mycfg.get("config"): LOG.debug("%s: no 'fan' config entry. disabling", name) return - util.write_file(mycfg.get('config_path'), mycfg.get('config'), omode="w") + util.write_file(mycfg.get("config_path"), mycfg.get("config"), omode="w") distro = cloud.distro - if not subp.which('fanctl'): - distro.install_packages(['ubuntu-fan']) + if not subp.which("fanctl"): + distro.install_packages(["ubuntu-fan"]) stop_update_start( distro, - service='ubuntu-fan', config_file=mycfg.get('config_path'), - content=mycfg.get('config')) + service="ubuntu-fan", + config_file=mycfg.get("config_path"), + content=mycfg.get("config"), + ) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py index 4fa5297e..f443ccd8 100644 --- a/cloudinit/config/cc_final_message.py +++ b/cloudinit/config/cc_final_message.py @@ -31,10 +31,7 @@ specified as a jinja template with the following variables set: """ -from cloudinit import templater -from cloudinit import util -from cloudinit import version - +from cloudinit import templater, util, version from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS @@ -49,7 +46,7 @@ FINAL_MESSAGE_DEF = ( def handle(_name, cfg, cloud, log, args): - msg_in = '' + msg_in = "" if len(args) != 0: msg_in = str(args[0]) else: @@ -64,14 +61,18 @@ def handle(_name, cfg, cloud, log, args): cver = version.version_string() try: subs = { - 'uptime': uptime, - 'timestamp': ts, - 'version': cver, - 'datasource': str(cloud.datasource), + "uptime": uptime, + "timestamp": ts, + "version": cver, + "datasource": str(cloud.datasource), } subs.update(dict([(k.upper(), v) for k, v in subs.items()])) - util.multi_log("%s\n" % (templater.render_string(msg_in, subs)), - console=False, stderr=True, log=log) + util.multi_log( + "%s\n" % (templater.render_string(msg_in, subs)), + console=False, + stderr=True, + log=log, + ) except Exception: util.logexc(log, "Failed to render final message template") @@ -85,4 +86,5 @@ def handle(_name, cfg, cloud, log, args): if cloud.datasource.is_disconnected: log.warning("Used fallback datasource") + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_foo.py b/cloudinit/config/cc_foo.py index 924b967c..3c307153 100644 --- a/cloudinit/config/cc_foo.py +++ b/cloudinit/config/cc_foo.py @@ -53,4 +53,5 @@ frequency = PER_INSTANCE def handle(name, _cfg, _cloud, log, _args): log.debug("Hi from module %s", name) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 1ddc9dc7..43334caa 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -70,17 +70,15 @@ import re import stat from cloudinit import log as logging +from cloudinit import subp, temp_utils, util from cloudinit.settings import PER_ALWAYS -from cloudinit import subp -from cloudinit import temp_utils -from cloudinit import util frequency = PER_ALWAYS DEFAULT_CONFIG = { - 'mode': 'auto', - 'devices': ['/'], - 'ignore_growroot_disabled': False, + "mode": "auto", + "devices": ["/"], + "ignore_growroot_disabled": False, } @@ -131,7 +129,7 @@ class ResizeFailedException(Exception): class ResizeGrowPart(object): def available(self): myenv = os.environ.copy() - myenv['LANG'] = 'C' + myenv["LANG"] = "C" try: (out, _err) = subp.subp(["growpart", "--help"], env=myenv) @@ -144,7 +142,7 @@ class ResizeGrowPart(object): def resize(self, diskdev, partnum, partdev): myenv = os.environ.copy() - myenv['LANG'] = 'C' + myenv["LANG"] = "C" before = get_size(partdev) # growpart uses tmp dir to store intermediate states @@ -153,14 +151,19 @@ class ResizeGrowPart(object): growpart_tmp = os.path.join(tmpd, "growpart") if not os.path.exists(growpart_tmp): os.mkdir(growpart_tmp, 0o700) - myenv['TMPDIR'] = growpart_tmp + myenv["TMPDIR"] = growpart_tmp try: - subp.subp(["growpart", '--dry-run', diskdev, partnum], - env=myenv) + subp.subp( + ["growpart", "--dry-run", diskdev, partnum], env=myenv + ) except subp.ProcessExecutionError as e: if e.exit_code != 1: - util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)", - diskdev, partnum) + util.logexc( + LOG, + "Failed growpart --dry-run for (%s, %s)", + diskdev, + partnum, + ) raise ResizeFailedException(e) from e return (before, before) @@ -176,7 +179,7 @@ class ResizeGrowPart(object): class ResizeGpart(object): def available(self): myenv = os.environ.copy() - myenv['LANG'] = 'C' + myenv["LANG"] = "C" try: (_out, err) = subp.subp(["gpart", "help"], env=myenv, rcs=[0, 1]) @@ -234,11 +237,11 @@ def device_part_info(devpath): # the device, like /dev/vtbd0p2. if util.is_FreeBSD(): freebsd_part = "/dev/" + util.find_freebsd_part(devpath) - m = re.search('^(/dev/.+)p([0-9])$', freebsd_part) + m = re.search("^(/dev/.+)p([0-9])$", freebsd_part) return (m.group(1), m.group(2)) elif util.is_DragonFlyBSD(): dragonflybsd_part = "/dev/" + util.find_dragonflybsd_part(devpath) - m = re.search('^(/dev/.+)s([0-9])$', dragonflybsd_part) + m = re.search("^(/dev/.+)s([0-9])$", dragonflybsd_part) return (m.group(1), m.group(2)) if not os.path.exists(syspath): @@ -275,7 +278,7 @@ def devent2dev(devent): container = util.is_container() # Ensure the path is a block device. - if (dev == "/dev/root" and not container): + if dev == "/dev/root" and not container: dev = util.rootdev_from_cmdline(util.get_cmdline()) if dev is None: if os.path.exists(dev): @@ -293,65 +296,102 @@ def resize_devices(resizer, devices): try: blockdev = devent2dev(devent) except ValueError as e: - info.append((devent, RESIZE.SKIPPED, - "unable to convert to device: %s" % e,)) + info.append( + ( + devent, + RESIZE.SKIPPED, + "unable to convert to device: %s" % e, + ) + ) continue try: statret = os.stat(blockdev) except OSError as e: - info.append((devent, RESIZE.SKIPPED, - "stat of '%s' failed: %s" % (blockdev, e),)) + info.append( + ( + devent, + RESIZE.SKIPPED, + "stat of '%s' failed: %s" % (blockdev, e), + ) + ) continue - if (not stat.S_ISBLK(statret.st_mode) and - not stat.S_ISCHR(statret.st_mode)): - info.append((devent, RESIZE.SKIPPED, - "device '%s' not a block device" % blockdev,)) + if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR( + statret.st_mode + ): + info.append( + ( + devent, + RESIZE.SKIPPED, + "device '%s' not a block device" % blockdev, + ) + ) continue try: (disk, ptnum) = device_part_info(blockdev) except (TypeError, ValueError) as e: - info.append((devent, RESIZE.SKIPPED, - "device_part_info(%s) failed: %s" % (blockdev, e),)) + info.append( + ( + devent, + RESIZE.SKIPPED, + "device_part_info(%s) failed: %s" % (blockdev, e), + ) + ) continue try: (old, new) = resizer.resize(disk, ptnum, blockdev) if old == new: - info.append((devent, RESIZE.NOCHANGE, - "no change necessary (%s, %s)" % (disk, ptnum),)) + info.append( + ( + devent, + RESIZE.NOCHANGE, + "no change necessary (%s, %s)" % (disk, ptnum), + ) + ) else: - info.append((devent, RESIZE.CHANGED, - "changed (%s, %s) from %s to %s" % - (disk, ptnum, old, new),)) + info.append( + ( + devent, + RESIZE.CHANGED, + "changed (%s, %s) from %s to %s" + % (disk, ptnum, old, new), + ) + ) except ResizeFailedException as e: - info.append((devent, RESIZE.FAILED, - "failed to resize: disk=%s, ptnum=%s: %s" % - (disk, ptnum, e),)) + info.append( + ( + devent, + RESIZE.FAILED, + "failed to resize: disk=%s, ptnum=%s: %s" + % (disk, ptnum, e), + ) + ) return info def handle(_name, cfg, _cloud, log, _args): - if 'growpart' not in cfg: - log.debug("No 'growpart' entry in cfg. Using default: %s" % - DEFAULT_CONFIG) - cfg['growpart'] = DEFAULT_CONFIG + if "growpart" not in cfg: + log.debug( + "No 'growpart' entry in cfg. Using default: %s" % DEFAULT_CONFIG + ) + cfg["growpart"] = DEFAULT_CONFIG - mycfg = cfg.get('growpart') + mycfg = cfg.get("growpart") if not isinstance(mycfg, dict): log.warning("'growpart' in config was not a dict") return - mode = mycfg.get('mode', "auto") + mode = mycfg.get("mode", "auto") if util.is_false(mode): log.debug("growpart disabled: mode=%s" % mode) return - if util.is_false(mycfg.get('ignore_growroot_disabled', False)): + if util.is_false(mycfg.get("ignore_growroot_disabled", False)): if os.path.isfile("/etc/growroot-disabled"): log.debug("growpart disabled: /etc/growroot-disabled exists") log.debug("use ignore_growroot_disabled to ignore") @@ -370,8 +410,12 @@ def handle(_name, cfg, _cloud, log, _args): raise e return - resized = util.log_time(logfunc=log.debug, msg="resize_devices", - func=resize_devices, args=(resizer, devices)) + resized = util.log_time( + logfunc=log.debug, + msg="resize_devices", + func=resize_devices, + args=(resizer, devices), + ) for (entry, action, msg) in resized: if action == RESIZE.CHANGED: log.info("'%s' resized: %s" % (entry, msg)) @@ -379,6 +423,6 @@ def handle(_name, cfg, _cloud, log, _args): log.debug("'%s' %s: %s" % (entry, action, msg)) -RESIZERS = (('growpart', ResizeGrowPart), ('gpart', ResizeGpart)) +RESIZERS = (("growpart", ResizeGrowPart), ("gpart", ResizeGpart)) # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py index eb03c664..ad7243d9 100644 --- a/cloudinit/config/cc_grub_dpkg.py +++ b/cloudinit/config/cc_grub_dpkg.py @@ -43,11 +43,10 @@ seeded with empty values, and install_devices_empty is set to true. import os -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util from cloudinit.subp import ProcessExecutionError -distros = ['ubuntu', 'debian'] +distros = ["ubuntu", "debian"] def fetch_idevs(log): @@ -60,8 +59,9 @@ def fetch_idevs(log): try: # get the root disk where the /boot directory resides. - disk = subp.subp(['grub-probe', '-t', 'disk', '/boot'], - capture=True)[0].strip() + disk = subp.subp(["grub-probe", "-t", "disk", "/boot"], capture=True)[ + 0 + ].strip() except ProcessExecutionError as e: # grub-common may not be installed, especially on containers # FileNotFoundError is a nested exception of ProcessExecutionError @@ -81,26 +81,30 @@ def fetch_idevs(log): if not disk or not os.path.exists(disk): # If we failed to detect a disk, we can return early - return '' + return "" try: # check if disk exists and use udevadm to fetch symlinks - devices = subp.subp( - ['udevadm', 'info', '--root', '--query=symlink', disk], - capture=True - )[0].strip().split() + devices = ( + subp.subp( + ["udevadm", "info", "--root", "--query=symlink", disk], + capture=True, + )[0] + .strip() + .split() + ) except Exception: util.logexc( log, "udevadm DEVLINKS symlink query failed for disk='%s'", disk ) - log.debug('considering these device symlinks: %s', ','.join(devices)) + log.debug("considering these device symlinks: %s", ",".join(devices)) # filter symlinks for /dev/disk/by-id entries - devices = [dev for dev in devices if 'disk/by-id' in dev] - log.debug('filtered to these disk/by-id symlinks: %s', ','.join(devices)) + devices = [dev for dev in devices if "disk/by-id" in dev] + log.debug("filtered to these disk/by-id symlinks: %s", ",".join(devices)) # select first device if there is one, else fall back to plain name idevs = sorted(devices)[0] if devices else disk - log.debug('selected %s', idevs) + log.debug("selected %s", idevs) return idevs @@ -111,14 +115,15 @@ def handle(name, cfg, _cloud, log, _args): if not mycfg: mycfg = {} - enabled = mycfg.get('enabled', True) + enabled = mycfg.get("enabled", True) if util.is_false(enabled): log.debug("%s disabled by config grub_dpkg/enabled=%s", name, enabled) return idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None) idevs_empty = util.get_cfg_option_str( - mycfg, "grub-pc/install_devices_empty", None) + mycfg, "grub-pc/install_devices_empty", None + ) if idevs is None: idevs = fetch_idevs(log) @@ -128,16 +133,21 @@ def handle(name, cfg, _cloud, log, _args): # now idevs and idevs_empty are set to determined values # or, those set by user - dconf_sel = (("grub-pc grub-pc/install_devices string %s\n" - "grub-pc grub-pc/install_devices_empty boolean %s\n") % - (idevs, idevs_empty)) + dconf_sel = ( + "grub-pc grub-pc/install_devices string %s\n" + "grub-pc grub-pc/install_devices_empty boolean %s\n" + % (idevs, idevs_empty) + ) - log.debug("Setting grub debconf-set-selections with '%s','%s'" % - (idevs, idevs_empty)) + log.debug( + "Setting grub debconf-set-selections with '%s','%s'" + % (idevs, idevs_empty) + ) try: - subp.subp(['debconf-set-selections'], dconf_sel) + subp.subp(["debconf-set-selections"], dconf_sel) except Exception: util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg") + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_install_hotplug.py b/cloudinit/config/cc_install_hotplug.py index 9b4075cc..952d9f13 100644 --- a/cloudinit/config/cc_install_hotplug.py +++ b/cloudinit/config/cc_install_hotplug.py @@ -3,15 +3,12 @@ import os from textwrap import dedent -from cloudinit import util -from cloudinit import subp -from cloudinit import stages +from cloudinit import stages, subp, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.distros import ALL_DISTROS -from cloudinit.event import EventType, EventScope +from cloudinit.event import EventScope, EventType from cloudinit.settings import PER_INSTANCE - frequency = PER_INSTANCE distros = [ALL_DISTROS] @@ -19,7 +16,8 @@ meta = { "id": "cc_install_hotplug", "name": "Install Hotplug", "title": "Install hotplug if supported and enabled", - "description": dedent("""\ + "description": dedent( + """\ This module will install the udev rules to enable hotplug if supported by the datasource and enabled in the userdata. The udev rules will be installed as @@ -32,21 +30,26 @@ meta = { network configuration. Currently supported datasources: Openstack, EC2 - """), + """ + ), "distros": distros, "examples": [ - dedent("""\ + dedent( + """\ # Enable hotplug of network devices updates: network: when: ["hotplug"] - """), - dedent("""\ + """ + ), + dedent( + """\ # Enable network hotplug alongside boot event updates: network: when: ["boot", "hotplug"] - """), + """ + ), ], "frequency": frequency, } @@ -74,14 +77,14 @@ schema = { "boot-legacy", "boot", "hotplug", - ] - } + ], + }, } - } + }, } - } + }, } - } + }, } __doc__ = get_meta_doc(meta, schema) @@ -100,14 +103,15 @@ LABEL="cloudinit_end" def handle(_name, cfg, cloud, log, _args): validate_cloudconfig_schema(cfg, schema) network_hotplug_enabled = ( - 'updates' in cfg and - 'network' in cfg['updates'] and - 'when' in cfg['updates']['network'] and - 'hotplug' in cfg['updates']['network']['when'] + "updates" in cfg + and "network" in cfg["updates"] + and "when" in cfg["updates"]["network"] + and "hotplug" in cfg["updates"]["network"]["when"] ) hotplug_supported = EventType.HOTPLUG in ( - cloud.datasource.get_supported_events( - [EventType.HOTPLUG]).get(EventScope.NETWORK, set()) + cloud.datasource.get_supported_events([EventType.HOTPLUG]).get( + EventScope.NETWORK, set() + ) ) hotplug_enabled = stages.update_event_enabled( datasource=cloud.datasource, diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py index d72b5244..ab35e136 100644 --- a/cloudinit/config/cc_keys_to_console.py +++ b/cloudinit/config/cc_keys_to_console.py @@ -38,49 +38,53 @@ host keys are not written to console. import os +from cloudinit import subp, util from cloudinit.settings import PER_INSTANCE -from cloudinit import subp -from cloudinit import util frequency = PER_INSTANCE # This is a tool that cloud init provides -HELPER_TOOL_TPL = '%s/cloud-init/write-ssh-key-fingerprints' +HELPER_TOOL_TPL = "%s/cloud-init/write-ssh-key-fingerprints" def _get_helper_tool_path(distro): try: base_lib = distro.usr_lib_exec except AttributeError: - base_lib = '/usr/lib' + base_lib = "/usr/lib" return HELPER_TOOL_TPL % base_lib def handle(name, cfg, cloud, log, _args): if util.is_false(cfg.get("ssh", {}).get("emit_keys_to_console", True)): - log.debug(("Skipping module named %s, " - "logging of SSH host keys disabled"), name) + log.debug( + "Skipping module named %s, logging of SSH host keys disabled", name + ) return helper_path = _get_helper_tool_path(cloud.distro) if not os.path.exists(helper_path): - log.warning(("Unable to activate module %s," - " helper tool not found at %s"), name, helper_path) + log.warning( + "Unable to activate module %s, helper tool not found at %s", + name, + helper_path, + ) return - fp_blacklist = util.get_cfg_option_list(cfg, - "ssh_fp_console_blacklist", []) - key_blacklist = util.get_cfg_option_list(cfg, - "ssh_key_console_blacklist", - ["ssh-dss"]) + fp_blacklist = util.get_cfg_option_list( + cfg, "ssh_fp_console_blacklist", [] + ) + key_blacklist = util.get_cfg_option_list( + cfg, "ssh_key_console_blacklist", ["ssh-dss"] + ) try: - cmd = [helper_path, ','.join(fp_blacklist), ','.join(key_blacklist)] + cmd = [helper_path, ",".join(fp_blacklist), ",".join(key_blacklist)] (stdout, _stderr) = subp.subp(cmd) - util.multi_log("%s\n" % (stdout.strip()), - stderr=False, console=True) + util.multi_log("%s\n" % (stdout.strip()), stderr=False, console=True) except Exception: log.warning("Writing keys to the system console failed!") raise + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index 299c4d01..03ebf411 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -60,10 +60,7 @@ from io import BytesIO from configobj import ConfigObj -from cloudinit import type_utils -from cloudinit import subp -from cloudinit import util - +from cloudinit import subp, type_utils, util from cloudinit.settings import PER_INSTANCE frequency = PER_INSTANCE @@ -71,15 +68,15 @@ frequency = PER_INSTANCE LSC_CLIENT_CFG_FILE = "/etc/landscape/client.conf" LS_DEFAULT_FILE = "/etc/default/landscape-client" -distros = ['ubuntu'] +distros = ["ubuntu"] # defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2 LSC_BUILTIN_CFG = { - 'client': { - 'log_level': "info", - 'url': "https://landscape.canonical.com/message-system", - 'ping_url': "http://landscape.canonical.com/ping", - 'data_path': "/var/lib/landscape/client", + "client": { + "log_level": "info", + "url": "https://landscape.canonical.com/message-system", + "ping_url": "http://landscape.canonical.com/ping", + "data_path": "/var/lib/landscape/client", } } @@ -97,11 +94,13 @@ def handle(_name, cfg, cloud, log, _args): raise RuntimeError( "'landscape' key existed in config, but not a dictionary type," " is a {_type} instead".format( - _type=type_utils.obj_name(ls_cloudcfg))) + _type=type_utils.obj_name(ls_cloudcfg) + ) + ) if not ls_cloudcfg: return - cloud.distro.install_packages(('landscape-client',)) + cloud.distro.install_packages(("landscape-client",)) merge_data = [ LSC_BUILTIN_CFG, @@ -135,4 +134,5 @@ def merge_together(objs): cfg.merge(ConfigObj(obj)) return cfg + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py index 7fed9abd..487f58f7 100644 --- a/cloudinit/config/cc_locale.py +++ b/cloudinit/config/cc_locale.py @@ -14,45 +14,48 @@ from cloudinit import util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE - frequency = PER_INSTANCE -distros = ['all'] +distros = ["all"] meta = { - 'id': 'cc_locale', - 'name': 'Locale', - 'title': 'Set system locale', - 'description': dedent( + "id": "cc_locale", + "name": "Locale", + "title": "Set system locale", + "description": dedent( """\ Configure the system locale and apply it system wide. By default use the locale specified by the datasource.""" ), - 'distros': distros, - 'examples': [ - dedent("""\ + "distros": distros, + "examples": [ + dedent( + """\ # Set the locale to ar_AE locale: ar_AE - """), - dedent("""\ + """ + ), + dedent( + """\ # Set the locale to fr_CA in /etc/alternate_path/locale locale: fr_CA locale_configfile: /etc/alternate_path/locale - """), + """ + ), ], - 'frequency': frequency, + "frequency": frequency, } schema = { - 'type': 'object', - 'properties': { - 'locale': { - 'type': 'string', - 'description': ( + "type": "object", + "properties": { + "locale": { + "type": "string", + "description": ( "The locale to set as the system's locale (e.g. ar_PS)" ), }, - 'locale_configfile': { - 'type': 'string', - 'description': ( + "locale_configfile": { + "type": "string", + "description": ( "The file in which to write the locale configuration (defaults" " to the distro's default location)" ), @@ -70,8 +73,9 @@ def handle(name, cfg, cloud, log, args): locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale()) if util.is_false(locale): - log.debug("Skipping module named %s, disabled by config: %s", - name, locale) + log.debug( + "Skipping module named %s, disabled by config: %s", name, locale + ) return validate_cloudconfig_schema(cfg, schema) @@ -80,4 +84,5 @@ def handle(name, cfg, cloud, log, args): locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile") cloud.distro.apply_locale(locale, locale_cfgfile) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index 486037d9..13ddcbe9 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -47,12 +47,12 @@ lxd-bridge will be configured accordingly. domain: """ -from cloudinit import log as logging -from cloudinit import subp -from cloudinit import util import os -distros = ['ubuntu'] +from cloudinit import log as logging +from cloudinit import subp, util + +distros = ["ubuntu"] LOG = logging.getLogger(__name__) @@ -61,36 +61,42 @@ _DEFAULT_NETWORK_NAME = "lxdbr0" def handle(name, cfg, cloud, log, args): # Get config - lxd_cfg = cfg.get('lxd') + lxd_cfg = cfg.get("lxd") if not lxd_cfg: - log.debug("Skipping module named %s, not present or disabled by cfg", - name) + log.debug( + "Skipping module named %s, not present or disabled by cfg", name + ) return if not isinstance(lxd_cfg, dict): - log.warning("lxd config must be a dictionary. found a '%s'", - type(lxd_cfg)) + log.warning( + "lxd config must be a dictionary. found a '%s'", type(lxd_cfg) + ) return # Grab the configuration - init_cfg = lxd_cfg.get('init') + init_cfg = lxd_cfg.get("init") if not isinstance(init_cfg, dict): - log.warning("lxd/init config must be a dictionary. found a '%s'", - type(init_cfg)) + log.warning( + "lxd/init config must be a dictionary. found a '%s'", + type(init_cfg), + ) init_cfg = {} - bridge_cfg = lxd_cfg.get('bridge', {}) + bridge_cfg = lxd_cfg.get("bridge", {}) if not isinstance(bridge_cfg, dict): - log.warning("lxd/bridge config must be a dictionary. found a '%s'", - type(bridge_cfg)) + log.warning( + "lxd/bridge config must be a dictionary. found a '%s'", + type(bridge_cfg), + ) bridge_cfg = {} # Install the needed packages packages = [] if not subp.which("lxd"): - packages.append('lxd') + packages.append("lxd") - if init_cfg.get("storage_backend") == "zfs" and not subp.which('zfs'): - packages.append('zfsutils-linux') + if init_cfg.get("storage_backend") == "zfs" and not subp.which("zfs"): + packages.append("zfsutils-linux") if len(packages): try: @@ -102,23 +108,30 @@ def handle(name, cfg, cloud, log, args): # Set up lxd if init config is given if init_cfg: init_keys = ( - 'network_address', 'network_port', 'storage_backend', - 'storage_create_device', 'storage_create_loop', - 'storage_pool', 'trust_password') - subp.subp(['lxd', 'waitready', '--timeout=300']) - cmd = ['lxd', 'init', '--auto'] + "network_address", + "network_port", + "storage_backend", + "storage_create_device", + "storage_create_loop", + "storage_pool", + "trust_password", + ) + subp.subp(["lxd", "waitready", "--timeout=300"]) + cmd = ["lxd", "init", "--auto"] for k in init_keys: if init_cfg.get(k): - cmd.extend(["--%s=%s" % - (k.replace('_', '-'), str(init_cfg[k]))]) + cmd.extend( + ["--%s=%s" % (k.replace("_", "-"), str(init_cfg[k]))] + ) subp.subp(cmd) # Set up lxd-bridge if bridge config is given dconf_comm = "debconf-communicate" if bridge_cfg: net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME) - if os.path.exists("/etc/default/lxd-bridge") \ - and subp.which(dconf_comm): + if os.path.exists("/etc/default/lxd-bridge") and subp.which( + dconf_comm + ): # Bridge configured through packaging debconf = bridge_to_debconf(bridge_cfg) @@ -126,39 +139,47 @@ def handle(name, cfg, cloud, log, args): # Update debconf database try: log.debug("Setting lxd debconf via " + dconf_comm) - data = "\n".join(["set %s %s" % (k, v) - for k, v in debconf.items()]) + "\n" - subp.subp(['debconf-communicate'], data) + data = ( + "\n".join( + ["set %s %s" % (k, v) for k, v in debconf.items()] + ) + + "\n" + ) + subp.subp(["debconf-communicate"], data) except Exception: - util.logexc(log, "Failed to run '%s' for lxd with" % - dconf_comm) + util.logexc( + log, "Failed to run '%s' for lxd with" % dconf_comm + ) # Remove the existing configuration file (forces re-generation) util.del_file("/etc/default/lxd-bridge") # Run reconfigure log.debug("Running dpkg-reconfigure for lxd") - subp.subp(['dpkg-reconfigure', 'lxd', - '--frontend=noninteractive']) + subp.subp(["dpkg-reconfigure", "lxd", "--frontend=noninteractive"]) else: # Built-in LXD bridge support cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg) maybe_cleanup_default( - net_name=net_name, did_init=bool(init_cfg), - create=bool(cmd_create), attach=bool(cmd_attach)) + net_name=net_name, + did_init=bool(init_cfg), + create=bool(cmd_create), + attach=bool(cmd_attach), + ) if cmd_create: - log.debug("Creating lxd bridge: %s" % - " ".join(cmd_create)) + log.debug("Creating lxd bridge: %s" % " ".join(cmd_create)) _lxc(cmd_create) if cmd_attach: - log.debug("Setting up default lxd bridge: %s" % - " ".join(cmd_attach)) + log.debug( + "Setting up default lxd bridge: %s" % " ".join(cmd_attach) + ) _lxc(cmd_attach) elif bridge_cfg: raise RuntimeError( - "Unable to configure lxd bridge without %s." + dconf_comm) + "Unable to configure lxd bridge without %s." + dconf_comm + ) def bridge_to_debconf(bridge_cfg): @@ -180,33 +201,32 @@ def bridge_to_debconf(bridge_cfg): if bridge_cfg.get("ipv4_address"): debconf["lxd/bridge-ipv4"] = "true" - debconf["lxd/bridge-ipv4-address"] = \ - bridge_cfg.get("ipv4_address") - debconf["lxd/bridge-ipv4-netmask"] = \ - bridge_cfg.get("ipv4_netmask") - debconf["lxd/bridge-ipv4-dhcp-first"] = \ - bridge_cfg.get("ipv4_dhcp_first") - debconf["lxd/bridge-ipv4-dhcp-last"] = \ - bridge_cfg.get("ipv4_dhcp_last") - debconf["lxd/bridge-ipv4-dhcp-leases"] = \ - bridge_cfg.get("ipv4_dhcp_leases") - debconf["lxd/bridge-ipv4-nat"] = \ - bridge_cfg.get("ipv4_nat", "true") + debconf["lxd/bridge-ipv4-address"] = bridge_cfg.get("ipv4_address") + debconf["lxd/bridge-ipv4-netmask"] = bridge_cfg.get("ipv4_netmask") + debconf["lxd/bridge-ipv4-dhcp-first"] = bridge_cfg.get( + "ipv4_dhcp_first" + ) + debconf["lxd/bridge-ipv4-dhcp-last"] = bridge_cfg.get( + "ipv4_dhcp_last" + ) + debconf["lxd/bridge-ipv4-dhcp-leases"] = bridge_cfg.get( + "ipv4_dhcp_leases" + ) + debconf["lxd/bridge-ipv4-nat"] = bridge_cfg.get("ipv4_nat", "true") if bridge_cfg.get("ipv6_address"): debconf["lxd/bridge-ipv6"] = "true" - debconf["lxd/bridge-ipv6-address"] = \ - bridge_cfg.get("ipv6_address") - debconf["lxd/bridge-ipv6-netmask"] = \ - bridge_cfg.get("ipv6_netmask") - debconf["lxd/bridge-ipv6-nat"] = \ - bridge_cfg.get("ipv6_nat", "false") + debconf["lxd/bridge-ipv6-address"] = bridge_cfg.get("ipv6_address") + debconf["lxd/bridge-ipv6-netmask"] = bridge_cfg.get("ipv6_netmask") + debconf["lxd/bridge-ipv6-nat"] = bridge_cfg.get( + "ipv6_nat", "false" + ) if bridge_cfg.get("domain"): debconf["lxd/bridge-domain"] = bridge_cfg.get("domain") else: - raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode")) + raise Exception('invalid bridge mode "%s"' % bridge_cfg.get("mode")) return debconf @@ -217,37 +237,41 @@ def bridge_to_cmd(bridge_cfg): bridge_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME) cmd_create = [] - cmd_attach = ["network", "attach-profile", bridge_name, - "default", "eth0"] + cmd_attach = ["network", "attach-profile", bridge_name, "default", "eth0"] if bridge_cfg.get("mode") == "existing": return None, cmd_attach if bridge_cfg.get("mode") != "new": - raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode")) + raise Exception('invalid bridge mode "%s"' % bridge_cfg.get("mode")) cmd_create = ["network", "create", bridge_name] if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"): - cmd_create.append("ipv4.address=%s/%s" % - (bridge_cfg.get("ipv4_address"), - bridge_cfg.get("ipv4_netmask"))) + cmd_create.append( + "ipv4.address=%s/%s" + % (bridge_cfg.get("ipv4_address"), bridge_cfg.get("ipv4_netmask")) + ) if bridge_cfg.get("ipv4_nat", "true") == "true": cmd_create.append("ipv4.nat=true") - if bridge_cfg.get("ipv4_dhcp_first") and \ - bridge_cfg.get("ipv4_dhcp_last"): - dhcp_range = "%s-%s" % (bridge_cfg.get("ipv4_dhcp_first"), - bridge_cfg.get("ipv4_dhcp_last")) + if bridge_cfg.get("ipv4_dhcp_first") and bridge_cfg.get( + "ipv4_dhcp_last" + ): + dhcp_range = "%s-%s" % ( + bridge_cfg.get("ipv4_dhcp_first"), + bridge_cfg.get("ipv4_dhcp_last"), + ) cmd_create.append("ipv4.dhcp.ranges=%s" % dhcp_range) else: cmd_create.append("ipv4.address=none") if bridge_cfg.get("ipv6_address") and bridge_cfg.get("ipv6_netmask"): - cmd_create.append("ipv6.address=%s/%s" % - (bridge_cfg.get("ipv6_address"), - bridge_cfg.get("ipv6_netmask"))) + cmd_create.append( + "ipv6.address=%s/%s" + % (bridge_cfg.get("ipv6_address"), bridge_cfg.get("ipv6_netmask")) + ) if bridge_cfg.get("ipv6_nat", "false") == "true": cmd_create.append("ipv6.nat=true") @@ -262,14 +286,17 @@ def bridge_to_cmd(bridge_cfg): def _lxc(cmd): - env = {'LC_ALL': 'C', - 'HOME': os.environ.get('HOME', '/root'), - 'USER': os.environ.get('USER', 'root')} - subp.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env) + env = { + "LC_ALL": "C", + "HOME": os.environ.get("HOME", "/root"), + "USER": os.environ.get("USER", "root"), + } + subp.subp(["lxc"] + list(cmd) + ["--force-local"], update_env=env) -def maybe_cleanup_default(net_name, did_init, create, attach, - profile="default", nic_name="eth0"): +def maybe_cleanup_default( + net_name, did_init, create, attach, profile="default", nic_name="eth0" +): """Newer versions of lxc (3.0.1+) create a lxdbr0 network when 'lxd init --auto' is run. Older versions did not. @@ -306,4 +333,5 @@ def maybe_cleanup_default(net_name, did_init, create, attach, raise e LOG.debug(msg, nic_name, profile, fail_assume_enoent) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py index 41ea4fc9..1b0158ec 100644 --- a/cloudinit/config/cc_mcollective.py +++ b/cloudinit/config/cc_mcollective.py @@ -56,18 +56,21 @@ import io from configobj import ConfigObj from cloudinit import log as logging -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem" PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem" -SERVER_CFG = '/etc/mcollective/server.cfg' +SERVER_CFG = "/etc/mcollective/server.cfg" LOG = logging.getLogger(__name__) -def configure(config, server_cfg=SERVER_CFG, - pubcert_file=PUBCERT_FILE, pricert_file=PRICERT_FILE): +def configure( + config, + server_cfg=SERVER_CFG, + pubcert_file=PUBCERT_FILE, + pricert_file=PRICERT_FILE, +): # Read server.cfg (if it exists) values from the # original file in order to be able to mix the rest up. try: @@ -77,20 +80,20 @@ def configure(config, server_cfg=SERVER_CFG, if e.errno != errno.ENOENT: raise else: - LOG.debug("Did not find file %s (starting with an empty" - " config)", server_cfg) + LOG.debug( + "Did not find file %s (starting with an empty config)", + server_cfg, + ) mcollective_config = ConfigObj() for (cfg_name, cfg) in config.items(): - if cfg_name == 'public-cert': + if cfg_name == "public-cert": util.write_file(pubcert_file, cfg, mode=0o644) - mcollective_config[ - 'plugin.ssl_server_public'] = pubcert_file - mcollective_config['securityprovider'] = 'ssl' - elif cfg_name == 'private-cert': + mcollective_config["plugin.ssl_server_public"] = pubcert_file + mcollective_config["securityprovider"] = "ssl" + elif cfg_name == "private-cert": util.write_file(pricert_file, cfg, mode=0o600) - mcollective_config[ - 'plugin.ssl_server_private'] = pricert_file - mcollective_config['securityprovider'] = 'ssl' + mcollective_config["plugin.ssl_server_private"] = pricert_file + mcollective_config["securityprovider"] = "ssl" else: if isinstance(cfg, str): # Just set it in the 'main' section @@ -126,21 +129,24 @@ def configure(config, server_cfg=SERVER_CFG, def handle(name, cfg, cloud, log, _args): # If there isn't a mcollective key in the configuration don't do anything - if 'mcollective' not in cfg: - log.debug(("Skipping module named %s, " - "no 'mcollective' key in configuration"), name) + if "mcollective" not in cfg: + log.debug( + "Skipping module named %s, no 'mcollective' key in configuration", + name, + ) return - mcollective_cfg = cfg['mcollective'] + mcollective_cfg = cfg["mcollective"] # Start by installing the mcollective package ... cloud.distro.install_packages(("mcollective",)) # ... and then update the mcollective configuration - if 'conf' in mcollective_cfg: - configure(config=mcollective_cfg['conf']) + if "conf" in mcollective_cfg: + configure(config=mcollective_cfg["conf"]) # restart mcollective to handle updated config - subp.subp(['service', 'mcollective', 'restart'], capture=False) + subp.subp(["service", "mcollective", "restart"], capture=False) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py index 79bcc27d..4fafb4af 100644 --- a/cloudinit/config/cc_migrator.py +++ b/cloudinit/config/cc_migrator.py @@ -29,16 +29,14 @@ false`` in config. import os import shutil -from cloudinit import helpers -from cloudinit import util - +from cloudinit import helpers, util from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS def _migrate_canon_sems(cloud): - paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem')) + paths = (cloud.paths.get_ipath("sem"), cloud.paths.get_cpath("sem")) am_adjusted = 0 for sem_path in paths: if not sem_path or not os.path.exists(sem_path): @@ -57,12 +55,12 @@ def _migrate_canon_sems(cloud): def _migrate_legacy_sems(cloud, log): legacy_adjust = { - 'apt-update-upgrade': [ - 'apt-configure', - 'package-update-upgrade-install', + "apt-update-upgrade": [ + "apt-configure", + "package-update-upgrade-install", ], } - paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem')) + paths = (cloud.paths.get_ipath("sem"), cloud.paths.get_cpath("sem")) for sem_path in paths: if not sem_path or not os.path.exists(sem_path): continue @@ -78,8 +76,9 @@ def _migrate_legacy_sems(cloud, log): util.del_file(os.path.join(sem_path, p)) (_name, freq) = os.path.splitext(p) for m in migrate_to: - log.debug("Migrating %s => %s with the same frequency", - p, m) + log.debug( + "Migrating %s => %s with the same frequency", p, m + ) with sem_helper.lock(m, freq): pass @@ -90,8 +89,10 @@ def handle(name, cfg, cloud, log, _args): log.debug("Skipping module named %s, migration disabled", name) return sems_moved = _migrate_canon_sems(cloud) - log.debug("Migrated %s semaphore files to there canonicalized names", - sems_moved) + log.debug( + "Migrated %s semaphore files to there canonicalized names", sems_moved + ) _migrate_legacy_sems(cloud, log) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index eeb008d2..ec2e46ff 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -62,15 +62,12 @@ swap file is created. maxsize: """ -from string import whitespace - import logging import os import re +from string import whitespace -from cloudinit import type_utils -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, type_utils, util # Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0 DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$" @@ -105,21 +102,25 @@ def is_network_device(name): def _get_nth_partition_for_device(device_path, partition_number): - potential_suffixes = [str(partition_number), 'p%s' % (partition_number,), - '-part%s' % (partition_number,)] + potential_suffixes = [ + str(partition_number), + "p%s" % (partition_number,), + "-part%s" % (partition_number,), + ] for suffix in potential_suffixes: - potential_partition_device = '%s%s' % (device_path, suffix) + potential_partition_device = "%s%s" % (device_path, suffix) if os.path.exists(potential_partition_device): return potential_partition_device return None def _is_block_device(device_path, partition_path=None): - device_name = os.path.realpath(device_path).split('/')[-1] - sys_path = os.path.join('/sys/block/', device_name) + device_name = os.path.realpath(device_path).split("/")[-1] + sys_path = os.path.join("/sys/block/", device_name) if partition_path is not None: sys_path = os.path.join( - sys_path, os.path.realpath(partition_path).split('/')[-1]) + sys_path, os.path.realpath(partition_path).split("/")[-1] + ) return os.path.exists(sys_path) @@ -159,8 +160,9 @@ def sanitize_devname(startname, transformer, log, aliases=None): if partition_number is None: partition_path = _get_nth_partition_for_device(device_path, 1) else: - partition_path = _get_nth_partition_for_device(device_path, - partition_number) + partition_path = _get_nth_partition_for_device( + device_path, partition_number + ) if partition_path is None: return None @@ -174,12 +176,12 @@ def sanitize_devname(startname, transformer, log, aliases=None): def suggested_swapsize(memsize=None, maxsize=None, fsys=None): # make a suggestion on the size of swap for this system. if memsize is None: - memsize = util.read_meminfo()['total'] + memsize = util.read_meminfo()["total"] GB = 2 ** 30 sugg_max = 8 * GB - info = {'avail': 'na', 'max_in': maxsize, 'mem': memsize} + info = {"avail": "na", "max_in": maxsize, "mem": memsize} if fsys is None and maxsize is None: # set max to 8GB default if no filesystem given @@ -187,18 +189,18 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None): elif fsys: statvfs = os.statvfs(fsys) avail = statvfs.f_frsize * statvfs.f_bfree - info['avail'] = avail + info["avail"] = avail if maxsize is None: # set to 25% of filesystem space maxsize = min(int(avail / 4), sugg_max) - elif maxsize > ((avail * .9)): + elif maxsize > ((avail * 0.9)): # set to 90% of available disk space - maxsize = int(avail * .9) + maxsize = int(avail * 0.9) elif maxsize is None: maxsize = sugg_max - info['max'] = maxsize + info["max"] = maxsize formulas = [ # < 1G: swap = double memory @@ -226,7 +228,7 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None): if size is not None: size = maxsize - info['size'] = size + info["size"] = size MB = 2 ** 20 pinfo = {} @@ -236,9 +238,14 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None): else: pinfo[k] = v - LOG.debug("suggest %s swap for %s memory with '%s'" - " disk given max=%s [max=%s]'", pinfo['size'], pinfo['mem'], - pinfo['avail'], pinfo['max_in'], pinfo['max']) + LOG.debug( + "suggest %s swap for %s memory with '%s' disk given max=%s [max=%s]'", + pinfo["size"], + pinfo["mem"], + pinfo["avail"], + pinfo["max_in"], + pinfo["max"], + ) return size @@ -248,14 +255,23 @@ def create_swapfile(fname: str, size: str) -> None: errmsg = "Failed to create swapfile '%s' of size %sMB via %s: %s" def create_swap(fname, size, method): - LOG.debug("Creating swapfile in '%s' on fstype '%s' using '%s'", - fname, fstype, method) + LOG.debug( + "Creating swapfile in '%s' on fstype '%s' using '%s'", + fname, + fstype, + method, + ) if method == "fallocate": - cmd = ['fallocate', '-l', '%sM' % size, fname] + cmd = ["fallocate", "-l", "%sM" % size, fname] elif method == "dd": - cmd = ['dd', 'if=/dev/zero', 'of=%s' % fname, 'bs=1M', - 'count=%s' % size] + cmd = [ + "dd", + "if=/dev/zero", + "of=%s" % fname, + "bs=1M", + "count=%s" % size, + ] try: subp.subp(cmd, capture=True) @@ -269,8 +285,9 @@ def create_swapfile(fname: str, size: str) -> None: fstype = util.get_mount_info(swap_dir)[1] - if (fstype == "xfs" and - util.kernel_version() < (4, 18)) or fstype == "btrfs": + if ( + fstype == "xfs" and util.kernel_version() < (4, 18) + ) or fstype == "btrfs": create_swap(fname, size, "dd") else: try: @@ -282,7 +299,7 @@ def create_swapfile(fname: str, size: str) -> None: if os.path.exists(fname): util.chmod(fname, 0o600) try: - subp.subp(['mkswap', fname]) + subp.subp(["mkswap", fname]) except subp.ProcessExecutionError: util.del_file(fname) raise @@ -297,37 +314,42 @@ def setup_swapfile(fname, size=None, maxsize=None): swap_dir = os.path.dirname(fname) if str(size).lower() == "auto": try: - memsize = util.read_meminfo()['total'] + memsize = util.read_meminfo()["total"] except IOError: LOG.debug("Not creating swap: failed to read meminfo") return util.ensure_dir(swap_dir) - size = suggested_swapsize(fsys=swap_dir, maxsize=maxsize, - memsize=memsize) + size = suggested_swapsize( + fsys=swap_dir, maxsize=maxsize, memsize=memsize + ) mibsize = str(int(size / (2 ** 20))) if not size: LOG.debug("Not creating swap: suggested size was 0") return - util.log_time(LOG.debug, msg="Setting up swap file", func=create_swapfile, - args=[fname, mibsize]) + util.log_time( + LOG.debug, + msg="Setting up swap file", + func=create_swapfile, + args=[fname, mibsize], + ) return fname def handle_swapcfg(swapcfg): """handle the swap config, calling setup_swap if necessary. - return None or (filename, size) + return None or (filename, size) """ if not isinstance(swapcfg, dict): LOG.warning("input for swap config was not a dict.") return None - fname = swapcfg.get('filename', '/swap.img') - size = swapcfg.get('size', 0) - maxsize = swapcfg.get('maxsize', None) + fname = swapcfg.get("filename", "/swap.img") + size = swapcfg.get("size", 0) + maxsize = swapcfg.get("maxsize", None) if not (size and fname): LOG.debug("no need to setup swap") @@ -335,8 +357,10 @@ def handle_swapcfg(swapcfg): if os.path.exists(fname): if not os.path.exists("/proc/swaps"): - LOG.debug("swap file %s exists, but no /proc/swaps exists, " - "being safe", fname) + LOG.debug( + "swap file %s exists, but no /proc/swaps exists, being safe", + fname, + ) return fname try: for line in util.load_file("/proc/swaps").splitlines(): @@ -345,8 +369,9 @@ def handle_swapcfg(swapcfg): return fname LOG.debug("swap file %s exists, but not in /proc/swaps", fname) except Exception: - LOG.warning("swap file %s exists. Error reading /proc/swaps", - fname) + LOG.warning( + "swap file %s exists. Error reading /proc/swaps", fname + ) return fname try: @@ -373,8 +398,10 @@ def handle(_name, cfg, cloud, log, _args): defvals = cfg.get("mount_default_fields", defvals) # these are our default set of mounts - defmnts = [["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"], - ["swap", "none", "swap", "sw", "0", "0"]] + defmnts = [ + ["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"], + ["swap", "none", "swap", "sw", "0", "0"], + ] cfgmnt = [] if "mounts" in cfg: @@ -404,13 +431,17 @@ def handle(_name, cfg, cloud, log, _args): for i in range(len(cfgmnt)): # skip something that wasn't a list if not isinstance(cfgmnt[i], list): - log.warning("Mount option %s not a list, got a %s instead", - (i + 1), type_utils.obj_name(cfgmnt[i])) + log.warning( + "Mount option %s not a list, got a %s instead", + (i + 1), + type_utils.obj_name(cfgmnt[i]), + ) continue start = str(cfgmnt[i][0]) - sanitized = sanitize_devname(start, cloud.device_name_to_device, log, - aliases=device_aliases) + sanitized = sanitize_devname( + start, cloud.device_name_to_device, log, aliases=device_aliases + ) if sanitized != start: log.debug("changed %s => %s" % (start, sanitized)) @@ -418,8 +449,11 @@ def handle(_name, cfg, cloud, log, _args): log.debug("Ignoring nonexistent named mount %s", start) continue elif sanitized in fstab_devs: - log.info("Device %s already defined in fstab: %s", - sanitized, fstab_devs[sanitized]) + log.info( + "Device %s already defined in fstab: %s", + sanitized, + fstab_devs[sanitized], + ) continue cfgmnt[i][0] = sanitized @@ -452,8 +486,9 @@ def handle(_name, cfg, cloud, log, _args): # entry has the same device name for defmnt in defmnts: start = defmnt[0] - sanitized = sanitize_devname(start, cloud.device_name_to_device, log, - aliases=device_aliases) + sanitized = sanitize_devname( + start, cloud.device_name_to_device, log, aliases=device_aliases + ) if sanitized != start: log.debug("changed default device %s => %s" % (start, sanitized)) @@ -461,8 +496,11 @@ def handle(_name, cfg, cloud, log, _args): log.debug("Ignoring nonexistent default named mount %s", start) continue elif sanitized in fstab_devs: - log.debug("Device %s already defined in fstab: %s", - sanitized, fstab_devs[sanitized]) + log.debug( + "Device %s already defined in fstab: %s", + sanitized, + fstab_devs[sanitized], + ) continue defmnt[0] = sanitized @@ -474,8 +512,7 @@ def handle(_name, cfg, cloud, log, _args): break if cfgmnt_has: - log.debug(("Not including %s, already" - " previously included"), start) + log.debug("Not including %s, already previously included", start) continue cfgmnt.append(defmnt) @@ -488,7 +525,7 @@ def handle(_name, cfg, cloud, log, _args): else: actlist.append(x) - swapret = handle_swapcfg(cfg.get('swap', {})) + swapret = handle_swapcfg(cfg.get("swap", {})) if swapret: actlist.append([swapret, "none", "swap", "sw", "0", "0"]) @@ -507,10 +544,11 @@ def handle(_name, cfg, cloud, log, _args): needswap = True if line[1].startswith("/"): dirs.append(line[1]) - cc_lines.append('\t'.join(line)) + cc_lines.append("\t".join(line)) - mount_points = [v['mountpoint'] for k, v in util.mounts().items() - if 'mountpoint' in v] + mount_points = [ + v["mountpoint"] for k, v in util.mounts().items() if "mountpoint" in v + ] for d in dirs: try: util.ensure_dir(d) @@ -525,11 +563,12 @@ def handle(_name, cfg, cloud, log, _args): sadds = [WS.sub(" ", n) for n in cc_lines] sdrops = [WS.sub(" ", n) for n in fstab_removed] - sops = (["- " + drop for drop in sdrops if drop not in sadds] + - ["+ " + add for add in sadds if add not in sdrops]) + sops = ["- " + drop for drop in sdrops if drop not in sadds] + [ + "+ " + add for add in sadds if add not in sdrops + ] fstab_lines.extend(cc_lines) - contents = "%s\n" % ('\n'.join(fstab_lines)) + contents = "%s\n" % "\n".join(fstab_lines) util.write_file(FSTAB_PATH, contents) activate_cmds = [] @@ -549,7 +588,7 @@ def handle(_name, cfg, cloud, log, _args): fmt = "Activating swap and mounts with: %s" for cmd in activate_cmds: - fmt = "Activate mounts: %s:" + ' '.join(cmd) + fmt = "Activate mounts: %s:" + " ".join(cmd) try: subp.subp(cmd) log.debug(fmt, "PASS") @@ -557,4 +596,5 @@ def handle(_name, cfg, cloud, log, _args): log.warning(fmt, "FAIL") util.logexc(log, fmt, "FAIL") + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index c55d5d86..a31da9bb 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -11,124 +11,132 @@ import os from textwrap import dedent from cloudinit import log as logging -from cloudinit import temp_utils -from cloudinit import templater -from cloudinit import type_utils -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, temp_utils, templater, type_utils, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) frequency = PER_INSTANCE -NTP_CONF = '/etc/ntp.conf' +NTP_CONF = "/etc/ntp.conf" NR_POOL_SERVERS = 4 -distros = ['almalinux', 'alpine', 'centos', 'cloudlinux', 'debian', - 'eurolinux', 'fedora', 'miraclelinux', 'openEuler', 'opensuse', - 'photon', 'rhel', 'rocky', 'sles', 'ubuntu', 'virtuozzo'] +distros = [ + "almalinux", + "alpine", + "centos", + "cloudlinux", + "debian", + "eurolinux", + "fedora", + "miraclelinux", + "openEuler", + "opensuse", + "photon", + "rhel", + "rocky", + "sles", + "ubuntu", + "virtuozzo", +] NTP_CLIENT_CONFIG = { - 'chrony': { - 'check_exe': 'chronyd', - 'confpath': '/etc/chrony.conf', - 'packages': ['chrony'], - 'service_name': 'chrony', - 'template_name': 'chrony.conf.{distro}', - 'template': None, + "chrony": { + "check_exe": "chronyd", + "confpath": "/etc/chrony.conf", + "packages": ["chrony"], + "service_name": "chrony", + "template_name": "chrony.conf.{distro}", + "template": None, }, - 'ntp': { - 'check_exe': 'ntpd', - 'confpath': NTP_CONF, - 'packages': ['ntp'], - 'service_name': 'ntp', - 'template_name': 'ntp.conf.{distro}', - 'template': None, + "ntp": { + "check_exe": "ntpd", + "confpath": NTP_CONF, + "packages": ["ntp"], + "service_name": "ntp", + "template_name": "ntp.conf.{distro}", + "template": None, }, - 'ntpdate': { - 'check_exe': 'ntpdate', - 'confpath': NTP_CONF, - 'packages': ['ntpdate'], - 'service_name': 'ntpdate', - 'template_name': 'ntp.conf.{distro}', - 'template': None, + "ntpdate": { + "check_exe": "ntpdate", + "confpath": NTP_CONF, + "packages": ["ntpdate"], + "service_name": "ntpdate", + "template_name": "ntp.conf.{distro}", + "template": None, }, - 'systemd-timesyncd': { - 'check_exe': '/lib/systemd/systemd-timesyncd', - 'confpath': '/etc/systemd/timesyncd.conf.d/cloud-init.conf', - 'packages': [], - 'service_name': 'systemd-timesyncd', - 'template_name': 'timesyncd.conf', - 'template': None, + "systemd-timesyncd": { + "check_exe": "/lib/systemd/systemd-timesyncd", + "confpath": "/etc/systemd/timesyncd.conf.d/cloud-init.conf", + "packages": [], + "service_name": "systemd-timesyncd", + "template_name": "timesyncd.conf", + "template": None, }, } # This is Distro-specific configuration overrides of the base config DISTRO_CLIENT_CONFIG = { - 'alpine': { - 'chrony': { - 'confpath': '/etc/chrony/chrony.conf', - 'service_name': 'chronyd', + "alpine": { + "chrony": { + "confpath": "/etc/chrony/chrony.conf", + "service_name": "chronyd", }, - 'ntp': { - 'confpath': '/etc/ntp.conf', - 'packages': [], - 'service_name': 'ntpd', + "ntp": { + "confpath": "/etc/ntp.conf", + "packages": [], + "service_name": "ntpd", }, }, - 'debian': { - 'chrony': { - 'confpath': '/etc/chrony/chrony.conf', + "debian": { + "chrony": { + "confpath": "/etc/chrony/chrony.conf", }, }, - 'opensuse': { - 'chrony': { - 'service_name': 'chronyd', + "opensuse": { + "chrony": { + "service_name": "chronyd", }, - 'ntp': { - 'confpath': '/etc/ntp.conf', - 'service_name': 'ntpd', + "ntp": { + "confpath": "/etc/ntp.conf", + "service_name": "ntpd", }, - 'systemd-timesyncd': { - 'check_exe': '/usr/lib/systemd/systemd-timesyncd', + "systemd-timesyncd": { + "check_exe": "/usr/lib/systemd/systemd-timesyncd", }, }, - 'photon': { - 'chrony': { - 'service_name': 'chronyd', + "photon": { + "chrony": { + "service_name": "chronyd", }, - 'ntp': { - 'service_name': 'ntpd', - 'confpath': '/etc/ntp.conf' - }, - 'systemd-timesyncd': { - 'check_exe': '/usr/lib/systemd/systemd-timesyncd', - 'confpath': '/etc/systemd/timesyncd.conf', + "ntp": {"service_name": "ntpd", "confpath": "/etc/ntp.conf"}, + "systemd-timesyncd": { + "check_exe": "/usr/lib/systemd/systemd-timesyncd", + "confpath": "/etc/systemd/timesyncd.conf", }, }, - 'rhel': { - 'ntp': { - 'service_name': 'ntpd', + "rhel": { + "ntp": { + "service_name": "ntpd", }, - 'chrony': { - 'service_name': 'chronyd', + "chrony": { + "service_name": "chronyd", }, }, - 'sles': { - 'chrony': { - 'service_name': 'chronyd', + "sles": { + "chrony": { + "service_name": "chronyd", }, - 'ntp': { - 'confpath': '/etc/ntp.conf', - 'service_name': 'ntpd', + "ntp": { + "confpath": "/etc/ntp.conf", + "service_name": "ntpd", }, - 'systemd-timesyncd': { - 'check_exe': '/usr/lib/systemd/systemd-timesyncd', + "systemd-timesyncd": { + "check_exe": "/usr/lib/systemd/systemd-timesyncd", }, }, - 'ubuntu': { - 'chrony': { - 'confpath': '/etc/chrony/chrony.conf', + "ubuntu": { + "chrony": { + "confpath": "/etc/chrony/chrony.conf", }, }, } @@ -141,10 +149,11 @@ DISTRO_CLIENT_CONFIG = { # configuration. meta = { - 'id': 'cc_ntp', - 'name': 'NTP', - 'title': 'enable and configure ntp', - 'description': dedent("""\ + "id": "cc_ntp", + "name": "NTP", + "title": "enable and configure ntp", + "description": dedent( + """\ Handle ntp configuration. If ntp is not installed on the system and ntp configuration is specified, ntp will be installed. If there is a default ntp config file in the image or one is present in the @@ -152,16 +161,20 @@ meta = { appended to the filename before any changes are made. A list of ntp pools and ntp servers can be provided under the ``ntp`` config key. If no ntp ``servers`` or ``pools`` are provided, 4 pools will be used - in the format ``{0-3}.{distro}.pool.ntp.org``."""), - 'distros': distros, - 'examples': [ - dedent("""\ + in the format ``{0-3}.{distro}.pool.ntp.org``.""" + ), + "distros": distros, + "examples": [ + dedent( + """\ # Override ntp with chrony configuration on Ubuntu ntp: enabled: true ntp_client: chrony # Uses cloud-init default chrony configuration - """), - dedent("""\ + """ + ), + dedent( + """\ # Provide a custom ntp client configuration ntp: enabled: true @@ -188,120 +201,137 @@ meta = { servers: - ntp.server.local - ntp.ubuntu.com - - 192.168.23.2""")], - 'frequency': PER_INSTANCE, + - 192.168.23.2""" + ), + ], + "frequency": PER_INSTANCE, } schema = { - 'type': 'object', - 'properties': { - 'ntp': { - 'type': ['object', 'null'], - 'properties': { - 'pools': { - 'type': 'array', - 'items': { - 'type': 'string', - 'format': 'hostname' - }, - 'uniqueItems': True, - 'description': dedent("""\ + "type": "object", + "properties": { + "ntp": { + "type": ["object", "null"], + "properties": { + "pools": { + "type": "array", + "items": {"type": "string", "format": "hostname"}, + "uniqueItems": True, + "description": dedent( + """\ List of ntp pools. If both pools and servers are empty, 4 default pool servers will be provided of the format ``{0-3}.{distro}.pool.ntp.org``. NOTE: for Alpine Linux when using the Busybox NTP client this setting will be ignored due to the limited - functionality of Busybox's ntpd.""") + functionality of Busybox's ntpd.""" + ), }, - 'servers': { - 'type': 'array', - 'items': { - 'type': 'string', - 'format': 'hostname' - }, - 'uniqueItems': True, - 'description': dedent("""\ + "servers": { + "type": "array", + "items": {"type": "string", "format": "hostname"}, + "uniqueItems": True, + "description": dedent( + """\ List of ntp servers. If both pools and servers are empty, 4 default pool servers will be provided with - the format ``{0-3}.{distro}.pool.ntp.org``.""") + the format ``{0-3}.{distro}.pool.ntp.org``.""" + ), }, - 'ntp_client': { - 'type': 'string', - 'default': 'auto', - 'description': dedent("""\ + "ntp_client": { + "type": "string", + "default": "auto", + "description": dedent( + """\ Name of an NTP client to use to configure system NTP. When unprovided or 'auto' the default client preferred by the distribution will be used. The following built-in client names can be used to override existing configuration defaults: chrony, ntp, ntpdate, - systemd-timesyncd."""), + systemd-timesyncd.""" + ), }, - 'enabled': { - 'type': 'boolean', - 'default': True, - 'description': dedent("""\ + "enabled": { + "type": "boolean", + "default": True, + "description": dedent( + """\ Attempt to enable ntp clients if set to True. If set to False, ntp client will not be configured or - installed"""), + installed""" + ), }, - 'config': { - 'description': dedent("""\ + "config": { + "description": dedent( + """\ Configuration settings or overrides for the - ``ntp_client`` specified."""), - 'type': ['object'], - 'properties': { - 'confpath': { - 'type': 'string', - 'description': dedent("""\ + ``ntp_client`` specified.""" + ), + "type": ["object"], + "properties": { + "confpath": { + "type": "string", + "description": dedent( + """\ The path to where the ``ntp_client`` - configuration is written."""), + configuration is written.""" + ), }, - 'check_exe': { - 'type': 'string', - 'description': dedent("""\ + "check_exe": { + "type": "string", + "description": dedent( + """\ The executable name for the ``ntp_client``. For example, ntp service ``check_exe`` is - 'ntpd' because it runs the ntpd binary."""), + 'ntpd' because it runs the ntpd binary.""" + ), }, - 'packages': { - 'type': 'array', - 'items': { - 'type': 'string', + "packages": { + "type": "array", + "items": { + "type": "string", }, - 'uniqueItems': True, - 'description': dedent("""\ + "uniqueItems": True, + "description": dedent( + """\ List of packages needed to be installed for the - selected ``ntp_client``."""), + selected ``ntp_client``.""" + ), }, - 'service_name': { - 'type': 'string', - 'description': dedent("""\ + "service_name": { + "type": "string", + "description": dedent( + """\ The systemd or sysvinit service name used to start and stop the ``ntp_client`` - service."""), + service.""" + ), }, - 'template': { - 'type': 'string', - 'description': dedent("""\ + "template": { + "type": "string", + "description": dedent( + """\ Inline template allowing users to define their own ``ntp_client`` configuration template. The value must start with '## template:jinja' to enable use of templating support. - """), + """ + ), }, }, # Don't use REQUIRED_NTP_CONFIG_KEYS to allow for override # of builtin client values. - 'minProperties': 1, # If we have config, define something - 'additionalProperties': False + "minProperties": 1, # If we have config, define something + "additionalProperties": False, }, }, - 'additionalProperties': False + "additionalProperties": False, } - } + }, } -REQUIRED_NTP_CONFIG_KEYS = frozenset([ - 'check_exe', 'confpath', 'packages', 'service_name']) +REQUIRED_NTP_CONFIG_KEYS = frozenset( + ["check_exe", "confpath", "packages", "service_name"] +) __doc__ = get_meta_doc(meta, schema) # Supplement python help() @@ -334,21 +364,23 @@ def select_ntp_client(ntp_client, distro): distro_cfg = distro_ntp_client_configs(distro.name) # user specified client, return its config - if ntp_client and ntp_client != 'auto': - LOG.debug('Selected NTP client "%s" via user-data configuration', - ntp_client) + if ntp_client and ntp_client != "auto": + LOG.debug( + 'Selected NTP client "%s" via user-data configuration', ntp_client + ) return distro_cfg.get(ntp_client, {}) # default to auto if unset in distro - distro_ntp_client = distro.get_option('ntp_client', 'auto') + distro_ntp_client = distro.get_option("ntp_client", "auto") clientcfg = {} if distro_ntp_client == "auto": for client in distro.preferred_ntp_clients: cfg = distro_cfg.get(client) - if subp.which(cfg.get('check_exe')): - LOG.debug('Selected NTP client "%s", already installed', - client) + if subp.which(cfg.get("check_exe")): + LOG.debug( + 'Selected NTP client "%s", already installed', client + ) clientcfg = cfg break @@ -356,11 +388,14 @@ def select_ntp_client(ntp_client, distro): client = distro.preferred_ntp_clients[0] LOG.debug( 'Selected distro preferred NTP client "%s", not yet installed', - client) + client, + ) clientcfg = distro_cfg.get(client) else: - LOG.debug('Selected NTP client "%s" via distro system config', - distro_ntp_client) + LOG.debug( + 'Selected NTP client "%s" via distro system config', + distro_ntp_client, + ) clientcfg = distro_cfg.get(distro_ntp_client, {}) return clientcfg @@ -378,7 +413,7 @@ def install_ntp_client(install_func, packages=None, check_exe="ntpd"): if subp.which(check_exe): return if packages is None: - packages = ['ntp'] + packages = ["ntp"] install_func(packages) @@ -403,25 +438,34 @@ def generate_server_names(distro): names = [] pool_distro = distro - if distro == 'sles': + if distro == "sles": # For legal reasons x.pool.sles.ntp.org does not exist, # use the opensuse pool - pool_distro = 'opensuse' - elif distro == 'alpine' or distro == 'eurolinux': + pool_distro = "opensuse" + elif distro == "alpine" or distro == "eurolinux": # Alpine-specific pool (i.e. x.alpine.pool.ntp.org) does not exist # so use general x.pool.ntp.org instead. The same applies to EuroLinux - pool_distro = '' + pool_distro = "" for x in range(0, NR_POOL_SERVERS): - names.append(".".join( - [n for n in [str(x)] + [pool_distro] + ['pool.ntp.org'] if n])) + names.append( + ".".join( + [n for n in [str(x)] + [pool_distro] + ["pool.ntp.org"] if n] + ) + ) return names -def write_ntp_config_template(distro_name, service_name=None, servers=None, - pools=None, path=None, template_fn=None, - template=None): +def write_ntp_config_template( + distro_name, + service_name=None, + servers=None, + pools=None, + path=None, + template_fn=None, + template=None, +): """Render a ntp client configuration for the specified client. @param distro_name: string. The distro class name. @@ -444,27 +488,30 @@ def write_ntp_config_template(distro_name, service_name=None, servers=None, if not pools: pools = [] - if (len(servers) == 0 and distro_name == 'alpine' and - service_name == 'ntpd'): + if ( + len(servers) == 0 + and distro_name == "alpine" + and service_name == "ntpd" + ): # Alpine's Busybox ntpd only understands "servers" configuration # and not "pool" configuration. servers = generate_server_names(distro_name) - LOG.debug( - 'Adding distro default ntp servers: %s', ','.join(servers)) + LOG.debug("Adding distro default ntp servers: %s", ",".join(servers)) elif len(servers) == 0 and len(pools) == 0: pools = generate_server_names(distro_name) LOG.debug( - 'Adding distro default ntp pool servers: %s', ','.join(pools)) + "Adding distro default ntp pool servers: %s", ",".join(pools) + ) if not path: - raise ValueError('Invalid value for path parameter') + raise ValueError("Invalid value for path parameter") if not template_fn and not template: - raise ValueError('Not template_fn or template provided') + raise ValueError("Not template_fn or template provided") - params = {'servers': servers, 'pools': pools} + params = {"servers": servers, "pools": pools} if template: - tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl") + tfile = temp_utils.mkstemp(prefix="template_name-", suffix=".tmpl") template_fn = tfile[1] # filepath is second item in tuple util.write_file(template_fn, content=template) @@ -487,50 +534,62 @@ def supplemental_schema_validation(ntp_config): errors = [] missing = REQUIRED_NTP_CONFIG_KEYS.difference(set(ntp_config.keys())) if missing: - keys = ', '.join(sorted(missing)) + keys = ", ".join(sorted(missing)) errors.append( - 'Missing required ntp:config keys: {keys}'.format(keys=keys)) - elif not any([ntp_config.get('template'), - ntp_config.get('template_name')]): + "Missing required ntp:config keys: {keys}".format(keys=keys) + ) + elif not any( + [ntp_config.get("template"), ntp_config.get("template_name")] + ): errors.append( - 'Either ntp:config:template or ntp:config:template_name values' - ' are required') + "Either ntp:config:template or ntp:config:template_name values" + " are required" + ) for key, value in sorted(ntp_config.items()): - keypath = 'ntp:config:' + key - if key == 'confpath': + keypath = "ntp:config:" + key + if key == "confpath": if not all([value, isinstance(value, str)]): errors.append( - 'Expected a config file path {keypath}.' - ' Found ({value})'.format(keypath=keypath, value=value)) - elif key == 'packages': + "Expected a config file path {keypath}." + " Found ({value})".format(keypath=keypath, value=value) + ) + elif key == "packages": if not isinstance(value, list): errors.append( - 'Expected a list of required package names for {keypath}.' - ' Found ({value})'.format(keypath=keypath, value=value)) - elif key in ('template', 'template_name'): + "Expected a list of required package names for {keypath}." + " Found ({value})".format(keypath=keypath, value=value) + ) + elif key in ("template", "template_name"): if value is None: # Either template or template_name can be none continue if not isinstance(value, str): errors.append( - 'Expected a string type for {keypath}.' - ' Found ({value})'.format(keypath=keypath, value=value)) + "Expected a string type for {keypath}." + " Found ({value})".format(keypath=keypath, value=value) + ) elif not isinstance(value, str): errors.append( - 'Expected a string type for {keypath}.' - ' Found ({value})'.format(keypath=keypath, value=value)) + "Expected a string type for {keypath}. Found ({value})".format( + keypath=keypath, value=value + ) + ) if errors: - raise ValueError(r'Invalid ntp configuration:\n{errors}'.format( - errors='\n'.join(errors))) + raise ValueError( + r"Invalid ntp configuration:\n{errors}".format( + errors="\n".join(errors) + ) + ) def handle(name, cfg, cloud, log, _args): """Enable and configure ntp.""" - if 'ntp' not in cfg: + if "ntp" not in cfg: LOG.debug( - "Skipping module named %s, not present or disabled by cfg", name) + "Skipping module named %s, not present or disabled by cfg", name + ) return - ntp_cfg = cfg['ntp'] + ntp_cfg = cfg["ntp"] if ntp_cfg is None: ntp_cfg = {} # Allow empty config which will install the package @@ -538,52 +597,61 @@ def handle(name, cfg, cloud, log, _args): if not isinstance(ntp_cfg, (dict)): raise RuntimeError( "'ntp' key existed in config, but not a dictionary type," - " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))) + " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg)) + ) validate_cloudconfig_schema(cfg, schema) # Allow users to explicitly enable/disable - enabled = ntp_cfg.get('enabled', True) + enabled = ntp_cfg.get("enabled", True) if util.is_false(enabled): LOG.debug("Skipping module named %s, disabled by cfg", name) return # Select which client is going to be used and get the configuration - ntp_client_config = select_ntp_client(ntp_cfg.get('ntp_client'), - cloud.distro) + ntp_client_config = select_ntp_client( + ntp_cfg.get("ntp_client"), cloud.distro + ) # Allow user ntp config to override distro configurations ntp_client_config = util.mergemanydict( - [ntp_client_config, ntp_cfg.get('config', {})], reverse=True) + [ntp_client_config, ntp_cfg.get("config", {})], reverse=True + ) supplemental_schema_validation(ntp_client_config) - rename_ntp_conf(confpath=ntp_client_config.get('confpath')) + rename_ntp_conf(confpath=ntp_client_config.get("confpath")) template_fn = None - if not ntp_client_config.get('template'): - template_name = ( - ntp_client_config.get('template_name').replace('{distro}', - cloud.distro.name)) + if not ntp_client_config.get("template"): + template_name = ntp_client_config.get("template_name").replace( + "{distro}", cloud.distro.name + ) template_fn = cloud.get_template_filename(template_name) if not template_fn: - msg = ('No template found, not rendering %s' % - ntp_client_config.get('template_name')) + msg = ( + "No template found, not rendering %s" + % ntp_client_config.get("template_name") + ) raise RuntimeError(msg) - write_ntp_config_template(cloud.distro.name, - service_name=ntp_client_config.get( - 'service_name'), - servers=ntp_cfg.get('servers', []), - pools=ntp_cfg.get('pools', []), - path=ntp_client_config.get('confpath'), - template_fn=template_fn, - template=ntp_client_config.get('template')) - - install_ntp_client(cloud.distro.install_packages, - packages=ntp_client_config['packages'], - check_exe=ntp_client_config['check_exe']) + write_ntp_config_template( + cloud.distro.name, + service_name=ntp_client_config.get("service_name"), + servers=ntp_cfg.get("servers", []), + pools=ntp_cfg.get("pools", []), + path=ntp_client_config.get("confpath"), + template_fn=template_fn, + template=ntp_client_config.get("template"), + ) + + install_ntp_client( + cloud.distro.install_packages, + packages=ntp_client_config["packages"], + check_exe=ntp_client_config["check_exe"], + ) try: - cloud.distro.manage_service('reload', - ntp_client_config.get('service_name')) + cloud.distro.manage_service( + "reload", ntp_client_config.get("service_name") + ) except subp.ProcessExecutionError as e: LOG.exception("Failed to reload/start ntp service: %s", e) raise diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py index 036baf85..14cdfab8 100644 --- a/cloudinit/config/cc_package_update_upgrade_install.py +++ b/cloudinit/config/cc_package_update_upgrade_install.py @@ -43,8 +43,7 @@ import os import time from cloudinit import log as logging -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util REBOOT_FILE = "/var/run/reboot-required" REBOOT_CMD = ["/sbin/reboot"] @@ -68,17 +67,19 @@ def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2): log.debug("Rebooted, but still running after %s seconds", int(elapsed)) # If we got here, not good elapsed = time.time() - start - raise RuntimeError(("Reboot did not happen" - " after %s seconds!") % (int(elapsed))) + raise RuntimeError( + "Reboot did not happen after %s seconds!" % (int(elapsed)) + ) def handle(_name, cfg, cloud, log, _args): # Handle the old style + new config names - update = _multi_cfg_bool_get(cfg, 'apt_update', 'package_update') - upgrade = _multi_cfg_bool_get(cfg, 'package_upgrade', 'apt_upgrade') - reboot_if_required = _multi_cfg_bool_get(cfg, 'apt_reboot_if_required', - 'package_reboot_if_required') - pkglist = util.get_cfg_option_list(cfg, 'packages', []) + update = _multi_cfg_bool_get(cfg, "apt_update", "package_update") + upgrade = _multi_cfg_bool_get(cfg, "package_upgrade", "apt_upgrade") + reboot_if_required = _multi_cfg_bool_get( + cfg, "apt_reboot_if_required", "package_reboot_if_required" + ) + pkglist = util.get_cfg_option_list(cfg, "packages", []) errors = [] if update or len(pkglist) or upgrade: @@ -109,8 +110,9 @@ def handle(_name, cfg, cloud, log, _args): reboot_fn_exists = os.path.isfile(REBOOT_FILE) if (upgrade or pkglist) and reboot_if_required and reboot_fn_exists: try: - log.warning("Rebooting after upgrade or install per " - "%s", REBOOT_FILE) + log.warning( + "Rebooting after upgrade or install per %s", REBOOT_FILE + ) # Flush the above warning + anything else out... logging.flushLoggers(log) _fire_reboot(log) @@ -119,8 +121,10 @@ def handle(_name, cfg, cloud, log, _args): errors.append(e) if len(errors): - log.warning("%s failed with exceptions, re-raising the last one", - len(errors)) + log.warning( + "%s failed with exceptions, re-raising the last one", len(errors) + ) raise errors[-1] + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index 733c3910..cc1fe53e 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -41,22 +41,19 @@ keys to post. Available keys are: tries: 10 """ -from cloudinit import templater -from cloudinit import url_helper -from cloudinit import util - +from cloudinit import templater, url_helper, util from cloudinit.settings import PER_INSTANCE frequency = PER_INSTANCE POST_LIST_ALL = [ - 'pub_key_dsa', - 'pub_key_rsa', - 'pub_key_ecdsa', - 'pub_key_ed25519', - 'instance_id', - 'hostname', - 'fqdn' + "pub_key_dsa", + "pub_key_rsa", + "pub_key_ecdsa", + "pub_key_ed25519", + "instance_id", + "hostname", + "fqdn", ] @@ -74,48 +71,58 @@ def handle(name, cfg, cloud, log, args): if len(args) != 0: ph_cfg = util.read_conf(args[0]) else: - if 'phone_home' not in cfg: - log.debug(("Skipping module named %s, " - "no 'phone_home' configuration found"), name) + if "phone_home" not in cfg: + log.debug( + "Skipping module named %s, " + "no 'phone_home' configuration found", + name, + ) return - ph_cfg = cfg['phone_home'] - - if 'url' not in ph_cfg: - log.warning(("Skipping module named %s, " - "no 'url' found in 'phone_home' configuration"), name) + ph_cfg = cfg["phone_home"] + + if "url" not in ph_cfg: + log.warning( + "Skipping module named %s, " + "no 'url' found in 'phone_home' configuration", + name, + ) return - url = ph_cfg['url'] - post_list = ph_cfg.get('post', 'all') - tries = ph_cfg.get('tries') + url = ph_cfg["url"] + post_list = ph_cfg.get("post", "all") + tries = ph_cfg.get("tries") try: tries = int(tries) except Exception: tries = 10 - util.logexc(log, "Configuration entry 'tries' is not an integer, " - "using %s instead", tries) + util.logexc( + log, + "Configuration entry 'tries' is not an integer, using %s instead", + tries, + ) if post_list == "all": post_list = POST_LIST_ALL all_keys = {} - all_keys['instance_id'] = cloud.get_instance_id() - all_keys['hostname'] = cloud.get_hostname() - all_keys['fqdn'] = cloud.get_hostname(fqdn=True) + all_keys["instance_id"] = cloud.get_instance_id() + all_keys["hostname"] = cloud.get_hostname() + all_keys["fqdn"] = cloud.get_hostname(fqdn=True) pubkeys = { - 'pub_key_dsa': '/etc/ssh/ssh_host_dsa_key.pub', - 'pub_key_rsa': '/etc/ssh/ssh_host_rsa_key.pub', - 'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub', - 'pub_key_ed25519': '/etc/ssh/ssh_host_ed25519_key.pub', + "pub_key_dsa": "/etc/ssh/ssh_host_dsa_key.pub", + "pub_key_rsa": "/etc/ssh/ssh_host_rsa_key.pub", + "pub_key_ecdsa": "/etc/ssh/ssh_host_ecdsa_key.pub", + "pub_key_ed25519": "/etc/ssh/ssh_host_ed25519_key.pub", } for (n, path) in pubkeys.items(): try: all_keys[n] = util.load_file(path) except Exception: - util.logexc(log, "%s: failed to open, can not phone home that " - "data!", path) + util.logexc( + log, "%s: failed to open, can not phone home that data!", path + ) submit_keys = {} for k in post_list: @@ -123,28 +130,37 @@ def handle(name, cfg, cloud, log, args): submit_keys[k] = all_keys[k] else: submit_keys[k] = None - log.warning(("Requested key %s from 'post'" - " configuration list not available"), k) + log.warning( + "Requested key %s from 'post'" + " configuration list not available", + k, + ) # Get them read to be posted real_submit_keys = {} for (k, v) in submit_keys.items(): if v is None: - real_submit_keys[k] = 'N/A' + real_submit_keys[k] = "N/A" else: real_submit_keys[k] = str(v) # Incase the url is parameterized url_params = { - 'INSTANCE_ID': all_keys['instance_id'], + "INSTANCE_ID": all_keys["instance_id"], } url = templater.render_string(url, url_params) try: url_helper.read_file_or_url( - url, data=real_submit_keys, retries=tries, sec_between=3, - ssl_details=util.fetch_ssl_details(cloud.paths)) + url, + data=real_submit_keys, + retries=tries, + sec_between=3, + ssl_details=util.fetch_ssl_details(cloud.paths), + ) except Exception: - util.logexc(log, "Failed to post phone home data to %s in %s tries", - url, tries) + util.logexc( + log, "Failed to post phone home data to %s in %s tries", url, tries + ) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 5780a7e9..d4eb68c0 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -58,9 +58,8 @@ import re import subprocess import time +from cloudinit import subp, util from cloudinit.settings import PER_INSTANCE -from cloudinit import subp -from cloudinit import util frequency = PER_INSTANCE @@ -75,9 +74,9 @@ def givecmdline(pid): # PID COMM ARGS # 1 init /bin/init -- if util.is_FreeBSD(): - (output, _err) = subp.subp(['procstat', '-c', str(pid)]) + (output, _err) = subp.subp(["procstat", "-c", str(pid)]) line = output.splitlines()[1] - m = re.search(r'\d+ (\w|\.|-)+\s+(/\w.+)', line) + m = re.search(r"\d+ (\w|\.|-)+\s+(/\w.+)", line) return m.group(2) else: return util.load_file("/proc/%s/cmdline" % pid) @@ -106,8 +105,9 @@ def check_condition(cond, log=None): return False else: if log: - log.warning(pre + "unexpected exit %s. " % ret + - "do not apply change.") + log.warning( + pre + "unexpected exit %s. " % ret + "do not apply change." + ) return False except Exception as e: if log: @@ -138,16 +138,24 @@ def handle(_name, cfg, cloud, log, _args): devnull_fp = open(os.devnull, "w") - log.debug("After pid %s ends, will execute: %s" % (mypid, ' '.join(args))) + log.debug("After pid %s ends, will execute: %s" % (mypid, " ".join(args))) - util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log, - condition, execmd, [args, devnull_fp]) + util.fork_cb( + run_after_pid_gone, + mypid, + cmdline, + timeout, + log, + condition, + execmd, + [args, devnull_fp], + ) def load_power_state(cfg, distro): # returns a tuple of shutdown_command, timeout # shutdown_command is None if no config found - pstate = cfg.get('power_state') + pstate = cfg.get("power_state") if pstate is None: return (None, None, None) @@ -155,22 +163,25 @@ def load_power_state(cfg, distro): if not isinstance(pstate, dict): raise TypeError("power_state is not a dict.") - modes_ok = ['halt', 'poweroff', 'reboot'] + modes_ok = ["halt", "poweroff", "reboot"] mode = pstate.get("mode") if mode not in distro.shutdown_options_map: raise TypeError( - "power_state[mode] required, must be one of: %s. found: '%s'." % - (','.join(modes_ok), mode)) + "power_state[mode] required, must be one of: %s. found: '%s'." + % (",".join(modes_ok), mode) + ) - args = distro.shutdown_command(mode=mode, - delay=pstate.get("delay", "now"), - message=pstate.get("message")) + args = distro.shutdown_command( + mode=mode, + delay=pstate.get("delay", "now"), + message=pstate.get("message"), + ) try: - timeout = float(pstate.get('timeout', 30.0)) + timeout = float(pstate.get("timeout", 30.0)) except ValueError as e: raise ValueError( - "failed to convert timeout '%s' to float." % pstate['timeout'] + "failed to convert timeout '%s' to float." % pstate["timeout"] ) from e condition = pstate.get("condition", True) @@ -186,8 +197,12 @@ def doexit(sysexit): def execmd(exe_args, output=None, data_in=None): ret = 1 try: - proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE, - stdout=output, stderr=subprocess.STDOUT) + proc = subprocess.Popen( + exe_args, + stdin=subprocess.PIPE, + stdout=output, + stderr=subprocess.STDOUT, + ) proc.communicate(data_in) ret = proc.returncode except Exception: @@ -230,7 +245,7 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args): except Exception as e: fatal("Unexpected Exception: %s" % e) - time.sleep(.25) + time.sleep(0.25) if not msg: fatal("Unexpected error in run_after_pid_gone") @@ -246,4 +261,5 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args): func(*args) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index dc20fc44..f51f49bc 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -108,23 +108,20 @@ key (by default the agent will execute with the ``--test`` flag). import os import socket -import yaml from io import StringIO -from cloudinit import helpers -from cloudinit import subp -from cloudinit import temp_utils -from cloudinit import util -from cloudinit import url_helper +import yaml -AIO_INSTALL_URL = 'https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh' # noqa: E501 -PUPPET_AGENT_DEFAULT_ARGS = ['--test'] +from cloudinit import helpers, subp, temp_utils, url_helper, util +AIO_INSTALL_URL = "https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh" # noqa: E501 +PUPPET_AGENT_DEFAULT_ARGS = ["--test"] -class PuppetConstants(object): - def __init__(self, puppet_conf_file, puppet_ssl_dir, - csr_attributes_path, log): +class PuppetConstants(object): + def __init__( + self, puppet_conf_file, puppet_ssl_dir, csr_attributes_path, log + ): self.conf_path = puppet_conf_file self.ssl_dir = puppet_ssl_dir self.ssl_cert_dir = os.path.join(puppet_ssl_dir, "certs") @@ -134,18 +131,27 @@ class PuppetConstants(object): def _autostart_puppet(log): # Set puppet to automatically start - if os.path.exists('/etc/default/puppet'): - subp.subp(['sed', '-i', - '-e', 's/^START=.*/START=yes/', - '/etc/default/puppet'], capture=False) - elif os.path.exists('/bin/systemctl'): - subp.subp(['/bin/systemctl', 'enable', 'puppet.service'], - capture=False) - elif os.path.exists('/sbin/chkconfig'): - subp.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False) + if os.path.exists("/etc/default/puppet"): + subp.subp( + [ + "sed", + "-i", + "-e", + "s/^START=.*/START=yes/", + "/etc/default/puppet", + ], + capture=False, + ) + elif os.path.exists("/bin/systemctl"): + subp.subp( + ["/bin/systemctl", "enable", "puppet.service"], capture=False + ) + elif os.path.exists("/sbin/chkconfig"): + subp.subp(["/sbin/chkconfig", "puppet", "on"], capture=False) else: - log.warning(("Sorry we do not know how to enable" - " puppet services on this system")) + log.warning( + "Sorry we do not know how to enable puppet services on this system" + ) def get_config_value(puppet_bin, setting): @@ -153,12 +159,13 @@ def get_config_value(puppet_bin, setting): :param puppet_bin: path to puppet binary :param setting: setting to query """ - out, _ = subp.subp([puppet_bin, 'config', 'print', setting]) + out, _ = subp.subp([puppet_bin, "config", "print", setting]) return out.rstrip() -def install_puppet_aio(url=AIO_INSTALL_URL, version=None, - collection=None, cleanup=True): +def install_puppet_aio( + url=AIO_INSTALL_URL, version=None, collection=None, cleanup=True +): """Install puppet-agent from the puppetlabs repositories using the one-shot shell script @@ -169,62 +176,70 @@ def install_puppet_aio(url=AIO_INSTALL_URL, version=None, """ args = [] if version is not None: - args = ['-v', version] + args = ["-v", version] if collection is not None: - args += ['-c', collection] + args += ["-c", collection] # Purge puppetlabs repos after installation if cleanup: - args += ['--cleanup'] + args += ["--cleanup"] content = url_helper.readurl(url=url, retries=5).contents # Use tmpdir over tmpfile to avoid 'text file busy' on execute with temp_utils.tempdir(needs_exe=True) as tmpd: - tmpf = os.path.join(tmpd, 'puppet-install') + tmpf = os.path.join(tmpd, "puppet-install") util.write_file(tmpf, content, mode=0o700) return subp.subp([tmpf] + args, capture=False) def handle(name, cfg, cloud, log, _args): # If there isn't a puppet key in the configuration don't do anything - if 'puppet' not in cfg: - log.debug(("Skipping module named %s," - " no 'puppet' configuration found"), name) + if "puppet" not in cfg: + log.debug( + "Skipping module named %s, no 'puppet' configuration found", name + ) return - puppet_cfg = cfg['puppet'] + puppet_cfg = cfg["puppet"] # Start by installing the puppet package if necessary... - install = util.get_cfg_option_bool(puppet_cfg, 'install', True) - version = util.get_cfg_option_str(puppet_cfg, 'version', None) - collection = util.get_cfg_option_str(puppet_cfg, 'collection', None) + install = util.get_cfg_option_bool(puppet_cfg, "install", True) + version = util.get_cfg_option_str(puppet_cfg, "version", None) + collection = util.get_cfg_option_str(puppet_cfg, "collection", None) install_type = util.get_cfg_option_str( - puppet_cfg, 'install_type', 'packages') - cleanup = util.get_cfg_option_bool(puppet_cfg, 'cleanup', True) - run = util.get_cfg_option_bool(puppet_cfg, 'exec', default=False) - start_puppetd = util.get_cfg_option_bool(puppet_cfg, - 'start_service', - default=True) + puppet_cfg, "install_type", "packages" + ) + cleanup = util.get_cfg_option_bool(puppet_cfg, "cleanup", True) + run = util.get_cfg_option_bool(puppet_cfg, "exec", default=False) + start_puppetd = util.get_cfg_option_bool( + puppet_cfg, "start_service", default=True + ) aio_install_url = util.get_cfg_option_str( - puppet_cfg, 'aio_install_url', default=AIO_INSTALL_URL) + puppet_cfg, "aio_install_url", default=AIO_INSTALL_URL + ) # AIO and distro packages use different paths - if install_type == 'aio': - puppet_user = 'root' - puppet_bin = '/opt/puppetlabs/bin/puppet' - puppet_package = 'puppet-agent' + if install_type == "aio": + puppet_user = "root" + puppet_bin = "/opt/puppetlabs/bin/puppet" + puppet_package = "puppet-agent" else: # default to 'packages' - puppet_user = 'puppet' - puppet_bin = 'puppet' - puppet_package = 'puppet' + puppet_user = "puppet" + puppet_bin = "puppet" + puppet_package = "puppet" package_name = util.get_cfg_option_str( - puppet_cfg, 'package_name', puppet_package) + puppet_cfg, "package_name", puppet_package + ) if not install and version: - log.warning(("Puppet install set to false but version supplied," - " doing nothing.")) + log.warning( + "Puppet install set to false but version supplied, doing nothing." + ) elif install: - log.debug(("Attempting to install puppet %s from %s"), - version if version else 'latest', install_type) + log.debug( + "Attempting to install puppet %s from %s", + version if version else "latest", + install_type, + ) if install_type == "packages": cloud.distro.install_packages((package_name, version)) @@ -235,17 +250,21 @@ def handle(name, cfg, cloud, log, _args): run = False conf_file = util.get_cfg_option_str( - puppet_cfg, 'conf_file', get_config_value(puppet_bin, 'config')) + puppet_cfg, "conf_file", get_config_value(puppet_bin, "config") + ) ssl_dir = util.get_cfg_option_str( - puppet_cfg, 'ssl_dir', get_config_value(puppet_bin, 'ssldir')) + puppet_cfg, "ssl_dir", get_config_value(puppet_bin, "ssldir") + ) csr_attributes_path = util.get_cfg_option_str( - puppet_cfg, 'csr_attributes_path', - get_config_value(puppet_bin, 'csr_attributes')) + puppet_cfg, + "csr_attributes_path", + get_config_value(puppet_bin, "csr_attributes"), + ) p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log) # ... and then update the puppet configuration - if 'conf' in puppet_cfg: + if "conf" in puppet_cfg: # Add all sections from the conf object to puppet.conf contents = util.load_file(p_constants.conf_path) # Create object for reading puppet.conf values @@ -254,30 +273,31 @@ def handle(name, cfg, cloud, log, _args): # mix the rest up. First clean them up # (TODO(harlowja) is this really needed??) cleaned_lines = [i.lstrip() for i in contents.splitlines()] - cleaned_contents = '\n'.join(cleaned_lines) + cleaned_contents = "\n".join(cleaned_lines) # Move to puppet_config.read_file when dropping py2.7 puppet_config.read_file( - StringIO(cleaned_contents), - source=p_constants.conf_path) - for (cfg_name, cfg) in puppet_cfg['conf'].items(): + StringIO(cleaned_contents), source=p_constants.conf_path + ) + for (cfg_name, cfg) in puppet_cfg["conf"].items(): # Cert configuration is a special case # Dump the puppetserver ca certificate in the correct place - if cfg_name == 'ca_cert': + if cfg_name == "ca_cert": # Puppet ssl sub-directory isn't created yet # Create it with the proper permissions and ownership util.ensure_dir(p_constants.ssl_dir, 0o771) - util.chownbyname(p_constants.ssl_dir, puppet_user, 'root') + util.chownbyname(p_constants.ssl_dir, puppet_user, "root") util.ensure_dir(p_constants.ssl_cert_dir) - util.chownbyname(p_constants.ssl_cert_dir, puppet_user, 'root') + util.chownbyname(p_constants.ssl_cert_dir, puppet_user, "root") util.write_file(p_constants.ssl_cert_path, cfg) - util.chownbyname(p_constants.ssl_cert_path, - puppet_user, 'root') + util.chownbyname( + p_constants.ssl_cert_path, puppet_user, "root" + ) else: # Iterate through the config items, we'll use ConfigParser.set # to overwrite or create new items as needed for (o, v) in cfg.items(): - if o == 'certname': + if o == "certname": # Expand %f as the fqdn # TODO(harlowja) should this use the cloud fqdn?? v = v.replace("%f", socket.getfqdn()) @@ -288,14 +308,16 @@ def handle(name, cfg, cloud, log, _args): puppet_config.set(cfg_name, o, v) # We got all our config as wanted we'll rename # the previous puppet.conf and create our new one - util.rename(p_constants.conf_path, "%s.old" - % (p_constants.conf_path)) + util.rename( + p_constants.conf_path, "%s.old" % (p_constants.conf_path) + ) util.write_file(p_constants.conf_path, puppet_config.stringify()) - if 'csr_attributes' in puppet_cfg: - util.write_file(p_constants.csr_attributes_path, - yaml.dump(puppet_cfg['csr_attributes'], - default_flow_style=False)) + if "csr_attributes" in puppet_cfg: + util.write_file( + p_constants.csr_attributes_path, + yaml.dump(puppet_cfg["csr_attributes"], default_flow_style=False), + ) # Set it up so it autostarts if start_puppetd: @@ -303,18 +325,21 @@ def handle(name, cfg, cloud, log, _args): # Run the agent if needed if run: - log.debug('Running puppet-agent') - cmd = [puppet_bin, 'agent'] - if 'exec_args' in puppet_cfg: - cmd_args = puppet_cfg['exec_args'] + log.debug("Running puppet-agent") + cmd = [puppet_bin, "agent"] + if "exec_args" in puppet_cfg: + cmd_args = puppet_cfg["exec_args"] if isinstance(cmd_args, (list, tuple)): cmd.extend(cmd_args) elif isinstance(cmd_args, str): cmd.extend(cmd_args.split()) else: - log.warning("Unknown type %s provided for puppet" - " 'exec_args' expected list, tuple," - " or string", type(cmd_args)) + log.warning( + "Unknown type %s provided for puppet" + " 'exec_args' expected list, tuple," + " or string", + type(cmd_args), + ) cmd.extend(PUPPET_AGENT_DEFAULT_ARGS) else: cmd.extend(PUPPET_AGENT_DEFAULT_ARGS) @@ -322,6 +347,7 @@ def handle(name, cfg, cloud, log, _args): if start_puppetd: # Start puppetd - subp.subp(['service', 'puppet', 'start'], capture=False) + subp.subp(["service", "puppet", "start"], capture=False) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_refresh_rmc_and_interface.py b/cloudinit/config/cc_refresh_rmc_and_interface.py index d5e0ecb2..87be5348 100644 --- a/cloudinit/config/cc_refresh_rmc_and_interface.py +++ b/cloudinit/config/cc_refresh_rmc_and_interface.py @@ -34,20 +34,18 @@ This module handles """ +import errno + from cloudinit import log as logging +from cloudinit import netinfo, subp, util from cloudinit.settings import PER_ALWAYS -from cloudinit import util -from cloudinit import subp -from cloudinit import netinfo - -import errno frequency = PER_ALWAYS LOG = logging.getLogger(__name__) # Ensure that /opt/rsct/bin has been added to standard PATH of the # distro. The symlink to rmcctrl is /usr/sbin/rsct/bin/rmcctrl . -RMCCTRL = 'rmcctrl' +RMCCTRL = "rmcctrl" def handle(name, _cfg, _cloud, _log, _args): @@ -56,10 +54,11 @@ def handle(name, _cfg, _cloud, _log, _args): return LOG.debug( - 'Making the IPv6 up explicitly. ' - 'Ensuring IPv6 interface is not being handled by NetworkManager ' - 'and it is restarted to re-establish the communication with ' - 'the hypervisor') + "Making the IPv6 up explicitly. " + "Ensuring IPv6 interface is not being handled by NetworkManager " + "and it is restarted to re-establish the communication with " + "the hypervisor" + ) ifaces = find_ipv6_ifaces() @@ -80,7 +79,7 @@ def find_ipv6_ifaces(): ifaces = [] for iface, data in info.items(): if iface == "lo": - LOG.debug('Skipping localhost interface') + LOG.debug("Skipping localhost interface") if len(data.get("ipv4", [])) != 0: # skip this interface, as it has ipv4 addrs continue @@ -92,16 +91,16 @@ def refresh_ipv6(interface): # IPv6 interface is explicitly brought up, subsequent to which the # RMC services are restarted to re-establish the communication with # the hypervisor. - subp.subp(['ip', 'link', 'set', interface, 'down']) - subp.subp(['ip', 'link', 'set', interface, 'up']) + subp.subp(["ip", "link", "set", interface, "down"]) + subp.subp(["ip", "link", "set", interface, "up"]) def sysconfig_path(iface): - return '/etc/sysconfig/network-scripts/ifcfg-' + iface + return "/etc/sysconfig/network-scripts/ifcfg-" + iface def restart_network_manager(): - subp.subp(['systemctl', 'restart', 'NetworkManager']) + subp.subp(["systemctl", "restart", "NetworkManager"]) def disable_ipv6(iface_file): @@ -113,12 +112,11 @@ def disable_ipv6(iface_file): contents = util.load_file(iface_file) except IOError as e: if e.errno == errno.ENOENT: - LOG.debug("IPv6 interface file %s does not exist\n", - iface_file) + LOG.debug("IPv6 interface file %s does not exist\n", iface_file) else: raise e - if 'IPV6INIT' not in contents: + if "IPV6INIT" not in contents: LOG.debug("Interface file %s did not have IPV6INIT", iface_file) return @@ -135,11 +133,12 @@ def disable_ipv6(iface_file): def search(contents): # Search for any NM_CONTROLLED or IPV6 lines in IPv6 interface file. - return( - contents.startswith("IPV6ADDR") or - contents.startswith("IPADDR6") or - contents.startswith("IPV6INIT") or - contents.startswith("NM_CONTROLLED")) + return ( + contents.startswith("IPV6ADDR") + or contents.startswith("IPADDR6") + or contents.startswith("IPV6INIT") + or contents.startswith("NM_CONTROLLED") + ) def refresh_rmc(): @@ -152,8 +151,8 @@ def refresh_rmc(): # until the subsystem and all resource managers are stopped. # -s : start Resource Monitoring & Control subsystem. try: - subp.subp([RMCCTRL, '-z']) - subp.subp([RMCCTRL, '-s']) + subp.subp([RMCCTRL, "-z"]) + subp.subp([RMCCTRL, "-s"]) except Exception: - util.logexc(LOG, 'Failed to refresh the RMC subsystem.') + util.logexc(LOG, "Failed to refresh the RMC subsystem.") raise diff --git a/cloudinit/config/cc_reset_rmc.py b/cloudinit/config/cc_reset_rmc.py index 1cd72774..3b929903 100644 --- a/cloudinit/config/cc_reset_rmc.py +++ b/cloudinit/config/cc_reset_rmc.py @@ -39,9 +39,8 @@ Prerequisite of using this module is to install RSCT packages. import os from cloudinit import log as logging +from cloudinit import subp, util from cloudinit.settings import PER_INSTANCE -from cloudinit import util -from cloudinit import subp frequency = PER_INSTANCE @@ -49,34 +48,34 @@ frequency = PER_INSTANCE # The symlink for RMCCTRL and RECFGCT are # /usr/sbin/rsct/bin/rmcctrl and # /usr/sbin/rsct/install/bin/recfgct respectively. -RSCT_PATH = '/opt/rsct/install/bin' -RMCCTRL = 'rmcctrl' -RECFGCT = 'recfgct' +RSCT_PATH = "/opt/rsct/install/bin" +RMCCTRL = "rmcctrl" +RECFGCT = "recfgct" LOG = logging.getLogger(__name__) -NODE_ID_FILE = '/etc/ct_node_id' +NODE_ID_FILE = "/etc/ct_node_id" def handle(name, _cfg, cloud, _log, _args): # Ensuring node id has to be generated only once during first boot - if cloud.datasource.platform_type == 'none': - LOG.debug('Skipping creation of new ct_node_id node') + if cloud.datasource.platform_type == "none": + LOG.debug("Skipping creation of new ct_node_id node") return if not os.path.isdir(RSCT_PATH): LOG.debug("module disabled, RSCT_PATH not present") return - orig_path = os.environ.get('PATH') + orig_path = os.environ.get("PATH") try: add_path(orig_path) reset_rmc() finally: if orig_path: - os.environ['PATH'] = orig_path + os.environ["PATH"] = orig_path else: - del os.environ['PATH'] + del os.environ["PATH"] def reconfigure_rsct_subsystems(): @@ -88,17 +87,17 @@ def reconfigure_rsct_subsystems(): LOG.debug(out.strip()) return out except subp.ProcessExecutionError: - util.logexc(LOG, 'Failed to reconfigure the RSCT subsystems.') + util.logexc(LOG, "Failed to reconfigure the RSCT subsystems.") raise def get_node_id(): try: fp = util.load_file(NODE_ID_FILE) - node_id = fp.split('\n')[0] + node_id = fp.split("\n")[0] return node_id except Exception: - util.logexc(LOG, 'Failed to get node ID from file %s.' % NODE_ID_FILE) + util.logexc(LOG, "Failed to get node ID from file %s." % NODE_ID_FILE) raise @@ -107,25 +106,25 @@ def add_path(orig_path): # So thet cloud init automatically find and # run RECFGCT to create new node_id. suff = ":" + orig_path if orig_path else "" - os.environ['PATH'] = RSCT_PATH + suff - return os.environ['PATH'] + os.environ["PATH"] = RSCT_PATH + suff + return os.environ["PATH"] def rmcctrl(): # Stop the RMC subsystem and all resource managers so that we can make # some changes to it try: - return subp.subp([RMCCTRL, '-z']) + return subp.subp([RMCCTRL, "-z"]) except Exception: - util.logexc(LOG, 'Failed to stop the RMC subsystem.') + util.logexc(LOG, "Failed to stop the RMC subsystem.") raise def reset_rmc(): - LOG.debug('Attempting to reset RMC.') + LOG.debug("Attempting to reset RMC.") node_id_before = get_node_id() - LOG.debug('Node ID at beginning of module: %s', node_id_before) + LOG.debug("Node ID at beginning of module: %s", node_id_before) # Stop the RMC subsystem and all resource managers so that we can make # some changes to it @@ -133,11 +132,11 @@ def reset_rmc(): reconfigure_rsct_subsystems() node_id_after = get_node_id() - LOG.debug('Node ID at end of module: %s', node_id_after) + LOG.debug("Node ID at end of module: %s", node_id_after) # Check if new node ID is generated or not # by comparing old and new node ID if node_id_after == node_id_before: - msg = 'New node ID did not get generated.' + msg = "New node ID did not get generated." LOG.error(msg) raise Exception(msg) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 00bb7ae7..b009c392 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -13,21 +13,21 @@ import os import stat from textwrap import dedent +from cloudinit import subp, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_ALWAYS -from cloudinit import subp -from cloudinit import util NOBLOCK = "noblock" frequency = PER_ALWAYS -distros = ['all'] +distros = ["all"] meta = { - 'id': 'cc_resizefs', - 'name': 'Resizefs', - 'title': 'Resize filesystem', - 'description': dedent("""\ + "id": "cc_resizefs", + "name": "Resizefs", + "title": "Resize filesystem", + "description": dedent( + """\ Resize a filesystem to use all avaliable space on partition. This module is useful along with ``cc_growpart`` and will ensure that if the root partition has been resized the root filesystem will be resized @@ -36,22 +36,26 @@ meta = { running. Optionally, the resize operation can be performed in the background while cloud-init continues running modules. This can be enabled by setting ``resize_rootfs`` to ``true``. This module can be - disabled altogether by setting ``resize_rootfs`` to ``false``."""), - 'distros': distros, - 'examples': [ - 'resize_rootfs: false # disable root filesystem resize operation'], - 'frequency': PER_ALWAYS, + disabled altogether by setting ``resize_rootfs`` to ``false``.""" + ), + "distros": distros, + "examples": [ + "resize_rootfs: false # disable root filesystem resize operation" + ], + "frequency": PER_ALWAYS, } schema = { - 'type': 'object', - 'properties': { - 'resize_rootfs': { - 'enum': [True, False, NOBLOCK], - 'description': dedent("""\ - Whether to resize the root partition. Default: 'true'""") + "type": "object", + "properties": { + "resize_rootfs": { + "enum": [True, False, NOBLOCK], + "description": dedent( + """\ + Whether to resize the root partition. Default: 'true'""" + ), } - } + }, } __doc__ = get_meta_doc(meta, schema) # Supplement python help() @@ -63,32 +67,38 @@ def _resize_btrfs(mount_point, devpth): # Use a subvolume that is not ro to trick the resize operation to do the # "right" thing. The use of ".snapshot" is specific to "snapper" a generic # solution would be walk the subvolumes and find a rw mounted subvolume. - if (not util.mount_is_read_write(mount_point) and - os.path.isdir("%s/.snapshots" % mount_point)): - return ('btrfs', 'filesystem', 'resize', 'max', - '%s/.snapshots' % mount_point) + if not util.mount_is_read_write(mount_point) and os.path.isdir( + "%s/.snapshots" % mount_point + ): + return ( + "btrfs", + "filesystem", + "resize", + "max", + "%s/.snapshots" % mount_point, + ) else: - return ('btrfs', 'filesystem', 'resize', 'max', mount_point) + return ("btrfs", "filesystem", "resize", "max", mount_point) def _resize_ext(mount_point, devpth): - return ('resize2fs', devpth) + return ("resize2fs", devpth) def _resize_xfs(mount_point, devpth): - return ('xfs_growfs', mount_point) + return ("xfs_growfs", mount_point) def _resize_ufs(mount_point, devpth): - return ('growfs', '-y', mount_point) + return ("growfs", "-y", mount_point) def _resize_zfs(mount_point, devpth): - return ('zpool', 'online', '-e', mount_point, devpth) + return ("zpool", "online", "-e", mount_point, devpth) def _resize_hammer2(mount_point, devpth): - return ('hammer2', 'growfs', mount_point) + return ("hammer2", "growfs", mount_point) def _can_skip_resize_ufs(mount_point, devpth): @@ -100,7 +110,7 @@ def _can_skip_resize_ufs(mount_point, devpth): # growfs exits with 1 for almost all cases up to this one. # This means we can't just use rcs=[0, 1] as subp parameter: try: - subp.subp(['growfs', '-N', devpth]) + subp.subp(["growfs", "-N", devpth]) except subp.ProcessExecutionError as e: if e.stderr.startswith(skip_start) and skip_contain in e.stderr: # This FS is already at the desired size @@ -114,17 +124,15 @@ def _can_skip_resize_ufs(mount_point, devpth): # for multiple filesystem types if possible, e.g. one command for # ext2, ext3 and ext4. RESIZE_FS_PREFIXES_CMDS = [ - ('btrfs', _resize_btrfs), - ('ext', _resize_ext), - ('xfs', _resize_xfs), - ('ufs', _resize_ufs), - ('zfs', _resize_zfs), - ('hammer2', _resize_hammer2), + ("btrfs", _resize_btrfs), + ("ext", _resize_ext), + ("xfs", _resize_xfs), + ("ufs", _resize_ufs), + ("zfs", _resize_zfs), + ("hammer2", _resize_hammer2), ] -RESIZE_FS_PRECHECK_CMDS = { - 'ufs': _can_skip_resize_ufs -} +RESIZE_FS_PRECHECK_CMDS = {"ufs": _can_skip_resize_ufs} def can_skip_resize(fs_type, resize_what, devpth): @@ -148,52 +156,66 @@ def maybe_get_writable_device_path(devpath, info, log): container = util.is_container() # Ensure the path is a block device. - if (devpath == "/dev/root" and not os.path.exists(devpath) and - not container): + if ( + devpath == "/dev/root" + and not os.path.exists(devpath) + and not container + ): devpath = util.rootdev_from_cmdline(util.get_cmdline()) if devpath is None: log.warning("Unable to find device '/dev/root'") return None log.debug("Converted /dev/root to '%s' per kernel cmdline", devpath) - if devpath == 'overlayroot': + if devpath == "overlayroot": log.debug("Not attempting to resize devpath '%s': %s", devpath, info) return None # FreeBSD zpool can also just use gpt/