From 3cee0bf85fbf12d272422c8eeed63bf06e64570b Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 31 Jul 2018 18:44:12 +0000 Subject: oracle: fix detect_openstack to report True on OracleCloud.com DMI data The OpenStack datasource in 18.3 changed to detect data in the init-local stage instead of init-network and attempted to redetect OpenStackLocal datasource on Oracle across reboots. The function detect_openstack was added to quickly detect whether a platform is OpenStack based on dmi product_name or chassis_asset_tag and it was a bit too strict for Oracle in checking for 'OpenStack Nova'/'Compute' DMI product_name. Oracle's DMI product_name reports 'SAtandard PC (i440FX + PIIX, 1996)' and DMI chassis_asset_tag is 'OracleCloud.com'. detect_openstack function now adds 'OracleCloud.com' as a supported value 'OracleCloud.com' to valid chassis-asset-tags for the OpenStack datasource. LP: #1784685 --- tests/unittests/test_datasource/test_openstack.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'tests/unittests/test_datasource') diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index 585acc33..d862f4bc 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -510,6 +510,24 @@ class TestDetectOpenStack(test_helpers.CiTestCase): ds.detect_openstack(), 'Expected detect_openstack == True on OpenTelekomCloud') + @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data') + def test_detect_openstack_oraclecloud_chassis_asset_tag(self, m_dmi, + m_is_x86): + """Return True on OpenStack reporting Oracle cloud asset-tag.""" + m_is_x86.return_value = True + + def fake_dmi_read(dmi_key): + if dmi_key == 'system-product-name': + return 'Standard PC (i440FX + PIIX, 1996)' # No match + if dmi_key == 'chassis-asset-tag': + return 'OracleCloud.com' + assert False, 'Unexpected dmi read of %s' % dmi_key + + m_dmi.side_effect = fake_dmi_read + self.assertTrue( + ds.detect_openstack(), + 'Expected detect_openstack == True on OracleCloud.com') + @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env') @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data') def test_detect_openstack_by_proc_1_environ(self, m_dmi, m_proc_env, -- cgit v1.2.3 From 01cfa711bcf31c6c0019d1e936b5b07aa6abc2bc Mon Sep 17 00:00:00 2001 From: Akihiko Ota Date: Fri, 3 Aug 2018 20:44:59 +0000 Subject: OpenNebula: Fix null gateway6 The OpenNebula data source generates an invalid netplan yaml file if the IPv6 gateway is not defined in context.sh. LP: #1768547 --- cloudinit/sources/DataSourceOpenNebula.py | 2 +- tests/unittests/test_datasource/test_opennebula.py | 408 ++++++++++++++++++++- 2 files changed, 407 insertions(+), 3 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 16c10785..77ccd128 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -232,7 +232,7 @@ class OpenNebulaNetwork(object): # Set IPv6 default gateway gateway6 = self.get_gateway6(c_dev) - if gateway: + if gateway6: devconf['gateway6'] = gateway6 # Set DNS servers and search domains diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index ab42f344..36b4d771 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -354,6 +354,412 @@ class TestOpenNebulaNetwork(unittest.TestCase): system_nics = ('eth0', 'ens3') + def test_context_devname(self): + """Verify context_devname correctly returns mac and name.""" + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH1_MAC': '02:00:0a:12:0f:0f', } + expected = { + '02:00:0a:12:01:01': 'ETH0', + '02:00:0a:12:0f:0f': 'ETH1', } + net = ds.OpenNebulaNetwork(context) + self.assertEqual(expected, net.context_devname) + + def test_get_nameservers(self): + """ + Verify get_nameservers('device') correctly returns DNS server addresses + and search domains. + """ + context = { + 'DNS': '1.2.3.8', + 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_SEARCH_DOMAIN': 'example.com example.org', } + expected = { + 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], + 'search': ['example.com', 'example.org']} + net = ds.OpenNebulaNetwork(context) + val = net.get_nameservers('eth0') + self.assertEqual(expected, val) + + def test_get_mtu(self): + """Verify get_mtu('device') correctly returns MTU size.""" + context = {'ETH0_MTU': '1280'} + net = ds.OpenNebulaNetwork(context) + val = net.get_mtu('eth0') + self.assertEqual('1280', val) + + def test_get_ip(self): + """Verify get_ip('device') correctly returns IPv4 address.""" + context = {'ETH0_IP': PUBLIC_IP} + net = ds.OpenNebulaNetwork(context) + val = net.get_ip('eth0', MACADDR) + self.assertEqual(PUBLIC_IP, val) + + def test_get_ip_emptystring(self): + """ + Verify get_ip('device') correctly returns IPv4 address. + It returns IP address created by MAC address if ETH0_IP has empty + string. + """ + context = {'ETH0_IP': ''} + net = ds.OpenNebulaNetwork(context) + val = net.get_ip('eth0', MACADDR) + self.assertEqual(IP_BY_MACADDR, val) + + def test_get_ip6(self): + """ + Verify get_ip6('device') correctly returns IPv6 address. + In this case, IPv6 address is Given by ETH0_IP6. + """ + context = { + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_ULA': '', } + expected = [IP6_GLOBAL] + net = ds.OpenNebulaNetwork(context) + val = net.get_ip6('eth0') + self.assertEqual(expected, val) + + def test_get_ip6_ula(self): + """ + Verify get_ip6('device') correctly returns IPv6 address. + In this case, IPv6 address is Given by ETH0_IP6_ULA. + """ + context = { + 'ETH0_IP6': '', + 'ETH0_IP6_ULA': IP6_ULA, } + expected = [IP6_ULA] + net = ds.OpenNebulaNetwork(context) + val = net.get_ip6('eth0') + self.assertEqual(expected, val) + + def test_get_ip6_dual(self): + """ + Verify get_ip6('device') correctly returns IPv6 address. + In this case, IPv6 addresses are Given by ETH0_IP6 and ETH0_IP6_ULA. + """ + context = { + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_ULA': IP6_ULA, } + expected = [IP6_GLOBAL, IP6_ULA] + net = ds.OpenNebulaNetwork(context) + val = net.get_ip6('eth0') + self.assertEqual(expected, val) + + def test_get_ip6_prefix(self): + """ + Verify get_ip6_prefix('device') correctly returns IPv6 prefix. + """ + context = {'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX} + net = ds.OpenNebulaNetwork(context) + val = net.get_ip6_prefix('eth0') + self.assertEqual(IP6_PREFIX, val) + + def test_get_ip6_prefix_emptystring(self): + """ + Verify get_ip6_prefix('device') correctly returns IPv6 prefix. + It returns default value '64' if ETH0_IP6_PREFIX_LENGTH has empty + string. + """ + context = {'ETH0_IP6_PREFIX_LENGTH': ''} + net = ds.OpenNebulaNetwork(context) + val = net.get_ip6_prefix('eth0') + self.assertEqual('64', val) + + def test_get_gateway(self): + """ + Verify get_gateway('device') correctly returns IPv4 default gateway + address. + """ + context = {'ETH0_GATEWAY': '1.2.3.5'} + net = ds.OpenNebulaNetwork(context) + val = net.get_gateway('eth0') + self.assertEqual('1.2.3.5', val) + + def test_get_gateway6(self): + """ + Verify get_gateway6('device') correctly returns IPv6 default gateway + address. + """ + context = {'ETH0_GATEWAY6': IP6_GW} + net = ds.OpenNebulaNetwork(context) + val = net.get_gateway6('eth0') + self.assertEqual(IP6_GW, val) + + def test_get_mask(self): + """ + Verify get_mask('device') correctly returns IPv4 subnet mask. + """ + context = {'ETH0_MASK': '255.255.0.0'} + net = ds.OpenNebulaNetwork(context) + val = net.get_mask('eth0') + self.assertEqual('255.255.0.0', val) + + def test_get_mask_emptystring(self): + """ + Verify get_mask('device') correctly returns IPv4 subnet mask. + It returns default value '255.255.255.0' if ETH0_MASK has empty string. + """ + context = {'ETH0_MASK': ''} + net = ds.OpenNebulaNetwork(context) + val = net.get_mask('eth0') + self.assertEqual('255.255.255.0', val) + + def test_get_network(self): + """ + Verify get_network('device') correctly returns IPv4 network address. + """ + context = {'ETH0_NETWORK': '1.2.3.0'} + net = ds.OpenNebulaNetwork(context) + val = net.get_network('eth0', MACADDR) + self.assertEqual('1.2.3.0', val) + + def test_get_network_emptystring(self): + """ + Verify get_network('device') correctly returns IPv4 network address. + It returns network address created by MAC address if ETH0_NETWORK has + empty string. + """ + context = {'ETH0_NETWORK': ''} + net = ds.OpenNebulaNetwork(context) + val = net.get_network('eth0', MACADDR) + self.assertEqual('10.18.1.0', val) + + def test_get_field(self): + """ + Verify get_field('device', 'name') returns *context* value. + """ + context = {'ETH9_DUMMY': 'DUMMY_VALUE'} + net = ds.OpenNebulaNetwork(context) + val = net.get_field('eth9', 'dummy') + self.assertEqual('DUMMY_VALUE', val) + + def test_get_field_withdefaultvalue(self): + """ + Verify get_field('device', 'name', 'default value') returns *context* + value. + """ + context = {'ETH9_DUMMY': 'DUMMY_VALUE'} + net = ds.OpenNebulaNetwork(context) + val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE') + self.assertEqual('DUMMY_VALUE', val) + + def test_get_field_withdefaultvalue_emptycontext(self): + """ + Verify get_field('device', 'name', 'default value') returns *default* + value if context value is empty string. + """ + context = {'ETH9_DUMMY': ''} + net = ds.OpenNebulaNetwork(context) + val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE') + self.assertEqual('DEFAULT_VALUE', val) + + def test_get_field_emptycontext(self): + """ + Verify get_field('device', 'name') returns None if context value is + empty string. + """ + context = {'ETH9_DUMMY': ''} + net = ds.OpenNebulaNetwork(context) + val = net.get_field('eth9', 'dummy') + self.assertEqual(None, val) + + def test_get_field_nonecontext(self): + """ + Verify get_field('device', 'name') returns None if context value is + None. + """ + context = {'ETH9_DUMMY': None} + net = ds.OpenNebulaNetwork(context) + val = net.get_field('eth9', 'dummy') + self.assertEqual(None, val) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_gateway(self, m_get_phys_by_mac): + """Test rendering with/without IPv4 gateway""" + self.maxDiff = None + # empty ETH0_GATEWAY + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_GATEWAY': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + # set ETH0_GATEWAY + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_GATEWAY': '1.2.3.5', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'gateway4': '1.2.3.5', + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_gateway6(self, m_get_phys_by_mac): + """Test rendering with/without IPv6 gateway""" + self.maxDiff = None + # empty ETH0_GATEWAY6 + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_GATEWAY6': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + # set ETH0_GATEWAY6 + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_GATEWAY6': IP6_GW, } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'gateway6': IP6_GW, + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_ipv6address(self, m_get_phys_by_mac): + """Test rendering with/without IPv6 address""" + self.maxDiff = None + # empty ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_IP6': '', + 'ETH0_IP6_ULA': '', + 'ETH0_IP6_PREFIX_LENGTH': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + # set ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX, + 'ETH0_IP6_ULA': IP6_ULA, } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [ + IP_BY_MACADDR + '/' + IP4_PREFIX, + IP6_GLOBAL + '/' + IP6_PREFIX, + IP6_ULA + '/' + IP6_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_dns(self, m_get_phys_by_mac): + """Test rendering with/without DNS server, search domain""" + self.maxDiff = None + # empty DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'DNS': '', + 'ETH0_DNS': '', + 'ETH0_SEARCH_DOMAIN': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + # set DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'DNS': '1.2.3.8', + 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_SEARCH_DOMAIN': 'example.com example.org', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'nameservers': { + 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], + 'search': ['example.com', 'example.org']}, + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_mtu(self, m_get_phys_by_mac): + """Test rendering with/without MTU""" + self.maxDiff = None + # empty ETH0_MTU + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_MTU': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + # set ETH0_MTU + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_MTU': '1280', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'mtu': '1280', + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_eth0(self, m_get_phys_by_mac): for nic in self.system_nics: @@ -395,7 +801,6 @@ class TestOpenNebulaNetwork(unittest.TestCase): 'match': {'macaddress': MACADDR}, 'addresses': [IP_BY_MACADDR + '/16'], 'gateway4': '1.2.3.5', - 'gateway6': None, 'nameservers': { 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8']}}}} @@ -494,7 +899,6 @@ class TestOpenNebulaNetwork(unittest.TestCase): 'match': {'macaddress': MAC_1}, 'addresses': ['10.3.1.3/16'], 'gateway4': '10.3.0.1', - 'gateway6': None, 'nameservers': { 'addresses': ['10.3.1.2', '1.2.3.8'], 'search': [ -- cgit v1.2.3 From 51f49dc1da0c045d01ddc977874c9bed70cb510f Mon Sep 17 00:00:00 2001 From: Louis Bouchard Date: Fri, 17 Aug 2018 18:43:44 +0000 Subject: Scaleway: Add network configuration to the DataSource DEP_NETWORK is removed since the network_config must run at each boot. New EventType.BOOT event is used for that. Network is brought up early to fetch the metadata which is required to configure the network (ipv4 and/or v6). Adds unittests for the following and fixes test_common for LOCAL and NETWORK sets. --- cloudinit/sources/DataSourceScaleway.py | 54 +++++++++++++--- tests/unittests/test_datasource/test_common.py | 2 +- tests/unittests/test_datasource/test_scaleway.py | 79 +++++++++++++++++++++++- 3 files changed, 124 insertions(+), 11 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index e2502b02..9dc4ab23 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -29,7 +29,9 @@ from cloudinit import log as logging from cloudinit import sources from cloudinit import url_helper from cloudinit import util - +from cloudinit import net +from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError +from cloudinit.event import EventType LOG = logging.getLogger(__name__) @@ -168,8 +170,8 @@ def query_data_api(api_type, api_address, retries, timeout): class DataSourceScaleway(sources.DataSource): - dsname = "Scaleway" + update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]} def __init__(self, sys_cfg, distro, paths): super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) @@ -185,11 +187,10 @@ class DataSourceScaleway(sources.DataSource): self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES)) self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT)) + self._fallback_interface = None + self._network_config = None - def _get_data(self): - if not on_scaleway(): - return False - + def _crawl_metadata(self): resp = url_helper.readurl(self.metadata_address, timeout=self.timeout, retries=self.retries) @@ -203,8 +204,47 @@ class DataSourceScaleway(sources.DataSource): 'vendor-data', self.vendordata_address, self.retries, self.timeout ) + + def _get_data(self): + if not on_scaleway(): + return False + + if self._fallback_interface is None: + self._fallback_interface = net.find_fallback_nic() + try: + with EphemeralDHCPv4(self._fallback_interface): + util.log_time( + logfunc=LOG.debug, msg='Crawl of metadata service', + func=self._crawl_metadata) + except (NoDHCPLeaseError) as e: + util.logexc(LOG, str(e)) + return False return True + @property + def network_config(self): + """ + Configure networking according to data received from the + metadata API. + """ + if self._network_config: + return self._network_config + + if self._fallback_interface is None: + self._fallback_interface = net.find_fallback_nic() + + netcfg = {'type': 'physical', 'name': '%s' % self._fallback_interface} + subnets = [{'type': 'dhcp4'}] + if self.metadata['ipv6']: + subnets += [{'type': 'static', + 'address': '%s' % self.metadata['ipv6']['address'], + 'gateway': '%s' % self.metadata['ipv6']['gateway'], + 'netmask': '%s' % self.metadata['ipv6']['netmask'], + }] + netcfg['subnets'] = subnets + self._network_config = {'version': 1, 'config': [netcfg]} + return self._network_config + @property def launch_index(self): return None @@ -228,7 +268,7 @@ class DataSourceScaleway(sources.DataSource): datasources = [ - (DataSourceScaleway, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceScaleway, (sources.DEP_FILESYSTEM,)), ] diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 0d35dc29..1a5a3db2 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -41,6 +41,7 @@ DEFAULT_LOCAL = [ SmartOS.DataSourceSmartOS, Ec2.DataSourceEc2Local, OpenStack.DataSourceOpenStackLocal, + Scaleway.DataSourceScaleway, ] DEFAULT_NETWORK = [ @@ -55,7 +56,6 @@ DEFAULT_NETWORK = [ NoCloud.DataSourceNoCloudNet, OpenStack.DataSourceOpenStack, OVF.DataSourceOVFNet, - Scaleway.DataSourceScaleway, ] diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index e4e9bb20..c2bc7a00 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -176,11 +176,18 @@ class TestDataSourceScaleway(HttprettyTestCase): self.vendordata_url = \ DataSourceScaleway.BUILTIN_DS_CONFIG['vendordata_url'] + self.add_patch('cloudinit.sources.DataSourceScaleway.on_scaleway', + '_m_on_scaleway', return_value=True) + self.add_patch( + 'cloudinit.sources.DataSourceScaleway.net.find_fallback_nic', + '_m_find_fallback_nic', return_value='scalewaynic0') + + @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', get_source_address_adapter) @mock.patch('cloudinit.util.get_cmdline') @mock.patch('time.sleep', return_value=None) - def test_metadata_ok(self, sleep, m_get_cmdline): + def test_metadata_ok(self, sleep, m_get_cmdline, dhcpv4): """ get_data() returns metadata, user data and vendor data. """ @@ -211,11 +218,12 @@ class TestDataSourceScaleway(HttprettyTestCase): self.assertIsNone(self.datasource.region) self.assertEqual(sleep.call_count, 0) + @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', get_source_address_adapter) @mock.patch('cloudinit.util.get_cmdline') @mock.patch('time.sleep', return_value=None) - def test_metadata_404(self, sleep, m_get_cmdline): + def test_metadata_404(self, sleep, m_get_cmdline, dhcpv4): """ get_data() returns metadata, but no user data nor vendor data. """ @@ -234,11 +242,12 @@ class TestDataSourceScaleway(HttprettyTestCase): self.assertIsNone(self.datasource.get_vendordata_raw()) self.assertEqual(sleep.call_count, 0) + @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', get_source_address_adapter) @mock.patch('cloudinit.util.get_cmdline') @mock.patch('time.sleep', return_value=None) - def test_metadata_rate_limit(self, sleep, m_get_cmdline): + def test_metadata_rate_limit(self, sleep, m_get_cmdline, dhcpv4): """ get_data() is rate limited two times by the metadata API when fetching user data. @@ -262,3 +271,67 @@ class TestDataSourceScaleway(HttprettyTestCase): self.assertEqual(self.datasource.get_userdata_raw(), DataResponses.FAKE_USER_DATA) self.assertEqual(sleep.call_count, 2) + + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_ok(self, m_get_cmdline, fallback_nic): + """ + network_config will only generate IPv4 config if no ipv6 data is + available in the metadata + """ + m_get_cmdline.return_value = 'scaleway' + fallback_nic.return_value = 'ens2' + self.datasource.metadata['ipv6'] = None + + netcfg = self.datasource.network_config + resp = {'version': 1, + 'config': [{ + 'type': 'physical', + 'name': 'ens2', + 'subnets': [{'type': 'dhcp4'}]}] + } + self.assertEqual(netcfg, resp) + + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_ipv6_ok(self, m_get_cmdline, fallback_nic): + """ + network_config will only generate IPv4/v6 configs if ipv6 data is + available in the metadata + """ + m_get_cmdline.return_value = 'scaleway' + fallback_nic.return_value = 'ens2' + self.datasource.metadata['ipv6'] = { + 'address': '2000:abc:4444:9876::42:999', + 'gateway': '2000:abc:4444:9876::42:000', + 'netmask': '127', + } + + netcfg = self.datasource.network_config + resp = {'version': 1, + 'config': [{ + 'type': 'physical', + 'name': 'ens2', + 'subnets': [{'type': 'dhcp4'}, + {'type': 'static', + 'address': '2000:abc:4444:9876::42:999', + 'gateway': '2000:abc:4444:9876::42:000', + 'netmask': '127', } + ] + + }] + } + self.assertEqual(netcfg, resp) + + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_existing(self, m_get_cmdline, fallback_nic): + """ + network_config() should return the same data if a network config + already exists + """ + m_get_cmdline.return_value = 'scaleway' + self.datasource._network_config = '0xdeadbeef' + + netcfg = self.datasource.network_config + self.assertEqual(netcfg, '0xdeadbeef') -- cgit v1.2.3 From 47548df9ded4ad4088d3d846f1876b29b16aa7d1 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 17 Aug 2018 20:24:58 +0000 Subject: azure: allow azure to generate network configuration from IMDS per boot. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Azure datasource now queries IMDS metadata service for network configuration at link local address http://169.254.169.254/metadata/instance?api-version=2017-12-01. The azure metadata service presents a list of macs and allocated ip addresses associated with this instance. Azure will now also regenerate network configuration on every boot because it subscribes to EventType.BOOT maintenance events as well as the 'first boot' EventType.BOOT_NEW_INSTANCE. For testing add azure-imds --kind to cloud-init devel net_convert tool for debugging IMDS metadata. Also refactor _get_data into 3 discrete methods:   - is_platform_viable: check quickly whether the datasource is     potentially compatible with the platform on which is is running   - crawl_metadata: walk all potential metadata candidates, returning a     structured dict of all metadata and userdata. Raise InvalidMetaData on     error.   - _get_data: call crawl_metadata and process results or error. Cache     instance data on class attributes: metadata, userdata_raw etc. --- cloudinit/cmd/devel/net_convert.py | 9 +- cloudinit/sources/DataSourceAzure.py | 256 +++++++++++++++-- tests/unittests/test_datasource/test_azure.py | 399 ++++++++++++++++++++++++-- 3 files changed, 605 insertions(+), 59 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py index 1ec08a3c..271dc5ed 100755 --- a/cloudinit/cmd/devel/net_convert.py +++ b/cloudinit/cmd/devel/net_convert.py @@ -8,6 +8,7 @@ import sys import yaml from cloudinit.sources.helpers import openstack +from cloudinit.sources import DataSourceAzure as azure from cloudinit.net import eni, netplan, network_state, sysconfig from cloudinit import log @@ -28,7 +29,8 @@ def get_parser(parser=None): parser.add_argument("-p", "--network-data", type=open, metavar="PATH", required=True) parser.add_argument("-k", "--kind", - choices=['eni', 'network_data.json', 'yaml'], + choices=['eni', 'network_data.json', 'yaml', + 'azure-imds'], required=True) parser.add_argument("-d", "--directory", metavar="PATH", @@ -78,10 +80,13 @@ def handle_args(name, args): ["Input YAML", yaml.dump(pre_ns, default_flow_style=False, indent=4), ""])) ns = network_state.parse_net_config_data(pre_ns) - else: + elif args.kind == 'network_data.json': pre_ns = openstack.convert_net_json( json.loads(net_data), known_macs=known_macs) ns = network_state.parse_net_config_data(pre_ns) + elif args.kind == 'azure-imds': + pre_ns = azure.parse_network_config(json.loads(net_data)) + ns = network_state.parse_net_config_data(pre_ns) if not ns: raise RuntimeError("No valid network_state object created from" diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 7007d9ea..783445e1 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -8,6 +8,7 @@ import base64 import contextlib import crypt from functools import partial +import json import os import os.path import re @@ -17,6 +18,7 @@ import xml.etree.ElementTree as ET from cloudinit import log as logging from cloudinit import net +from cloudinit.event import EventType from cloudinit.net.dhcp import EphemeralDHCPv4 from cloudinit import sources from cloudinit.sources.helpers.azure import get_metadata_from_fabric @@ -49,7 +51,17 @@ DEFAULT_FS = 'ext4' AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" -IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata" +AGENT_SEED_DIR = '/var/lib/waagent' +IMDS_URL = "http://169.254.169.254/metadata/" + +# List of static scripts and network config artifacts created by +# stock ubuntu suported images. +UBUNTU_EXTENDED_NETWORK_SCRIPTS = [ + '/etc/netplan/90-azure-hotplug.yaml', + '/usr/local/sbin/ephemeral_eth.sh', + '/etc/udev/rules.d/10-net-device-added.rules', + '/run/network/interfaces.ephemeral.d', +] def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid): @@ -185,7 +197,7 @@ if util.is_FreeBSD(): BUILTIN_DS_CONFIG = { 'agent_command': AGENT_START_BUILTIN, - 'data_dir': "/var/lib/waagent", + 'data_dir': AGENT_SEED_DIR, 'set_hostname': True, 'hostname_bounce': { 'interface': DEFAULT_PRIMARY_NIC, @@ -252,6 +264,7 @@ class DataSourceAzure(sources.DataSource): dsname = 'Azure' _negotiated = False + _metadata_imds = sources.UNSET def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -263,6 +276,8 @@ class DataSourceAzure(sources.DataSource): BUILTIN_DS_CONFIG]) self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file') self._network_config = None + # Regenerate network config new_instance boot and every boot + self.update_events['network'].add(EventType.BOOT) def __str__(self): root = sources.DataSource.__str__(self) @@ -336,15 +351,17 @@ class DataSourceAzure(sources.DataSource): metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) return metadata - def _get_data(self): + def crawl_metadata(self): + """Walk all instance metadata sources returning a dict on success. + + @return: A dictionary of any metadata content for this instance. + @raise: InvalidMetaDataException when the expected metadata service is + unavailable, broken or disabled. + """ + crawled_data = {} # azure removes/ejects the cdrom containing the ovf-env.xml # file on reboot. So, in order to successfully reboot we # need to look in the datadir and consider that valid - asset_tag = util.read_dmi_data('chassis-asset-tag') - if asset_tag != AZURE_CHASSIS_ASSET_TAG: - LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) - return False - ddir = self.ds_cfg['data_dir'] candidates = [self.seed_dir] @@ -373,41 +390,84 @@ class DataSourceAzure(sources.DataSource): except NonAzureDataSource: continue except BrokenAzureDataSource as exc: - raise exc + msg = 'BrokenAzureDataSource: %s' % exc + raise sources.InvalidMetaDataException(msg) except util.MountFailedError: LOG.warning("%s was not mountable", cdev) continue if reprovision or self._should_reprovision(ret): ret = self._reprovision() - (md, self.userdata_raw, cfg, files) = ret + imds_md = get_metadata_from_imds( + self.fallback_interface, retries=3) + (md, userdata_raw, cfg, files) = ret self.seed = cdev - self.metadata = util.mergemanydict([md, DEFAULT_METADATA]) - self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG]) + crawled_data.update({ + 'cfg': cfg, + 'files': files, + 'metadata': util.mergemanydict( + [md, {'imds': imds_md}]), + 'userdata_raw': userdata_raw}) found = cdev LOG.debug("found datasource in %s", cdev) break if not found: - return False + raise sources.InvalidMetaDataException('No Azure metadata found') if found == ddir: LOG.debug("using files cached in %s", ddir) seed = _get_random_seed() if seed: - self.metadata['random_seed'] = seed + crawled_data['metadata']['random_seed'] = seed + crawled_data['metadata']['instance-id'] = util.read_dmi_data( + 'system-uuid') + return crawled_data + + def _is_platform_viable(self): + """Check platform environment to report if this datasource may run.""" + return _is_platform_viable(self.seed_dir) + + def clear_cached_attrs(self, attr_defaults=()): + """Reset any cached class attributes to defaults.""" + super(DataSourceAzure, self).clear_cached_attrs(attr_defaults) + self._metadata_imds = sources.UNSET + + def _get_data(self): + """Crawl and process datasource metadata caching metadata as attrs. + + @return: True on success, False on error, invalid or disabled + datasource. + """ + if not self._is_platform_viable(): + return False + try: + crawled_data = util.log_time( + logfunc=LOG.debug, msg='Crawl of metadata service', + func=self.crawl_metadata) + except sources.InvalidMetaDataException as e: + LOG.warning('Could not crawl Azure metadata: %s', e) + return False + if self.distro and self.distro.name == 'ubuntu': + maybe_remove_ubuntu_network_config_scripts() + + # Process crawled data and augment with various config defaults + self.cfg = util.mergemanydict( + [crawled_data['cfg'], BUILTIN_CLOUD_CONFIG]) + self._metadata_imds = crawled_data['metadata']['imds'] + self.metadata = util.mergemanydict( + [crawled_data['metadata'], DEFAULT_METADATA]) + self.userdata_raw = crawled_data['userdata_raw'] user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) # walinux agent writes files world readable, but expects # the directory to be protected. - write_files(ddir, files, dirmode=0o700) - - self.metadata['instance-id'] = util.read_dmi_data('system-uuid') - + write_files( + self.ds_cfg['data_dir'], crawled_data['files'], dirmode=0o700) return True def device_name_to_device(self, name): @@ -436,7 +496,7 @@ class DataSourceAzure(sources.DataSource): def _poll_imds(self): """Poll IMDS for the new provisioning data until we get a valid response. Then return the returned JSON object.""" - url = IMDS_URL + "?api-version=2017-04-02" + url = IMDS_URL + "reprovisiondata?api-version=2017-04-02" headers = {"Metadata": "true"} report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE)) LOG.debug("Start polling IMDS") @@ -487,7 +547,7 @@ class DataSourceAzure(sources.DataSource): jump back into the polling loop in order to retrieve the ovf_env.""" if not ret: return False - (_md, self.userdata_raw, cfg, _files) = ret + (_md, _userdata_raw, cfg, _files) = ret path = REPROVISION_MARKER_FILE if (cfg.get('PreprovisionedVm') is True or os.path.isfile(path)): @@ -543,22 +603,15 @@ class DataSourceAzure(sources.DataSource): @property def network_config(self): """Generate a network config like net.generate_fallback_network() with - the following execptions. + the following exceptions. 1. Probe the drivers of the net-devices present and inject them in the network configuration under params: driver: value 2. Generate a fallback network config that does not include any of the blacklisted devices. """ - blacklist = ['mlx4_core'] if not self._network_config: - LOG.debug('Azure: generating fallback configuration') - # generate a network config, blacklist picking any mlx4_core devs - netconfig = net.generate_fallback_config( - blacklist_drivers=blacklist, config_driver=True) - - self._network_config = netconfig - + self._network_config = parse_network_config(self._metadata_imds) return self._network_config @@ -1025,6 +1078,151 @@ def load_azure_ds_dir(source_dir): return (md, ud, cfg, {'ovf-env.xml': contents}) +def parse_network_config(imds_metadata): + """Convert imds_metadata dictionary to network v2 configuration. + + Parses network configuration from imds metadata if present or generate + fallback network config excluding mlx4_core devices. + + @param: imds_metadata: Dict of content read from IMDS network service. + @return: Dictionary containing network version 2 standard configuration. + """ + if imds_metadata != sources.UNSET and imds_metadata: + netconfig = {'version': 2, 'ethernets': {}} + LOG.debug('Azure: generating network configuration from IMDS') + network_metadata = imds_metadata['network'] + for idx, intf in enumerate(network_metadata['interface']): + nicname = 'eth{idx}'.format(idx=idx) + dev_config = {} + for addr4 in intf['ipv4']['ipAddress']: + privateIpv4 = addr4['privateIpAddress'] + if privateIpv4: + if dev_config.get('dhcp4', False): + # Append static address config for nic > 1 + netPrefix = intf['ipv4']['subnet'][0].get( + 'prefix', '24') + if not dev_config.get('addresses'): + dev_config['addresses'] = [] + dev_config['addresses'].append( + '{ip}/{prefix}'.format( + ip=privateIpv4, prefix=netPrefix)) + else: + dev_config['dhcp4'] = True + for addr6 in intf['ipv6']['ipAddress']: + privateIpv6 = addr6['privateIpAddress'] + if privateIpv6: + dev_config['dhcp6'] = True + break + if dev_config: + mac = ':'.join(re.findall(r'..', intf['macAddress'])) + dev_config.update( + {'match': {'macaddress': mac.lower()}, + 'set-name': nicname}) + netconfig['ethernets'][nicname] = dev_config + else: + blacklist = ['mlx4_core'] + LOG.debug('Azure: generating fallback configuration') + # generate a network config, blacklist picking mlx4_core devs + netconfig = net.generate_fallback_config( + blacklist_drivers=blacklist, config_driver=True) + return netconfig + + +def get_metadata_from_imds(fallback_nic, retries): + """Query Azure's network metadata service, returning a dictionary. + + If network is not up, setup ephemeral dhcp on fallback_nic to talk to the + IMDS. For more info on IMDS: + https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service + + @param fallback_nic: String. The name of the nic which requires active + network in order to query IMDS. + @param retries: The number of retries of the IMDS_URL. + + @return: A dict of instance metadata containing compute and network + info. + """ + kwargs = {'logfunc': LOG.debug, + 'msg': 'Crawl of Azure Instance Metadata Service (IMDS)', + 'func': _get_metadata_from_imds, 'args': (retries,)} + if net.is_up(fallback_nic): + return util.log_time(**kwargs) + else: + with EphemeralDHCPv4(fallback_nic): + return util.log_time(**kwargs) + + +def _get_metadata_from_imds(retries): + + def retry_on_url_error(msg, exception): + if isinstance(exception, UrlError) and exception.code == 404: + return True # Continue retries + return False # Stop retries on all other exceptions + + url = IMDS_URL + "instance?api-version=2017-12-01" + headers = {"Metadata": "true"} + try: + response = readurl( + url, timeout=1, headers=headers, retries=retries, + exception_cb=retry_on_url_error) + except Exception as e: + LOG.debug('Ignoring IMDS instance metadata: %s', e) + return {} + try: + return util.load_json(str(response)) + except json.decoder.JSONDecodeError: + LOG.warning( + 'Ignoring non-json IMDS instance metadata: %s', str(response)) + return {} + + +def maybe_remove_ubuntu_network_config_scripts(paths=None): + """Remove Azure-specific ubuntu network config for non-primary nics. + + @param paths: List of networking scripts or directories to remove when + present. + + In certain supported ubuntu images, static udev rules or netplan yaml + config is delivered in the base ubuntu image to support dhcp on any + additional interfaces which get attached by a customer at some point + after initial boot. Since the Azure datasource can now regenerate + network configuration as metadata reports these new devices, we no longer + want the udev rules or netplan's 90-azure-hotplug.yaml to configure + networking on eth1 or greater as it might collide with cloud-init's + configuration. + + Remove the any existing extended network scripts if the datasource is + enabled to write network per-boot. + """ + if not paths: + paths = UBUNTU_EXTENDED_NETWORK_SCRIPTS + logged = False + for path in paths: + if os.path.exists(path): + if not logged: + LOG.info( + 'Removing Ubuntu extended network scripts because' + ' cloud-init updates Azure network configuration on the' + ' following event: %s.', + EventType.BOOT) + logged = True + if os.path.isdir(path): + util.del_dir(path) + else: + util.del_file(path) + + +def _is_platform_viable(seed_dir): + """Check platform environment to report if this datasource may run.""" + asset_tag = util.read_dmi_data('chassis-asset-tag') + if asset_tag == AZURE_CHASSIS_ASSET_TAG: + return True + LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) + if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): + return True + return False + + class BrokenAzureDataSource(Exception): pass diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index e82716eb..4e428b71 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1,15 +1,21 @@ # This file is part of cloud-init. See LICENSE file for license information. +from cloudinit import distros from cloudinit import helpers -from cloudinit.sources import DataSourceAzure as dsaz +from cloudinit import url_helper +from cloudinit.sources import ( + UNSET, DataSourceAzure as dsaz, InvalidMetaDataException) from cloudinit.util import (b64e, decode_binary, load_file, write_file, find_freebsd_part, get_path_dev_freebsd, MountFailedError) from cloudinit.version import version_string as vs -from cloudinit.tests.helpers import (CiTestCase, TestCase, populate_dir, mock, - ExitStack, PY26, SkipTest) +from cloudinit.tests.helpers import ( + HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, + ExitStack, PY26, SkipTest) import crypt +import httpretty +import json import os import stat import xml.etree.ElementTree as ET @@ -77,6 +83,106 @@ def construct_valid_ovf_env(data=None, pubkeys=None, return content +NETWORK_METADATA = { + "network": { + "interface": [ + { + "macAddress": "000D3A047598", + "ipv6": { + "ipAddress": [] + }, + "ipv4": { + "subnet": [ + { + "prefix": "24", + "address": "10.0.0.0" + } + ], + "ipAddress": [ + { + "privateIpAddress": "10.0.0.4", + "publicIpAddress": "104.46.124.81" + } + ] + } + } + ] + } +} + + +class TestGetMetadataFromIMDS(HttprettyTestCase): + + with_logs = True + + def setUp(self): + super(TestGetMetadataFromIMDS, self).setUp() + self.network_md_url = dsaz.IMDS_URL + "instance?api-version=2017-12-01" + + @mock.patch('cloudinit.sources.DataSourceAzure.readurl') + @mock.patch('cloudinit.sources.DataSourceAzure.EphemeralDHCPv4') + @mock.patch('cloudinit.sources.DataSourceAzure.net.is_up') + def test_get_metadata_does_not_dhcp_if_network_is_up( + self, m_net_is_up, m_dhcp, m_readurl): + """Do not perform DHCP setup when nic is already up.""" + m_net_is_up.return_value = True + m_readurl.return_value = url_helper.StringResponse( + json.dumps(NETWORK_METADATA).encode('utf-8')) + self.assertEqual( + NETWORK_METADATA, + dsaz.get_metadata_from_imds('eth9', retries=3)) + + m_net_is_up.assert_called_with('eth9') + m_dhcp.assert_not_called() + self.assertIn( + "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time + self.logs.getvalue()) + + @mock.patch('cloudinit.sources.DataSourceAzure.readurl') + @mock.patch('cloudinit.sources.DataSourceAzure.EphemeralDHCPv4') + @mock.patch('cloudinit.sources.DataSourceAzure.net.is_up') + def test_get_metadata_performs_dhcp_when_network_is_down( + self, m_net_is_up, m_dhcp, m_readurl): + """Perform DHCP setup when nic is not up.""" + m_net_is_up.return_value = False + m_readurl.return_value = url_helper.StringResponse( + json.dumps(NETWORK_METADATA).encode('utf-8')) + + self.assertEqual( + NETWORK_METADATA, + dsaz.get_metadata_from_imds('eth9', retries=2)) + + m_net_is_up.assert_called_with('eth9') + m_dhcp.assert_called_with('eth9') + self.assertIn( + "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time + self.logs.getvalue()) + + m_readurl.assert_called_with( + self.network_md_url, exception_cb=mock.ANY, + headers={'Metadata': 'true'}, retries=2, timeout=1) + + @mock.patch('cloudinit.url_helper.time.sleep') + @mock.patch('cloudinit.sources.DataSourceAzure.net.is_up') + def test_get_metadata_from_imds_empty_when_no_imds_present( + self, m_net_is_up, m_sleep): + """Return empty dict when IMDS network metadata is absent.""" + httpretty.register_uri( + httpretty.GET, + dsaz.IMDS_URL + 'instance?api-version=2017-12-01', + body={}, status=404) + + m_net_is_up.return_value = True # skips dhcp + + self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=2)) + + m_net_is_up.assert_called_with('eth9') + self.assertEqual([mock.call(1), mock.call(1)], m_sleep.call_args_list) + self.assertIn( + "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time + self.logs.getvalue()) + + class TestAzureDataSource(CiTestCase): with_logs = True @@ -95,8 +201,19 @@ class TestAzureDataSource(CiTestCase): self.patches = ExitStack() self.addCleanup(self.patches.close) - self.patches.enter_context(mock.patch.object(dsaz, '_get_random_seed')) - + self.patches.enter_context(mock.patch.object( + dsaz, '_get_random_seed', return_value='wild')) + self.m_get_metadata_from_imds = self.patches.enter_context( + mock.patch.object( + dsaz, 'get_metadata_from_imds', + mock.MagicMock(return_value=NETWORK_METADATA))) + self.m_fallback_nic = self.patches.enter_context( + mock.patch('cloudinit.sources.net.find_fallback_nic', + return_value='eth9')) + self.m_remove_ubuntu_network_scripts = self.patches.enter_context( + mock.patch.object( + dsaz, 'maybe_remove_ubuntu_network_config_scripts', + mock.MagicMock())) super(TestAzureDataSource, self).setUp() def apply_patches(self, patches): @@ -137,7 +254,7 @@ scbus-1 on xpt0 bus 0 ]) return dsaz - def _get_ds(self, data, agent_command=None): + def _get_ds(self, data, agent_command=None, distro=None): def dsdevs(): return data.get('dsdevs', []) @@ -186,8 +303,11 @@ scbus-1 on xpt0 bus 0 side_effect=_wait_for_files)), ]) + if distro is not None: + distro_cls = distros.fetch(distro) + distro = distro_cls(distro, data.get('sys_cfg', {}), self.paths) dsrc = dsaz.DataSourceAzure( - data.get('sys_cfg', {}), distro=None, paths=self.paths) + data.get('sys_cfg', {}), distro=distro, paths=self.paths) if agent_command is not None: dsrc.ds_cfg['agent_command'] = agent_command @@ -260,29 +380,20 @@ fdescfs /dev/fd fdescfs rw 0 0 res = get_path_dev_freebsd('/etc', mnt_list) self.assertIsNotNone(res) - @mock.patch('cloudinit.sources.DataSourceAzure.util.read_dmi_data') - def test_non_azure_dmi_chassis_asset_tag(self, m_read_dmi_data): - """Report non-azure when DMI's chassis asset tag doesn't match. - - Return False when the asset tag doesn't match Azure's static - AZURE_CHASSIS_ASSET_TAG. - """ + @mock.patch('cloudinit.sources.DataSourceAzure._is_platform_viable') + def test_call_is_platform_viable_seed(self, m_is_platform_viable): + """Check seed_dir using _is_platform_viable and return False.""" # Return a non-matching asset tag value - nonazure_tag = dsaz.AZURE_CHASSIS_ASSET_TAG + 'X' - m_read_dmi_data.return_value = nonazure_tag + m_is_platform_viable.return_value = False dsrc = dsaz.DataSourceAzure( {}, distro=None, paths=self.paths) self.assertFalse(dsrc.get_data()) - self.assertEqual( - "DEBUG: Non-Azure DMI asset tag '{0}' discovered.\n".format( - nonazure_tag), - self.logs.getvalue()) + m_is_platform_viable.assert_called_with(dsrc.seed_dir) def test_basic_seed_dir(self): odata = {'HostName': "myhost", 'UserName': "myuser"} data = {'ovfcontent': construct_valid_ovf_env(data=odata), 'sys_cfg': {}} - dsrc = self._get_ds(data) ret = dsrc.get_data() self.assertTrue(ret) @@ -291,6 +402,82 @@ fdescfs /dev/fd fdescfs rw 0 0 self.assertTrue(os.path.isfile( os.path.join(self.waagent_d, 'ovf-env.xml'))) + def test_get_data_non_ubuntu_will_not_remove_network_scripts(self): + """get_data on non-Ubuntu will not remove ubuntu net scripts.""" + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + + dsrc = self._get_ds(data, distro='debian') + dsrc.get_data() + self.m_remove_ubuntu_network_scripts.assert_not_called() + + def test_get_data_on_ubuntu_will_remove_network_scripts(self): + """get_data will remove ubuntu net scripts on Ubuntu distro.""" + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + + dsrc = self._get_ds(data, distro='ubuntu') + dsrc.get_data() + self.m_remove_ubuntu_network_scripts.assert_called_once_with() + + def test_crawl_metadata_returns_structured_data_and_caches_nothing(self): + """Return all structured metadata and cache no class attributes.""" + yaml_cfg = "{agent_command: my_command}\n" + odata = {'HostName': "myhost", 'UserName': "myuser", + 'UserData': {'text': 'FOOBAR', 'encoding': 'plain'}, + 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + dsrc = self._get_ds(data) + expected_cfg = { + 'PreprovisionedVm': False, + 'datasource': {'Azure': {'agent_command': 'my_command'}}, + 'system_info': {'default_user': {'name': u'myuser'}}} + expected_metadata = { + 'azure_data': { + 'configurationsettype': 'LinuxProvisioningConfiguration'}, + 'imds': {'network': {'interface': [{ + 'ipv4': {'ipAddress': [ + {'privateIpAddress': '10.0.0.4', + 'publicIpAddress': '104.46.124.81'}], + 'subnet': [{'address': '10.0.0.0', 'prefix': '24'}]}, + 'ipv6': {'ipAddress': []}, + 'macAddress': '000D3A047598'}]}}, + 'instance-id': 'test-instance-id', + 'local-hostname': u'myhost', + 'random_seed': 'wild'} + + crawled_metadata = dsrc.crawl_metadata() + + self.assertItemsEqual( + crawled_metadata.keys(), + ['cfg', 'files', 'metadata', 'userdata_raw']) + self.assertEqual(crawled_metadata['cfg'], expected_cfg) + self.assertEqual( + list(crawled_metadata['files'].keys()), ['ovf-env.xml']) + self.assertIn( + b'myhost', + crawled_metadata['files']['ovf-env.xml']) + self.assertEqual(crawled_metadata['metadata'], expected_metadata) + self.assertEqual(crawled_metadata['userdata_raw'], 'FOOBAR') + self.assertEqual(dsrc.userdata_raw, None) + self.assertEqual(dsrc.metadata, {}) + self.assertEqual(dsrc._metadata_imds, UNSET) + self.assertFalse(os.path.isfile( + os.path.join(self.waagent_d, 'ovf-env.xml'))) + + def test_crawl_metadata_raises_invalid_metadata_on_error(self): + """crawl_metadata raises an exception on invalid ovf-env.xml.""" + data = {'ovfcontent': "BOGUS", 'sys_cfg': {}} + dsrc = self._get_ds(data) + error_msg = ('BrokenAzureDataSource: Invalid ovf-env.xml:' + ' syntax error: line 1, column 0') + with self.assertRaises(InvalidMetaDataException) as cm: + dsrc.crawl_metadata() + self.assertEqual(str(cm.exception), error_msg) + def test_waagent_d_has_0700_perms(self): # we expect /var/lib/waagent to be created 0700 dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) @@ -314,6 +501,20 @@ fdescfs /dev/fd fdescfs rw 0 0 self.assertTrue(ret) self.assertEqual(data['agent_invoked'], cfg['agent_command']) + def test_network_config_set_from_imds(self): + """Datasource.network_config returns IMDS network data.""" + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + expected_network_config = { + 'ethernets': { + 'eth0': {'set-name': 'eth0', + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp4': True}}, + 'version': 2} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual(expected_network_config, dsrc.network_config) + def test_user_cfg_set_agent_command(self): # set dscfg in via base64 encoded yaml cfg = {'agent_command': "my_command"} @@ -579,12 +780,34 @@ fdescfs /dev/fd fdescfs rw 0 0 self.assertEqual( [mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list) + @mock.patch('cloudinit.net.generate_fallback_config') + def test_imds_network_config(self, mock_fallback): + """Network config is generated from IMDS network data when present.""" + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + + expected_cfg = { + 'ethernets': { + 'eth0': {'dhcp4': True, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, + 'version': 2} + + self.assertEqual(expected_cfg, dsrc.network_config) + mock_fallback.assert_not_called() + @mock.patch('cloudinit.net.get_interface_mac') @mock.patch('cloudinit.net.get_devicelist') @mock.patch('cloudinit.net.device_driver') @mock.patch('cloudinit.net.generate_fallback_config') - def test_network_config(self, mock_fallback, mock_dd, - mock_devlist, mock_get_mac): + def test_fallback_network_config(self, mock_fallback, mock_dd, + mock_devlist, mock_get_mac): + """On absent IMDS network data, generate network fallback config.""" odata = {'HostName': "myhost", 'UserName': "myuser"} data = {'ovfcontent': construct_valid_ovf_env(data=odata), 'sys_cfg': {}} @@ -605,6 +828,8 @@ fdescfs /dev/fd fdescfs rw 0 0 mock_get_mac.return_value = '00:11:22:33:44:55' dsrc = self._get_ds(data) + # Represent empty response from network imds + self.m_get_metadata_from_imds.return_value = {} ret = dsrc.get_data() self.assertTrue(ret) @@ -617,8 +842,9 @@ fdescfs /dev/fd fdescfs rw 0 0 @mock.patch('cloudinit.net.get_devicelist') @mock.patch('cloudinit.net.device_driver') @mock.patch('cloudinit.net.generate_fallback_config') - def test_network_config_blacklist(self, mock_fallback, mock_dd, - mock_devlist, mock_get_mac): + def test_fallback_network_config_blacklist(self, mock_fallback, mock_dd, + mock_devlist, mock_get_mac): + """On absent network metadata, blacklist mlx from fallback config.""" odata = {'HostName': "myhost", 'UserName': "myuser"} data = {'ovfcontent': construct_valid_ovf_env(data=odata), 'sys_cfg': {}} @@ -649,6 +875,8 @@ fdescfs /dev/fd fdescfs rw 0 0 mock_get_mac.return_value = '00:11:22:33:44:55' dsrc = self._get_ds(data) + # Represent empty response from network imds + self.m_get_metadata_from_imds.return_value = {} ret = dsrc.get_data() self.assertTrue(ret) @@ -689,9 +917,12 @@ class TestAzureBounce(CiTestCase): mock.patch.object(dsaz, 'get_metadata_from_fabric', mock.MagicMock(return_value={}))) self.patches.enter_context( - mock.patch.object(dsaz.util, 'which', lambda x: True)) + mock.patch.object(dsaz, 'get_metadata_from_imds', + mock.MagicMock(return_value={}))) self.patches.enter_context( - mock.patch.object(dsaz, '_get_random_seed')) + mock.patch.object(dsaz.util, 'which', lambda x: True)) + self.patches.enter_context(mock.patch.object( + dsaz, '_get_random_seed', return_value='wild')) def _dmi_mocks(key): if key == 'system-uuid': @@ -719,9 +950,12 @@ class TestAzureBounce(CiTestCase): mock.patch.object(dsaz, 'set_hostname')) self.subp = self.patches.enter_context( mock.patch('cloudinit.sources.DataSourceAzure.util.subp')) + self.find_fallback_nic = self.patches.enter_context( + mock.patch('cloudinit.net.find_fallback_nic', return_value='eth9')) def tearDown(self): self.patches.close() + super(TestAzureBounce, self).tearDown() def _get_ds(self, ovfcontent=None, agent_command=None): if ovfcontent is not None: @@ -927,7 +1161,7 @@ class TestLoadAzureDsDir(CiTestCase): str(context_manager.exception)) -class TestReadAzureOvf(TestCase): +class TestReadAzureOvf(CiTestCase): def test_invalid_xml_raises_non_azure_ds(self): invalid_xml = "" + construct_valid_ovf_env(data={}) @@ -1188,6 +1422,25 @@ class TestCanDevBeReformatted(CiTestCase): "(datasource.Azure.never_destroy_ntfs)", msg) +class TestClearCachedData(CiTestCase): + + def test_clear_cached_attrs_clears_imds(self): + """All class attributes are reset to defaults, including imds data.""" + tmp = self.tmp_dir() + paths = helpers.Paths( + {'cloud_dir': tmp, 'run_dir': tmp}) + dsrc = dsaz.DataSourceAzure({}, distro=None, paths=paths) + clean_values = [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds] + dsrc.metadata = 'md' + dsrc.userdata = 'ud' + dsrc._metadata_imds = 'imds' + dsrc._dirty_cache = True + dsrc.clear_cached_attrs() + self.assertEqual( + [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds], + clean_values) + + class TestAzureNetExists(CiTestCase): def test_azure_net_must_exist_for_legacy_objpkl(self): @@ -1398,4 +1651,94 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): self.assertEqual(m_net.call_count, 1) +class TestRemoveUbuntuNetworkConfigScripts(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestRemoveUbuntuNetworkConfigScripts, self).setUp() + self.tmp = self.tmp_dir() + + def test_remove_network_scripts_removes_both_files_and_directories(self): + """Any files or directories in paths are removed when present.""" + file1 = self.tmp_path('file1', dir=self.tmp) + subdir = self.tmp_path('sub1', dir=self.tmp) + subfile = self.tmp_path('leaf1', dir=subdir) + write_file(file1, 'file1content') + write_file(subfile, 'leafcontent') + dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[subdir, file1]) + + for path in (file1, subdir, subfile): + self.assertFalse(os.path.exists(path), + 'Found unremoved: %s' % path) + + expected_logs = [ + 'INFO: Removing Ubuntu extended network scripts because cloud-init' + ' updates Azure network configuration on the following event:' + ' System boot.', + 'Recursively deleting %s' % subdir, + 'Attempting to remove %s' % file1] + for log in expected_logs: + self.assertIn(log, self.logs.getvalue()) + + def test_remove_network_scripts_only_attempts_removal_if_path_exists(self): + """Any files or directories absent are skipped without error.""" + dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[ + self.tmp_path('nodirhere/', dir=self.tmp), + self.tmp_path('notfilehere', dir=self.tmp)]) + self.assertNotIn('/not/a', self.logs.getvalue()) # No delete logs + + @mock.patch('cloudinit.sources.DataSourceAzure.os.path.exists') + def test_remove_network_scripts_default_removes_stock_scripts(self, + m_exists): + """Azure's stock ubuntu image scripts and artifacts are removed.""" + # Report path absent on all to avoid delete operation + m_exists.return_value = False + dsaz.maybe_remove_ubuntu_network_config_scripts() + calls = m_exists.call_args_list + for path in dsaz.UBUNTU_EXTENDED_NETWORK_SCRIPTS: + self.assertIn(mock.call(path), calls) + + +class TestWBIsPlatformViable(CiTestCase): + """White box tests for _is_platform_viable.""" + with_logs = True + + @mock.patch('cloudinit.sources.DataSourceAzure.util.read_dmi_data') + def test_true_on_non_azure_chassis(self, m_read_dmi_data): + """Return True if DMI chassis-asset-tag is AZURE_CHASSIS_ASSET_TAG.""" + m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG + self.assertTrue(dsaz._is_platform_viable('doesnotmatter')) + + @mock.patch('cloudinit.sources.DataSourceAzure.os.path.exists') + @mock.patch('cloudinit.sources.DataSourceAzure.util.read_dmi_data') + def test_true_on_azure_ovf_env_in_seed_dir(self, m_read_dmi_data, m_exist): + """Return True if ovf-env.xml exists in known seed dirs.""" + # Non-matching Azure chassis-asset-tag + m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG + 'X' + + m_exist.return_value = True + self.assertTrue(dsaz._is_platform_viable('/some/seed/dir')) + m_exist.called_once_with('/other/seed/dir') + + def test_false_on_no_matching_azure_criteria(self): + """Report non-azure on unmatched asset tag, ovf-env absent and no dev. + + Return False when the asset tag doesn't match Azure's static + AZURE_CHASSIS_ASSET_TAG, no ovf-env.xml files exist in known seed dirs + and no devices have a label starting with prefix 'rd_rdfe_'. + """ + self.assertFalse(wrap_and_call( + 'cloudinit.sources.DataSourceAzure', + {'os.path.exists': False, + # Non-matching Azure chassis-asset-tag + 'util.read_dmi_data': dsaz.AZURE_CHASSIS_ASSET_TAG + 'X', + 'util.which': None}, + dsaz._is_platform_viable, 'doesnotmatter')) + self.assertIn( + "DEBUG: Non-Azure DMI asset tag '{0}' discovered.\n".format( + dsaz.AZURE_CHASSIS_ASSET_TAG + 'X'), + self.logs.getvalue()) + + # vi: ts=4 expandtab -- cgit v1.2.3 From aaffd59431fe05932a66016db941fe197c4e7620 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 17 Aug 2018 20:25:31 +0000 Subject: Add datasource Oracle Compute Infrastructure (OCI). This adds a Oracle specific datasource that functions with OCI. It is a simplified version of the OpenStack metadata server with support for vendor-data. It does not support the OCI-C (classic) platform. Also here is a move of BrokenMetadata to common 'sources' as this was the third occurrence of that class. --- .pylintrc | 3 +- cloudinit/apport.py | 1 + cloudinit/settings.py | 1 + cloudinit/sources/DataSourceIBMCloud.py | 13 +- cloudinit/sources/DataSourceOpenStack.py | 12 +- cloudinit/sources/DataSourceOracle.py | 233 +++++++++++++++ cloudinit/sources/__init__.py | 4 + cloudinit/sources/helpers/openstack.py | 6 +- cloudinit/sources/tests/test_oracle.py | 331 ++++++++++++++++++++++ doc/rtd/topics/datasources.rst | 1 + doc/rtd/topics/datasources/oracle.rst | 26 ++ tests/unittests/test_datasource/test_common.py | 2 + tests/unittests/test_datasource/test_openstack.py | 13 +- tests/unittests/test_ds_identify.py | 19 ++ tools/ds-identify | 8 +- 15 files changed, 650 insertions(+), 23 deletions(-) create mode 100644 cloudinit/sources/DataSourceOracle.py create mode 100644 cloudinit/sources/tests/test_oracle.py create mode 100644 doc/rtd/topics/datasources/oracle.rst (limited to 'tests/unittests/test_datasource') diff --git a/.pylintrc b/.pylintrc index 3bfa0c81..e376b48b 100644 --- a/.pylintrc +++ b/.pylintrc @@ -61,7 +61,8 @@ ignored-modules= # List of class names for which member attributes should not be checked (useful # for classes with dynamically set attributes). This supports the use of # qualified names. -ignored-classes=optparse.Values,thread._local +# argparse.Namespace from https://github.com/PyCQA/pylint/issues/2413 +ignored-classes=argparse.Namespace,optparse.Values,thread._local # List of members which are set dynamically and missed by pylint inference # system, and so shouldn't trigger E1101 when accessed. Python regular diff --git a/cloudinit/apport.py b/cloudinit/apport.py index 130ff269..22cb7fde 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -30,6 +30,7 @@ KNOWN_CLOUD_NAMES = [ 'NoCloud', 'OpenNebula', 'OpenStack', + 'Oracle', 'OVF', 'OpenTelekomCloud', 'Scaleway', diff --git a/cloudinit/settings.py b/cloudinit/settings.py index dde5749d..ea367cb7 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -38,6 +38,7 @@ CFG_BUILTIN = { 'Scaleway', 'Hetzner', 'IBMCloud', + 'Oracle', # At the end to act as a 'catch' when none of the above work... 'None', ], diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py index 01106ec0..a5358148 100644 --- a/cloudinit/sources/DataSourceIBMCloud.py +++ b/cloudinit/sources/DataSourceIBMCloud.py @@ -295,7 +295,7 @@ def read_md(): results = metadata_from_dir(path) else: results = util.mount_cb(path, metadata_from_dir) - except BrokenMetadata as e: + except sources.BrokenMetadata as e: raise RuntimeError( "Failed reading IBM config disk (platform=%s path=%s): %s" % (platform, path, e)) @@ -304,10 +304,6 @@ def read_md(): return ret -class BrokenMetadata(IOError): - pass - - def metadata_from_dir(source_dir): """Walk source_dir extracting standardized metadata. @@ -352,12 +348,13 @@ def metadata_from_dir(source_dir): try: data = transl(raw) except Exception as e: - raise BrokenMetadata("Failed decoding %s: %s" % (path, e)) + raise sources.BrokenMetadata( + "Failed decoding %s: %s" % (path, e)) results[name] = data if results.get('metadata_raw') is None: - raise BrokenMetadata( + raise sources.BrokenMetadata( "%s missing required file 'meta_data.json'" % source_dir) results['metadata'] = {} @@ -368,7 +365,7 @@ def metadata_from_dir(source_dir): try: md['random_seed'] = base64.b64decode(md_raw['random_seed']) except (ValueError, TypeError) as e: - raise BrokenMetadata( + raise sources.BrokenMetadata( "Badly formatted metadata random_seed entry: %s" % e) renames = ( diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index b9ade90d..4a015240 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -13,6 +13,7 @@ from cloudinit import url_helper from cloudinit import util from cloudinit.sources.helpers import openstack +from cloudinit.sources import DataSourceOracle as oracle LOG = logging.getLogger(__name__) @@ -28,8 +29,7 @@ DMI_PRODUCT_NOVA = 'OpenStack Nova' DMI_PRODUCT_COMPUTE = 'OpenStack Compute' VALID_DMI_PRODUCT_NAMES = [DMI_PRODUCT_NOVA, DMI_PRODUCT_COMPUTE] DMI_ASSET_TAG_OPENTELEKOM = 'OpenTelekomCloud' -DMI_ASSET_TAG_ORACLE_CLOUD = 'OracleCloud.com' -VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM, DMI_ASSET_TAG_ORACLE_CLOUD] +VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM] class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): @@ -122,8 +122,10 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): False when unable to contact metadata service or when metadata format is invalid or disabled. """ - if not detect_openstack(): + oracle_considered = 'Oracle' in self.sys_cfg.get('datasource_list') + if not detect_openstack(accept_oracle=not oracle_considered): return False + if self.perform_dhcp_setup: # Setup networking in init-local stage. try: with EphemeralDHCPv4(self.fallback_interface): @@ -215,7 +217,7 @@ def read_metadata_service(base_url, ssl_details=None, return reader.read_v2() -def detect_openstack(): +def detect_openstack(accept_oracle=False): """Return True when a potential OpenStack platform is detected.""" if not util.is_x86(): return True # Non-Intel cpus don't properly report dmi product names @@ -224,6 +226,8 @@ def detect_openstack(): return True elif util.read_dmi_data('chassis-asset-tag') in VALID_DMI_ASSET_TAGS: return True + elif accept_oracle and oracle._is_platform_viable(): + return True elif util.get_proc_env(1).get('product_name') == DMI_PRODUCT_NOVA: return True return False diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py new file mode 100644 index 00000000..fab39af3 --- /dev/null +++ b/cloudinit/sources/DataSourceOracle.py @@ -0,0 +1,233 @@ +# This file is part of cloud-init. See LICENSE file for license information. +"""Datasource for Oracle (OCI/Oracle Cloud Infrastructure) + +OCI provides a OpenStack like metadata service which provides only +'2013-10-17' and 'latest' versions.. + +Notes: + * This datasource does not support the OCI-Classic. OCI-Classic + provides an EC2 lookalike metadata service. + * The uuid provided in DMI data is not the same as the meta-data provided + instance-id, but has an equivalent lifespan. + * We do need to support upgrade from an instance that cloud-init + identified as OpenStack. + * Both bare-metal and vms use iscsi root + * Both bare-metal and vms provide chassis-asset-tag of OracleCloud.com +""" + +from cloudinit.url_helper import combine_url, readurl, UrlError +from cloudinit.net import dhcp +from cloudinit import net +from cloudinit import sources +from cloudinit import util +from cloudinit.net import cmdline +from cloudinit import log as logging + +import json +import re + +LOG = logging.getLogger(__name__) + +CHASSIS_ASSET_TAG = "OracleCloud.com" +METADATA_ENDPOINT = "http://169.254.169.254/openstack/" + + +class DataSourceOracle(sources.DataSource): + + dsname = 'Oracle' + system_uuid = None + vendordata_pure = None + _network_config = sources.UNSET + + def _is_platform_viable(self): + """Check platform environment to report if this datasource may run.""" + return _is_platform_viable() + + def _get_data(self): + if not self._is_platform_viable(): + return False + + # network may be configured if iscsi root. If that is the case + # then read_kernel_cmdline_config will return non-None. + if _is_iscsi_root(): + data = self.crawl_metadata() + else: + with dhcp.EphemeralDHCPv4(net.find_fallback_nic()): + data = self.crawl_metadata() + + self._crawled_metadata = data + vdata = data['2013-10-17'] + + self.userdata_raw = vdata.get('user_data') + self.system_uuid = vdata['system_uuid'] + + vd = vdata.get('vendor_data') + if vd: + self.vendordata_pure = vd + try: + self.vendordata_raw = sources.convert_vendordata(vd) + except ValueError as e: + LOG.warning("Invalid content in vendor-data: %s", e) + self.vendordata_raw = None + + mdcopies = ('public_keys',) + md = dict([(k, vdata['meta_data'].get(k)) + for k in mdcopies if k in vdata['meta_data']]) + + mdtrans = ( + # oracle meta_data.json name, cloudinit.datasource.metadata name + ('availability_zone', 'availability-zone'), + ('hostname', 'local-hostname'), + ('launch_index', 'launch-index'), + ('uuid', 'instance-id'), + ) + for dsname, ciname in mdtrans: + if dsname in vdata['meta_data']: + md[ciname] = vdata['meta_data'][dsname] + + self.metadata = md + return True + + def crawl_metadata(self): + return read_metadata() + + def check_instance_id(self, sys_cfg): + """quickly check (local only) if self.instance_id is still valid + + On Oracle, the dmi-provided system uuid differs from the instance-id + but has the same life-span.""" + return sources.instance_id_matches_system_uuid(self.system_uuid) + + def get_public_ssh_keys(self): + return sources.normalize_pubkey_data(self.metadata.get('public_keys')) + + @property + def network_config(self): + """Network config is read from initramfs provided files + If none is present, then we fall back to fallback configuration. + + One thing to note here is that this method is not currently + considered at all if there is is kernel/initramfs provided + data. In that case, stages considers that the cmdline data + overrides datasource provided data and does not consult here. + + We nonetheless return cmdline provided config if present + and fallback to generate fallback.""" + if self._network_config == sources.UNSET: + cmdline_cfg = cmdline.read_kernel_cmdline_config() + if cmdline_cfg: + self._network_config = cmdline_cfg + else: + self._network_config = self.distro.generate_fallback_config() + return self._network_config + + +def _read_system_uuid(): + sys_uuid = util.read_dmi_data('system-uuid') + return None if sys_uuid is None else sys_uuid.lower() + + +def _is_platform_viable(): + asset_tag = util.read_dmi_data('chassis-asset-tag') + return asset_tag == CHASSIS_ASSET_TAG + + +def _is_iscsi_root(): + return bool(cmdline.read_kernel_cmdline_config()) + + +def _load_index(content): + """Return a list entries parsed from content. + + OpenStack's metadata service returns a newline delimited list + of items. Oracle's implementation has html formatted list of links. + The parser here just grabs targets from + and throws away "../". + + Oracle has accepted that to be buggy and may fix in the future + to instead return a '\n' delimited plain text list. This function + will continue to work if that change is made.""" + if not content.lower().startswith(""): + return content.splitlines() + items = re.findall( + r'href="(?P[^"]*)"', content, re.MULTILINE | re.IGNORECASE) + return [i for i in items if not i.startswith(".")] + + +def read_metadata(endpoint_base=METADATA_ENDPOINT, sys_uuid=None, + version='2013-10-17'): + """Read metadata, return a dictionary. + + Each path listed in the index will be represented in the dictionary. + If the path ends in .json, then the content will be decoded and + populated into the dictionary. + + The system uuid (/sys/class/dmi/id/product_uuid) is also populated. + Example: given paths = ('user_data', 'meta_data.json') + This would return: + {version: {'user_data': b'blob', 'meta_data': json.loads(blob.decode()) + 'system_uuid': '3b54f2e0-3ab2-458d-b770-af9926eee3b2'}} + """ + endpoint = combine_url(endpoint_base, version) + "/" + if sys_uuid is None: + sys_uuid = _read_system_uuid() + if not sys_uuid: + raise sources.BrokenMetadata("Failed to read system uuid.") + + try: + resp = readurl(endpoint) + if not resp.ok(): + raise sources.BrokenMetadata( + "Bad response from %s: %s" % (endpoint, resp.code)) + except UrlError as e: + raise sources.BrokenMetadata( + "Failed to read index at %s: %s" % (endpoint, e)) + + entries = _load_index(resp.contents.decode('utf-8')) + LOG.debug("index url %s contained: %s", endpoint, entries) + + # meta_data.json is required. + mdj = 'meta_data.json' + if mdj not in entries: + raise sources.BrokenMetadata( + "Required field '%s' missing in index at %s" % (mdj, endpoint)) + + ret = {'system_uuid': sys_uuid} + for path in entries: + response = readurl(combine_url(endpoint, path)) + if path.endswith(".json"): + ret[path.rpartition(".")[0]] = ( + json.loads(response.contents.decode('utf-8'))) + else: + ret[path] = response.contents + + return {version: ret} + + +# Used to match classes to dependencies +datasources = [ + (DataSourceOracle, (sources.DEP_FILESYSTEM,)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) + + +if __name__ == "__main__": + import argparse + import os + + parser = argparse.ArgumentParser(description='Query Oracle Cloud Metadata') + parser.add_argument("--endpoint", metavar="URL", + help="The url of the metadata service.", + default=METADATA_ENDPOINT) + args = parser.parse_args() + sys_uuid = "uuid-not-available-not-root" if os.geteuid() != 0 else None + + data = read_metadata(endpoint_base=args.endpoint, sys_uuid=sys_uuid) + data['is_platform_viable'] = _is_platform_viable() + print(util.json_dumps(data)) + +# vi: ts=4 expandtab diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 06e613f8..41fde9ba 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -671,6 +671,10 @@ def convert_vendordata(data, recurse=True): raise ValueError("Unknown data type for vendordata: %s" % type(data)) +class BrokenMetadata(IOError): + pass + + # 'depends' is a list of dependencies (DEP_FILESYSTEM) # ds_list is a list of 2 item lists # ds_list = [ diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index a4cf0667..8f9c1441 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -21,6 +21,8 @@ from cloudinit import sources from cloudinit import url_helper from cloudinit import util +from cloudinit.sources import BrokenMetadata + # See https://docs.openstack.org/user-guide/cli-config-drive.html LOG = logging.getLogger(__name__) @@ -68,10 +70,6 @@ class NonReadable(IOError): pass -class BrokenMetadata(IOError): - pass - - class SourceMixin(object): def _ec2_name_to_device(self, name): if not self.ec2_metadata: diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py new file mode 100644 index 00000000..7599126c --- /dev/null +++ b/cloudinit/sources/tests/test_oracle.py @@ -0,0 +1,331 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.sources import DataSourceOracle as oracle +from cloudinit.sources import BrokenMetadata +from cloudinit import helpers + +from cloudinit.tests import helpers as test_helpers + +from textwrap import dedent +import argparse +import httpretty +import json +import mock +import os +import six +import uuid + +DS_PATH = "cloudinit.sources.DataSourceOracle" +MD_VER = "2013-10-17" + + +class TestDataSourceOracle(test_helpers.CiTestCase): + """Test datasource DataSourceOracle.""" + + ds_class = oracle.DataSourceOracle + + my_uuid = str(uuid.uuid4()) + my_md = {"uuid": "ocid1.instance.oc1.phx.abyhqlj", + "name": "ci-vm1", "availability_zone": "phx-ad-3", + "hostname": "ci-vm1hostname", + "launch_index": 0, "files": [], + "public_keys": {"0": "ssh-rsa AAAAB3N...== user@host"}, + "meta": {}} + + def _patch_instance(self, inst, patches): + """Patch an instance of a class 'inst'. + for each name, kwargs in patches: + inst.name = mock.Mock(**kwargs) + returns a namespace object that has + namespace.name = mock.Mock(**kwargs) + Do not bother with cleanup as instance is assumed transient.""" + mocks = argparse.Namespace() + for name, kwargs in patches.items(): + imock = mock.Mock(name=name, spec=getattr(inst, name), **kwargs) + setattr(mocks, name, imock) + setattr(inst, name, imock) + return mocks + + def _get_ds(self, sys_cfg=None, distro=None, paths=None, ud_proc=None, + patches=None): + if sys_cfg is None: + sys_cfg = {} + if patches is None: + patches = {} + if paths is None: + tmpd = self.tmp_dir() + dirs = {'cloud_dir': self.tmp_path('cloud_dir', tmpd), + 'run_dir': self.tmp_path('run_dir')} + for d in dirs.values(): + os.mkdir(d) + paths = helpers.Paths(dirs) + + ds = self.ds_class(sys_cfg=sys_cfg, distro=distro, + paths=paths, ud_proc=ud_proc) + + return ds, self._patch_instance(ds, patches) + + def test_platform_not_viable_returns_false(self): + ds, mocks = self._get_ds( + patches={'_is_platform_viable': {'return_value': False}}) + self.assertFalse(ds._get_data()) + mocks._is_platform_viable.assert_called_once_with() + + @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) + def test_without_userdata(self, m_is_iscsi_root): + """If no user-data is provided, it should not be in return dict.""" + ds, mocks = self._get_ds(patches={ + '_is_platform_viable': {'return_value': True}, + 'crawl_metadata': { + 'return_value': { + MD_VER: {'system_uuid': self.my_uuid, + 'meta_data': self.my_md}}}}) + self.assertTrue(ds._get_data()) + mocks._is_platform_viable.assert_called_once_with() + mocks.crawl_metadata.assert_called_once_with() + self.assertEqual(self.my_uuid, ds.system_uuid) + self.assertEqual(self.my_md['availability_zone'], ds.availability_zone) + self.assertIn(self.my_md["public_keys"]["0"], ds.get_public_ssh_keys()) + self.assertEqual(self.my_md['uuid'], ds.get_instance_id()) + self.assertIsNone(ds.userdata_raw) + + @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) + def test_with_vendordata(self, m_is_iscsi_root): + """Test with vendor data.""" + vd = {'cloud-init': '#cloud-config\nkey: value'} + ds, mocks = self._get_ds(patches={ + '_is_platform_viable': {'return_value': True}, + 'crawl_metadata': { + 'return_value': { + MD_VER: {'system_uuid': self.my_uuid, + 'meta_data': self.my_md, + 'vendor_data': vd}}}}) + self.assertTrue(ds._get_data()) + mocks._is_platform_viable.assert_called_once_with() + mocks.crawl_metadata.assert_called_once_with() + self.assertEqual(vd, ds.vendordata_pure) + self.assertEqual(vd['cloud-init'], ds.vendordata_raw) + + @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) + def test_with_userdata(self, m_is_iscsi_root): + """Ensure user-data is populated if present and is binary.""" + my_userdata = b'abcdefg' + ds, mocks = self._get_ds(patches={ + '_is_platform_viable': {'return_value': True}, + 'crawl_metadata': { + 'return_value': { + MD_VER: {'system_uuid': self.my_uuid, + 'meta_data': self.my_md, + 'user_data': my_userdata}}}}) + self.assertTrue(ds._get_data()) + mocks._is_platform_viable.assert_called_once_with() + mocks.crawl_metadata.assert_called_once_with() + self.assertEqual(self.my_uuid, ds.system_uuid) + self.assertIn(self.my_md["public_keys"]["0"], ds.get_public_ssh_keys()) + self.assertEqual(self.my_md['uuid'], ds.get_instance_id()) + self.assertEqual(my_userdata, ds.userdata_raw) + + @mock.patch(DS_PATH + ".cmdline.read_kernel_cmdline_config") + @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) + def test_network_cmdline(self, m_is_iscsi_root, m_cmdline_config): + """network_config should read kernel cmdline.""" + distro = mock.MagicMock() + ds, _ = self._get_ds(distro=distro, patches={ + '_is_platform_viable': {'return_value': True}, + 'crawl_metadata': { + 'return_value': { + MD_VER: {'system_uuid': self.my_uuid, + 'meta_data': self.my_md}}}}) + ncfg = {'version': 1, 'config': [{'a': 'b'}]} + m_cmdline_config.return_value = ncfg + self.assertTrue(ds._get_data()) + self.assertEqual(ncfg, ds.network_config) + m_cmdline_config.assert_called_once_with() + self.assertFalse(distro.generate_fallback_config.called) + + @mock.patch(DS_PATH + ".cmdline.read_kernel_cmdline_config") + @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) + def test_network_fallback(self, m_is_iscsi_root, m_cmdline_config): + """test that fallback network is generated if no kernel cmdline.""" + distro = mock.MagicMock() + ds, _ = self._get_ds(distro=distro, patches={ + '_is_platform_viable': {'return_value': True}, + 'crawl_metadata': { + 'return_value': { + MD_VER: {'system_uuid': self.my_uuid, + 'meta_data': self.my_md}}}}) + ncfg = {'version': 1, 'config': [{'a': 'b'}]} + m_cmdline_config.return_value = None + self.assertTrue(ds._get_data()) + ncfg = {'version': 1, 'config': [{'distro1': 'value'}]} + distro.generate_fallback_config.return_value = ncfg + self.assertEqual(ncfg, ds.network_config) + m_cmdline_config.assert_called_once_with() + distro.generate_fallback_config.assert_called_once_with() + self.assertEqual(1, m_cmdline_config.call_count) + + # test that the result got cached, and the methods not re-called. + self.assertEqual(ncfg, ds.network_config) + self.assertEqual(1, m_cmdline_config.call_count) + + +@mock.patch(DS_PATH + "._read_system_uuid", return_value=str(uuid.uuid4())) +class TestReadMetaData(test_helpers.HttprettyTestCase): + """Test the read_metadata which interacts with http metadata service.""" + + mdurl = oracle.METADATA_ENDPOINT + my_md = {"uuid": "ocid1.instance.oc1.phx.abyhqlj", + "name": "ci-vm1", "availability_zone": "phx-ad-3", + "hostname": "ci-vm1hostname", + "launch_index": 0, "files": [], + "public_keys": {"0": "ssh-rsa AAAAB3N...== user@host"}, + "meta": {}} + + def populate_md(self, data): + """call httppretty.register_url for each item dict 'data', + including valid indexes. Text values converted to bytes.""" + httpretty.register_uri( + httpretty.GET, self.mdurl + MD_VER + "/", + '\n'.join(data.keys()).encode('utf-8')) + for k, v in data.items(): + httpretty.register_uri( + httpretty.GET, self.mdurl + MD_VER + "/" + k, + v if not isinstance(v, six.text_type) else v.encode('utf-8')) + + def test_broken_no_sys_uuid(self, m_read_system_uuid): + """Datasource requires ability to read system_uuid and true return.""" + m_read_system_uuid.return_value = None + self.assertRaises(BrokenMetadata, oracle.read_metadata) + + def test_broken_no_metadata_json(self, m_read_system_uuid): + """Datasource requires meta_data.json.""" + httpretty.register_uri( + httpretty.GET, self.mdurl + MD_VER + "/", + '\n'.join(['user_data']).encode('utf-8')) + with self.assertRaises(BrokenMetadata) as cm: + oracle.read_metadata() + self.assertIn("Required field 'meta_data.json' missing", + str(cm.exception)) + + def test_with_userdata(self, m_read_system_uuid): + data = {'user_data': b'#!/bin/sh\necho hi world\n', + 'meta_data.json': json.dumps(self.my_md)} + self.populate_md(data) + result = oracle.read_metadata()[MD_VER] + self.assertEqual(data['user_data'], result['user_data']) + self.assertEqual(self.my_md, result['meta_data']) + + def test_without_userdata(self, m_read_system_uuid): + data = {'meta_data.json': json.dumps(self.my_md)} + self.populate_md(data) + result = oracle.read_metadata()[MD_VER] + self.assertNotIn('user_data', result) + self.assertEqual(self.my_md, result['meta_data']) + + def test_unknown_fields_included(self, m_read_system_uuid): + """Unknown fields listed in index should be included. + And those ending in .json should be decoded.""" + some_data = {'key1': 'data1', 'subk1': {'subd1': 'subv'}} + some_vendor_data = {'cloud-init': 'foo'} + data = {'meta_data.json': json.dumps(self.my_md), + 'some_data.json': json.dumps(some_data), + 'vendor_data.json': json.dumps(some_vendor_data), + 'other_blob': b'this is blob'} + self.populate_md(data) + result = oracle.read_metadata()[MD_VER] + self.assertNotIn('user_data', result) + self.assertEqual(self.my_md, result['meta_data']) + self.assertEqual(some_data, result['some_data']) + self.assertEqual(some_vendor_data, result['vendor_data']) + self.assertEqual(data['other_blob'], result['other_blob']) + + +class TestIsPlatformViable(test_helpers.CiTestCase): + @mock.patch(DS_PATH + ".util.read_dmi_data", + return_value=oracle.CHASSIS_ASSET_TAG) + def test_expected_viable(self, m_read_dmi_data): + """System with known chassis tag is viable.""" + self.assertTrue(oracle._is_platform_viable()) + m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')]) + + @mock.patch(DS_PATH + ".util.read_dmi_data", return_value=None) + def test_expected_not_viable_dmi_data_none(self, m_read_dmi_data): + """System without known chassis tag is not viable.""" + self.assertFalse(oracle._is_platform_viable()) + m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')]) + + @mock.patch(DS_PATH + ".util.read_dmi_data", return_value="LetsGoCubs") + def test_expected_not_viable_other(self, m_read_dmi_data): + """System with unnown chassis tag is not viable.""" + self.assertFalse(oracle._is_platform_viable()) + m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')]) + + +class TestLoadIndex(test_helpers.CiTestCase): + """_load_index handles parsing of an index into a proper list. + The tests here guarantee correct parsing of html version or + a fixed version. See the function docstring for more doc.""" + + _known_html_api_versions = dedent("""\ + + Index of /openstack/ + +

Index of /openstack/


../
+        2013-10-17/   27-Jun-2018 12:22  -
+        latest/           27-Jun-2018 12:22  -
+        

+ """) + + _known_html_contents = dedent("""\ + + Index of /openstack/2013-10-17/ + +

Index of /openstack/2013-10-17/


../
+        meta_data.json  27-Jun-2018 12:22  679
+        user_data            27-Jun-2018 12:22  146
+        

+ """) + + def test_parse_html(self): + """Test parsing of lower case html.""" + self.assertEqual( + ['2013-10-17/', 'latest/'], + oracle._load_index(self._known_html_api_versions)) + self.assertEqual( + ['meta_data.json', 'user_data'], + oracle._load_index(self._known_html_contents)) + + def test_parse_html_upper(self): + """Test parsing of upper case html, although known content is lower.""" + def _toupper(data): + return data.replace("", "HTML>") + + self.assertEqual( + ['2013-10-17/', 'latest/'], + oracle._load_index(_toupper(self._known_html_api_versions))) + self.assertEqual( + ['meta_data.json', 'user_data'], + oracle._load_index(_toupper(self._known_html_contents))) + + def test_parse_newline_list_with_endl(self): + """Test parsing of newline separated list with ending newline.""" + self.assertEqual( + ['2013-10-17/', 'latest/'], + oracle._load_index("\n".join(["2013-10-17/", "latest/", ""]))) + self.assertEqual( + ['meta_data.json', 'user_data'], + oracle._load_index("\n".join(["meta_data.json", "user_data", ""]))) + + def test_parse_newline_list_without_endl(self): + """Test parsing of newline separated list with no ending newline. + + Actual openstack implementation does not include trailing newline.""" + self.assertEqual( + ['2013-10-17/', 'latest/'], + oracle._load_index("\n".join(["2013-10-17/", "latest/"]))) + self.assertEqual( + ['meta_data.json', 'user_data'], + oracle._load_index("\n".join(["meta_data.json", "user_data"]))) + + +# vi: ts=4 expandtab diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index 30e57d85..83034589 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -189,6 +189,7 @@ Follow for more information. datasources/nocloud.rst datasources/opennebula.rst datasources/openstack.rst + datasources/oracle.rst datasources/ovf.rst datasources/smartos.rst datasources/fallback.rst diff --git a/doc/rtd/topics/datasources/oracle.rst b/doc/rtd/topics/datasources/oracle.rst new file mode 100644 index 00000000..f2383cee --- /dev/null +++ b/doc/rtd/topics/datasources/oracle.rst @@ -0,0 +1,26 @@ +.. _datasource_oracle: + +Oracle +====== + +This datasource reads metadata, vendor-data and user-data from +`Oracle Compute Infrastructure`_ (OCI). + +Oracle Platform +--------------- +OCI provides bare metal and virtual machines. In both cases, +the platform identifies itself via DMI data in the chassis asset tag +with the string 'OracleCloud.com'. + +Oracle's platform provides a metadata service that mimics the 2013-10-17 +version of OpenStack metadata service. Initially support for Oracle +was done via the OpenStack datasource. + +Cloud-init has a specific datasource for Oracle in order to: + a. allow and support future growth of the OCI platform. + b. address small differences between OpenStack and Oracle metadata + implementation. + + +.. _Oracle Compute Infrastructure: https://cloud.oracle.com/ +.. vi: textwidth=78 diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 1a5a3db2..6b01a4ea 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -20,6 +20,7 @@ from cloudinit.sources import ( DataSourceNoCloud as NoCloud, DataSourceOpenNebula as OpenNebula, DataSourceOpenStack as OpenStack, + DataSourceOracle as Oracle, DataSourceOVF as OVF, DataSourceScaleway as Scaleway, DataSourceSmartOS as SmartOS, @@ -37,6 +38,7 @@ DEFAULT_LOCAL = [ IBMCloud.DataSourceIBMCloud, NoCloud.DataSourceNoCloud, OpenNebula.DataSourceOpenNebula, + Oracle.DataSourceOracle, OVF.DataSourceOVF, SmartOS.DataSourceSmartOS, Ec2.DataSourceEc2Local, diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index d862f4bc..6e1e971b 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -16,7 +16,7 @@ from six import StringIO from cloudinit import helpers from cloudinit import settings -from cloudinit.sources import convert_vendordata, UNSET +from cloudinit.sources import BrokenMetadata, convert_vendordata, UNSET from cloudinit.sources import DataSourceOpenStack as ds from cloudinit.sources.helpers import openstack from cloudinit import util @@ -186,7 +186,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): if k.endswith('meta_data.json'): os_files[k] = json.dumps(os_meta) _register_uris(self.VERSION, {}, {}, os_files) - self.assertRaises(openstack.BrokenMetadata, _read_metadata_service) + self.assertRaises(BrokenMetadata, _read_metadata_service) def test_userdata_empty(self): os_files = copy.deepcopy(OS_FILES) @@ -217,7 +217,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): if k.endswith('vendor_data.json'): os_files[k] = '{' # some invalid json _register_uris(self.VERSION, {}, {}, os_files) - self.assertRaises(openstack.BrokenMetadata, _read_metadata_service) + self.assertRaises(BrokenMetadata, _read_metadata_service) def test_metadata_invalid(self): os_files = copy.deepcopy(OS_FILES) @@ -225,7 +225,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): if k.endswith('meta_data.json'): os_files[k] = '{' # some invalid json _register_uris(self.VERSION, {}, {}, os_files) - self.assertRaises(openstack.BrokenMetadata, _read_metadata_service) + self.assertRaises(BrokenMetadata, _read_metadata_service) @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') def test_datasource(self, m_dhcp): @@ -525,8 +525,11 @@ class TestDetectOpenStack(test_helpers.CiTestCase): m_dmi.side_effect = fake_dmi_read self.assertTrue( - ds.detect_openstack(), + ds.detect_openstack(accept_oracle=True), 'Expected detect_openstack == True on OracleCloud.com') + self.assertFalse( + ds.detect_openstack(accept_oracle=False), + 'Expected detect_openstack == False.') @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env') @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data') diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 64d9f9f8..e08e7908 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -12,6 +12,7 @@ from cloudinit.tests.helpers import ( from cloudinit.sources import DataSourceIBMCloud as ds_ibm from cloudinit.sources import DataSourceSmartOS as ds_smartos +from cloudinit.sources import DataSourceOracle as ds_oracle UNAME_MYSYS = ("Linux bart 4.4.0-62-generic #83-Ubuntu " "SMP Wed Jan 18 14:10:15 UTC 2017 x86_64 GNU/Linux") @@ -598,6 +599,18 @@ class TestIsIBMProvisioning(DsIdentifyBase): self.assertIn("from current boot", ret.stderr) +class TestOracle(DsIdentifyBase): + def test_found_by_chassis(self): + """Simple positive test of Oracle by chassis id.""" + self._test_ds_found('Oracle') + + def test_not_found(self): + """Simple negative test of Oracle.""" + mycfg = copy.deepcopy(VALID_CFG['Oracle']) + mycfg['files'][P_CHASSIS_ASSET_TAG] = "Not Oracle" + self._check_via_dict(mycfg, rc=RC_NOT_FOUND) + + def blkid_out(disks=None): """Convert a list of disk dictionaries into blkid content.""" if disks is None: @@ -838,6 +851,12 @@ VALID_CFG = { }, ], }, + 'Oracle': { + 'ds': 'Oracle', + 'files': { + P_CHASSIS_ASSET_TAG: ds_oracle.CHASSIS_ASSET_TAG + '\n', + } + }, 'SmartOS-bhyve': { 'ds': 'SmartOS', 'mocks': [ diff --git a/tools/ds-identify b/tools/ds-identify index ce0477a5..fcc60149 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -116,7 +116,7 @@ DI_DSNAME="" # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ -OVF SmartOS Scaleway Hetzner IBMCloud" +OVF SmartOS Scaleway Hetzner IBMCloud Oracle" DI_DSLIST="" DI_MODE="" DI_ON_FOUND="" @@ -1036,6 +1036,12 @@ dscheck_Hetzner() { return ${DS_NOT_FOUND} } +dscheck_Oracle() { + local asset_tag="OracleCloud.com" + dmi_chassis_asset_tag_matches "${asset_tag}" && return ${DS_FOUND} + return ${DS_NOT_FOUND} +} + is_ibm_provisioning() { local pcfg="${PATH_ROOT}/root/provisioningConfiguration.cfg" local logf="${PATH_ROOT}/root/swinstall.log" -- cgit v1.2.3 From db50bc0d999e3a90136864a774f85e4e15b144e8 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Wed, 5 Sep 2018 14:17:16 +0000 Subject: sysconfig: refactor sysconfig to accept distro specific templates paths Multiple distros use sysconfig format but have different content and paths to certain files. Update distros to specify these template paths in their renderer_configs dictionary. --- cloudinit/cmd/devel/net_convert.py | 14 +- cloudinit/distros/__init__.py | 2 +- cloudinit/distros/opensuse.py | 15 +- cloudinit/distros/rhel.py | 10 + cloudinit/net/eni.py | 2 +- cloudinit/net/netplan.py | 2 +- cloudinit/net/renderer.py | 9 +- cloudinit/net/sysconfig.py | 78 +- cloudinit/tests/helpers.py | 11 +- .../unittests/test_datasource/test_configdrive.py | 6 +- tests/unittests/test_distros/test_netconfig.py | 971 +++++++++------------ tests/unittests/test_net.py | 544 +++++++++++- 12 files changed, 1029 insertions(+), 635 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py index 271dc5ed..a0f58a0a 100755 --- a/cloudinit/cmd/devel/net_convert.py +++ b/cloudinit/cmd/devel/net_convert.py @@ -10,6 +10,7 @@ import yaml from cloudinit.sources.helpers import openstack from cloudinit.sources import DataSourceAzure as azure +from cloudinit import distros from cloudinit.net import eni, netplan, network_state, sysconfig from cloudinit import log @@ -36,6 +37,11 @@ def get_parser(parser=None): metavar="PATH", help="directory to place output in", required=True) + parser.add_argument("-D", "--distro", + choices=[item for sublist in + distros.OSFAMILIES.values() + for item in sublist], + required=True) parser.add_argument("-m", "--mac", metavar="name,mac", action='append', @@ -96,14 +102,20 @@ def handle_args(name, args): sys.stderr.write('\n'.join([ "", "Internal State", yaml.dump(ns, default_flow_style=False, indent=4), ""])) + distro_cls = distros.fetch(args.distro) + distro = distro_cls(args.distro, {}, None) + config = {} if args.output_kind == "eni": r_cls = eni.Renderer + config = distro.renderer_configs.get('eni') elif args.output_kind == "netplan": r_cls = netplan.Renderer + config = distro.renderer_configs.get('netplan') else: r_cls = sysconfig.Renderer + config = distro.renderer_configs.get('sysconfig') - r = r_cls() + r = r_cls(config=config) sys.stderr.write(''.join([ "Read input format '%s' from '%s'.\n" % ( args.kind, args.network_data.name), diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index fde054e9..d9101ce6 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -91,7 +91,7 @@ class Distro(object): LOG.debug("Selected renderer '%s' from priority list: %s", name, priority) renderer = render_cls(config=self.renderer_configs.get(name)) - renderer.render_network_config(network_config=network_config) + renderer.render_network_config(network_config) return [] def _find_tz_file(self, tz): diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py index 9f90e95e..1fe896aa 100644 --- a/cloudinit/distros/opensuse.py +++ b/cloudinit/distros/opensuse.py @@ -28,13 +28,23 @@ class Distro(distros.Distro): hostname_conf_fn = '/etc/HOSTNAME' init_cmd = ['service'] locale_conf_fn = '/etc/sysconfig/language' - network_conf_fn = '/etc/sysconfig/network' + network_conf_fn = '/etc/sysconfig/network/config' network_script_tpl = '/etc/sysconfig/network/ifcfg-%s' resolve_conf_fn = '/etc/resolv.conf' route_conf_tpl = '/etc/sysconfig/network/ifroute-%s' systemd_hostname_conf_fn = '/etc/hostname' systemd_locale_conf_fn = '/etc/locale.conf' tz_local_fn = '/etc/localtime' + renderer_configs = { + 'sysconfig': { + 'control': 'etc/sysconfig/network/config', + 'iface_templates': '%(base)s/network/ifcfg-%(name)s', + 'route_templates': { + 'ipv4': '%(base)s/network/ifroute-%(name)s', + 'ipv6': '%(base)s/network/ifroute-%(name)s', + } + } + } def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) @@ -208,6 +218,9 @@ class Distro(distros.Distro): nameservers, searchservers) return dev_names + def _write_network_config(self, netconfig): + return self._supported_write_network_config(netconfig) + @property def preferred_ntp_clients(self): """The preferred ntp client is dependent on the version.""" diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 1fecb619..ff513438 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -39,6 +39,16 @@ class Distro(distros.Distro): resolve_conf_fn = "/etc/resolv.conf" tz_local_fn = "/etc/localtime" usr_lib_exec = "/usr/libexec" + renderer_configs = { + 'sysconfig': { + 'control': 'etc/sysconfig/network', + 'iface_templates': '%(base)s/network-scripts/ifcfg-%(name)s', + 'route_templates': { + 'ipv4': '%(base)s/network-scripts/route-%(name)s', + 'ipv6': '%(base)s/network-scripts/route6-%(name)s' + } + } + } def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index 80be2429..c6f631a9 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -480,7 +480,7 @@ class Renderer(renderer.Renderer): return '\n\n'.join(['\n'.join(s) for s in sections]) + "\n" - def render_network_state(self, network_state, target=None): + def render_network_state(self, network_state, templates=None, target=None): fpeni = util.target_path(target, self.eni_path) util.ensure_dir(os.path.dirname(fpeni)) header = self.eni_header if self.eni_header else "" diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 6352e78c..bc1087f9 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -189,7 +189,7 @@ class Renderer(renderer.Renderer): self._postcmds = config.get('postcmds', False) self.clean_default = config.get('clean_default', True) - def render_network_state(self, network_state, target): + def render_network_state(self, network_state, templates=None, target=None): # check network state for version # if v2, then extract network_state.config # else render_v2_from_state diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py index 57652e27..5f32e90f 100644 --- a/cloudinit/net/renderer.py +++ b/cloudinit/net/renderer.py @@ -45,11 +45,14 @@ class Renderer(object): return content.getvalue() @abc.abstractmethod - def render_network_state(self, network_state, target=None): + def render_network_state(self, network_state, templates=None, + target=None): """Render network state.""" - def render_network_config(self, network_config, target=None): + def render_network_config(self, network_config, templates=None, + target=None): return self.render_network_state( - network_state=parse_net_config_data(network_config), target=target) + network_state=parse_net_config_data(network_config), + templates=templates, target=target) # vi: ts=4 expandtab diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 3d719238..66e970e0 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -91,19 +91,20 @@ class ConfigMap(object): class Route(ConfigMap): """Represents a route configuration.""" - route_fn_tpl_ipv4 = '%(base)s/network-scripts/route-%(name)s' - route_fn_tpl_ipv6 = '%(base)s/network-scripts/route6-%(name)s' - - def __init__(self, route_name, base_sysconf_dir): + def __init__(self, route_name, base_sysconf_dir, + ipv4_tpl, ipv6_tpl): super(Route, self).__init__() self.last_idx = 1 self.has_set_default_ipv4 = False self.has_set_default_ipv6 = False self._route_name = route_name self._base_sysconf_dir = base_sysconf_dir + self.route_fn_tpl_ipv4 = ipv4_tpl + self.route_fn_tpl_ipv6 = ipv6_tpl def copy(self): - r = Route(self._route_name, self._base_sysconf_dir) + r = Route(self._route_name, self._base_sysconf_dir, + self.route_fn_tpl_ipv4, self.route_fn_tpl_ipv6) r._conf = self._conf.copy() r.last_idx = self.last_idx r.has_set_default_ipv4 = self.has_set_default_ipv4 @@ -169,18 +170,22 @@ class Route(ConfigMap): class NetInterface(ConfigMap): """Represents a sysconfig/networking-script (and its config + children).""" - iface_fn_tpl = '%(base)s/network-scripts/ifcfg-%(name)s' - iface_types = { 'ethernet': 'Ethernet', 'bond': 'Bond', 'bridge': 'Bridge', } - def __init__(self, iface_name, base_sysconf_dir, kind='ethernet'): + def __init__(self, iface_name, base_sysconf_dir, templates, + kind='ethernet'): super(NetInterface, self).__init__() self.children = [] - self.routes = Route(iface_name, base_sysconf_dir) + self.templates = templates + route_tpl = self.templates.get('route_templates') + self.routes = Route(iface_name, base_sysconf_dir, + ipv4_tpl=route_tpl.get('ipv4'), + ipv6_tpl=route_tpl.get('ipv6')) + self.iface_fn_tpl = self.templates.get('iface_templates') self.kind = kind self._iface_name = iface_name @@ -213,7 +218,8 @@ class NetInterface(ConfigMap): 'name': self.name}) def copy(self, copy_children=False, copy_routes=False): - c = NetInterface(self.name, self._base_sysconf_dir, kind=self._kind) + c = NetInterface(self.name, self._base_sysconf_dir, + self.templates, kind=self._kind) c._conf = self._conf.copy() if copy_children: c.children = list(self.children) @@ -251,6 +257,8 @@ class Renderer(renderer.Renderer): ('bridge_bridgeprio', 'PRIO'), ]) + templates = {} + def __init__(self, config=None): if not config: config = {} @@ -261,6 +269,11 @@ class Renderer(renderer.Renderer): nm_conf_path = 'etc/NetworkManager/conf.d/99-cloud-init.conf' self.networkmanager_conf_path = config.get('networkmanager_conf_path', nm_conf_path) + self.templates = { + 'control': config.get('control'), + 'iface_templates': config.get('iface_templates'), + 'route_templates': config.get('route_templates'), + } @classmethod def _render_iface_shared(cls, iface, iface_cfg): @@ -512,7 +525,7 @@ class Renderer(renderer.Renderer): return content_str @staticmethod - def _render_networkmanager_conf(network_state): + def _render_networkmanager_conf(network_state, templates=None): content = networkmanager_conf.NetworkManagerConf("") # If DNS server information is provided, configure @@ -556,14 +569,17 @@ class Renderer(renderer.Renderer): cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) @classmethod - def _render_sysconfig(cls, base_sysconf_dir, network_state): + def _render_sysconfig(cls, base_sysconf_dir, network_state, + templates=None): '''Given state, return /etc/sysconfig files + contents''' + if not templates: + templates = cls.templates iface_contents = {} for iface in network_state.iter_interfaces(): if iface['type'] == "loopback": continue iface_name = iface['name'] - iface_cfg = NetInterface(iface_name, base_sysconf_dir) + iface_cfg = NetInterface(iface_name, base_sysconf_dir, templates) cls._render_iface_shared(iface, iface_cfg) iface_contents[iface_name] = iface_cfg cls._render_physical_interfaces(network_state, iface_contents) @@ -578,17 +594,21 @@ class Renderer(renderer.Renderer): if iface_cfg: contents[iface_cfg.path] = iface_cfg.to_string() if iface_cfg.routes: - contents[iface_cfg.routes.path_ipv4] = \ - iface_cfg.routes.to_string("ipv4") - contents[iface_cfg.routes.path_ipv6] = \ - iface_cfg.routes.to_string("ipv6") + for cpath, proto in zip([iface_cfg.routes.path_ipv4, + iface_cfg.routes.path_ipv6], + ["ipv4", "ipv6"]): + if cpath not in contents: + contents[cpath] = iface_cfg.routes.to_string(proto) return contents - def render_network_state(self, network_state, target=None): + def render_network_state(self, network_state, templates=None, target=None): + if not templates: + templates = self.templates file_mode = 0o644 base_sysconf_dir = util.target_path(target, self.sysconf_dir) for path, data in self._render_sysconfig(base_sysconf_dir, - network_state).items(): + network_state, + templates=templates).items(): util.write_file(path, data, file_mode) if self.dns_path: dns_path = util.target_path(target, self.dns_path) @@ -598,7 +618,8 @@ class Renderer(renderer.Renderer): if self.networkmanager_conf_path: nm_conf_path = util.target_path(target, self.networkmanager_conf_path) - nm_conf_content = self._render_networkmanager_conf(network_state) + nm_conf_content = self._render_networkmanager_conf(network_state, + templates) if nm_conf_content: util.write_file(nm_conf_path, nm_conf_content, file_mode) if self.netrules_path: @@ -606,13 +627,16 @@ class Renderer(renderer.Renderer): netrules_path = util.target_path(target, self.netrules_path) util.write_file(netrules_path, netrules_content, file_mode) - # always write /etc/sysconfig/network configuration - sysconfig_path = util.target_path(target, "etc/sysconfig/network") - netcfg = [_make_header(), 'NETWORKING=yes'] - if network_state.use_ipv6: - netcfg.append('NETWORKING_IPV6=yes') - netcfg.append('IPV6_AUTOCONF=no') - util.write_file(sysconfig_path, "\n".join(netcfg) + "\n", file_mode) + sysconfig_path = util.target_path(target, templates.get('control')) + # Distros configuring /etc/sysconfig/network as a file e.g. Centos + if sysconfig_path.endswith('network'): + util.ensure_dir(os.path.dirname(sysconfig_path)) + netcfg = [_make_header(), 'NETWORKING=yes'] + if network_state.use_ipv6: + netcfg.append('NETWORKING_IPV6=yes') + netcfg.append('IPV6_AUTOCONF=no') + util.write_file(sysconfig_path, + "\n".join(netcfg) + "\n", file_mode) def available(target=None): diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index de24e25d..9a21426e 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -16,9 +16,9 @@ import six import unittest2 try: - from contextlib import ExitStack + from contextlib import ExitStack, contextmanager except ImportError: - from contextlib2 import ExitStack + from contextlib2 import ExitStack, contextmanager try: from configparser import ConfigParser @@ -326,6 +326,13 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): self.patchOS(root) return root + @contextmanager + def reRooted(self, root=None): + try: + yield self.reRoot(root) + finally: + self.patched_funcs.close() + class HttprettyTestCase(CiTestCase): # necessary as http_proxy gets in the way of httpretty diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 68400f22..7e6fcbbf 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -642,7 +642,7 @@ class TestConvertNetworkData(CiTestCase): routes) eni_renderer = eni.Renderer() eni_renderer.render_network_state( - network_state.parse_net_config_data(ncfg), self.tmp) + network_state.parse_net_config_data(ncfg), target=self.tmp) with open(os.path.join(self.tmp, "etc", "network", "interfaces"), 'r') as f: eni_rendering = f.read() @@ -664,7 +664,7 @@ class TestConvertNetworkData(CiTestCase): eni_renderer = eni.Renderer() eni_renderer.render_network_state( - network_state.parse_net_config_data(ncfg), self.tmp) + network_state.parse_net_config_data(ncfg), target=self.tmp) with open(os.path.join(self.tmp, "etc", "network", "interfaces"), 'r') as f: eni_rendering = f.read() @@ -695,7 +695,7 @@ class TestConvertNetworkData(CiTestCase): known_macs=KNOWN_MACS) eni_renderer = eni.Renderer() eni_renderer.render_network_state( - network_state.parse_net_config_data(ncfg), self.tmp) + network_state.parse_net_config_data(ncfg), target=self.tmp) with open(os.path.join(self.tmp, "etc", "network", "interfaces"), 'r') as f: eni_rendering = f.read() diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index 7765e408..740fb76c 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -2,24 +2,19 @@ import os from six import StringIO -import stat from textwrap import dedent try: from unittest import mock except ImportError: import mock -try: - from contextlib import ExitStack -except ImportError: - from contextlib2 import ExitStack from cloudinit import distros from cloudinit.distros.parsers.sys_conf import SysConf from cloudinit import helpers -from cloudinit.net import eni from cloudinit import settings -from cloudinit.tests.helpers import FilesystemMockingTestCase +from cloudinit.tests.helpers import ( + FilesystemMockingTestCase, dir2dict, populate_dir) from cloudinit import util @@ -82,7 +77,7 @@ V1_NET_CFG = {'config': [{'name': 'eth0', 'type': 'physical'}], 'version': 1} -V1_NET_CFG_OUTPUT = """ +V1_NET_CFG_OUTPUT = """\ # This file is generated from information provided by # the datasource. Changes to it will not persist across an instance. # To disable cloud-init's network configuration capabilities, write a file @@ -116,7 +111,7 @@ V1_NET_CFG_IPV6 = {'config': [{'name': 'eth0', 'version': 1} -V1_TO_V2_NET_CFG_OUTPUT = """ +V1_TO_V2_NET_CFG_OUTPUT = """\ # This file is generated from information provided by # the datasource. Changes to it will not persist across an instance. # To disable cloud-init's network configuration capabilities, write a file @@ -145,7 +140,7 @@ V2_NET_CFG = { } -V2_TO_V2_NET_CFG_OUTPUT = """ +V2_TO_V2_NET_CFG_OUTPUT = """\ # This file is generated from information provided by # the datasource. Changes to it will not persist across an instance. # To disable cloud-init's network configuration capabilities, write a file @@ -176,21 +171,10 @@ class WriteBuffer(object): return self.buffer.getvalue() -class TestNetCfgDistro(FilesystemMockingTestCase): - - frbsd_ifout = """\ -hn0: flags=8843 metric 0 mtu 1500 - options=51b - ether 00:15:5d:4c:73:00 - inet6 fe80::215:5dff:fe4c:7300%hn0 prefixlen 64 scopeid 0x2 - inet 10.156.76.127 netmask 0xfffffc00 broadcast 10.156.79.255 - nd6 options=23 - media: Ethernet autoselect (10Gbase-T ) - status: active -""" +class TestNetCfgDistroBase(FilesystemMockingTestCase): def setUp(self): - super(TestNetCfgDistro, self).setUp() + super(TestNetCfgDistroBase, self).setUp() self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy') self.add_patch('cloudinit.util.system_info', 'm_sysinfo') self.m_sysinfo.return_value = {'dist': ('Distro', '99.1', 'Codename')} @@ -204,144 +188,6 @@ hn0: flags=8843 metric 0 mtu 1500 paths = helpers.Paths({}) return cls(dname, cfg.get('system_info'), paths) - def test_simple_write_ub(self): - ub_distro = self._get_distro('ubuntu') - with ExitStack() as mocks: - write_bufs = {} - - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(os.path, 'isfile', return_value=False)) - - ub_distro.apply_network(BASE_NET_CFG, False) - - self.assertEqual(len(write_bufs), 1) - eni_name = '/etc/network/interfaces.d/50-cloud-init.cfg' - self.assertIn(eni_name, write_bufs) - write_buf = write_bufs[eni_name] - self.assertEqual(str(write_buf).strip(), BASE_NET_CFG.strip()) - self.assertEqual(write_buf.mode, 0o644) - - def test_apply_network_config_eni_ub(self): - ub_distro = self._get_distro('ubuntu') - with ExitStack() as mocks: - write_bufs = {} - - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - # eni availability checks - mocks.enter_context( - mock.patch.object(util, 'which', return_value=True)) - mocks.enter_context( - mock.patch.object(eni, 'available', return_value=True)) - mocks.enter_context( - mock.patch.object(util, 'ensure_dir')) - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(os.path, 'isfile', return_value=False)) - mocks.enter_context( - mock.patch("cloudinit.net.eni.glob.glob", - return_value=[])) - - ub_distro.apply_network_config(V1_NET_CFG, False) - - self.assertEqual(len(write_bufs), 2) - eni_name = '/etc/network/interfaces.d/50-cloud-init.cfg' - self.assertIn(eni_name, write_bufs) - write_buf = write_bufs[eni_name] - self.assertEqual(str(write_buf).strip(), V1_NET_CFG_OUTPUT.strip()) - self.assertEqual(write_buf.mode, 0o644) - - def test_apply_network_config_v1_to_netplan_ub(self): - renderers = ['netplan'] - devlist = ['eth0', 'lo'] - ub_distro = self._get_distro('ubuntu', renderers=renderers) - with ExitStack() as mocks: - write_bufs = {} - - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - mocks.enter_context( - mock.patch.object(util, 'which', return_value=True)) - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(util, 'ensure_dir')) - mocks.enter_context( - mock.patch.object(util, 'subp', return_value=(0, 0))) - mocks.enter_context( - mock.patch.object(os.path, 'isfile', return_value=False)) - mocks.enter_context( - mock.patch("cloudinit.net.netplan.get_devicelist", - return_value=devlist)) - - ub_distro.apply_network_config(V1_NET_CFG, False) - - self.assertEqual(len(write_bufs), 1) - netplan_name = '/etc/netplan/50-cloud-init.yaml' - self.assertIn(netplan_name, write_bufs) - write_buf = write_bufs[netplan_name] - self.assertEqual(str(write_buf).strip(), - V1_TO_V2_NET_CFG_OUTPUT.strip()) - self.assertEqual(write_buf.mode, 0o644) - - def test_apply_network_config_v2_passthrough_ub(self): - renderers = ['netplan'] - devlist = ['eth0', 'lo'] - ub_distro = self._get_distro('ubuntu', renderers=renderers) - with ExitStack() as mocks: - write_bufs = {} - - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - mocks.enter_context( - mock.patch.object(util, 'which', return_value=True)) - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(util, 'ensure_dir')) - mocks.enter_context( - mock.patch.object(util, 'subp', return_value=(0, 0))) - mocks.enter_context( - mock.patch.object(os.path, 'isfile', return_value=False)) - # FreeBSD does not have '/sys/class/net' file, - # so we need mock here. - mocks.enter_context( - mock.patch.object(os, 'listdir', return_value=devlist)) - ub_distro.apply_network_config(V2_NET_CFG, False) - - self.assertEqual(len(write_bufs), 1) - netplan_name = '/etc/netplan/50-cloud-init.yaml' - self.assertIn(netplan_name, write_bufs) - write_buf = write_bufs[netplan_name] - self.assertEqual(str(write_buf).strip(), - V2_TO_V2_NET_CFG_OUTPUT.strip()) - self.assertEqual(write_buf.mode, 0o644) - def assertCfgEquals(self, blob1, blob2): b1 = dict(SysConf(blob1.strip().splitlines())) b2 = dict(SysConf(blob2.strip().splitlines())) @@ -353,6 +199,20 @@ hn0: flags=8843 metric 0 mtu 1500 for (k, v) in b1.items(): self.assertEqual(v, b2[k]) + +class TestNetCfgDistroFreebsd(TestNetCfgDistroBase): + + frbsd_ifout = """\ +hn0: flags=8843 metric 0 mtu 1500 + options=51b + ether 00:15:5d:4c:73:00 + inet6 fe80::215:5dff:fe4c:7300%hn0 prefixlen 64 scopeid 0x2 + inet 10.156.76.127 netmask 0xfffffc00 broadcast 10.156.79.255 + nd6 options=23 + media: Ethernet autoselect (10Gbase-T ) + status: active +""" + @mock.patch('cloudinit.distros.freebsd.Distro.get_ifconfig_list') @mock.patch('cloudinit.distros.freebsd.Distro.get_ifconfig_ifname_out') def test_get_ip_nic_freebsd(self, ifname_out, iflist): @@ -376,349 +236,33 @@ hn0: flags=8843 metric 0 mtu 1500 res = frbsd_distro.generate_fallback_config() self.assertIsNotNone(res) - def test_simple_write_rh(self): - rh_distro = self._get_distro('rhel') - - write_bufs = {} - - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - with ExitStack() as mocks: - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(util, 'load_file', return_value='')) - mocks.enter_context( - mock.patch.object(os.path, 'isfile', return_value=False)) - - rh_distro.apply_network(BASE_NET_CFG, False) - - self.assertEqual(len(write_bufs), 4) - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-lo', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-lo'] - expected_buf = ''' -DEVICE="lo" -ONBOOT=yes -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0'] - expected_buf = ''' -DEVICE="eth0" -BOOTPROTO="static" -NETMASK="255.255.255.0" -IPADDR="192.168.1.5" -ONBOOT=yes -GATEWAY="192.168.1.254" -BROADCAST="192.168.1.0" -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1'] - expected_buf = ''' -DEVICE="eth1" -BOOTPROTO="dhcp" -ONBOOT=yes -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - self.assertIn('/etc/sysconfig/network', write_bufs) - write_buf = write_bufs['/etc/sysconfig/network'] - expected_buf = ''' -# Created by cloud-init v. 0.7 -NETWORKING=yes -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - def test_apply_network_config_rh(self): - renderers = ['sysconfig'] - rh_distro = self._get_distro('rhel', renderers=renderers) - - write_bufs = {} - - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - with ExitStack() as mocks: - # sysconfig availability checks - mocks.enter_context( - mock.patch.object(util, 'which', return_value=True)) - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(util, 'load_file', return_value='')) - mocks.enter_context( - mock.patch.object(os.path, 'isfile', return_value=True)) - - rh_distro.apply_network_config(V1_NET_CFG, False) - - self.assertEqual(len(write_bufs), 5) - - # eth0 - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0'] - expected_buf = ''' -# Created by cloud-init on instance boot automatically, do not edit. -# -BOOTPROTO=none -DEFROUTE=yes -DEVICE=eth0 -GATEWAY=192.168.1.254 -IPADDR=192.168.1.5 -NETMASK=255.255.255.0 -NM_CONTROLLED=no -ONBOOT=yes -TYPE=Ethernet -USERCTL=no -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - # eth1 - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1'] - expected_buf = ''' -# Created by cloud-init on instance boot automatically, do not edit. -# -BOOTPROTO=dhcp -DEVICE=eth1 -NM_CONTROLLED=no -ONBOOT=yes -TYPE=Ethernet -USERCTL=no -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - self.assertIn('/etc/sysconfig/network', write_bufs) - write_buf = write_bufs['/etc/sysconfig/network'] - expected_buf = ''' -# Created by cloud-init v. 0.7 -NETWORKING=yes -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - def test_write_ipv6_rhel(self): - rh_distro = self._get_distro('rhel') - - write_bufs = {} - - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - with ExitStack() as mocks: - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(util, 'load_file', return_value='')) - mocks.enter_context( - mock.patch.object(os.path, 'isfile', return_value=False)) - rh_distro.apply_network(BASE_NET_CFG_IPV6, False) - - self.assertEqual(len(write_bufs), 4) - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-lo', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-lo'] - expected_buf = ''' -DEVICE="lo" -ONBOOT=yes -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0'] - expected_buf = ''' -DEVICE="eth0" -BOOTPROTO="static" -NETMASK="255.255.255.0" -IPADDR="192.168.1.5" -ONBOOT=yes -GATEWAY="192.168.1.254" -BROADCAST="192.168.1.0" -IPV6INIT=yes -IPV6ADDR="2607:f0d0:1002:0011::2" -IPV6_DEFAULTGW="2607:f0d0:1002:0011::1" -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1'] - expected_buf = ''' -DEVICE="eth1" -BOOTPROTO="static" -NETMASK="255.255.255.0" -IPADDR="192.168.1.6" -ONBOOT=no -GATEWAY="192.168.1.254" -BROADCAST="192.168.1.0" -IPV6INIT=yes -IPV6ADDR="2607:f0d0:1002:0011::3" -IPV6_DEFAULTGW="2607:f0d0:1002:0011::1" -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - self.assertIn('/etc/sysconfig/network', write_bufs) - write_buf = write_bufs['/etc/sysconfig/network'] - expected_buf = ''' -# Created by cloud-init v. 0.7 -NETWORKING=yes -NETWORKING_IPV6=yes -IPV6_AUTOCONF=no -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - def test_apply_network_config_ipv6_rh(self): - renderers = ['sysconfig'] - rh_distro = self._get_distro('rhel', renderers=renderers) - - write_bufs = {} - - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - with ExitStack() as mocks: - mocks.enter_context( - mock.patch.object(util, 'which', return_value=True)) - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(util, 'load_file', return_value='')) - mocks.enter_context( - mock.patch.object(os.path, 'isfile', return_value=True)) - - rh_distro.apply_network_config(V1_NET_CFG_IPV6, False) - - self.assertEqual(len(write_bufs), 5) - - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0'] - expected_buf = ''' -# Created by cloud-init on instance boot automatically, do not edit. -# -BOOTPROTO=none -DEFROUTE=yes -DEVICE=eth0 -IPV6ADDR=2607:f0d0:1002:0011::2/64 -IPV6INIT=yes -IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 -NM_CONTROLLED=no -ONBOOT=yes -TYPE=Ethernet -USERCTL=no -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1'] - expected_buf = ''' -# Created by cloud-init on instance boot automatically, do not edit. -# -BOOTPROTO=dhcp -DEVICE=eth1 -NM_CONTROLLED=no -ONBOOT=yes -TYPE=Ethernet -USERCTL=no -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - self.assertIn('/etc/sysconfig/network', write_bufs) - write_buf = write_bufs['/etc/sysconfig/network'] - expected_buf = ''' -# Created by cloud-init v. 0.7 -NETWORKING=yes -NETWORKING_IPV6=yes -IPV6_AUTOCONF=no -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - def test_simple_write_freebsd(self): fbsd_distro = self._get_distro('freebsd') - write_bufs = {} + rc_conf = '/etc/rc.conf' read_bufs = { - '/etc/rc.conf': '', - '/etc/resolv.conf': '', + rc_conf: 'initial-rc-conf-not-validated', + '/etc/resolv.conf': 'initial-resolv-conf-not-validated', } - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - def replace_read(fname, read_cb=None, quiet=False): - if fname not in read_bufs: - if fname in write_bufs: - return str(write_bufs[fname]) - raise IOError("%s not found" % fname) - else: - if fname in write_bufs: - return str(write_bufs[fname]) - return read_bufs[fname] - - with ExitStack() as mocks: - mocks.enter_context( - mock.patch.object(util, 'subp', return_value=('vtnet0', ''))) - mocks.enter_context( - mock.patch.object(os.path, 'exists', return_value=False)) - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(util, 'load_file', replace_read)) - - fbsd_distro.apply_network(BASE_NET_CFG, False) - - self.assertIn('/etc/rc.conf', write_bufs) - write_buf = write_bufs['/etc/rc.conf'] - expected_buf = ''' -ifconfig_vtnet0="192.168.1.5 netmask 255.255.255.0" -ifconfig_vtnet1="DHCP" -defaultrouter="192.168.1.254" -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - def test_apply_network_config_fallback(self): + tmpd = self.tmp_dir() + populate_dir(tmpd, read_bufs) + with self.reRooted(tmpd): + with mock.patch("cloudinit.distros.freebsd.util.subp", + return_value=('vtnet0', '')): + fbsd_distro.apply_network(BASE_NET_CFG, False) + results = dir2dict(tmpd) + + self.assertIn(rc_conf, results) + self.assertCfgEquals( + dedent('''\ + ifconfig_vtnet0="192.168.1.5 netmask 255.255.255.0" + ifconfig_vtnet1="DHCP" + defaultrouter="192.168.1.254" + '''), results[rc_conf]) + self.assertEqual(0o644, get_mode(rc_conf, tmpd)) + + def test_apply_network_config_fallback_freebsd(self): fbsd_distro = self._get_distro('freebsd') # a weak attempt to verify that we don't have an implementation @@ -735,68 +279,324 @@ defaultrouter="192.168.1.254" "subnets": [{"type": "dhcp"}]}], 'version': 1} - write_bufs = {} + rc_conf = '/etc/rc.conf' read_bufs = { - '/etc/rc.conf': '', - '/etc/resolv.conf': '', + rc_conf: 'initial-rc-conf-not-validated', + '/etc/resolv.conf': 'initial-resolv-conf-not-validated', } - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - def replace_read(fname, read_cb=None, quiet=False): - if fname not in read_bufs: - if fname in write_bufs: - return str(write_bufs[fname]) - raise IOError("%s not found" % fname) - else: - if fname in write_bufs: - return str(write_bufs[fname]) - return read_bufs[fname] - - with ExitStack() as mocks: - mocks.enter_context( - mock.patch.object(util, 'subp', return_value=('vtnet0', ''))) - mocks.enter_context( - mock.patch.object(os.path, 'exists', return_value=False)) - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(util, 'load_file', replace_read)) - - fbsd_distro.apply_network_config(mynetcfg, bring_up=False) - - self.assertIn('/etc/rc.conf', write_bufs) - write_buf = write_bufs['/etc/rc.conf'] - expected_buf = ''' -ifconfig_vtnet0="DHCP" -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) + tmpd = self.tmp_dir() + populate_dir(tmpd, read_bufs) + with self.reRooted(tmpd): + with mock.patch("cloudinit.distros.freebsd.util.subp", + return_value=('vtnet0', '')): + fbsd_distro.apply_network_config(mynetcfg, bring_up=False) + results = dir2dict(tmpd) - def test_simple_write_opensuse(self): - """Opensuse network rendering writes appropriate sysconfg files.""" - tmpdir = self.tmp_dir() - self.patchOS(tmpdir) - self.patchUtils(tmpdir) - distro = self._get_distro('opensuse') + self.assertIn(rc_conf, results) + self.assertCfgEquals('ifconfig_vtnet0="DHCP"', results[rc_conf]) + self.assertEqual(0o644, get_mode(rc_conf, tmpd)) - distro.apply_network(BASE_NET_CFG, False) - lo_path = os.path.join(tmpdir, 'etc/sysconfig/network/ifcfg-lo') - eth0_path = os.path.join(tmpdir, 'etc/sysconfig/network/ifcfg-eth0') - eth1_path = os.path.join(tmpdir, 'etc/sysconfig/network/ifcfg-eth1') +class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase): + + def setUp(self): + super(TestNetCfgDistroUbuntuEni, self).setUp() + self.distro = self._get_distro('ubuntu', renderers=['eni']) + + def eni_path(self): + return '/etc/network/interfaces.d/50-cloud-init.cfg' + + def _apply_and_verify_eni(self, apply_fn, config, expected_cfgs=None, + bringup=False): + if not expected_cfgs: + raise ValueError('expected_cfg must not be None') + + tmpd = None + with mock.patch('cloudinit.net.eni.available') as m_avail: + m_avail.return_value = True + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + print("----------") + print(expected) + print("^^^^ expected | rendered VVVVVVV") + print(results[cfgpath]) + print("----------") + self.assertEqual(expected, results[cfgpath]) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def test_simple_write_ub(self): + expected_cfgs = { + self.eni_path(): BASE_NET_CFG, + } + + # ub_distro.apply_network(BASE_NET_CFG, False) + self._apply_and_verify_eni(self.distro.apply_network, + BASE_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + def test_apply_network_config_eni_ub(self): expected_cfgs = { - lo_path: dedent(''' + self.eni_path(): V1_NET_CFG_OUTPUT, + } + # ub_distro.apply_network_config(V1_NET_CFG, False) + self._apply_and_verify_eni(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + +class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase): + def setUp(self): + super(TestNetCfgDistroUbuntuNetplan, self).setUp() + self.distro = self._get_distro('ubuntu', renderers=['netplan']) + self.devlist = ['eth0', 'lo'] + + def _apply_and_verify_netplan(self, apply_fn, config, expected_cfgs=None, + bringup=False): + if not expected_cfgs: + raise ValueError('expected_cfg must not be None') + + tmpd = None + with mock.patch('cloudinit.net.netplan.available', + return_value=True): + with mock.patch("cloudinit.net.netplan.get_devicelist", + return_value=self.devlist): + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + print("----------") + print(expected) + print("^^^^ expected | rendered VVVVVVV") + print(results[cfgpath]) + print("----------") + self.assertEqual(expected, results[cfgpath]) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def netplan_path(self): + return '/etc/netplan/50-cloud-init.yaml' + + def test_apply_network_config_v1_to_netplan_ub(self): + expected_cfgs = { + self.netplan_path(): V1_TO_V2_NET_CFG_OUTPUT, + } + + # ub_distro.apply_network_config(V1_NET_CFG, False) + self._apply_and_verify_netplan(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + def test_apply_network_config_v2_passthrough_ub(self): + expected_cfgs = { + self.netplan_path(): V2_TO_V2_NET_CFG_OUTPUT, + } + # ub_distro.apply_network_config(V2_NET_CFG, False) + self._apply_and_verify_netplan(self.distro.apply_network_config, + V2_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + +class TestNetCfgDistroRedhat(TestNetCfgDistroBase): + + def setUp(self): + super(TestNetCfgDistroRedhat, self).setUp() + self.distro = self._get_distro('rhel', renderers=['sysconfig']) + + def ifcfg_path(self, ifname): + return '/etc/sysconfig/network-scripts/ifcfg-%s' % ifname + + def control_path(self): + return '/etc/sysconfig/network' + + def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, + bringup=False): + if not expected_cfgs: + raise ValueError('expected_cfg must not be None') + + tmpd = None + with mock.patch('cloudinit.net.sysconfig.available') as m_avail: + m_avail.return_value = True + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + self.assertCfgEquals(expected, results[cfgpath]) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def test_simple_write_rh(self): + expected_cfgs = { + self.ifcfg_path('lo'): dedent("""\ + DEVICE="lo" + ONBOOT=yes + """), + self.ifcfg_path('eth0'): dedent("""\ + DEVICE="eth0" + BOOTPROTO="static" + NETMASK="255.255.255.0" + IPADDR="192.168.1.5" + ONBOOT=yes + GATEWAY="192.168.1.254" + BROADCAST="192.168.1.0" + """), + self.ifcfg_path('eth1'): dedent("""\ + DEVICE="eth1" + BOOTPROTO="dhcp" + ONBOOT=yes + """), + self.control_path(): dedent("""\ + NETWORKING=yes + """), + } + # rh_distro.apply_network(BASE_NET_CFG, False) + self._apply_and_verify(self.distro.apply_network, + BASE_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + def test_apply_network_config_rh(self): + expected_cfgs = { + self.ifcfg_path('eth0'): dedent("""\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=eth0 + GATEWAY=192.168.1.254 + IPADDR=192.168.1.5 + NETMASK=255.255.255.0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + self.ifcfg_path('eth1'): dedent("""\ + BOOTPROTO=dhcp + DEVICE=eth1 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + self.control_path(): dedent("""\ + NETWORKING=yes + """), + } + # rh_distro.apply_network_config(V1_NET_CFG, False) + self._apply_and_verify(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + def test_write_ipv6_rhel(self): + expected_cfgs = { + self.ifcfg_path('lo'): dedent("""\ + DEVICE="lo" + ONBOOT=yes + """), + self.ifcfg_path('eth0'): dedent("""\ + DEVICE="eth0" + BOOTPROTO="static" + NETMASK="255.255.255.0" + IPADDR="192.168.1.5" + ONBOOT=yes + GATEWAY="192.168.1.254" + BROADCAST="192.168.1.0" + IPV6INIT=yes + IPV6ADDR="2607:f0d0:1002:0011::2" + IPV6_DEFAULTGW="2607:f0d0:1002:0011::1" + """), + self.ifcfg_path('eth1'): dedent("""\ + DEVICE="eth1" + BOOTPROTO="static" + NETMASK="255.255.255.0" + IPADDR="192.168.1.6" + ONBOOT=no + GATEWAY="192.168.1.254" + BROADCAST="192.168.1.0" + IPV6INIT=yes + IPV6ADDR="2607:f0d0:1002:0011::3" + IPV6_DEFAULTGW="2607:f0d0:1002:0011::1" + """), + self.control_path(): dedent("""\ + NETWORKING=yes + NETWORKING_IPV6=yes + IPV6_AUTOCONF=no + """), + } + # rh_distro.apply_network(BASE_NET_CFG_IPV6, False) + self._apply_and_verify(self.distro.apply_network, + BASE_NET_CFG_IPV6, + expected_cfgs=expected_cfgs.copy()) + + def test_apply_network_config_ipv6_rh(self): + expected_cfgs = { + self.ifcfg_path('eth0'): dedent("""\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=eth0 + IPV6ADDR=2607:f0d0:1002:0011::2/64 + IPV6INIT=yes + IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + self.ifcfg_path('eth1'): dedent("""\ + BOOTPROTO=dhcp + DEVICE=eth1 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + self.control_path(): dedent("""\ + NETWORKING=yes + NETWORKING_IPV6=yes + IPV6_AUTOCONF=no + """), + } + # rh_distro.apply_network_config(V1_NET_CFG_IPV6, False) + self._apply_and_verify(self.distro.apply_network_config, + V1_NET_CFG_IPV6, + expected_cfgs=expected_cfgs.copy()) + + +class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): + + def setUp(self): + super(TestNetCfgDistroOpensuse, self).setUp() + self.distro = self._get_distro('opensuse', renderers=['sysconfig']) + + def ifcfg_path(self, ifname): + return '/etc/sysconfig/network/ifcfg-%s' % ifname + + def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, + bringup=False): + if not expected_cfgs: + raise ValueError('expected_cfg must not be None') + + tmpd = None + with mock.patch('cloudinit.net.sysconfig.available') as m_avail: + m_avail.return_value = True + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + self.assertCfgEquals(expected, results[cfgpath]) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def test_simple_write_opensuse(self): + """Opensuse network rendering writes appropriate sysconfig files.""" + expected_cfgs = { + self.ifcfg_path('lo'): dedent(''' STARTMODE="auto" USERCONTROL="no" FIREWALL="no" '''), - eth0_path: dedent(''' + self.ifcfg_path('eth0'): dedent(''' BOOTPROTO="static" BROADCAST="192.168.1.0" GATEWAY="192.168.1.254" @@ -806,18 +606,77 @@ ifconfig_vtnet0="DHCP" USERCONTROL="no" ETHTOOL_OPTIONS="" '''), - eth1_path: dedent(''' + self.ifcfg_path('eth1'): dedent(''' BOOTPROTO="dhcp" STARTMODE="auto" USERCONTROL="no" ETHTOOL_OPTIONS="" ''') } - for cfgpath in (lo_path, eth0_path, eth1_path): - self.assertCfgEquals( - expected_cfgs[cfgpath], - util.load_file(cfgpath)) - file_stat = os.stat(cfgpath) - self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode)) + + # distro.apply_network(BASE_NET_CFG, False) + self._apply_and_verify(self.distro.apply_network, + BASE_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + def test_apply_network_config_opensuse(self): + """Opensuse uses apply_network_config and renders sysconfig""" + expected_cfgs = { + self.ifcfg_path('eth0'): dedent("""\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=eth0 + GATEWAY=192.168.1.254 + IPADDR=192.168.1.5 + NETMASK=255.255.255.0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + self.ifcfg_path('eth1'): dedent("""\ + BOOTPROTO=dhcp + DEVICE=eth1 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + } + self._apply_and_verify(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + def test_apply_network_config_ipv6_opensuse(self): + """Opensuse uses apply_network_config and renders sysconfig w/ipv6""" + expected_cfgs = { + self.ifcfg_path('eth0'): dedent("""\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=eth0 + IPV6ADDR=2607:f0d0:1002:0011::2/64 + IPV6INIT=yes + IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + self.ifcfg_path('eth1'): dedent("""\ + BOOTPROTO=dhcp + DEVICE=eth1 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + } + self._apply_and_verify(self.distro.apply_network_config, + V1_NET_CFG_IPV6, + expected_cfgs=expected_cfgs.copy()) + + +def get_mode(path, target=None): + return os.stat(util.target_path(target, path)).st_mode & 0o777 # vi: ts=4 expandtab diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 58e5ea14..05d5c72c 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1,6 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit import net +from cloudinit import distros from cloudinit.net import cmdline from cloudinit.net import ( eni, interface_has_own_mac, natural_sort_key, netplan, network_state, @@ -129,7 +130,40 @@ OS_SAMPLES = [ 'in_macs': { 'fa:16:3e:ed:9a:59': 'eth0', }, - 'out_sysconfig': [ + 'out_sysconfig_opensuse': [ + ('etc/sysconfig/network/ifcfg-eth0', + """ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=none +DEFROUTE=yes +DEVICE=eth0 +GATEWAY=172.19.3.254 +HWADDR=fa:16:3e:ed:9a:59 +IPADDR=172.19.1.34 +NETMASK=255.255.252.0 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""".lstrip()), + ('etc/resolv.conf', + """ +; Created by cloud-init on instance boot automatically, do not edit. +; +nameserver 172.19.0.12 +""".lstrip()), + ('etc/NetworkManager/conf.d/99-cloud-init.conf', + """ +# Created by cloud-init on instance boot automatically, do not edit. +# +[main] +dns = none +""".lstrip()), + ('etc/udev/rules.d/70-persistent-net.rules', + "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))], + 'out_sysconfig_rhel': [ ('etc/sysconfig/network-scripts/ifcfg-eth0', """ # Created by cloud-init on instance boot automatically, do not edit. @@ -162,6 +196,7 @@ dns = none ('etc/udev/rules.d/70-persistent-net.rules', "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))] + }, { 'in_data': { @@ -195,7 +230,42 @@ dns = none 'in_macs': { 'fa:16:3e:ed:9a:59': 'eth0', }, - 'out_sysconfig': [ + 'out_sysconfig_opensuse': [ + ('etc/sysconfig/network/ifcfg-eth0', + """ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=none +DEFROUTE=yes +DEVICE=eth0 +GATEWAY=172.19.3.254 +HWADDR=fa:16:3e:ed:9a:59 +IPADDR=172.19.1.34 +IPADDR1=10.0.0.10 +NETMASK=255.255.252.0 +NETMASK1=255.255.255.0 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""".lstrip()), + ('etc/resolv.conf', + """ +; Created by cloud-init on instance boot automatically, do not edit. +; +nameserver 172.19.0.12 +""".lstrip()), + ('etc/NetworkManager/conf.d/99-cloud-init.conf', + """ +# Created by cloud-init on instance boot automatically, do not edit. +# +[main] +dns = none +""".lstrip()), + ('etc/udev/rules.d/70-persistent-net.rules', + "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))], + 'out_sysconfig_rhel': [ ('etc/sysconfig/network-scripts/ifcfg-eth0', """ # Created by cloud-init on instance boot automatically, do not edit. @@ -230,6 +300,7 @@ dns = none ('etc/udev/rules.d/70-persistent-net.rules', "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))] + }, { 'in_data': { @@ -283,7 +354,44 @@ dns = none 'in_macs': { 'fa:16:3e:ed:9a:59': 'eth0', }, - 'out_sysconfig': [ + 'out_sysconfig_opensuse': [ + ('etc/sysconfig/network/ifcfg-eth0', + """ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=none +DEFROUTE=yes +DEVICE=eth0 +GATEWAY=172.19.3.254 +HWADDR=fa:16:3e:ed:9a:59 +IPADDR=172.19.1.34 +IPV6ADDR=2001:DB8::10/64 +IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" +IPV6INIT=yes +IPV6_DEFAULTGW=2001:DB8::1 +NETMASK=255.255.252.0 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""".lstrip()), + ('etc/resolv.conf', + """ +; Created by cloud-init on instance boot automatically, do not edit. +; +nameserver 172.19.0.12 +""".lstrip()), + ('etc/NetworkManager/conf.d/99-cloud-init.conf', + """ +# Created by cloud-init on instance boot automatically, do not edit. +# +[main] +dns = none +""".lstrip()), + ('etc/udev/rules.d/70-persistent-net.rules', + "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))], + 'out_sysconfig_rhel': [ ('etc/sysconfig/network-scripts/ifcfg-eth0', """ # Created by cloud-init on instance boot automatically, do not edit. @@ -1154,7 +1262,59 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true version: 2 """), - 'expected_sysconfig': { + 'expected_sysconfig_opensuse': { + 'ifcfg-bond0': textwrap.dedent("""\ + BONDING_MASTER=yes + BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 miimon=100" + BONDING_SLAVE0=bond0s0 + BONDING_SLAVE1=bond0s1 + BOOTPROTO=none + DEFROUTE=yes + DEVICE=bond0 + GATEWAY=192.168.0.1 + MACADDR=aa:bb:cc:dd:e8:ff + IPADDR=192.168.0.2 + IPADDR1=192.168.1.2 + IPV6ADDR=2001:1::1/92 + IPV6INIT=yes + MTU=9000 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Bond + USERCTL=no + """), + 'ifcfg-bond0s0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=bond0s0 + HWADDR=aa:bb:cc:dd:e8:00 + MASTER=bond0 + NM_CONTROLLED=no + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet + USERCTL=no + """), + 'ifroute-bond0': textwrap.dedent("""\ + ADDRESS0=10.1.3.0 + GATEWAY0=192.168.0.3 + NETMASK0=255.255.255.0 + """), + 'ifcfg-bond0s1': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=bond0s1 + HWADDR=aa:bb:cc:dd:e8:01 + MASTER=bond0 + NM_CONTROLLED=no + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet + USERCTL=no + """), + }, + + 'expected_sysconfig_rhel': { 'ifcfg-bond0': textwrap.dedent("""\ BONDING_MASTER=yes BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 miimon=100" @@ -1527,7 +1687,7 @@ class TestGenerateFallbackConfig(CiTestCase): # don't set rulepath so eni writes them renderer = eni.Renderer( {'eni_path': 'interfaces', 'netrules_path': 'netrules'}) - renderer.render_network_state(ns, render_dir) + renderer.render_network_state(ns, target=render_dir) self.assertTrue(os.path.exists(os.path.join(render_dir, 'interfaces'))) @@ -1591,7 +1751,7 @@ iface eth0 inet dhcp # don't set rulepath so eni writes them renderer = eni.Renderer( {'eni_path': 'interfaces', 'netrules_path': 'netrules'}) - renderer.render_network_state(ns, render_dir) + renderer.render_network_state(ns, target=render_dir) self.assertTrue(os.path.exists(os.path.join(render_dir, 'interfaces'))) @@ -1682,7 +1842,7 @@ iface eth1 inet dhcp self.assertEqual(0, mock_settle.call_count) -class TestSysConfigRendering(CiTestCase): +class TestRhelSysConfigRendering(CiTestCase): with_logs = True @@ -1690,6 +1850,13 @@ class TestSysConfigRendering(CiTestCase): header = ('# Created by cloud-init on instance boot automatically, ' 'do not edit.\n#\n') + expected_name = 'expected_sysconfig' + + def _get_renderer(self): + distro_cls = distros.fetch('rhel') + return sysconfig.Renderer( + config=distro_cls.renderer_configs.get('sysconfig')) + def _render_and_read(self, network_config=None, state=None, dir=None): if dir is None: dir = self.tmp_dir() @@ -1701,8 +1868,8 @@ class TestSysConfigRendering(CiTestCase): else: raise ValueError("Expected data or state, got neither") - renderer = sysconfig.Renderer() - renderer.render_network_state(ns, dir) + renderer = self._get_renderer() + renderer.render_network_state(ns, target=dir) return dir2dict(dir) def _compare_files_to_expected(self, expected, found): @@ -1745,8 +1912,8 @@ class TestSysConfigRendering(CiTestCase): render_dir = os.path.join(tmp_dir, "render") os.makedirs(render_dir) - renderer = sysconfig.Renderer() - renderer.render_network_state(ns, render_dir) + renderer = self._get_renderer() + renderer.render_network_state(ns, target=render_dir) render_file = 'etc/sysconfig/network-scripts/ifcfg-eth1000' with open(os.path.join(render_dir, render_file)) as fh: @@ -1797,9 +1964,9 @@ USERCTL=no network_cfg = openstack.convert_net_json(net_json, known_macs=macs) ns = network_state.parse_net_config_data(network_cfg, skip_broken=False) - renderer = sysconfig.Renderer() + renderer = self._get_renderer() with self.assertRaises(ValueError): - renderer.render_network_state(ns, render_dir) + renderer.render_network_state(ns, target=render_dir) self.assertEqual([], os.listdir(render_dir)) def test_multiple_ipv6_default_gateways(self): @@ -1835,9 +2002,9 @@ USERCTL=no network_cfg = openstack.convert_net_json(net_json, known_macs=macs) ns = network_state.parse_net_config_data(network_cfg, skip_broken=False) - renderer = sysconfig.Renderer() + renderer = self._get_renderer() with self.assertRaises(ValueError): - renderer.render_network_state(ns, render_dir) + renderer.render_network_state(ns, target=render_dir) self.assertEqual([], os.listdir(render_dir)) def test_openstack_rendering_samples(self): @@ -1849,12 +2016,13 @@ USERCTL=no ex_input, known_macs=ex_mac_addrs) ns = network_state.parse_net_config_data(network_cfg, skip_broken=False) - renderer = sysconfig.Renderer() + renderer = self._get_renderer() # render a multiple times to simulate reboots - renderer.render_network_state(ns, render_dir) - renderer.render_network_state(ns, render_dir) - renderer.render_network_state(ns, render_dir) - for fn, expected_content in os_sample.get('out_sysconfig', []): + renderer.render_network_state(ns, target=render_dir) + renderer.render_network_state(ns, target=render_dir) + renderer.render_network_state(ns, target=render_dir) + for fn, expected_content in os_sample.get('out_sysconfig_rhel', + []): with open(os.path.join(render_dir, fn)) as fh: self.assertEqual(expected_content, fh.read()) @@ -1862,8 +2030,8 @@ USERCTL=no ns = network_state.parse_net_config_data(CONFIG_V1_SIMPLE_SUBNET) render_dir = self.tmp_path("render") os.makedirs(render_dir) - renderer = sysconfig.Renderer() - renderer.render_network_state(ns, render_dir) + renderer = self._get_renderer() + renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) nspath = '/etc/sysconfig/network-scripts/' self.assertNotIn(nspath + 'ifcfg-lo', found.keys()) @@ -1888,8 +2056,8 @@ USERCTL=no ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) render_dir = self.tmp_path("render") os.makedirs(render_dir) - renderer = sysconfig.Renderer() - renderer.render_network_state(ns, render_dir) + renderer = self._get_renderer() + renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) nspath = '/etc/sysconfig/network-scripts/' self.assertNotIn(nspath + 'ifcfg-lo', found.keys()) @@ -1906,33 +2074,331 @@ USERCTL=no self.assertEqual(expected, found[nspath + 'ifcfg-eth0']) def test_bond_config(self): + expected_name = 'expected_sysconfig_rhel' entry = NETWORK_CONFIGS['bond'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._compare_files_to_expected(entry[expected_name], found) + self._assert_headers(found) + + def test_vlan_config(self): + entry = NETWORK_CONFIGS['vlan'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_bridge_config(self): + entry = NETWORK_CONFIGS['bridge'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_manual_config(self): + entry = NETWORK_CONFIGS['manual'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_all_config(self): + entry = NETWORK_CONFIGS['all'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + self.assertNotIn( + 'WARNING: Network config: ignoring eth0.101 device-level mtu', + self.logs.getvalue()) + + def test_small_config(self): + entry = NETWORK_CONFIGS['small'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_v4_and_v6_static_config(self): + entry = NETWORK_CONFIGS['v4_and_v6_static'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + expected_msg = ( + 'WARNING: Network config: ignoring iface0 device-level mtu:8999' + ' because ipv4 subnet-level mtu:9000 provided.') + self.assertIn(expected_msg, self.logs.getvalue()) + + def test_dhcpv6_only_config(self): + entry = NETWORK_CONFIGS['dhcpv6_only'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + +class TestOpenSuseSysConfigRendering(CiTestCase): + + with_logs = True + + scripts_dir = '/etc/sysconfig/network' + header = ('# Created by cloud-init on instance boot automatically, ' + 'do not edit.\n#\n') + + expected_name = 'expected_sysconfig' + + def _get_renderer(self): + distro_cls = distros.fetch('opensuse') + return sysconfig.Renderer( + config=distro_cls.renderer_configs.get('sysconfig')) + + def _render_and_read(self, network_config=None, state=None, dir=None): + if dir is None: + dir = self.tmp_dir() + + if network_config: + ns = network_state.parse_net_config_data(network_config) + elif state: + ns = state + else: + raise ValueError("Expected data or state, got neither") + + renderer = self._get_renderer() + renderer.render_network_state(ns, target=dir) + return dir2dict(dir) + + def _compare_files_to_expected(self, expected, found): + orig_maxdiff = self.maxDiff + expected_d = dict( + (os.path.join(self.scripts_dir, k), util.load_shell_content(v)) + for k, v in expected.items()) + + # only compare the files in scripts_dir + scripts_found = dict( + (k, util.load_shell_content(v)) for k, v in found.items() + if k.startswith(self.scripts_dir)) + try: + self.maxDiff = None + self.assertEqual(expected_d, scripts_found) + finally: + self.maxDiff = orig_maxdiff + + def _assert_headers(self, found): + missing = [f for f in found + if (f.startswith(self.scripts_dir) and + not found[f].startswith(self.header))] + if missing: + raise AssertionError("Missing headers in: %s" % missing) + + @mock.patch("cloudinit.net.sys_dev_path") + @mock.patch("cloudinit.net.read_sys_net") + @mock.patch("cloudinit.net.get_devicelist") + def test_default_generation(self, mock_get_devicelist, + mock_read_sys_net, + mock_sys_dev_path): + tmp_dir = self.tmp_dir() + _setup_test(tmp_dir, mock_get_devicelist, + mock_read_sys_net, mock_sys_dev_path) + + network_cfg = net.generate_fallback_config() + ns = network_state.parse_net_config_data(network_cfg, + skip_broken=False) + + render_dir = os.path.join(tmp_dir, "render") + os.makedirs(render_dir) + + renderer = self._get_renderer() + renderer.render_network_state(ns, target=render_dir) + + render_file = 'etc/sysconfig/network/ifcfg-eth1000' + with open(os.path.join(render_dir, render_file)) as fh: + content = fh.read() + expected_content = """ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=dhcp +DEVICE=eth1000 +HWADDR=07-1C-C6-75-A4-BE +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""".lstrip() + self.assertEqual(expected_content, content) + + def test_multiple_ipv4_default_gateways(self): + """ValueError is raised when duplicate ipv4 gateways exist.""" + net_json = { + "services": [{"type": "dns", "address": "172.19.0.12"}], + "networks": [{ + "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4", + "type": "ipv4", "netmask": "255.255.252.0", + "link": "tap1a81968a-79", + "routes": [{ + "netmask": "0.0.0.0", + "network": "0.0.0.0", + "gateway": "172.19.3.254", + }, { + "netmask": "0.0.0.0", # A second default gateway + "network": "0.0.0.0", + "gateway": "172.20.3.254", + }], + "ip_address": "172.19.1.34", "id": "network0" + }], + "links": [ + { + "ethernet_mac_address": "fa:16:3e:ed:9a:59", + "mtu": None, "type": "bridge", "id": + "tap1a81968a-79", + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" + }, + ], + } + macs = {'fa:16:3e:ed:9a:59': 'eth0'} + render_dir = self.tmp_dir() + network_cfg = openstack.convert_net_json(net_json, known_macs=macs) + ns = network_state.parse_net_config_data(network_cfg, + skip_broken=False) + renderer = self._get_renderer() + with self.assertRaises(ValueError): + renderer.render_network_state(ns, target=render_dir) + self.assertEqual([], os.listdir(render_dir)) + + def test_multiple_ipv6_default_gateways(self): + """ValueError is raised when duplicate ipv6 gateways exist.""" + net_json = { + "services": [{"type": "dns", "address": "172.19.0.12"}], + "networks": [{ + "network_id": "public-ipv6", + "type": "ipv6", "netmask": "", + "link": "tap1a81968a-79", + "routes": [{ + "gateway": "2001:DB8::1", + "netmask": "::", + "network": "::" + }, { + "gateway": "2001:DB9::1", + "netmask": "::", + "network": "::" + }], + "ip_address": "2001:DB8::10", "id": "network1" + }], + "links": [ + { + "ethernet_mac_address": "fa:16:3e:ed:9a:59", + "mtu": None, "type": "bridge", "id": + "tap1a81968a-79", + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" + }, + ], + } + macs = {'fa:16:3e:ed:9a:59': 'eth0'} + render_dir = self.tmp_dir() + network_cfg = openstack.convert_net_json(net_json, known_macs=macs) + ns = network_state.parse_net_config_data(network_cfg, + skip_broken=False) + renderer = self._get_renderer() + with self.assertRaises(ValueError): + renderer.render_network_state(ns, target=render_dir) + self.assertEqual([], os.listdir(render_dir)) + + def test_openstack_rendering_samples(self): + for os_sample in OS_SAMPLES: + render_dir = self.tmp_dir() + ex_input = os_sample['in_data'] + ex_mac_addrs = os_sample['in_macs'] + network_cfg = openstack.convert_net_json( + ex_input, known_macs=ex_mac_addrs) + ns = network_state.parse_net_config_data(network_cfg, + skip_broken=False) + renderer = self._get_renderer() + # render a multiple times to simulate reboots + renderer.render_network_state(ns, target=render_dir) + renderer.render_network_state(ns, target=render_dir) + renderer.render_network_state(ns, target=render_dir) + for fn, expected_content in os_sample.get('out_sysconfig_opensuse', + []): + with open(os.path.join(render_dir, fn)) as fh: + self.assertEqual(expected_content, fh.read()) + + def test_network_config_v1_samples(self): + ns = network_state.parse_net_config_data(CONFIG_V1_SIMPLE_SUBNET) + render_dir = self.tmp_path("render") + os.makedirs(render_dir) + renderer = self._get_renderer() + renderer.render_network_state(ns, target=render_dir) + found = dir2dict(render_dir) + nspath = '/etc/sysconfig/network/' + self.assertNotIn(nspath + 'ifcfg-lo', found.keys()) + expected = """\ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=none +DEFROUTE=yes +DEVICE=interface0 +GATEWAY=10.0.2.2 +HWADDR=52:54:00:12:34:00 +IPADDR=10.0.2.15 +NETMASK=255.255.255.0 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""" + self.assertEqual(expected, found[nspath + 'ifcfg-interface0']) + + def test_config_with_explicit_loopback(self): + ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) + render_dir = self.tmp_path("render") + os.makedirs(render_dir) + renderer = self._get_renderer() + renderer.render_network_state(ns, target=render_dir) + found = dir2dict(render_dir) + nspath = '/etc/sysconfig/network/' + self.assertNotIn(nspath + 'ifcfg-lo', found.keys()) + expected = """\ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=dhcp +DEVICE=eth0 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""" + self.assertEqual(expected, found[nspath + 'ifcfg-eth0']) + + def test_bond_config(self): + expected_name = 'expected_sysconfig_opensuse' + entry = NETWORK_CONFIGS['bond'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + for fname, contents in entry[expected_name].items(): + print(fname) + print(contents) + print() + print('-- expected ^ | v rendered --') + for fname, contents in found.items(): + print(fname) + print(contents) + print() + self._compare_files_to_expected(entry[expected_name], found) self._assert_headers(found) def test_vlan_config(self): entry = NETWORK_CONFIGS['vlan'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_bridge_config(self): entry = NETWORK_CONFIGS['bridge'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_manual_config(self): entry = NETWORK_CONFIGS['manual'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_all_config(self): entry = NETWORK_CONFIGS['all'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) self.assertNotIn( 'WARNING: Network config: ignoring eth0.101 device-level mtu', @@ -1941,13 +2407,13 @@ USERCTL=no def test_small_config(self): entry = NETWORK_CONFIGS['small'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_v4_and_v6_static_config(self): entry = NETWORK_CONFIGS['v4_and_v6_static'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) expected_msg = ( 'WARNING: Network config: ignoring iface0 device-level mtu:8999' @@ -1957,7 +2423,7 @@ USERCTL=no def test_dhcpv6_only_config(self): entry = NETWORK_CONFIGS['dhcpv6_only'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) @@ -1982,7 +2448,7 @@ class TestEniNetRendering(CiTestCase): renderer = eni.Renderer( {'eni_path': 'interfaces', 'netrules_path': None}) - renderer.render_network_state(ns, render_dir) + renderer.render_network_state(ns, target=render_dir) self.assertTrue(os.path.exists(os.path.join(render_dir, 'interfaces'))) @@ -2002,7 +2468,7 @@ iface eth1000 inet dhcp tmp_dir = self.tmp_dir() ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) renderer = eni.Renderer() - renderer.render_network_state(ns, tmp_dir) + renderer.render_network_state(ns, target=tmp_dir) expected = """\ auto lo iface lo inet loopback @@ -2038,7 +2504,7 @@ class TestNetplanNetRendering(CiTestCase): render_target = 'netplan.yaml' renderer = netplan.Renderer( {'netplan_path': render_target, 'postcmds': False}) - renderer.render_network_state(ns, render_dir) + renderer.render_network_state(ns, target=render_dir) self.assertTrue(os.path.exists(os.path.join(render_dir, render_target))) @@ -2143,7 +2609,7 @@ class TestNetplanPostcommands(CiTestCase): render_target = 'netplan.yaml' renderer = netplan.Renderer( {'netplan_path': render_target, 'postcmds': True}) - renderer.render_network_state(ns, render_dir) + renderer.render_network_state(ns, target=render_dir) mock_netplan_generate.assert_called_with(run=True) mock_net_setup_link.assert_called_with(run=True) @@ -2168,7 +2634,7 @@ class TestNetplanPostcommands(CiTestCase): '/sys/class/net/lo'], capture=True), ] with mock.patch.object(os.path, 'islink', return_value=True): - renderer.render_network_state(ns, render_dir) + renderer.render_network_state(ns, target=render_dir) mock_subp.assert_has_calls(expected) @@ -2363,7 +2829,7 @@ class TestNetplanRoundTrip(CiTestCase): renderer = netplan.Renderer( config={'netplan_path': netplan_path}) - renderer.render_network_state(ns, target) + renderer.render_network_state(ns, target=target) return dir2dict(target) def testsimple_render_bond_netplan(self): @@ -2453,7 +2919,7 @@ class TestEniRoundTrip(CiTestCase): renderer = eni.Renderer( config={'eni_path': eni_path, 'netrules_path': netrules_path}) - renderer.render_network_state(ns, dir) + renderer.render_network_state(ns, target=dir) return dir2dict(dir) def testsimple_convert_and_render(self): -- cgit v1.2.3 From a8dcad9ac62bb1d2a4f7489960395bad6cac9382 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 5 Sep 2018 16:02:25 +0000 Subject: tests: Disallow use of util.subp except for where needed. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In many cases, cloud-init uses 'util.subp' to run a subprocess. This is not really desirable in our unit tests as it makes the tests dependent upon existance of those utilities. The change here is to modify the base test case class (CiTestCase) to raise exception any time subp is called. Then, fix all callers. For cases where subp is necessary or actually desired, we can use it via   a.) context hander CiTestCase.allow_subp(value)   b.) class level self.allowed_subp = value Both cases the value is a list of acceptable executable names that will be called (essentially argv[0]). Some cleanups in AltCloud were done as the code was being updated. --- cloudinit/analyze/tests/test_dump.py | 86 ++++++++------------ cloudinit/cmd/tests/test_status.py | 6 +- cloudinit/config/tests/test_snap.py | 7 +- cloudinit/config/tests/test_ubuntu_advantage.py | 7 +- cloudinit/net/tests/test_init.py | 2 + cloudinit/sources/DataSourceAltCloud.py | 24 +++--- cloudinit/sources/DataSourceSmartOS.py | 29 ++++--- cloudinit/tests/helpers.py | 43 ++++++++++ tests/unittests/test_datasource/test_altcloud.py | 44 +++++----- tests/unittests/test_datasource/test_cloudsigma.py | 3 + .../unittests/test_datasource/test_configdrive.py | 3 + tests/unittests/test_datasource/test_nocloud.py | 2 + tests/unittests/test_datasource/test_opennebula.py | 1 + tests/unittests/test_datasource/test_ovf.py | 8 +- tests/unittests/test_datasource/test_smartos.py | 94 +++++++++++++++------- tests/unittests/test_ds_identify.py | 1 + .../test_handler/test_handler_apt_source_v3.py | 11 ++- .../unittests/test_handler/test_handler_bootcmd.py | 10 ++- tests/unittests/test_handler/test_handler_chef.py | 18 +++-- .../test_handler/test_handler_resizefs.py | 8 +- tests/unittests/test_handler/test_schema.py | 12 ++- tests/unittests/test_net.py | 18 ++++- tests/unittests/test_util.py | 27 ++++--- 23 files changed, 289 insertions(+), 175 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/analyze/tests/test_dump.py b/cloudinit/analyze/tests/test_dump.py index f4c42841..db2a667b 100644 --- a/cloudinit/analyze/tests/test_dump.py +++ b/cloudinit/analyze/tests/test_dump.py @@ -5,8 +5,8 @@ from textwrap import dedent from cloudinit.analyze.dump import ( dump_events, parse_ci_logline, parse_timestamp) -from cloudinit.util import subp, write_file -from cloudinit.tests.helpers import CiTestCase +from cloudinit.util import which, write_file +from cloudinit.tests.helpers import CiTestCase, mock, skipIf class TestParseTimestamp(CiTestCase): @@ -15,21 +15,9 @@ class TestParseTimestamp(CiTestCase): """Logs with cloud-init detailed formats will be properly parsed.""" trusty_fmt = '%Y-%m-%d %H:%M:%S,%f' trusty_stamp = '2016-09-12 14:39:20,839' - - parsed = parse_timestamp(trusty_stamp) - - # convert ourselves dt = datetime.strptime(trusty_stamp, trusty_fmt) - expected = float(dt.strftime('%s.%f')) - - # use date(1) - out, _err = subp(['date', '+%s.%3N', '-d', trusty_stamp]) - timestamp = out.strip() - date_ts = float(timestamp) - - self.assertEqual(expected, parsed) - self.assertEqual(expected, date_ts) - self.assertEqual(date_ts, parsed) + self.assertEqual( + float(dt.strftime('%s.%f')), parse_timestamp(trusty_stamp)) def test_parse_timestamp_handles_syslog_adding_year(self): """Syslog timestamps lack a year. Add year and properly parse.""" @@ -39,17 +27,9 @@ class TestParseTimestamp(CiTestCase): # convert stamp ourselves by adding the missing year value year = datetime.now().year dt = datetime.strptime(syslog_stamp + " " + str(year), syslog_fmt) - expected = float(dt.strftime('%s.%f')) - parsed = parse_timestamp(syslog_stamp) - - # use date(1) - out, _ = subp(['date', '+%s.%3N', '-d', syslog_stamp]) - timestamp = out.strip() - date_ts = float(timestamp) - - self.assertEqual(expected, parsed) - self.assertEqual(expected, date_ts) - self.assertEqual(date_ts, parsed) + self.assertEqual( + float(dt.strftime('%s.%f')), + parse_timestamp(syslog_stamp)) def test_parse_timestamp_handles_journalctl_format_adding_year(self): """Journalctl precise timestamps lack a year. Add year and parse.""" @@ -59,37 +39,22 @@ class TestParseTimestamp(CiTestCase): # convert stamp ourselves by adding the missing year value year = datetime.now().year dt = datetime.strptime(journal_stamp + " " + str(year), journal_fmt) - expected = float(dt.strftime('%s.%f')) - parsed = parse_timestamp(journal_stamp) - - # use date(1) - out, _ = subp(['date', '+%s.%6N', '-d', journal_stamp]) - timestamp = out.strip() - date_ts = float(timestamp) - - self.assertEqual(expected, parsed) - self.assertEqual(expected, date_ts) - self.assertEqual(date_ts, parsed) + self.assertEqual( + float(dt.strftime('%s.%f')), parse_timestamp(journal_stamp)) + @skipIf(not which("date"), "'date' command not available.") def test_parse_unexpected_timestamp_format_with_date_command(self): - """Dump sends unexpected timestamp formats to data for processing.""" + """Dump sends unexpected timestamp formats to date for processing.""" new_fmt = '%H:%M %m/%d %Y' new_stamp = '17:15 08/08' - # convert stamp ourselves by adding the missing year value year = datetime.now().year dt = datetime.strptime(new_stamp + " " + str(year), new_fmt) - expected = float(dt.strftime('%s.%f')) - parsed = parse_timestamp(new_stamp) # use date(1) - out, _ = subp(['date', '+%s.%6N', '-d', new_stamp]) - timestamp = out.strip() - date_ts = float(timestamp) - - self.assertEqual(expected, parsed) - self.assertEqual(expected, date_ts) - self.assertEqual(date_ts, parsed) + with self.allow_subp(["date"]): + self.assertEqual( + float(dt.strftime('%s.%f')), parse_timestamp(new_stamp)) class TestParseCILogLine(CiTestCase): @@ -135,7 +100,9 @@ class TestParseCILogLine(CiTestCase): 'timestamp': timestamp} self.assertEqual(expected, parse_ci_logline(line)) - def test_parse_logline_returns_event_for_finish_events(self): + @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date") + def test_parse_logline_returns_event_for_finish_events(self, + m_parse_from_date): """parse_ci_logline returns a finish event for a parsed log line.""" line = ('2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT]' ' handlers.py[DEBUG]: finish: modules-final: SUCCESS: running' @@ -147,7 +114,10 @@ class TestParseCILogLine(CiTestCase): 'origin': 'cloudinit', 'result': 'SUCCESS', 'timestamp': 1472594005.972} + m_parse_from_date.return_value = "1472594005.972" self.assertEqual(expected, parse_ci_logline(line)) + m_parse_from_date.assert_has_calls( + [mock.call("2016-08-30 21:53:25.972325+00:00")]) SAMPLE_LOGS = dedent("""\ @@ -162,10 +132,16 @@ Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT] util.py[DEBUG]:\ class TestDumpEvents(CiTestCase): maxDiff = None - def test_dump_events_with_rawdata(self): + @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date") + def test_dump_events_with_rawdata(self, m_parse_from_date): """Rawdata is split and parsed into a tuple of events and data""" + m_parse_from_date.return_value = "1472594005.972" events, data = dump_events(rawdata=SAMPLE_LOGS) expected_data = SAMPLE_LOGS.splitlines() + self.assertEqual( + [mock.call("2016-08-30 21:53:25.972325+00:00")], + m_parse_from_date.call_args_list) + self.assertEqual(expected_data, data) year = datetime.now().year dt1 = datetime.strptime( 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y') @@ -183,12 +159,14 @@ class TestDumpEvents(CiTestCase): 'result': 'SUCCESS', 'timestamp': 1472594005.972}] self.assertEqual(expected_events, events) - self.assertEqual(expected_data, data) - def test_dump_events_with_cisource(self): + @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date") + def test_dump_events_with_cisource(self, m_parse_from_date): """Cisource file is read and parsed into a tuple of events and data.""" tmpfile = self.tmp_path('logfile') write_file(tmpfile, SAMPLE_LOGS) + m_parse_from_date.return_value = 1472594005.972 + events, data = dump_events(cisource=open(tmpfile)) year = datetime.now().year dt1 = datetime.strptime( @@ -208,3 +186,5 @@ class TestDumpEvents(CiTestCase): 'timestamp': 1472594005.972}] self.assertEqual(expected_events, events) self.assertEqual(SAMPLE_LOGS.splitlines(), [d.strip() for d in data]) + m_parse_from_date.assert_has_calls( + [mock.call("2016-08-30 21:53:25.972325+00:00")]) diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py index 37a89936..aded8580 100644 --- a/cloudinit/cmd/tests/test_status.py +++ b/cloudinit/cmd/tests/test_status.py @@ -39,7 +39,8 @@ class TestStatus(CiTestCase): ensure_file(self.disable_file) # Create the ignored disable file (is_disabled, reason) = wrap_and_call( 'cloudinit.cmd.status', - {'uses_systemd': False}, + {'uses_systemd': False, + 'get_cmdline': "root=/dev/my-root not-important"}, status._is_cloudinit_disabled, self.disable_file, self.paths) self.assertFalse( is_disabled, 'expected enabled cloud-init on sysvinit') @@ -50,7 +51,8 @@ class TestStatus(CiTestCase): ensure_file(self.disable_file) # Create observed disable file (is_disabled, reason) = wrap_and_call( 'cloudinit.cmd.status', - {'uses_systemd': True}, + {'uses_systemd': True, + 'get_cmdline': "root=/dev/my-root not-important"}, status._is_cloudinit_disabled, self.disable_file, self.paths) self.assertTrue(is_disabled, 'expected disabled cloud-init') self.assertEqual( diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py index 34c80f1e..3c472891 100644 --- a/cloudinit/config/tests/test_snap.py +++ b/cloudinit/config/tests/test_snap.py @@ -162,6 +162,7 @@ class TestAddAssertions(CiTestCase): class TestRunCommands(CiTestCase): with_logs = True + allowed_subp = [CiTestCase.SUBP_SHELL_TRUE] def setUp(self): super(TestRunCommands, self).setUp() @@ -424,8 +425,10 @@ class TestHandle(CiTestCase): 'snap': {'commands': ['echo "HI" >> %s' % outfile, 'echo "MOM" >> %s' % outfile]}} mock_path = 'cloudinit.config.cc_snap.sys.stderr' - with mock.patch(mock_path, new_callable=StringIO): - handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None) + with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]): + with mock.patch(mock_path, new_callable=StringIO): + handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertEqual('HI\nMOM\n', util.load_file(outfile)) @mock.patch('cloudinit.config.cc_snap.util.subp') diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py index f1beeff8..b7cf9bee 100644 --- a/cloudinit/config/tests/test_ubuntu_advantage.py +++ b/cloudinit/config/tests/test_ubuntu_advantage.py @@ -23,6 +23,7 @@ class FakeCloud(object): class TestRunCommands(CiTestCase): with_logs = True + allowed_subp = [CiTestCase.SUBP_SHELL_TRUE] def setUp(self): super(TestRunCommands, self).setUp() @@ -234,8 +235,10 @@ class TestHandle(CiTestCase): 'ubuntu-advantage': {'commands': ['echo "HI" >> %s' % outfile, 'echo "MOM" >> %s' % outfile]}} mock_path = '%s.sys.stderr' % MPATH - with mock.patch(mock_path, new_callable=StringIO): - handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]): + with mock.patch(mock_path, new_callable=StringIO): + handle('nomatter', cfg=cfg, cloud=None, log=self.logger, + args=None) self.assertEqual('HI\nMOM\n', util.load_file(outfile)) diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py index 5c017d15..8b444f14 100644 --- a/cloudinit/net/tests/test_init.py +++ b/cloudinit/net/tests/test_init.py @@ -199,6 +199,8 @@ class TestGenerateFallbackConfig(CiTestCase): self.sysdir = self.tmp_dir() + '/' self.m_sys_path.return_value = self.sysdir self.addCleanup(sys_mock.stop) + self.add_patch('cloudinit.net.util.is_container', 'm_is_container', + return_value=False) self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle') def test_generate_fallback_finds_connected_eth_with_mac(self): diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index 24fd65ff..8cd312d0 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -181,27 +181,18 @@ class DataSourceAltCloud(sources.DataSource): # modprobe floppy try: - cmd = CMD_PROBE_FLOPPY - (cmd_out, _err) = util.subp(cmd) - LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out) + modprobe_floppy() except ProcessExecutionError as e: - util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e) - return False - except OSError as e: - util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e) + util.logexc(LOG, 'Failed modprobe: %s', e) return False floppy_dev = '/dev/fd0' # udevadm settle for floppy device try: - (cmd_out, _err) = util.udevadm_settle(exists=floppy_dev, timeout=5) - LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out) - except ProcessExecutionError as e: - util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e) - return False - except OSError as e: - util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e) + util.udevadm_settle(exists=floppy_dev, timeout=5) + except (ProcessExecutionError, OSError) as e: + util.logexc(LOG, 'Failed udevadm_settle: %s\n', e) return False try: @@ -258,6 +249,11 @@ class DataSourceAltCloud(sources.DataSource): return False +def modprobe_floppy(): + out, _err = util.subp(CMD_PROBE_FLOPPY) + LOG.debug('Command: %s\nOutput%s', ' '.join(CMD_PROBE_FLOPPY), out) + + # Used to match classes to dependencies # Source DataSourceAltCloud does not really depend on networking. # In the future 'dsmode' like behavior can be added to offer user diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index ad8cfb9c..593ac91a 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -683,6 +683,18 @@ def jmc_client_factory( raise ValueError("Unknown value for smartos_type: %s" % smartos_type) +def identify_file(content_f): + cmd = ["file", "--brief", "--mime-type", content_f] + f_type = None + try: + (f_type, _err) = util.subp(cmd) + LOG.debug("script %s mime type is %s", content_f, f_type) + except util.ProcessExecutionError as e: + util.logexc( + LOG, ("Failed to identify script type for %s" % content_f, e)) + return None if f_type is None else f_type.strip() + + def write_boot_content(content, content_f, link=None, shebang=False, mode=0o400): """ @@ -715,18 +727,11 @@ def write_boot_content(content, content_f, link=None, shebang=False, util.write_file(content_f, content, mode=mode) if shebang and not content.startswith("#!"): - try: - cmd = ["file", "--brief", "--mime-type", content_f] - (f_type, _err) = util.subp(cmd) - LOG.debug("script %s mime type is %s", content_f, f_type) - if f_type.strip() == "text/plain": - new_content = "\n".join(["#!/bin/bash", content]) - util.write_file(content_f, new_content, mode=mode) - LOG.debug("added shebang to file %s", content_f) - - except Exception as e: - util.logexc(LOG, ("Failed to identify script type for %s" % - content_f, e)) + f_type = identify_file(content_f) + if f_type == "text/plain": + util.write_file( + content_f, "\n".join(["#!/bin/bash", content]), mode=mode) + LOG.debug("added shebang to file %s", content_f) if link: try: diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 9a21426e..e7e4182f 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -33,6 +33,8 @@ from cloudinit import helpers as ch from cloudinit.sources import DataSourceNone from cloudinit import util +_real_subp = util.subp + # Used for skipping tests SkipTest = unittest2.SkipTest skipIf = unittest2.skipIf @@ -143,6 +145,17 @@ class CiTestCase(TestCase): # Subclass overrides for specific test behavior # Whether or not a unit test needs logfile setup with_logs = False + allowed_subp = False + SUBP_SHELL_TRUE = "shell=true" + + @contextmanager + def allow_subp(self, allowed_subp): + orig = self.allowed_subp + try: + self.allowed_subp = allowed_subp + yield + finally: + self.allowed_subp = orig def setUp(self): super(CiTestCase, self).setUp() @@ -155,11 +168,41 @@ class CiTestCase(TestCase): handler.setFormatter(formatter) self.old_handlers = self.logger.handlers self.logger.handlers = [handler] + if self.allowed_subp is True: + util.subp = _real_subp + else: + util.subp = self._fake_subp + + def _fake_subp(self, *args, **kwargs): + if 'args' in kwargs: + cmd = kwargs['args'] + else: + cmd = args[0] + + if not isinstance(cmd, six.string_types): + cmd = cmd[0] + pass_through = False + if not isinstance(self.allowed_subp, (list, bool)): + raise TypeError("self.allowed_subp supports list or bool.") + if isinstance(self.allowed_subp, bool): + pass_through = self.allowed_subp + else: + pass_through = ( + (cmd in self.allowed_subp) or + (self.SUBP_SHELL_TRUE in self.allowed_subp and + kwargs.get('shell'))) + if pass_through: + return _real_subp(*args, **kwargs) + raise Exception( + "called subp. set self.allowed_subp=True to allow\n subp(%s)" % + ', '.join([str(repr(a)) for a in args] + + ["%s=%s" % (k, repr(v)) for k, v in kwargs.items()])) def tearDown(self): if self.with_logs: # Remove the handler we setup logging.getLogger().handlers = self.old_handlers + util.subp = _real_subp super(CiTestCase, self).tearDown() def tmp_dir(self, dir=None, cleanup=True): diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py index 3253f3ad..ff35904e 100644 --- a/tests/unittests/test_datasource/test_altcloud.py +++ b/tests/unittests/test_datasource/test_altcloud.py @@ -262,64 +262,56 @@ class TestUserDataRhevm(CiTestCase): ''' Test to exercise method: DataSourceAltCloud.user_data_rhevm() ''' - cmd_pass = ['true'] - cmd_fail = ['false'] - cmd_not_found = ['bogus bad command'] - def setUp(self): '''Set up.''' self.paths = helpers.Paths({'cloud_dir': '/tmp'}) - self.mount_dir = tempfile.mkdtemp() + self.mount_dir = self.tmp_dir() _write_user_data_files(self.mount_dir, 'test user data') - - def tearDown(self): - # Reset - - _remove_user_data_files(self.mount_dir) - - # Attempt to remove the temp dir ignoring errors - try: - shutil.rmtree(self.mount_dir) - except OSError: - pass - - dsac.CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info' - dsac.CMD_PROBE_FLOPPY = ['modprobe', 'floppy'] - dsac.CMD_UDEVADM_SETTLE = ['udevadm', 'settle', - '--quiet', '--timeout=5'] + self.add_patch( + 'cloudinit.sources.DataSourceAltCloud.modprobe_floppy', + 'm_modprobe_floppy', return_value=None) + self.add_patch( + 'cloudinit.sources.DataSourceAltCloud.util.udevadm_settle', + 'm_udevadm_settle', return_value=('', '')) + self.add_patch( + 'cloudinit.sources.DataSourceAltCloud.util.mount_cb', + 'm_mount_cb') def test_mount_cb_fails(self): '''Test user_data_rhevm() where mount_cb fails.''' - dsac.CMD_PROBE_FLOPPY = self.cmd_pass + self.m_mount_cb.side_effect = util.MountFailedError("Failed Mount") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_rhevm()) def test_modprobe_fails(self): '''Test user_data_rhevm() where modprobe fails.''' - dsac.CMD_PROBE_FLOPPY = self.cmd_fail + self.m_modprobe_floppy.side_effect = util.ProcessExecutionError( + "Failed modprobe") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_rhevm()) def test_no_modprobe_cmd(self): '''Test user_data_rhevm() with no modprobe command.''' - dsac.CMD_PROBE_FLOPPY = self.cmd_not_found + self.m_modprobe_floppy.side_effect = util.ProcessExecutionError( + "No such file or dir") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_rhevm()) def test_udevadm_fails(self): '''Test user_data_rhevm() where udevadm fails.''' - dsac.CMD_UDEVADM_SETTLE = self.cmd_fail + self.m_udevadm_settle.side_effect = util.ProcessExecutionError( + "Failed settle.") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_rhevm()) def test_no_udevadm_cmd(self): '''Test user_data_rhevm() with no udevadm command.''' - dsac.CMD_UDEVADM_SETTLE = self.cmd_not_found + self.m_udevadm_settle.side_effect = OSError("No such file or dir") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_rhevm()) diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py index f6a59b6b..380ad1b5 100644 --- a/tests/unittests/test_datasource/test_cloudsigma.py +++ b/tests/unittests/test_datasource/test_cloudsigma.py @@ -42,6 +42,9 @@ class CepkoMock(Cepko): class DataSourceCloudSigmaTest(test_helpers.CiTestCase): def setUp(self): super(DataSourceCloudSigmaTest, self).setUp() + self.add_patch( + "cloudinit.sources.DataSourceCloudSigma.util.is_container", + "m_is_container", return_value=False) self.paths = helpers.Paths({'run_dir': self.tmp_dir()}) self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( "", "", paths=self.paths) diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 7e6fcbbf..b0abfc5f 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -224,6 +224,9 @@ class TestConfigDriveDataSource(CiTestCase): def setUp(self): super(TestConfigDriveDataSource, self).setUp() + self.add_patch( + "cloudinit.sources.DataSourceConfigDrive.util.find_devs_with", + "m_find_devs_with", return_value=[]) self.tmp = self.tmp_dir() def test_ec2_metadata(self): diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index cdbd1e1a..21931eb7 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -25,6 +25,8 @@ class TestNoCloudDataSource(CiTestCase): self.mocks.enter_context( mock.patch.object(util, 'get_cmdline', return_value=self.cmdline)) + self.mocks.enter_context( + mock.patch.object(util, 'read_dmi_data', return_value=None)) def test_nocloud_seed_dir(self): md = {'instance-id': 'IID', 'dsmode': 'local'} diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index 36b4d771..61591017 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -43,6 +43,7 @@ DS_PATH = "cloudinit.sources.DataSourceOpenNebula" class TestOpenNebulaDataSource(CiTestCase): parsed_user = None + allowed_subp = ['bash'] def setUp(self): super(TestOpenNebulaDataSource, self).setUp() diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index fc4eb36e..9d52eb99 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -124,7 +124,9 @@ class TestDatasourceOVF(CiTestCase): ds = self.datasource(sys_cfg={}, distro={}, paths=paths) retcode = wrap_and_call( 'cloudinit.sources.DataSourceOVF', - {'util.read_dmi_data': None}, + {'util.read_dmi_data': None, + 'transport_iso9660': (False, None, None), + 'transport_vmware_guestd': (False, None, None)}, ds.get_data) self.assertFalse(retcode, 'Expected False return from ds.get_data') self.assertIn( @@ -138,7 +140,9 @@ class TestDatasourceOVF(CiTestCase): paths=paths) retcode = wrap_and_call( 'cloudinit.sources.DataSourceOVF', - {'util.read_dmi_data': 'vmware'}, + {'util.read_dmi_data': 'vmware', + 'transport_iso9660': (False, None, None), + 'transport_vmware_guestd': (False, None, None)}, ds.get_data) self.assertFalse(retcode, 'Expected False return from ds.get_data') self.assertIn( diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index dca0b3d4..46d67b94 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -20,10 +20,8 @@ import multiprocessing import os import os.path import re -import shutil import signal import stat -import tempfile import unittest2 import uuid @@ -31,15 +29,27 @@ from cloudinit import serial from cloudinit.sources import DataSourceSmartOS from cloudinit.sources.DataSourceSmartOS import ( convert_smartos_network_data as convert_net, - SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ) + SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ, + identify_file) import six from cloudinit import helpers as c_helpers -from cloudinit.util import (b64e, subp) +from cloudinit.util import ( + b64e, subp, ProcessExecutionError, which, write_file) -from cloudinit.tests.helpers import mock, FilesystemMockingTestCase, TestCase +from cloudinit.tests.helpers import ( + CiTestCase, mock, FilesystemMockingTestCase, skipIf) + +try: + import serial as _pyserial + assert _pyserial # avoid pyflakes error F401: import unused + HAS_PYSERIAL = True +except ImportError: + HAS_PYSERIAL = False + +DSMOS = 'cloudinit.sources.DataSourceSmartOS' SDC_NICS = json.loads(""" [ { @@ -366,37 +376,33 @@ class PsuedoJoyentClient(object): class TestSmartOSDataSource(FilesystemMockingTestCase): + jmc_cfact = None + get_smartos_environ = None + def setUp(self): super(TestSmartOSDataSource, self).setUp() - dsmos = 'cloudinit.sources.DataSourceSmartOS' - patcher = mock.patch(dsmos + ".jmc_client_factory") - self.jmc_cfact = patcher.start() - self.addCleanup(patcher.stop) - patcher = mock.patch(dsmos + ".get_smartos_environ") - self.get_smartos_environ = patcher.start() - self.addCleanup(patcher.stop) - - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - self.paths = c_helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp}) - - self.legacy_user_d = os.path.join(self.tmp, 'legacy_user_tmp') + self.add_patch(DSMOS + ".get_smartos_environ", "get_smartos_environ") + self.add_patch(DSMOS + ".jmc_client_factory", "jmc_cfact") + self.legacy_user_d = self.tmp_path('legacy_user_tmp') os.mkdir(self.legacy_user_d) - - self.orig_lud = DataSourceSmartOS.LEGACY_USER_D - DataSourceSmartOS.LEGACY_USER_D = self.legacy_user_d - - def tearDown(self): - DataSourceSmartOS.LEGACY_USER_D = self.orig_lud - super(TestSmartOSDataSource, self).tearDown() + self.add_patch(DSMOS + ".LEGACY_USER_D", "m_legacy_user_d", + autospec=False, new=self.legacy_user_d) + self.add_patch(DSMOS + ".identify_file", "m_identify_file", + return_value="text/plain") def _get_ds(self, mockdata=None, mode=DataSourceSmartOS.SMARTOS_ENV_KVM, sys_cfg=None, ds_cfg=None): self.jmc_cfact.return_value = PsuedoJoyentClient(mockdata) self.get_smartos_environ.return_value = mode + tmpd = self.tmp_dir() + dirs = {'cloud_dir': self.tmp_path('cloud_dir', tmpd), + 'run_dir': self.tmp_path('run_dir')} + for d in dirs.values(): + os.mkdir(d) + paths = c_helpers.Paths(dirs) + if sys_cfg is None: sys_cfg = {} @@ -405,7 +411,7 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): sys_cfg['datasource']['SmartOS'] = ds_cfg return DataSourceSmartOS.DataSourceSmartOS( - sys_cfg, distro=None, paths=self.paths) + sys_cfg, distro=None, paths=paths) def test_no_base64(self): ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True} @@ -493,6 +499,7 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): dsrc.metadata['user-script']) legacy_script_f = "%s/user-script" % self.legacy_user_d + print("legacy_script_f=%s" % legacy_script_f) self.assertTrue(os.path.exists(legacy_script_f)) self.assertTrue(os.path.islink(legacy_script_f)) user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:] @@ -640,6 +647,28 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): mydscfg['disk_aliases']['FOO']) +class TestIdentifyFile(CiTestCase): + """Test the 'identify_file' utility.""" + @skipIf(not which("file"), "command 'file' not available.") + def test_file_happy_path(self): + """Test file is available and functional on plain text.""" + fname = self.tmp_path("myfile") + write_file(fname, "plain text content here\n") + with self.allow_subp(["file"]): + self.assertEqual("text/plain", identify_file(fname)) + + @mock.patch(DSMOS + ".util.subp") + def test_returns_none_on_error(self, m_subp): + """On 'file' execution error, None should be returned.""" + m_subp.side_effect = ProcessExecutionError("FILE_FAILED", exit_code=99) + fname = self.tmp_path("myfile") + write_file(fname, "plain text content here\n") + self.assertEqual(None, identify_file(fname)) + self.assertEqual( + [mock.call(["file", "--brief", "--mime-type", fname])], + m_subp.call_args_list) + + class ShortReader(object): """Implements a 'read' interface for bytes provided. much like io.BytesIO but the 'endbyte' acts as if EOF. @@ -893,7 +922,7 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase): self.assertEqual(client.list(), []) -class TestNetworkConversion(TestCase): +class TestNetworkConversion(CiTestCase): def test_convert_simple(self): expected = { 'version': 1, @@ -1058,7 +1087,8 @@ class TestNetworkConversion(TestCase): "Only supported on KVM and bhyve guests under SmartOS") @unittest2.skipUnless(os.access(SERIAL_DEVICE, os.W_OK), "Requires write access to " + SERIAL_DEVICE) -class TestSerialConcurrency(TestCase): +@unittest2.skipUnless(HAS_PYSERIAL is True, "pyserial not available") +class TestSerialConcurrency(CiTestCase): """ This class tests locking on an actual serial port, and as such can only be run in a kvm or bhyve guest running on a SmartOS host. A test run on @@ -1066,7 +1096,11 @@ class TestSerialConcurrency(TestCase): there is only one session over a connection. In contrast, in the absence of proper locking multiple processes opening the same serial port can corrupt each others' exchanges with the metadata server. + + This takes on the order of 2 to 3 minutes to run. """ + allowed_subp = ['mdata-get'] + def setUp(self): self.mdata_proc = multiprocessing.Process(target=self.start_mdata_loop) self.mdata_proc.start() @@ -1097,7 +1131,7 @@ class TestSerialConcurrency(TestCase): keys = [tup[0] for tup in ds.SMARTOS_ATTRIB_MAP.values()] keys.extend(ds.SMARTOS_ATTRIB_JSON.values()) - client = ds.jmc_client_factory() + client = ds.jmc_client_factory(smartos_type=SMARTOS_ENV_KVM) self.assertIsNotNone(client) # The behavior that we are testing for was observed mdata-get running diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index e08e7908..46778e95 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -89,6 +89,7 @@ CallReturn = namedtuple('CallReturn', class DsIdentifyBase(CiTestCase): dsid_path = os.path.realpath('tools/ds-identify') + allowed_subp = ['sh'] def call(self, rootd=None, mocks=None, func="main", args=None, files=None, policy_dmi=DI_DEFAULT_POLICY, diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py index 7a64c230..a81c67c7 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v3.py +++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py @@ -48,6 +48,10 @@ ADD_APT_REPO_MATCH = r"^[\w-]+:\w" TARGET = None +MOCK_LSB_RELEASE_DATA = { + 'id': 'Ubuntu', 'description': 'Ubuntu 18.04.1 LTS', + 'release': '18.04', 'codename': 'bionic'} + class TestAptSourceConfig(t_help.FilesystemMockingTestCase): """TestAptSourceConfig @@ -64,6 +68,9 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list") self.join = os.path.join self.matcher = re.compile(ADD_APT_REPO_MATCH).search + self.add_patch( + 'cloudinit.config.cc_apt_configure.util.lsb_release', + 'm_lsb_release', return_value=MOCK_LSB_RELEASE_DATA.copy()) @staticmethod def _add_apt_sources(*args, **kwargs): @@ -76,7 +83,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): Get the most basic default mrror and release info to be used in tests """ params = {} - params['RELEASE'] = util.lsb_release()['codename'] + params['RELEASE'] = MOCK_LSB_RELEASE_DATA['release'] arch = 'amd64' params['MIRROR'] = cc_apt_configure.\ get_default_mirrors(arch)["PRIMARY"] @@ -464,7 +471,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): 'uri': 'http://testsec.ubuntu.com/%s/' % component}]} post = ("%s_dists_%s-updates_InRelease" % - (component, util.lsb_release()['codename'])) + (component, MOCK_LSB_RELEASE_DATA['codename'])) fromfn = ("%s/%s_%s" % (pre, archive, post)) tofn = ("%s/test.ubuntu.com_%s" % (pre, post)) diff --git a/tests/unittests/test_handler/test_handler_bootcmd.py b/tests/unittests/test_handler/test_handler_bootcmd.py index b1375269..a76760fa 100644 --- a/tests/unittests/test_handler/test_handler_bootcmd.py +++ b/tests/unittests/test_handler/test_handler_bootcmd.py @@ -118,7 +118,8 @@ class TestBootcmd(CiTestCase): 'echo {0} $INSTANCE_ID > {1}'.format(my_id, out_file)]} with mock.patch(self._etmpfile_path, FakeExtendedTempFile): - handle('cc_bootcmd', valid_config, cc, LOG, []) + with self.allow_subp(['/bin/sh']): + handle('cc_bootcmd', valid_config, cc, LOG, []) self.assertEqual(my_id + ' iid-datasource-none\n', util.load_file(out_file)) @@ -128,12 +129,13 @@ class TestBootcmd(CiTestCase): valid_config = {'bootcmd': ['exit 1']} # Script with error with mock.patch(self._etmpfile_path, FakeExtendedTempFile): - with self.assertRaises(util.ProcessExecutionError) as ctxt_manager: - handle('does-not-matter', valid_config, cc, LOG, []) + with self.allow_subp(['/bin/sh']): + with self.assertRaises(util.ProcessExecutionError) as ctxt: + handle('does-not-matter', valid_config, cc, LOG, []) self.assertIn( 'Unexpected error while running command.\n' "Command: ['/bin/sh',", - str(ctxt_manager.exception)) + str(ctxt.exception)) self.assertIn( 'Failed to run bootcmd module does-not-matter', self.logs.getvalue()) diff --git a/tests/unittests/test_handler/test_handler_chef.py b/tests/unittests/test_handler/test_handler_chef.py index f4bbd66d..b16532ea 100644 --- a/tests/unittests/test_handler/test_handler_chef.py +++ b/tests/unittests/test_handler/test_handler_chef.py @@ -36,13 +36,21 @@ class TestInstallChefOmnibus(HttprettyTestCase): @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP) def test_install_chef_from_omnibus_runs_chef_url_content(self): - """install_chef_from_omnibus runs downloaded OMNIBUS_URL as script.""" - chef_outfile = self.tmp_path('chef.out', self.new_root) - response = '#!/bin/bash\necho "Hi Mom" > {0}'.format(chef_outfile) + """install_chef_from_omnibus calls subp_blob_in_tempfile.""" + response = b'#!/bin/bash\necho "Hi Mom"' httpretty.register_uri( httpretty.GET, cc_chef.OMNIBUS_URL, body=response, status=200) - cc_chef.install_chef_from_omnibus() - self.assertEqual('Hi Mom\n', util.load_file(chef_outfile)) + ret = (None, None) # stdout, stderr but capture=False + + with mock.patch("cloudinit.config.cc_chef.util.subp_blob_in_tempfile", + return_value=ret) as m_subp_blob: + cc_chef.install_chef_from_omnibus() + # admittedly whitebox, but assuming subp_blob_in_tempfile works + # this should be fine. + self.assertEqual( + [mock.call(blob=response, args=[], basename='chef-omnibus-install', + capture=False)], + m_subp_blob.call_args_list) @mock.patch('cloudinit.config.cc_chef.url_helper.readurl') @mock.patch('cloudinit.config.cc_chef.util.subp_blob_in_tempfile') diff --git a/tests/unittests/test_handler/test_handler_resizefs.py b/tests/unittests/test_handler/test_handler_resizefs.py index f92175fd..feca56c2 100644 --- a/tests/unittests/test_handler/test_handler_resizefs.py +++ b/tests/unittests/test_handler/test_handler_resizefs.py @@ -150,10 +150,12 @@ class TestResizefs(CiTestCase): self.assertEqual(('growfs', '-y', devpth), _resize_ufs(mount_point, devpth)) + @mock.patch('cloudinit.util.is_container', return_value=False) @mock.patch('cloudinit.util.get_mount_info') @mock.patch('cloudinit.util.get_device_info_from_zpool') @mock.patch('cloudinit.util.parse_mount') - def test_handle_zfs_root(self, mount_info, zpool_info, parse_mount): + def test_handle_zfs_root(self, mount_info, zpool_info, parse_mount, + is_container): devpth = 'vmzroot/ROOT/freebsd' disk = 'gpt/system' fs_type = 'zfs' @@ -354,8 +356,10 @@ class TestMaybeGetDevicePathAsWritableBlock(CiTestCase): ('btrfs', 'filesystem', 'resize', 'max', '/'), _resize_btrfs("/", "/dev/sda1")) + @mock.patch('cloudinit.util.is_container', return_value=True) @mock.patch('cloudinit.util.is_FreeBSD') - def test_maybe_get_writable_device_path_zfs_freebsd(self, freebsd): + def test_maybe_get_writable_device_path_zfs_freebsd(self, freebsd, + m_is_container): freebsd.return_value = True info = 'dev=gpt/system mnt_point=/ path=/' devpth = maybe_get_writable_device_path('gpt/system', info, LOG) diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py index fb266faf..1bad07f6 100644 --- a/tests/unittests/test_handler/test_schema.py +++ b/tests/unittests/test_handler/test_schema.py @@ -4,7 +4,7 @@ from cloudinit.config.schema import ( CLOUD_CONFIG_HEADER, SchemaValidationError, annotated_cloudconfig_file, get_schema_doc, get_schema, validate_cloudconfig_file, validate_cloudconfig_schema, main) -from cloudinit.util import subp, write_file +from cloudinit.util import write_file from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema @@ -406,8 +406,14 @@ class CloudTestsIntegrationTest(CiTestCase): integration_testdir = os.path.sep.join( [testsdir, 'cloud_tests', 'testcases']) errors = [] - out, _ = subp(['find', integration_testdir, '-name', '*yaml']) - for filename in out.splitlines(): + + yaml_files = [] + for root, _dirnames, filenames in os.walk(integration_testdir): + yaml_files.extend([os.path.join(root, f) + for f in filenames if f.endswith(".yaml")]) + self.assertTrue(len(yaml_files) > 0) + + for filename in yaml_files: test_cfg = safe_load(open(filename)) cloud_config = test_cfg.get('cloud_config') if cloud_config: diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 05d5c72c..f3165da9 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1653,6 +1653,12 @@ def _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, class TestGenerateFallbackConfig(CiTestCase): + def setUp(self): + super(TestGenerateFallbackConfig, self).setUp() + self.add_patch( + "cloudinit.util.get_cmdline", "m_get_cmdline", + return_value="root=/dev/sda1") + @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") @@ -1895,12 +1901,13 @@ class TestRhelSysConfigRendering(CiTestCase): if missing: raise AssertionError("Missing headers in: %s" % missing) + @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") def test_default_generation(self, mock_get_devicelist, mock_read_sys_net, - mock_sys_dev_path): + mock_sys_dev_path, m_get_cmdline): tmp_dir = self.tmp_dir() _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path) @@ -2183,12 +2190,13 @@ class TestOpenSuseSysConfigRendering(CiTestCase): if missing: raise AssertionError("Missing headers in: %s" % missing) + @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") def test_default_generation(self, mock_get_devicelist, mock_read_sys_net, - mock_sys_dev_path): + mock_sys_dev_path, m_get_cmdline): tmp_dir = self.tmp_dir() _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path) @@ -2429,12 +2437,13 @@ USERCTL=no class TestEniNetRendering(CiTestCase): + @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") def test_default_generation(self, mock_get_devicelist, mock_read_sys_net, - mock_sys_dev_path): + mock_sys_dev_path, m_get_cmdline): tmp_dir = self.tmp_dir() _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path) @@ -2482,6 +2491,7 @@ iface eth0 inet dhcp class TestNetplanNetRendering(CiTestCase): + @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") @mock.patch("cloudinit.net.netplan._clean_default") @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @@ -2489,7 +2499,7 @@ class TestNetplanNetRendering(CiTestCase): def test_default_generation(self, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path, - mock_clean_default): + mock_clean_default, m_get_cmdline): tmp_dir = self.tmp_dir() _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 7a203ce2..5a14479a 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -24,6 +24,7 @@ except ImportError: BASH = util.which('bash') +BOGUS_COMMAND = 'this-is-not-expected-to-be-a-program-name' class FakeSelinux(object): @@ -742,6 +743,8 @@ class TestReadSeeded(helpers.TestCase): class TestSubp(helpers.CiTestCase): with_logs = True + allowed_subp = [BASH, 'cat', helpers.CiTestCase.SUBP_SHELL_TRUE, + BOGUS_COMMAND, sys.executable] stdin2err = [BASH, '-c', 'cat >&2'] stdin2out = ['cat'] @@ -749,7 +752,6 @@ class TestSubp(helpers.CiTestCase): utf8_valid = b'start \xc3\xa9 end' utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7' printenv = [BASH, '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--'] - bogus_command = 'this-is-not-expected-to-be-a-program-name' def printf_cmd(self, *args): # bash's printf supports \xaa. So does /usr/bin/printf @@ -848,9 +850,10 @@ class TestSubp(helpers.CiTestCase): util.write_file(noshebang, 'true\n') os.chmod(noshebang, os.stat(noshebang).st_mode | stat.S_IEXEC) - self.assertRaisesRegex(util.ProcessExecutionError, - r'Missing #! in script\?', - util.subp, (noshebang,)) + with self.allow_subp([noshebang]): + self.assertRaisesRegex(util.ProcessExecutionError, + r'Missing #! in script\?', + util.subp, (noshebang,)) def test_subp_combined_stderr_stdout(self): """Providing combine_capture as True redirects stderr to stdout.""" @@ -868,14 +871,14 @@ class TestSubp(helpers.CiTestCase): def test_exception_has_out_err_are_bytes_if_decode_false(self): """Raised exc should have stderr, stdout as bytes if no decode.""" with self.assertRaises(util.ProcessExecutionError) as cm: - util.subp([self.bogus_command], decode=False) + util.subp([BOGUS_COMMAND], decode=False) self.assertTrue(isinstance(cm.exception.stdout, bytes)) self.assertTrue(isinstance(cm.exception.stderr, bytes)) def test_exception_has_out_err_are_bytes_if_decode_true(self): """Raised exc should have stderr, stdout as string if no decode.""" with self.assertRaises(util.ProcessExecutionError) as cm: - util.subp([self.bogus_command], decode=True) + util.subp([BOGUS_COMMAND], decode=True) self.assertTrue(isinstance(cm.exception.stdout, six.string_types)) self.assertTrue(isinstance(cm.exception.stderr, six.string_types)) @@ -925,10 +928,10 @@ class TestSubp(helpers.CiTestCase): logs.append(log) with self.assertRaises(util.ProcessExecutionError): - util.subp([self.bogus_command], status_cb=status_cb) + util.subp([BOGUS_COMMAND], status_cb=status_cb) expected = [ - 'Begin run command: {cmd}\n'.format(cmd=self.bogus_command), + 'Begin run command: {cmd}\n'.format(cmd=BOGUS_COMMAND), 'ERROR: End run command: invalid command provided\n'] self.assertEqual(expected, logs) @@ -940,13 +943,13 @@ class TestSubp(helpers.CiTestCase): logs.append(log) with self.assertRaises(util.ProcessExecutionError): - util.subp(['ls', '/I/dont/exist'], status_cb=status_cb) - util.subp(['ls'], status_cb=status_cb) + util.subp([BASH, '-c', 'exit 2'], status_cb=status_cb) + util.subp([BASH, '-c', 'exit 0'], status_cb=status_cb) expected = [ - 'Begin run command: ls /I/dont/exist\n', + 'Begin run command: %s -c exit 2\n' % BASH, 'ERROR: End run command: exit(2)\n', - 'Begin run command: ls\n', + 'Begin run command: %s -c exit 0\n' % BASH, 'End run command: exit(0)\n'] self.assertEqual(expected, logs) -- cgit v1.2.3 From c75c582ed1824dc3ff39cefdbddccfc2924b868c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 12 Sep 2018 18:44:19 +0000 Subject: OpenStack: fix bug causing 'latest' version to be used from network. Cloud-init was reading a list of versions from the OpenStack metadata service (http://169.254.169.254/openstack/) and attempt to select the newest known supported version. The problem was that the list of versions was not being decoded, so we were comparing a list of bytes (found versions) to a list of strings (known versions). LP: #1792157 --- cloudinit/sources/helpers/openstack.py | 2 +- tests/unittests/test_datasource/test_openstack.py | 30 +++++++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 8f9c1441..e3d372b6 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -439,7 +439,7 @@ class MetadataReader(BaseReader): return self._versions found = [] version_path = self._path_join(self.base_path, "openstack") - content = self._path_read(version_path) + content = self._path_read(version_path, decode=True) for line in content.splitlines(): line = line.strip() if not line: diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index 6e1e971b..ab5d3adc 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -555,4 +555,34 @@ class TestDetectOpenStack(test_helpers.CiTestCase): m_proc_env.assert_called_with(1) +class TestMetadataReader(test_helpers.HttprettyTestCase): + """Test the MetadataReader.""" + burl = 'http://169.254.169.254/' + + def register(self, path, body=None, status=200): + hp.register_uri( + hp.GET, self.burl + "openstack" + path, status=status, body=body) + + def register_versions(self, versions): + self.register("", '\n'.join(versions).encode('utf-8')) + + def test__find_working_version(self): + """Test a working version ignores unsupported.""" + unsup = "2016-11-09" + self.register_versions( + [openstack.OS_FOLSOM, openstack.OS_LIBERTY, unsup, + openstack.OS_LATEST]) + self.assertEqual( + openstack.OS_LIBERTY, + openstack.MetadataReader(self.burl)._find_working_version()) + + def test__find_working_version_uses_latest(self): + """'latest' should be used if no supported versions.""" + unsup1, unsup2 = ("2016-11-09", '2017-06-06') + self.register_versions([unsup1, unsup2, openstack.OS_LATEST]) + self.assertEqual( + openstack.OS_LATEST, + openstack.MetadataReader(self.burl)._find_working_version()) + + # vi: ts=4 expandtab -- cgit v1.2.3 From 4361e0e42a34ebf9f49dde2356b7261715cf0cd5 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 13 Sep 2018 18:44:20 +0000 Subject: OpenStack: support reading of newer versions of metdata. Mark as supported for reading some newer versions of openstack metadata: 2016-06-30 : Newton one 2016-10-06 : Newton two 2017-02-22 : Ocata 2018-08-27 : Rocky --- cloudinit/sources/helpers/openstack.py | 16 ++++++ tests/unittests/test_datasource/test_openstack.py | 66 +++++++++++++++++++++-- 2 files changed, 79 insertions(+), 3 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index e3d372b6..1e651440 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -38,17 +38,33 @@ KEY_COPIES = ( ('local-hostname', 'hostname', False), ('instance-id', 'uuid', True), ) + +# Versions and names taken from nova source nova/api/metadata/base.py OS_LATEST = 'latest' OS_FOLSOM = '2012-08-10' OS_GRIZZLY = '2013-04-04' OS_HAVANA = '2013-10-17' OS_LIBERTY = '2015-10-15' +# NEWTON_ONE adds 'devices' to md (sriov-pf-passthrough-neutron-port-vlan) +OS_NEWTON_ONE = '2016-06-30' +# NEWTON_TWO adds vendor_data2.json (vendordata-reboot) +OS_NEWTON_TWO = '2016-10-06' +# OS_OCATA adds 'vif' field to devices (sriov-pf-passthrough-neutron-port-vlan) +OS_OCATA = '2017-02-22' +# OS_ROCKY adds a vf_trusted field to devices (sriov-trusted-vfs) +OS_ROCKY = '2018-08-27' + + # keep this in chronological order. new supported versions go at the end. OS_VERSIONS = ( OS_FOLSOM, OS_GRIZZLY, OS_HAVANA, OS_LIBERTY, + OS_NEWTON_ONE, + OS_NEWTON_TWO, + OS_OCATA, + OS_ROCKY, ) PHYSICAL_TYPES = ( diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index ab5d3adc..a731f1ed 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -12,7 +12,7 @@ import re from cloudinit.tests import helpers as test_helpers from six.moves.urllib.parse import urlparse -from six import StringIO +from six import StringIO, text_type from cloudinit import helpers from cloudinit import settings @@ -558,13 +558,36 @@ class TestDetectOpenStack(test_helpers.CiTestCase): class TestMetadataReader(test_helpers.HttprettyTestCase): """Test the MetadataReader.""" burl = 'http://169.254.169.254/' + md_base = { + 'availability_zone': 'myaz1', + 'hostname': 'sm-foo-test.novalocal', + "keys": [{"data": PUBKEY, "name": "brickies", "type": "ssh"}], + 'launch_index': 0, + 'name': 'sm-foo-test', + 'public_keys': {'mykey': PUBKEY}, + 'project_id': '6a103f813b774b9fb15a4fcd36e1c056', + 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'} def register(self, path, body=None, status=200): + content = (body if not isinstance(body, text_type) + else body.encode('utf-8')) hp.register_uri( - hp.GET, self.burl + "openstack" + path, status=status, body=body) + hp.GET, self.burl + "openstack" + path, status=status, + body=content) def register_versions(self, versions): - self.register("", '\n'.join(versions).encode('utf-8')) + self.register("", '\n'.join(versions)) + self.register("/", '\n'.join(versions)) + + def register_version(self, version, data): + content = '\n'.join(sorted(data.keys())) + self.register(version, content) + self.register(version + "/", content) + for path, content in data.items(): + self.register("/%s/%s" % (version, path), content) + self.register("/%s/%s" % (version, path), content) + if 'user_data' not in data: + self.register("/%s/user_data" % version, "nodata", status=404) def test__find_working_version(self): """Test a working version ignores unsupported.""" @@ -584,5 +607,42 @@ class TestMetadataReader(test_helpers.HttprettyTestCase): openstack.OS_LATEST, openstack.MetadataReader(self.burl)._find_working_version()) + def test_read_v2_os_ocata(self): + """Validate return value of read_v2 for os_ocata data.""" + md = copy.deepcopy(self.md_base) + md['devices'] = [] + network_data = {'links': [], 'networks': [], 'services': []} + vendor_data = {} + vendor_data2 = {"static": {}} + + data = { + 'meta_data.json': json.dumps(md), + 'network_data.json': json.dumps(network_data), + 'vendor_data.json': json.dumps(vendor_data), + 'vendor_data2.json': json.dumps(vendor_data2), + } + + self.register_versions([openstack.OS_OCATA, openstack.OS_LATEST]) + self.register_version(openstack.OS_OCATA, data) + + mock_read_ec2 = test_helpers.mock.MagicMock( + return_value={'instance-id': 'unused-ec2'}) + expected_md = copy.deepcopy(md) + expected_md.update( + {'instance-id': md['uuid'], 'local-hostname': md['hostname']}) + expected = { + 'userdata': '', # Annoying, no user-data results in empty string. + 'version': 2, + 'metadata': expected_md, + 'vendordata': vendor_data, + 'networkdata': network_data, + 'ec2-metadata': mock_read_ec2.return_value, + 'files': {}, + } + reader = openstack.MetadataReader(self.burl) + reader._read_ec2_metadata = mock_read_ec2 + self.assertEqual(expected, reader.read_v2()) + self.assertEqual(1, mock_read_ec2.call_count) + # vi: ts=4 expandtab -- cgit v1.2.3 From c6cfed7fb58be76a3f95963a5de469aa03542cd3 Mon Sep 17 00:00:00 2001 From: Fabian Wiesel Date: Fri, 14 Sep 2018 19:33:01 +0000 Subject: OpenStack: Support setting mac address on bond. Fix a bug where setting of mac address on a bond device was ignored when provided in OpenStack network_config.json. LP: #1682064 --- cloudinit/sources/helpers/openstack.py | 2 ++ tests/unittests/test_datasource/test_configdrive.py | 6 +++++- 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index d6f39a1e..76a6e310 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -604,6 +604,8 @@ def convert_net_json(network_json=None, known_macs=None): cfg.update({'type': 'physical', 'mac_address': link_mac_addr}) elif link['type'] in ['bond']: params = {} + if link_mac_addr: + params['mac_address'] = link_mac_addr for k, v in link.items(): if k == 'bond_links': continue diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index b0abfc5f..231619c9 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -136,6 +136,7 @@ NETWORK_DATA_3 = { ] } +BOND_MAC = "fa:16:3e:b3:72:36" NETWORK_DATA_BOND = { "services": [ {"type": "dns", "address": "1.1.1.191"}, @@ -163,7 +164,7 @@ NETWORK_DATA_BOND = { {"bond_links": ["eth0", "eth1"], "bond_miimon": 100, "bond_mode": "4", "bond_xmit_hash_policy": "layer3+4", - "ethernet_mac_address": "0c:c4:7a:34:6e:3c", + "ethernet_mac_address": BOND_MAC, "id": "bond0", "type": "bond"}, {"ethernet_mac_address": "fa:16:3e:b3:72:30", "id": "vlan2", "type": "vlan", "vlan_id": 602, @@ -691,6 +692,9 @@ class TestConvertNetworkData(CiTestCase): self.assertIn("auto oeth0", eni_rendering) self.assertIn("auto oeth1", eni_rendering) self.assertIn("auto bond0", eni_rendering) + # The bond should have the given mac address + pos = eni_rendering.find("auto bond0") + self.assertIn(BOND_MAC, eni_rendering[pos:]) def test_vlan(self): # light testing of vlan config conversion and eni rendering -- cgit v1.2.3