From b56d7a191fc695be364430f8428a17591c523403 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 8 Aug 2016 15:46:36 -0700 Subject: Newer requests have strong type validation Only use strings in headers, as newer requests actually do stricter validation of this, so ensure that we comply by only having string objects in header dicts. --- cloudinit/sources/DataSourceGCE.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index c660a350..6c12d703 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -31,7 +31,7 @@ REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname') class GoogleMetadataFetcher(object): - headers = {'X-Google-Metadata-Request': True} + headers = {'X-Google-Metadata-Request': 'True'} def __init__(self, metadata_address): self.metadata_address = metadata_address -- cgit v1.2.3 From c52b8eb9671be454d22283c81057f97b98bb49fe Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 10 Aug 2016 12:44:28 -0600 Subject: SmartOS: more improvements for network configuration This improves smart os network configuration - fix the SocketClient which was previously completely broken. - adds support for configuring dns servers and dns search (based off the sdc:dns_domain). - support 'sdc:gateways' information from the datasource for configuring default routes. - add converted network information to output when module is run as a main This does not support 'sdc:routes' as described at http://eng.joyent.com/mdata/datadict.html --- ChangeLog | 2 + cloudinit/sources/DataSourceSmartOS.py | 114 ++++++-- tests/unittests/test_datasource/test_smartos.py | 350 +++++++++++++++++++++++- 3 files changed, 438 insertions(+), 28 deletions(-) (limited to 'cloudinit/sources') diff --git a/ChangeLog b/ChangeLog index bae982e3..3d9b8ea4 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,5 @@ +0.7.8: + - SmartOS: more improvements for network configuration 0.7.7: - open 0.7.7 - Digital Ocean: add datasource for Digital Ocean. [Neal Shrader] diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index ccc86883..39e7bbd9 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -60,11 +60,15 @@ SMARTOS_ATTRIB_MAP = { 'availability_zone': ('sdc:datacenter_name', True), 'vendor-data': ('sdc:vendor-data', False), 'operator-script': ('sdc:operator-script', False), + 'hostname': ('sdc:hostname', True), + 'dns_domain': ('sdc:dns_domain', True), } SMARTOS_ATTRIB_JSON = { # Cloud-init Key : (SmartOS Key known JSON) 'network-data': 'sdc:nics', + 'dns_servers': 'sdc:resolvers', + 'routes': 'sdc:routes', } SMARTOS_ENV_LX_BRAND = "lx-brand" @@ -311,7 +315,10 @@ class DataSourceSmartOS(sources.DataSource): if self._network_config is None: if self.network_data is not None: self._network_config = ( - convert_smartos_network_data(self.network_data)) + convert_smartos_network_data( + network_data=self.network_data, + dns_servers=self.metadata['dns_servers'], + dns_domain=self.metadata['dns_domain'])) return self._network_config @@ -445,7 +452,8 @@ class JoyentMetadataClient(object): class JoyentMetadataSocketClient(JoyentMetadataClient): - def __init__(self, socketpath): + def __init__(self, socketpath, smartos_type=SMARTOS_ENV_LX_BRAND): + super(JoyentMetadataSocketClient, self).__init__(smartos_type) self.socketpath = socketpath def open_transport(self): @@ -461,7 +469,7 @@ class JoyentMetadataSocketClient(JoyentMetadataClient): class JoyentMetadataSerialClient(JoyentMetadataClient): - def __init__(self, device, timeout=10, smartos_type=None): + def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM): super(JoyentMetadataSerialClient, self).__init__(smartos_type) self.device = device self.timeout = timeout @@ -583,7 +591,8 @@ def jmc_client_factory( device=serial_device, timeout=serial_timeout, smartos_type=smartos_type) elif smartos_type == SMARTOS_ENV_LX_BRAND: - return JoyentMetadataSocketClient(socketpath=metadata_sockfile) + return JoyentMetadataSocketClient(socketpath=metadata_sockfile, + smartos_type=smartos_type) raise ValueError("Unknown value for smartos_type: %s" % smartos_type) @@ -671,8 +680,9 @@ def get_smartos_environ(uname_version=None, product_name=None, return None -# Covert SMARTOS 'sdc:nics' data to network_config yaml -def convert_smartos_network_data(network_data=None): +# Convert SMARTOS 'sdc:nics' data to network_config yaml +def convert_smartos_network_data(network_data=None, + dns_servers=None, dns_domain=None): """Return a dictionary of network_config by parsing provided SMARTOS sdc:nics configuration data @@ -706,9 +716,7 @@ def convert_smartos_network_data(network_data=None): 'broadcast', 'dns_nameservers', 'dns_search', - 'gateway', 'metric', - 'netmask', 'pointopoint', 'routes', 'scope', @@ -716,6 +724,29 @@ def convert_smartos_network_data(network_data=None): ], } + if dns_servers: + if not isinstance(dns_servers, (list, tuple)): + dns_servers = [dns_servers] + else: + dns_servers = [] + + if dns_domain: + if not isinstance(dns_domain, (list, tuple)): + dns_domain = [dns_domain] + else: + dns_domain = [] + + def is_valid_ipv4(addr): + return '.' in addr + + def is_valid_ipv6(addr): + return ':' in addr + + pgws = { + 'ipv4': {'match': is_valid_ipv4, 'gw': None}, + 'ipv6': {'match': is_valid_ipv6, 'gw': None}, + } + config = [] for nic in network_data: cfg = dict((k, v) for k, v in nic.items() @@ -727,18 +758,40 @@ def convert_smartos_network_data(network_data=None): cfg.update({'mac_address': nic['mac']}) subnets = [] - for ip, gw in zip(nic['ips'], nic['gateways']): - subnet = dict((k, v) for k, v in nic.items() - if k in valid_keys['subnet']) - subnet.update({ - 'type': 'static', - 'address': ip, - 'gateway': gw, - }) + for ip in nic.get('ips', []): + if ip == "dhcp": + subnet = {'type': 'dhcp4'} + else: + subnet = dict((k, v) for k, v in nic.items() + if k in valid_keys['subnet']) + subnet.update({ + 'type': 'static', + 'address': ip, + }) + + proto = 'ipv4' if is_valid_ipv4(ip) else 'ipv6' + # Only use gateways for 'primary' nics + if 'primary' in nic and nic.get('primary', False): + # the ips and gateways list may be N to M, here + # we map the ip index into the gateways list, + # and handle the case that we could have more ips + # than gateways. we only consume the first gateway + if not pgws[proto]['gw']: + gateways = [gw for gw in nic.get('gateways', []) + if pgws[proto]['match'](gw)] + if len(gateways): + pgws[proto]['gw'] = gateways[0] + subnet.update({'gateway': pgws[proto]['gw']}) + subnets.append(subnet) cfg.update({'subnets': subnets}) config.append(cfg) + if dns_servers: + config.append( + {'type': 'nameserver', 'address': dns_servers, + 'search': dns_domain}) + return {'version': 1, 'config': config} @@ -761,21 +814,36 @@ if __name__ == "__main__": sys.exit(1) if len(sys.argv) == 1: keys = (list(SMARTOS_ATTRIB_JSON.keys()) + - list(SMARTOS_ATTRIB_MAP.keys())) + list(SMARTOS_ATTRIB_MAP.keys()) + ['network_config']) else: keys = sys.argv[1:] - data = {} - for key in keys: + def load_key(client, key, data): + if key in data: + return data[key] + if key in SMARTOS_ATTRIB_JSON: keyname = SMARTOS_ATTRIB_JSON[key] - data[key] = jmc.get_json(keyname) + data[key] = client.get_json(keyname) + elif key == "network_config": + for depkey in ('network-data', 'dns_servers', 'dns_domain'): + load_key(client, depkey, data) + data[key] = convert_smartos_network_data( + network_data=data['network-data'], + dns_servers=data['dns_servers'], + dns_domain=data['dns_domain']) else: if key in SMARTOS_ATTRIB_MAP: keyname, strip = SMARTOS_ATTRIB_MAP[key] else: keyname, strip = (key, False) - val = jmc.get(keyname, strip=strip) - data[key] = jmc.get(keyname, strip=strip) + data[key] = client.get(keyname, strip=strip) + + return data[key] + + data = {} + for key in keys: + load_key(client=jmc, key=key, data=data) - print(json.dumps(data, indent=1)) + print(json.dumps(data, indent=1, sort_keys=True, + separators=(',', ': '))) diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 9c6c8768..0532f986 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -36,6 +36,8 @@ import uuid from cloudinit import serial from cloudinit.sources import DataSourceSmartOS +from cloudinit.sources.DataSourceSmartOS import ( + convert_smartos_network_data as convert_net) import six @@ -86,6 +88,229 @@ SDC_NICS = json.loads(""" ] """) + +SDC_NICS_ALT = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": [ + "8.12.42.1" + ], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": [ + "8.12.42.51/24" + ], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": [ + "10.210.1.217/24" + ], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_DHCP = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": [ + "8.12.42.1" + ], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": [ + "8.12.42.51/24" + ], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": [ + "dhcp" + ], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_MIP = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": [ + "8.12.42.1" + ], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": [ + "8.12.42.51/24", + "8.12.42.52/24" + ], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": [ + "10.210.1.217/24", + "10.210.1.151/24" + ], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_MIP_IPV6 = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": [ + "8.12.42.1" + ], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": [ + "2001:4800:78ff:1b:be76:4eff:fe06:96b3/64", + "8.12.42.51/24" + ], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": [ + "10.210.1.217/24" + ], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_IPV4_IPV6 = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": ["8.12.42.1", "2001::1", "2001::2"], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": ["2001::10/64", "8.12.42.51/24", "2001::11/64", + "8.12.42.52/32"], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": ["10.210.1.217/24"], + "gateways": ["10.210.1.210"], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_SINGLE_GATEWAY = json.loads(""" +[ + { + "interface":"net0", + "mac":"90:b8:d0:d8:82:b4", + "vlan_id":324, + "nic_tag":"external", + "gateway":"8.12.42.1", + "gateways":["8.12.42.1"], + "netmask":"255.255.255.0", + "ip":"8.12.42.26", + "ips":["8.12.42.26/24"], + "network_uuid":"992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model":"virtio", + "mtu":1500, + "primary":true + }, + { + "interface":"net1", + "mac":"90:b8:d0:0a:51:31", + "vlan_id":600, + "nic_tag":"internal", + "netmask":"255.255.255.0", + "ip":"10.210.1.27", + "ips":["10.210.1.27/24"], + "network_uuid":"98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model":"virtio", + "mtu":1500 + } +] +""") + + MOCK_RETURNS = { 'hostname': 'test-host', 'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname', @@ -524,20 +749,135 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase): class TestNetworkConversion(TestCase): - def test_convert_simple(self): expected = { 'version': 1, 'config': [ {'name': 'net0', 'type': 'physical', 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', - 'netmask': '255.255.255.0', 'address': '8.12.42.102/24'}], 'mtu': 1500, 'mac_address': '90:b8:d0:f5:e4:f5'}, {'name': 'net1', 'type': 'physical', - 'subnets': [{'type': 'static', 'gateway': '192.168.128.1', - 'netmask': '255.255.252.0', + 'subnets': [{'type': 'static', 'address': '192.168.128.93/22'}], 'mtu': 8500, 'mac_address': '90:b8:d0:a5:ff:cd'}]} - found = DataSourceSmartOS.convert_smartos_network_data(SDC_NICS) + found = convert_net(SDC_NICS) + self.assertEqual(expected, found) + + def test_convert_simple_alt(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'static', + 'address': '10.210.1.217/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} + found = convert_net(SDC_NICS_ALT) + self.assertEqual(expected, found) + + def test_convert_simple_dhcp(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'dhcp4'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} + found = convert_net(SDC_NICS_DHCP) + self.assertEqual(expected, found) + + def test_convert_simple_multi_ip(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}, + {'type': 'static', + 'address': '8.12.42.52/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'static', + 'address': '10.210.1.217/24'}, + {'type': 'static', + 'address': '10.210.1.151/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} + found = convert_net(SDC_NICS_MIP) + self.assertEqual(expected, found) + + def test_convert_with_dns(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'dhcp4'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}, + {'type': 'nameserver', + 'address': ['8.8.8.8', '8.8.8.1'], 'search': ["local"]}]} + found = convert_net( + network_data=SDC_NICS_DHCP, dns_servers=['8.8.8.8', '8.8.8.1'], + dns_domain="local") + self.assertEqual(expected, found) + + def test_convert_simple_multi_ipv6(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'address': + '2001:4800:78ff:1b:be76:4eff:fe06:96b3/64'}, + {'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'static', + 'address': '10.210.1.217/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} + found = convert_net(SDC_NICS_MIP_IPV6) + self.assertEqual(expected, found) + + def test_convert_simple_both_ipv4_ipv6(self): + expected = { + 'version': 1, + 'config': [ + {'mac_address': '90:b8:d0:ae:64:51', 'mtu': 1500, + 'name': 'net0', 'type': 'physical', + 'subnets': [{'address': '2001::10/64', 'gateway': '2001::1', + 'type': 'static'}, + {'address': '8.12.42.51/24', + 'gateway': '8.12.42.1', + 'type': 'static'}, + {'address': '2001::11/64', 'type': 'static'}, + {'address': '8.12.42.52/32', 'type': 'static'}]}, + {'mac_address': '90:b8:d0:bd:4f:9c', 'mtu': 1500, + 'name': 'net1', 'type': 'physical', + 'subnets': [{'address': '10.210.1.217/24', + 'type': 'static'}]}]} + found = convert_net(SDC_NICS_IPV4_IPV6) + self.assertEqual(expected, found) + + def test_gateways_not_on_all_nics(self): + expected = { + 'version': 1, + 'config': [ + {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500, + 'name': 'net0', 'type': 'physical', + 'subnets': [{'address': '8.12.42.26/24', + 'gateway': '8.12.42.1', 'type': 'static'}]}, + {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500, + 'name': 'net1', 'type': 'physical', + 'subnets': [{'address': '10.210.1.27/24', + 'type': 'static'}]}]} + found = convert_net(SDC_NICS_SINGLE_GATEWAY) self.assertEqual(expected, found) -- cgit v1.2.3 From cdcac86848a570eb657af428fe4d2bd4ce3bceb1 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 11 Aug 2016 06:43:17 -0600 Subject: NoCloud: fix bug providing network-interfaces via meta-data. This fixes an issue with the NoCloud datasource where it would not recognize the 'network-interfaces' key provided in meta-data. LP: 1577982 --- cloudinit/sources/DataSourceNoCloud.py | 2 +- tests/unittests/test_datasource/test_nocloud.py | 85 ++++++++++++++++++++++++- 2 files changed, 85 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index cdc9eef5..e6a0b5fe 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -52,7 +52,7 @@ class DataSourceNoCloud(sources.DataSource): found = [] mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "", - 'network-config': {}} + 'network-config': None} try: # Parse the kernel command line, getting data passed in diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index b0fa1130..f6a46ce9 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -6,7 +6,7 @@ from ..helpers import TestCase, populate_dir, mock, ExitStack import os import shutil import tempfile - +import textwrap import yaml @@ -129,6 +129,89 @@ class TestNoCloudDataSource(TestCase): self.assertFalse(dsrc.vendordata) self.assertTrue(ret) + def test_metadata_network_interfaces(self): + gateway = "103.225.10.1" + md = { + 'instance-id': 'i-abcd', + 'local-hostname': 'hostname1', + 'network-interfaces': textwrap.dedent("""\ + auto eth0 + iface eth0 inet static + hwaddr 00:16:3e:70:e1:04 + address 103.225.10.12 + netmask 255.255.255.0 + gateway """ + gateway + """ + dns-servers 8.8.8.8""")} + + populate_dir( + os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", + 'meta-data': yaml.dump(md) + "\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + ds = DataSourceNoCloud.DataSourceNoCloud + + dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertTrue(ret) + # very simple check just for the strings above + self.assertIn(gateway, str(dsrc.network_config)) + + def test_metadata_network_config(self): + # network-config needs to get into network_config + netconf = {'version': 1, + 'config': [{'type': 'physical', 'name': 'interface0', + 'subnets': [{'type': 'dhcp'}]}]} + populate_dir( + os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", + 'meta-data': "instance-id: IID\n", + 'network-config': yaml.dump(netconf) + "\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + ds = DataSourceNoCloud.DataSourceNoCloud + + dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(netconf, dsrc.network_config) + + def test_metadata_network_config_over_interfaces(self): + # network-config should override meta-data/network-interfaces + gateway = "103.225.10.1" + md = { + 'instance-id': 'i-abcd', + 'local-hostname': 'hostname1', + 'network-interfaces': textwrap.dedent("""\ + auto eth0 + iface eth0 inet static + hwaddr 00:16:3e:70:e1:04 + address 103.225.10.12 + netmask 255.255.255.0 + gateway """ + gateway + """ + dns-servers 8.8.8.8""")} + + netconf = {'version': 1, + 'config': [{'type': 'physical', 'name': 'interface0', + 'subnets': [{'type': 'dhcp'}]}]} + populate_dir( + os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", + 'meta-data': yaml.dump(md) + "\n", + 'network-config': yaml.dump(netconf) + "\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + ds = DataSourceNoCloud.DataSourceNoCloud + + dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(netconf, dsrc.network_config) + self.assertNotIn(gateway, str(dsrc.network_config)) + class TestParseCommandLineData(TestCase): -- cgit v1.2.3 From 8028c9234ec4260eda9431bffc6728ac3703e243 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 11 Aug 2016 15:03:09 -0600 Subject: ConfigDrive: recognize 'tap' as a link type. This just adds 'tap' to the list of types that are understood to be physical or virtual network devices. Openstack basically exposes the type of the host device through. LP: #1610784 --- cloudinit/sources/helpers/openstack.py | 2 +- .../unittests/test_datasource/test_configdrive.py | 44 ++++++++++++++++++++++ 2 files changed, 45 insertions(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 2e7a1d47..461fbd0d 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -571,7 +571,7 @@ def convert_net_json(network_json=None, known_macs=None): subnet['ipv6'] = True subnets.append(subnet) cfg.update({'subnets': subnets}) - if link['type'] in ['ethernet', 'vif', 'ovs', 'phy', 'bridge']: + if link['type'] in ['ethernet', 'vif', 'ovs', 'phy', 'bridge', 'tap']: cfg.update({ 'type': 'physical', 'mac_address': link['ethernet_mac_address']}) diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 18551b92..d0269943 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -101,6 +101,41 @@ NETWORK_DATA_2 = { "type": "vif", "id": "eth1", "vif_id": "vif-foo2"}] } +# This network data ha 'tap' type for a link. +NETWORK_DATA_3 = { + "services": [{"type": "dns", "address": "172.16.36.11"}, + {"type": "dns", "address": "172.16.36.12"}], + "networks": [ + {"network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e", + "type": "ipv4", "netmask": "255.255.255.128", + "link": "tap77a0dc5b-72", "ip_address": "172.17.48.18", + "id": "network0", + "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", + "gateway": "172.17.48.1"}]}, + {"network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e", + "type": "ipv6", "netmask": "ffff:ffff:ffff:ffff::", + "link": "tap77a0dc5b-72", + "ip_address": "fdb8:52d0:9d14:0:f816:3eff:fe9f:70d", + "id": "network1", + "routes": [{"netmask": "::", "network": "::", + "gateway": "fdb8:52d0:9d14::1"}]}, + {"network_id": "1f53cb0e-72d3-47c7-94b9-ff4397c5fe54", + "type": "ipv4", "netmask": "255.255.255.128", + "link": "tap7d6b7bec-93", "ip_address": "172.16.48.13", + "id": "network2", + "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", + "gateway": "172.16.48.1"}, + {"netmask": "255.255.0.0", "network": "172.16.0.0", + "gateway": "172.16.48.1"}]}], + "links": [ + {"ethernet_mac_address": "fa:16:3e:dd:50:9a", "mtu": None, + "type": "tap", "id": "tap77a0dc5b-72", + "vif_id": "77a0dc5b-720e-41b7-bfa7-1b2ff62e0d48"}, + {"ethernet_mac_address": "fa:16:3e:a8:14:69", "mtu": None, + "type": "tap", "id": "tap7d6b7bec-93", + "vif_id": "7d6b7bec-93e6-4c03-869a-ddc5014892d5"} + ] +} KNOWN_MACS = { 'fa:16:3e:69:b0:58': 'enp0s1', @@ -555,6 +590,15 @@ class TestConvertNetworkData(TestCase): eni_rendering = f.read() self.assertIn("route add default gw 2.2.2.9", eni_rendering) + def test_conversion_with_tap(self): + ncfg = openstack.convert_net_json(NETWORK_DATA_3, + known_macs=KNOWN_MACS) + physicals = set() + for i in ncfg['config']: + if i.get('type') == "physical": + physicals.add(i['name']) + self.assertEqual(physicals, set(('foo1', 'foo2'))) + def cfg_ds_from_dir(seed_d): cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None, -- cgit v1.2.3 From d9537aaa37f1e17db334c7cf8888ea3c4dcf1436 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 11 Aug 2016 06:34:06 -0600 Subject: MAAS: add vendor-data support Add vendor-data support to maas which will behave like the openstack vendor-data does. Data returned from maas must be yaml loadable. Also update the main in DataSourceMAAS to "just work" on a maas deployed system. LP: #1612313 --- cloudinit/sources/DataSourceConfigDrive.py | 2 +- cloudinit/sources/DataSourceMAAS.py | 199 ++++++++++++---------- cloudinit/sources/DataSourceOpenStack.py | 2 +- cloudinit/sources/__init__.py | 27 ++- cloudinit/sources/helpers/openstack.py | 25 --- tests/unittests/helpers.py | 4 +- tests/unittests/test_datasource/test_maas.py | 127 +++++++------- tests/unittests/test_datasource/test_openstack.py | 3 +- 8 files changed, 208 insertions(+), 181 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 91d6ff13..5c9edabe 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -134,7 +134,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): vd = results.get('vendordata') self.vendordata_pure = vd try: - self.vendordata_raw = openstack.convert_vendordata_json(vd) + self.vendordata_raw = sources.convert_vendordata(vd) except ValueError as e: LOG.warn("Invalid content in vendor-data: %s", e) self.vendordata_raw = None diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index d828f078..ab93c0a2 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -20,7 +20,6 @@ from __future__ import print_function -import errno import os import time @@ -32,7 +31,14 @@ from cloudinit import util LOG = logging.getLogger(__name__) MD_VERSION = "2012-03-01" -BINARY_FIELDS = ('user-data',) +DS_FIELDS = [ + # remote path, location in dictionary, binary data?, optional? + ("meta-data/instance-id", 'meta-data/instance-id', False, False), + ("meta-data/local-hostname", 'meta-data/local-hostname', False, False), + ("meta-data/public-keys", 'meta-data/public-keys', False, True), + ('meta-data/vendor-data', 'vendor-data', True, True), + ('user-data', 'user-data', True, True), +] class DataSourceMAAS(sources.DataSource): @@ -43,6 +49,7 @@ class DataSourceMAAS(sources.DataSource): instance-id user-data hostname + vendor-data """ def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -71,10 +78,7 @@ class DataSourceMAAS(sources.DataSource): mcfg = self.ds_cfg try: - (userdata, metadata) = read_maas_seed_dir(self.seed_dir) - self.userdata_raw = userdata - self.metadata = metadata - self.base_url = self.seed_dir + self._set_data(self.seed_dir, read_maas_seed_dir(self.seed_dir)) return True except MAASSeedDirNone: pass @@ -95,18 +99,29 @@ class DataSourceMAAS(sources.DataSource): if not self.wait_for_metadata_service(url): return False - self.base_url = url - - (userdata, metadata) = read_maas_seed_url( - self.base_url, read_file_or_url=self.oauth_helper.readurl, - paths=self.paths, retries=1) - self.userdata_raw = userdata - self.metadata = metadata + self._set_data( + url, read_maas_seed_url( + url, read_file_or_url=self.oauth_helper.readurl, + paths=self.paths, retries=1)) return True except Exception: util.logexc(LOG, "Failed fetching metadata from url %s", url) return False + def _set_data(self, url, data): + # takes a url for base_url and a tuple of userdata, metadata, vd. + self.base_url = url + ud, md, vd = data + self.userdata_raw = ud + self.metadata = md + self.vendordata_pure = vd + if vd: + try: + self.vendordata_raw = sources.convert_vendordata(vd) + except ValueError as e: + LOG.warn("Invalid content in vendor-data: %s", e) + self.vendordata_raw = None + def wait_for_metadata_service(self, url): mcfg = self.ds_cfg max_wait = 120 @@ -126,6 +141,8 @@ class DataSourceMAAS(sources.DataSource): LOG.warn("Failed to get timeout, using %s" % timeout) starttime = time.time() + if url.endswith("/"): + url = url[:-1] check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION) urls = [check_url] url = self.oauth_helper.wait_for_url( @@ -141,27 +158,13 @@ class DataSourceMAAS(sources.DataSource): def read_maas_seed_dir(seed_d): - """ - Return user-data and metadata for a maas seed dir in seed_d. - Expected format of seed_d are the following files: - * instance-id - * local-hostname - * user-data - """ - if not os.path.isdir(seed_d): + if seed_d.startswith("file://"): + seed_d = seed_d[7:] + if not os.path.isdir(seed_d) or len(os.listdir(seed_d)) == 0: raise MAASSeedDirNone("%s: not a directory") - files = ('local-hostname', 'instance-id', 'user-data', 'public-keys') - md = {} - for fname in files: - try: - md[fname] = util.load_file(os.path.join(seed_d, fname), - decode=fname not in BINARY_FIELDS) - except IOError as e: - if e.errno != errno.ENOENT: - raise - - return check_seed_contents(md, seed_d) + # seed_dir looks in seed_dir, not seed_dir/VERSION + return read_maas_seed_url("file://%s" % seed_d, version=None) def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None, @@ -175,73 +178,78 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None, * //meta-data/instance-id * //meta-data/local-hostname * //user-data + If version is None, then / will not be used. """ - base_url = "%s/%s" % (seed_url, version) - file_order = [ - 'local-hostname', - 'instance-id', - 'public-keys', - 'user-data', - ] - files = { - 'local-hostname': "%s/%s" % (base_url, 'meta-data/local-hostname'), - 'instance-id': "%s/%s" % (base_url, 'meta-data/instance-id'), - 'public-keys': "%s/%s" % (base_url, 'meta-data/public-keys'), - 'user-data': "%s/%s" % (base_url, 'user-data'), - } - if read_file_or_url is None: read_file_or_url = util.read_file_or_url + if seed_url.endswith("/"): + seed_url = seed_url[:-1] + md = {} - for name in file_order: - url = files.get(name) - if name == 'user-data': - item_retries = 0 + for path, dictname, binary, optional in DS_FIELDS: + if version is None: + url = "%s/%s" % (seed_url, path) else: - item_retries = retries - + url = "%s/%s/%s" % (seed_url, version, path) try: ssl_details = util.fetch_ssl_details(paths) - resp = read_file_or_url(url, retries=item_retries, - timeout=timeout, ssl_details=ssl_details) + resp = read_file_or_url(url, retries=retries, timeout=timeout, + ssl_details=ssl_details) if resp.ok(): - if name in BINARY_FIELDS: - md[name] = resp.contents + if binary: + md[path] = resp.contents else: - md[name] = util.decode_binary(resp.contents) + md[path] = util.decode_binary(resp.contents) else: LOG.warn(("Fetching from %s resulted in" " an invalid http code %s"), url, resp.code) except url_helper.UrlError as e: - if e.code != 404: - raise + if e.code == 404 and not optional: + raise MAASSeedDirMalformed( + "Missing required %s: %s" % (path, e)) + elif e.code != 404: + raise e + return check_seed_contents(md, seed_url) def check_seed_contents(content, seed): - """Validate if content is Is the content a dict that is valid as a - return for a datasource. - Either return a (userdata, metadata) tuple or + """Validate if dictionary content valid as a return for a datasource. + Either return a (userdata, metadata, vendordata) tuple or Raise MAASSeedDirMalformed or MAASSeedDirNone """ - md_required = ('instance-id', 'local-hostname') - if len(content) == 0: + ret = {} + missing = [] + for spath, dpath, _binary, optional in DS_FIELDS: + if spath not in content: + if not optional: + missing.append(spath) + continue + + if "/" in dpath: + top, _, p = dpath.partition("/") + if top not in ret: + ret[top] = {} + ret[top][p] = content[spath] + else: + ret[dpath] = content[spath] + + if len(ret) == 0: raise MAASSeedDirNone("%s: no data files found" % seed) - found = list(content.keys()) - missing = [k for k in md_required if k not in found] - if len(missing): + if missing: raise MAASSeedDirMalformed("%s: missing files %s" % (seed, missing)) - userdata = content.get('user-data', b"") - md = {} - for (key, val) in content.items(): - if key == 'user-data': - continue - md[key] = val + vd_data = None + if ret.get('vendor-data'): + err = object() + vd_data = util.load_yaml(ret.get('vendor-data'), default=err, + allowed=(object)) + if vd_data is err: + raise MAASSeedDirMalformed("vendor-data was not loadable as yaml.") - return (userdata, md) + return ret.get('user-data'), ret.get('meta-data'), vd_data class MAASSeedDirNone(Exception): @@ -272,6 +280,7 @@ if __name__ == "__main__": """ import argparse import pprint + import sys parser = argparse.ArgumentParser(description='Interact with MAAS DS') parser.add_argument("--config", metavar="file", @@ -289,17 +298,25 @@ if __name__ == "__main__": default=MD_VERSION) subcmds = parser.add_subparsers(title="subcommands", dest="subcmd") - subcmds.add_parser('crawl', help="crawl the datasource") - subcmds.add_parser('get', help="do a single GET of provided url") - subcmds.add_parser('check-seed', help="read andn verify seed at url") - - parser.add_argument("url", help="the data source to query") + for (name, help) in (('crawl', 'crawl the datasource'), + ('get', 'do a single GET of provided url'), + ('check-seed', 'read and verify seed at url')): + p = subcmds.add_parser(name, help=help) + p.add_argument("url", help="the datasource url", nargs='?', + default=None) args = parser.parse_args() creds = {'consumer_key': args.ckey, 'token_key': args.tkey, 'token_secret': args.tsec, 'consumer_secret': args.csec} + maaspkg_cfg = "/etc/cloud/cloud.cfg.d/90_dpkg_maas.cfg" + if (args.config is None and args.url is None and + os.path.exists(maaspkg_cfg) and + os.access(maaspkg_cfg, os.R_OK)): + sys.stderr.write("Used config in %s.\n" % maaspkg_cfg) + args.config = maaspkg_cfg + if args.config: cfg = util.read_conf(args.config) if 'datasource' in cfg: @@ -307,6 +324,12 @@ if __name__ == "__main__": for key in creds.keys(): if key in cfg and creds[key] is None: creds[key] = cfg[key] + if args.url is None and 'metadata_url' in cfg: + args.url = cfg['metadata_url'] + + if args.url is None: + sys.stderr.write("Must provide a url or a config with url.\n") + sys.exit(1) oauth_helper = url_helper.OauthUrlHelper(**creds) @@ -331,16 +354,20 @@ if __name__ == "__main__": printurl(url) if args.subcmd == "check-seed": + sys.stderr.write("Checking seed at %s\n" % args.url) readurl = oauth_helper.readurl if args.url[0] == "/" or args.url.startswith("file://"): - readurl = None - (userdata, metadata) = read_maas_seed_url( - args.url, version=args.apiver, read_file_or_url=readurl, - retries=2) - print("=== userdata ===") - print(userdata.decode()) - print("=== metadata ===") + (userdata, metadata, vd) = read_maas_seed_dir(args.url) + else: + (userdata, metadata, vd) = read_maas_seed_url( + args.url, version=args.apiver, read_file_or_url=readurl, + retries=2) + print("=== user-data ===") + print("N/A" if userdata is None else userdata.decode()) + print("=== meta-data ===") pprint.pprint(metadata) + print("=== vendor-data ===") + pprint.pprint("N/A" if vd is None else vd) elif args.subcmd == "get": printurl(args.url) diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index c06d17f3..82558214 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -138,7 +138,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): vd = results.get('vendordata') self.vendordata_pure = vd try: - self.vendordata_raw = openstack.convert_vendordata_json(vd) + self.vendordata_raw = sources.convert_vendordata(vd) except ValueError as e: LOG.warn("Invalid content in vendor-data: %s", e) self.vendordata_raw = None diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 87b8e524..d1395270 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -21,8 +21,8 @@ # along with this program. If not, see . import abc +import copy import os - import six from cloudinit import importer @@ -355,6 +355,31 @@ def instance_id_matches_system_uuid(instance_id, field='system-uuid'): return instance_id.lower() == dmi_value.lower() +def convert_vendordata(data, recurse=True): + """data: a loaded object (strings, arrays, dicts). + return something suitable for cloudinit vendordata_raw. + + if data is: + None: return None + string: return string + list: return data + the list is then processed in UserDataProcessor + dict: return convert_vendordata(data.get('cloud-init')) + """ + if not data: + return None + if isinstance(data, six.string_types): + return data + if isinstance(data, list): + return copy.deepcopy(data) + if isinstance(data, dict): + if recurse is True: + return convert_vendordata(data.get('cloud-init'), + recurse=False) + raise ValueError("vendordata['cloud-init'] cannot be dict") + raise ValueError("Unknown data type for vendordata: %s" % type(data)) + + # 'depends' is a list of dependencies (DEP_FILESYSTEM) # ds_list is a list of 2 item lists # ds_list = [ diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 461fbd0d..84322e0e 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -621,28 +621,3 @@ def convert_net_json(network_json=None, known_macs=None): config.append(cfg) return {'version': 1, 'config': config} - - -def convert_vendordata_json(data, recurse=True): - """data: a loaded json *object* (strings, arrays, dicts). - return something suitable for cloudinit vendordata_raw. - - if data is: - None: return None - string: return string - list: return data - the list is then processed in UserDataProcessor - dict: return convert_vendordata_json(data.get('cloud-init')) - """ - if not data: - return None - if isinstance(data, six.string_types): - return data - if isinstance(data, list): - return copy.deepcopy(data) - if isinstance(data, dict): - if recurse is True: - return convert_vendordata_json(data.get('cloud-init'), - recurse=False) - raise ValueError("vendordata['cloud-init'] cannot be dict") - raise ValueError("Unknown data type for vendordata: %s" % type(data)) diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index 972245df..de2cf638 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -256,7 +256,9 @@ def populate_dir(path, files): if not os.path.exists(path): os.makedirs(path) for (name, content) in files.items(): - with open(os.path.join(path, name), "wb") as fp: + p = os.path.join(path, name) + util.ensure_dir(os.path.dirname(p)) + with open(p, "wb") as fp: if isinstance(content, six.binary_type): fp.write(content) else: diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index f66f1c6d..0126c883 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -2,6 +2,7 @@ from copy import copy import os import shutil import tempfile +import yaml from cloudinit.sources import DataSourceMAAS from cloudinit import url_helper @@ -24,41 +25,44 @@ class TestMAASDataSource(TestCase): def test_seed_dir_valid(self): """Verify a valid seeddir is read as such.""" - data = {'instance-id': 'i-valid01', - 'local-hostname': 'valid01-hostname', - 'user-data': b'valid01-userdata', + userdata = b'valid01-userdata' + data = {'meta-data/instance-id': 'i-valid01', + 'meta-data/local-hostname': 'valid01-hostname', + 'user-data': userdata, 'public-keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname'} my_d = os.path.join(self.tmp, "valid") populate_dir(my_d, data) - (userdata, metadata) = DataSourceMAAS.read_maas_seed_dir(my_d) + ud, md, vd = DataSourceMAAS.read_maas_seed_dir(my_d) - self.assertEqual(userdata, data['user-data']) + self.assertEqual(userdata, ud) for key in ('instance-id', 'local-hostname'): - self.assertEqual(data[key], metadata[key]) + self.assertEqual(data["meta-data/" + key], md[key]) # verify that 'userdata' is not returned as part of the metadata - self.assertFalse(('user-data' in metadata)) + self.assertFalse(('user-data' in md)) + self.assertEqual(vd, None) def test_seed_dir_valid_extra(self): """Verify extra files do not affect seed_dir validity.""" - data = {'instance-id': 'i-valid-extra', - 'local-hostname': 'valid-extra-hostname', - 'user-data': b'valid-extra-userdata', 'foo': 'bar'} + userdata = b'valid-extra-userdata' + data = {'meta-data/instance-id': 'i-valid-extra', + 'meta-data/local-hostname': 'valid-extra-hostname', + 'user-data': userdata, 'foo': 'bar'} my_d = os.path.join(self.tmp, "valid_extra") populate_dir(my_d, data) - (userdata, metadata) = DataSourceMAAS.read_maas_seed_dir(my_d) + ud, md, vd = DataSourceMAAS.read_maas_seed_dir(my_d) - self.assertEqual(userdata, data['user-data']) + self.assertEqual(userdata, ud) for key in ('instance-id', 'local-hostname'): - self.assertEqual(data[key], metadata[key]) + self.assertEqual(data['meta-data/' + key], md[key]) # additional files should not just appear as keys in metadata atm - self.assertFalse(('foo' in metadata)) + self.assertFalse(('foo' in md)) def test_seed_dir_invalid(self): """Verify that invalid seed_dir raises MAASSeedDirMalformed.""" @@ -97,67 +101,60 @@ class TestMAASDataSource(TestCase): DataSourceMAAS.read_maas_seed_dir, os.path.join(self.tmp, "nonexistantdirectory")) + def mock_read_maas_seed_url(self, data, seed, version="19991231"): + """mock up readurl to appear as a web server at seed has provided data. + return what read_maas_seed_url returns.""" + def my_readurl(*args, **kwargs): + if len(args): + url = args[0] + else: + url = kwargs['url'] + prefix = "%s/%s/" % (seed, version) + if not url.startswith(prefix): + raise ValueError("unexpected call %s" % url) + + short = url[len(prefix):] + if short not in data: + raise url_helper.UrlError("not found", code=404, url=url) + return url_helper.StringResponse(data[short]) + + # Now do the actual call of the code under test. + with mock.patch("cloudinit.url_helper.readurl") as mock_readurl: + mock_readurl.side_effect = my_readurl + return DataSourceMAAS.read_maas_seed_url(seed, version=version) + def test_seed_url_valid(self): """Verify that valid seed_url is read as such.""" valid = { 'meta-data/instance-id': 'i-instanceid', 'meta-data/local-hostname': 'test-hostname', 'meta-data/public-keys': 'test-hostname', + 'meta-data/vendor-data': b'my-vendordata', 'user-data': b'foodata', } - valid_order = [ - 'meta-data/local-hostname', - 'meta-data/instance-id', - 'meta-data/public-keys', - 'user-data', - ] my_seed = "http://example.com/xmeta" my_ver = "1999-99-99" - my_headers = {'header1': 'value1', 'header2': 'value2'} - - def my_headers_cb(url): - return my_headers - - # Each time url_helper.readurl() is called, something different is - # returned based on the canned data above. We need to build up a list - # of side effect return values, which the mock will return. At the - # same time, we'll build up a list of expected call arguments for - # asserting after the code under test is run. - calls = [] - - def side_effect(): - for key in valid_order: - resp = valid.get(key) - url = "%s/%s/%s" % (my_seed, my_ver, key) - calls.append( - mock.call(url, headers=None, timeout=mock.ANY, - data=mock.ANY, sec_between=mock.ANY, - ssl_details=mock.ANY, retries=mock.ANY, - headers_cb=my_headers_cb, - exception_cb=mock.ANY)) - yield url_helper.StringResponse(resp) - - # Now do the actual call of the code under test. - with mock.patch.object(url_helper, 'readurl', - side_effect=side_effect()) as mockobj: - userdata, metadata = DataSourceMAAS.read_maas_seed_url( - my_seed, version=my_ver) - - self.assertEqual(b"foodata", userdata) - self.assertEqual(metadata['instance-id'], - valid['meta-data/instance-id']) - self.assertEqual(metadata['local-hostname'], - valid['meta-data/local-hostname']) - - mockobj.has_calls(calls) - - def test_seed_url_invalid(self): - """Verify that invalid seed_url raises MAASSeedDirMalformed.""" - pass - - def test_seed_url_missing(self): - """Verify seed_url with no found entries raises MAASSeedDirNone.""" - pass + ud, md, vd = self.mock_read_maas_seed_url(valid, my_seed, my_ver) + + self.assertEqual(valid['meta-data/instance-id'], md['instance-id']) + self.assertEqual( + valid['meta-data/local-hostname'], md['local-hostname']) + self.assertEqual(valid['meta-data/public-keys'], md['public-keys']) + self.assertEqual(valid['user-data'], ud) + # vendor-data is yaml, which decodes a string + self.assertEqual(valid['meta-data/vendor-data'].decode(), vd) + + def test_seed_url_vendor_data_dict(self): + expected_vd = {'key1': 'value1'} + valid = { + 'meta-data/instance-id': 'i-instanceid', + 'meta-data/local-hostname': 'test-hostname', + 'meta-data/vendor-data': yaml.safe_dump(expected_vd).encode(), + } + ud, md, vd = self.mock_read_maas_seed_url( + valid, "http://example.com/foo") + self.assertEqual(valid['meta-data/instance-id'], md['instance-id']) + self.assertEqual(expected_vd, vd) # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index 5c8592c5..97b99a18 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -27,6 +27,7 @@ from six import StringIO from cloudinit import helpers from cloudinit import settings +from cloudinit.sources import convert_vendordata from cloudinit.sources import DataSourceOpenStack as ds from cloudinit.sources.helpers import openstack from cloudinit import util @@ -318,7 +319,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): class TestVendorDataLoading(test_helpers.TestCase): def cvj(self, data): - return openstack.convert_vendordata_json(data) + return convert_vendordata(data) def test_vd_load_none(self): # non-existant vendor-data should return none -- cgit v1.2.3 From bc2c3267549b9067c017a34e22bbee18890aec06 Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Mon, 1 Aug 2016 14:47:39 -0600 Subject: DigitalOcean: use the v1.json endpoint Per [1], DigitalOcean provides the metadata in multiple formats. The JSON document is the preferred endpoint. Changes: - Switch to the v1.json meta-data endpoint - Identify droplet identity from SMBIOS - Only poll for metadata when the instance is confirmed to be a droplet - Removal of hard-coded mirrors Additionally, centralize the gates on running 'dmidecode' on arm arches, and update tests to address. [1] https://developers.digitalocean.com/documentation/metadata/ --- cloudinit/sources/DataSourceAltCloud.py | 6 -- cloudinit/sources/DataSourceCloudSigma.py | 6 -- cloudinit/sources/DataSourceDigitalOcean.py | 106 ++++++++++++--------- cloudinit/sources/DataSourceSmartOS.py | 8 +- cloudinit/util.py | 7 ++ .../unittests/test_datasource/test_digitalocean.py | 67 ++++++------- tests/unittests/test_util.py | 26 ++++- 7 files changed, 122 insertions(+), 104 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index a3529609..48136f7c 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -110,12 +110,6 @@ class DataSourceAltCloud(sources.DataSource): ''' - uname_arch = os.uname()[4] - if uname_arch.startswith("arm") or uname_arch == "aarch64": - # Disabling because dmi data is not available on ARM processors - LOG.debug("Disabling AltCloud datasource on arm (LP: #1243287)") - return 'UNKNOWN' - system_name = util.read_dmi_data("system-product-name") if not system_name: return 'UNKNOWN' diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index d1f806d6..be74503b 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -16,7 +16,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . from base64 import b64decode -import os import re from cloudinit.cs_utils import Cepko @@ -45,11 +44,6 @@ class DataSourceCloudSigma(sources.DataSource): Uses dmi data to detect if this instance of cloud-init is running in the CloudSigma's infrastructure. """ - uname_arch = os.uname()[4] - if uname_arch.startswith("arm") or uname_arch == "aarch64": - # Disabling because dmi data on ARM processors - LOG.debug("Disabling CloudSigma datasource on arm (LP: #1243287)") - return False LOG.debug("determining hypervisor product name via dmi data") sys_product_name = util.read_dmi_data("system-product-name") diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py index 44a17a00..fc596e17 100644 --- a/cloudinit/sources/DataSourceDigitalOcean.py +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -1,6 +1,7 @@ # vi: ts=4 expandtab # # Author: Neal Shrader +# Author: Ben Howard # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as @@ -14,22 +15,27 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from cloudinit import ec2_utils +# DigitalOcean Droplet API: +# https://developers.digitalocean.com/documentation/metadata/ + +import json + from cloudinit import log as logging from cloudinit import sources +from cloudinit import url_helper from cloudinit import util -import functools - - LOG = logging.getLogger(__name__) BUILTIN_DS_CONFIG = { - 'metadata_url': 'http://169.254.169.254/metadata/v1/', - 'mirrors_url': 'http://mirrors.digitalocean.com/' + 'metadata_url': 'http://169.254.169.254/metadata/v1.json', } -MD_RETRIES = 0 -MD_TIMEOUT = 1 + +# Wait for a up to a minute, retrying the meta-data server +# every 2 seconds. +MD_RETRIES = 30 +MD_TIMEOUT = 2 +MD_WAIT_RETRY = 2 class DataSourceDigitalOcean(sources.DataSource): @@ -40,43 +46,61 @@ class DataSourceDigitalOcean(sources.DataSource): util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}), BUILTIN_DS_CONFIG]) self.metadata_address = self.ds_cfg['metadata_url'] + self.retries = self.ds_cfg.get('retries', MD_RETRIES) + self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT) + self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY) - if self.ds_cfg.get('retries'): - self.retries = self.ds_cfg['retries'] - else: - self.retries = MD_RETRIES + def _get_sysinfo(self): + # DigitalOcean embeds vendor ID and instance/droplet_id in the + # SMBIOS information - if self.ds_cfg.get('timeout'): - self.timeout = self.ds_cfg['timeout'] - else: - self.timeout = MD_TIMEOUT + LOG.debug("checking if instance is a DigitalOcean droplet") + + # Detect if we are on DigitalOcean and return the Droplet's ID + vendor_name = util.read_dmi_data("system-manufacturer") + if vendor_name != "DigitalOcean": + return (False, None) - def get_data(self): - caller = functools.partial(util.read_file_or_url, - timeout=self.timeout, retries=self.retries) + LOG.info("running on DigitalOcean") - def mcaller(url): - return caller(url).contents + droplet_id = util.read_dmi_data("system-serial-number") + if droplet_id: + LOG.debug(("system identified via SMBIOS as DigitalOcean Droplet" + "{}").format(droplet_id)) + else: + LOG.critical(("system identified via SMBIOS as a DigitalOcean " + "Droplet, but did not provide an ID. Please file a " + "support ticket at: " + "https://cloud.digitalocean.com/support/tickets/" + "new")) - md = ec2_utils.MetadataMaterializer(mcaller(self.metadata_address), - base_url=self.metadata_address, - caller=mcaller) + return (True, droplet_id) - self.metadata = md.materialize() + def get_data(self, apply_filter=False): + (is_do, droplet_id) = self._get_sysinfo() - if self.metadata.get('id'): - return True - else: + # only proceed if we know we are on DigitalOcean + if not is_do: return False - def get_userdata_raw(self): - return "\n".join(self.metadata['user-data']) + LOG.debug("reading metadata from {}".format(self.metadata_address)) + response = url_helper.readurl(self.metadata_address, + timeout=self.timeout, + sec_between=self.wait_retry, + retries=self.retries) - def get_vendordata_raw(self): - return "\n".join(self.metadata['vendor-data']) + contents = util.decode_binary(response.contents) + decoded = json.loads(contents) + + self.metadata = decoded + self.metadata['instance-id'] = decoded.get('droplet_id', droplet_id) + self.metadata['local-hostname'] = decoded.get('hostname', droplet_id) + self.vendordata_raw = decoded.get("vendor_data", None) + self.userdata_raw = decoded.get("user_data", None) + return True def get_public_ssh_keys(self): - public_keys = self.metadata['public-keys'] + public_keys = self.metadata.get('public_keys', []) if isinstance(public_keys, list): return public_keys else: @@ -84,21 +108,17 @@ class DataSourceDigitalOcean(sources.DataSource): @property def availability_zone(self): - return self.metadata['region'] - - def get_instance_id(self): - return self.metadata['id'] - - def get_hostname(self, fqdn=False, resolve_ip=False): - return self.metadata['hostname'] - - def get_package_mirror_info(self): - return self.ds_cfg['mirrors_url'] + return self.metadata.get('region', 'default') @property def launch_index(self): return None + def check_instance_id(self, sys_cfg): + return sources.instance_id_matches_system_uuid( + self.get_instance_id(), 'system-serial-number') + + # Used to match classes to dependencies datasources = [ (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 39e7bbd9..143ab368 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -653,14 +653,8 @@ def write_boot_content(content, content_f, link=None, shebang=False, util.logexc(LOG, "failed establishing content link: %s", e) -def get_smartos_environ(uname_version=None, product_name=None, - uname_arch=None): +def get_smartos_environ(uname_version=None, product_name=None): uname = os.uname() - if uname_arch is None: - uname_arch = uname[4] - - if uname_arch.startswith("arm") or uname_arch == "aarch64": - return None # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but # report 'BrandZ virtual linux' as the kernel version diff --git a/cloudinit/util.py b/cloudinit/util.py index e5dd61a0..226628cc 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2227,10 +2227,17 @@ def read_dmi_data(key): If all of the above fail to find a value, None will be returned. """ + syspath_value = _read_dmi_syspath(key) if syspath_value is not None: return syspath_value + # running dmidecode can be problematic on some arches (LP: #1243287) + uname_arch = os.uname()[4] + if uname_arch.startswith("arm") or uname_arch == "aarch64": + LOG.debug("dmidata is not supported on %s", uname_arch) + return None + dmidecode_path = which('dmidecode') if dmidecode_path: return _call_dmidecode(key, dmidecode_path) diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py index 8936a1e3..f5d2ef35 100644 --- a/tests/unittests/test_datasource/test_digitalocean.py +++ b/tests/unittests/test_datasource/test_digitalocean.py @@ -15,68 +15,58 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import re - -from six.moves.urllib_parse import urlparse +import json from cloudinit import helpers from cloudinit import settings from cloudinit.sources import DataSourceDigitalOcean from .. import helpers as test_helpers +from ..helpers import HttprettyTestCase httpretty = test_helpers.import_httpretty() -# Abbreviated for the test -DO_INDEX = """id - hostname - user-data - vendor-data - public-keys - region""" - -DO_MULTIPLE_KEYS = """ssh-rsa AAAAB3NzaC1yc2EAAAA... neal@digitalocean.com - ssh-rsa AAAAB3NzaC1yc2EAAAA... neal2@digitalocean.com""" -DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... neal@digitalocean.com" +DO_MULTIPLE_KEYS = ["ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co", + "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co"] +DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@do.co" DO_META = { - '': DO_INDEX, - 'user-data': '#!/bin/bash\necho "user-data"', - 'vendor-data': '#!/bin/bash\necho "vendor-data"', - 'public-keys': DO_SINGLE_KEY, + 'user_data': 'user_data_here', + 'vendor_data': 'vendor_data_here', + 'public_keys': DO_SINGLE_KEY, 'region': 'nyc3', 'id': '2000000', 'hostname': 'cloudinit-test', } -MD_URL_RE = re.compile(r'http://169.254.169.254/metadata/v1/.*') +MD_URL = 'http://169.254.169.254/metadata/v1.json' + + +def _mock_dmi(): + return (True, DO_META.get('id')) def _request_callback(method, uri, headers): - url_path = urlparse(uri).path - if url_path.startswith('/metadata/v1/'): - path = url_path.split('/metadata/v1/')[1:][0] - else: - path = None - if path in DO_META: - return (200, headers, DO_META.get(path)) - else: - return (404, headers, '') + return (200, headers, json.dumps(DO_META)) -class TestDataSourceDigitalOcean(test_helpers.HttprettyTestCase): +class TestDataSourceDigitalOcean(HttprettyTestCase): + """ + Test reading the meta-data + """ def setUp(self): self.ds = DataSourceDigitalOcean.DataSourceDigitalOcean( settings.CFG_BUILTIN, None, helpers.Paths({})) + self.ds._get_sysinfo = _mock_dmi super(TestDataSourceDigitalOcean, self).setUp() @httpretty.activate def test_connection(self): httpretty.register_uri( - httpretty.GET, MD_URL_RE, - body=_request_callback) + httpretty.GET, MD_URL, + body=json.dumps(DO_META)) success = self.ds.get_data() self.assertTrue(success) @@ -84,14 +74,14 @@ class TestDataSourceDigitalOcean(test_helpers.HttprettyTestCase): @httpretty.activate def test_metadata(self): httpretty.register_uri( - httpretty.GET, MD_URL_RE, + httpretty.GET, MD_URL, body=_request_callback) self.ds.get_data() - self.assertEqual(DO_META.get('user-data'), + self.assertEqual(DO_META.get('user_data'), self.ds.get_userdata_raw()) - self.assertEqual(DO_META.get('vendor-data'), + self.assertEqual(DO_META.get('vendor_data'), self.ds.get_vendordata_raw()) self.assertEqual(DO_META.get('region'), @@ -103,11 +93,8 @@ class TestDataSourceDigitalOcean(test_helpers.HttprettyTestCase): self.assertEqual(DO_META.get('hostname'), self.ds.get_hostname()) - self.assertEqual('http://mirrors.digitalocean.com/', - self.ds.get_package_mirror_info()) - # Single key - self.assertEqual([DO_META.get('public-keys')], + self.assertEqual([DO_META.get('public_keys')], self.ds.get_public_ssh_keys()) self.assertIsInstance(self.ds.get_public_ssh_keys(), list) @@ -116,12 +103,12 @@ class TestDataSourceDigitalOcean(test_helpers.HttprettyTestCase): def test_multiple_ssh_keys(self): DO_META['public_keys'] = DO_MULTIPLE_KEYS httpretty.register_uri( - httpretty.GET, MD_URL_RE, + httpretty.GET, MD_URL, body=_request_callback) self.ds.get_data() # Multiple keys - self.assertEqual(DO_META.get('public-keys').splitlines(), + self.assertEqual(DO_META.get('public_keys'), self.ds.get_public_ssh_keys()) self.assertIsInstance(self.ds.get_public_ssh_keys(), list) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 37a984ac..73369cd3 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -371,8 +371,30 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase): self._create_sysfs_parent_directory() expected_dmi_value = 'dmidecode-used' self._configure_dmidecode_return('use-dmidecode', expected_dmi_value) - self.assertEqual(expected_dmi_value, - util.read_dmi_data('use-dmidecode')) + with mock.patch("cloudinit.util.os.uname") as m_uname: + m_uname.return_value = ('x-sysname', 'x-nodename', + 'x-release', 'x-version', 'x86_64') + self.assertEqual(expected_dmi_value, + util.read_dmi_data('use-dmidecode')) + + def test_dmidecode_not_used_on_arm(self): + self.patch_mapping({}) + self._create_sysfs_parent_directory() + dmi_val = 'from-dmidecode' + dmi_name = 'use-dmidecode' + self._configure_dmidecode_return(dmi_name, dmi_val) + + expected = {'armel': None, 'aarch64': None, 'x86_64': dmi_val} + found = {} + # we do not run the 'dmi-decode' binary on some arches + # verify that anything requested that is not in the sysfs dir + # will return None on those arches. + with mock.patch("cloudinit.util.os.uname") as m_uname: + for arch in expected: + m_uname.return_value = ('x-sysname', 'x-nodename', + 'x-release', 'x-version', arch) + found[arch] = util.read_dmi_data(dmi_name) + self.assertEqual(expected, found) def test_none_returned_if_neither_source_has_data(self): self.patch_mapping({}) -- cgit v1.2.3 From 648dbbf6b090c81e989f1ab70bf99f4de16a6a70 Mon Sep 17 00:00:00 2001 From: Brent Baude Date: Wed, 10 Aug 2016 16:36:49 -0600 Subject: Get Azure endpoint server from DHCP client It is more efficient and cross-distribution safe to use the hooks function from dhclient to obtain the Azure endpoint server (DHCP option 245). This is done by providing shell scritps that are called by the hooks infrastructure of both dhclient and NetworkManager. The hooks then invoke 'cloud-init dhclient-hook' that maintains json data with the dhclient options in /run/cloud-init/dhclient.hooks/.json . The azure helper then pulls the value from /run/cloud-init/dhclient.hooks/.json file(s). If that file does not exist or the value is not present, it will then fall back to the original method of scraping the dhcp client lease file. --- cloudinit/atomic_helper.py | 25 ++++++ cloudinit/cmd/main.py | 45 ++++++---- cloudinit/dhclient_hook.py | 50 +++++++++++ cloudinit/sources/DataSourceAzure.py | 15 ++-- cloudinit/sources/helpers/azure.py | 99 +++++++++++++++++++--- config/cloud.cfg | 6 ++ doc/sources/azure/README.rst | 32 ++++++- setup.py | 2 + .../unittests/test_datasource/test_azure_helper.py | 15 +++- tools/hook-dhclient | 9 ++ tools/hook-network-manager | 9 ++ tools/hook-rhel.sh | 12 +++ 12 files changed, 277 insertions(+), 42 deletions(-) create mode 100644 cloudinit/atomic_helper.py create mode 100644 cloudinit/dhclient_hook.py create mode 100755 tools/hook-dhclient create mode 100755 tools/hook-network-manager create mode 100755 tools/hook-rhel.sh (limited to 'cloudinit/sources') diff --git a/cloudinit/atomic_helper.py b/cloudinit/atomic_helper.py new file mode 100644 index 00000000..15319f71 --- /dev/null +++ b/cloudinit/atomic_helper.py @@ -0,0 +1,25 @@ +#!/usr/bin/python +# vi: ts=4 expandtab + +import json +import os +import tempfile + + +def atomic_write_file(path, content, mode='w'): + tf = None + try: + tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(path), + delete=False, mode=mode) + tf.write(content) + tf.close() + os.rename(tf.name, path) + except Exception as e: + if tf is not None: + os.unlink(tf.name) + raise e + + +def atomic_write_json(path, data): + return atomic_write_file(path, json.dumps(data, indent=1, + sort_keys=True) + "\n") diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 63621c1d..ba22b168 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -25,7 +25,6 @@ import argparse import json import os import sys -import tempfile import time import traceback @@ -47,6 +46,10 @@ from cloudinit.reporting import events from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE, CLOUD_CONFIG) +from cloudinit.atomic_helper import atomic_write_json + +from cloudinit.dhclient_hook import LogDhclient + # Pretty little cheetah formatted welcome message template WELCOME_MSG_TPL = ("Cloud-init v. ${version} running '${action}' at " @@ -452,22 +455,10 @@ def main_single(name, args): return 0 -def atomic_write_file(path, content, mode='w'): - tf = None - try: - tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(path), - delete=False, mode=mode) - tf.write(content) - tf.close() - os.rename(tf.name, path) - except Exception as e: - if tf is not None: - os.unlink(tf.name) - raise e - - -def atomic_write_json(path, data): - return atomic_write_file(path, json.dumps(data, indent=1) + "\n") +def dhclient_hook(name, args): + record = LogDhclient(args) + record.check_hooks_dir() + record.record() def status_wrapper(name, args, data_d=None, link_d=None): @@ -627,7 +618,6 @@ def main(sysv_args=None): # This subcommand allows you to run a single module parser_single = subparsers.add_parser('single', help=('run a single module ')) - parser_single.set_defaults(action=('single', main_single)) parser_single.add_argument("--name", '-n', action="store", help="module name to run", required=True) @@ -644,6 +634,16 @@ def main(sysv_args=None): ' pass to this module')) parser_single.set_defaults(action=('single', main_single)) + parser_dhclient = subparsers.add_parser('dhclient-hook', + help=('run the dhclient hook' + 'to record network info')) + parser_dhclient.add_argument("net_action", + help=('action taken on the interface')) + parser_dhclient.add_argument("net_interface", + help=('the network interface being acted' + ' upon')) + parser_dhclient.set_defaults(action=('dhclient_hook', dhclient_hook)) + args = parser.parse_args(args=sysv_args) try: @@ -677,9 +677,18 @@ def main(sysv_args=None): "running single module %s" % args.name) report_on = args.report + elif name == 'dhclient_hook': + rname, rdesc = ("dhclient-hook", + "running dhclient-hook module") + args.reporter = events.ReportEventStack( rname, rdesc, reporting_enabled=report_on) + with args.reporter: return util.log_time( logfunc=LOG.debug, msg="cloud-init mode '%s'" % name, get_uptime=True, func=functor, args=(name, args)) + + +if __name__ == '__main__': + main(sys.argv) diff --git a/cloudinit/dhclient_hook.py b/cloudinit/dhclient_hook.py new file mode 100644 index 00000000..9dcbe39c --- /dev/null +++ b/cloudinit/dhclient_hook.py @@ -0,0 +1,50 @@ +#!/usr/bin/python +# vi: ts=4 expandtab + +import os + +from cloudinit.atomic_helper import atomic_write_json +from cloudinit import log as logging +from cloudinit import stages + +LOG = logging.getLogger(__name__) + + +class LogDhclient(object): + + def __init__(self, cli_args): + self.hooks_dir = self._get_hooks_dir() + self.net_interface = cli_args.net_interface + self.net_action = cli_args.net_action + self.hook_file = os.path.join(self.hooks_dir, + self.net_interface + ".json") + + @staticmethod + def _get_hooks_dir(): + i = stages.Init() + return os.path.join(i.paths.get_runpath(), 'dhclient.hooks') + + def check_hooks_dir(self): + if not os.path.exists(self.hooks_dir): + os.makedirs(self.hooks_dir) + else: + # If the action is down and the json file exists, we need to + # delete the file + if self.net_action is 'down' and os.path.exists(self.hook_file): + os.remove(self.hook_file) + + @staticmethod + def get_vals(info): + new_info = {} + for k, v in info.items(): + if k.startswith("DHCP4_") or k.startswith("new_"): + key = (k.replace('DHCP4_', '').replace('new_', '')).lower() + new_info[key] = v + return new_info + + def record(self): + envs = os.environ + if self.hook_file is None: + return + atomic_write_json(self.hook_file, self.get_vals(envs)) + LOG.debug("Wrote dhclient options in %s", self.hook_file) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 8c7e8673..a251fe01 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -20,18 +20,17 @@ import base64 import contextlib import crypt import fnmatch +from functools import partial import os import os.path import time -import xml.etree.ElementTree as ET - from xml.dom import minidom - -from cloudinit.sources.helpers.azure import get_metadata_from_fabric +import xml.etree.ElementTree as ET from cloudinit import log as logging from cloudinit.settings import PER_ALWAYS from cloudinit import sources +from cloudinit.sources.helpers.azure import get_metadata_from_fabric from cloudinit import util LOG = logging.getLogger(__name__) @@ -107,6 +106,8 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): class DataSourceAzureNet(sources.DataSource): + FALLBACK_LEASE = '/var/lib/dhcp/dhclient.eth0.leases' + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, 'azure') @@ -115,6 +116,8 @@ class DataSourceAzureNet(sources.DataSource): self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]) + self.dhclient_lease_file = self.paths.cfgs.get('dhclient_lease', + self.FALLBACK_LEASE) def __str__(self): root = sources.DataSource.__str__(self) @@ -226,7 +229,9 @@ class DataSourceAzureNet(sources.DataSource): write_files(ddir, files, dirmode=0o700) if self.ds_cfg['agent_command'] == '__builtin__': - metadata_func = get_metadata_from_fabric + metadata_func = partial(get_metadata_from_fabric, + fallback_lease_file=self. + dhclient_lease_file) else: metadata_func = self.get_metadata_from_agent try: diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 63ccf10e..6e43440f 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -1,3 +1,4 @@ +import json import logging import os import re @@ -6,6 +7,7 @@ import struct import tempfile import time +from cloudinit import stages from contextlib import contextmanager from xml.etree import ElementTree @@ -187,19 +189,32 @@ class WALinuxAgentShim(object): ' ', '']) - def __init__(self): + def __init__(self, fallback_lease_file=None): LOG.debug('WALinuxAgentShim instantiated...') - self.endpoint = self.find_endpoint() + self.dhcpoptions = None + self._endpoint = None self.openssl_manager = None self.values = {} + self.lease_file = fallback_lease_file def clean_up(self): if self.openssl_manager is not None: self.openssl_manager.clean_up() @staticmethod - def get_ip_from_lease_value(lease_value): - unescaped_value = lease_value.replace('\\', '') + def _get_hooks_dir(): + _paths = stages.Init() + return os.path.join(_paths.paths.get_runpath(), "dhclient.hooks") + + @property + def endpoint(self): + if self._endpoint is None: + self._endpoint = self.find_endpoint(self.lease_file) + return self._endpoint + + @staticmethod + def get_ip_from_lease_value(fallback_lease_value): + unescaped_value = fallback_lease_value.replace('\\', '') if len(unescaped_value) > 4: hex_string = '' for hex_pair in unescaped_value.split(':'): @@ -213,15 +228,75 @@ class WALinuxAgentShim(object): return socket.inet_ntoa(packed_bytes) @staticmethod - def find_endpoint(): - LOG.debug('Finding Azure endpoint...') - content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases') - value = None + def _get_value_from_leases_file(fallback_lease_file): + leases = [] + content = util.load_file(fallback_lease_file) + LOG.debug("content is {}".format(content)) for line in content.splitlines(): if 'unknown-245' in line: - value = line.strip(' ').split(' ', 2)[-1].strip(';\n"') + # Example line from Ubuntu + # option unknown-245 a8:3f:81:10; + leases.append(line.strip(' ').split(' ', 2)[-1].strip(';\n"')) + # Return the "most recent" one in the list + if len(leases) < 1: + return None + else: + return leases[-1] + + @staticmethod + def _load_dhclient_json(): + dhcp_options = {} + hooks_dir = WALinuxAgentShim._get_hooks_dir() + if not os.path.exists(hooks_dir): + LOG.debug("%s not found.", hooks_dir) + return None + hook_files = [os.path.join(hooks_dir, x) + for x in os.listdir(hooks_dir)] + for hook_file in hook_files: + try: + name = os.path.basename(hook_file).replace('.json', '') + dhcp_options[name] = json.loads(util.load_file((hook_file))) + except ValueError: + raise ValueError("%s is not valid JSON data", hook_file) + return dhcp_options + + @staticmethod + def _get_value_from_dhcpoptions(dhcp_options): + if dhcp_options is None: + return None + # the MS endpoint server is given to us as DHPC option 245 + _value = None + for interface in dhcp_options: + _value = dhcp_options[interface].get('unknown_245', None) + if _value is not None: + LOG.debug("Endpoint server found in dhclient options") + break + return _value + + @staticmethod + def find_endpoint(fallback_lease_file=None): + LOG.debug('Finding Azure endpoint...') + value = None + # Option-245 stored in /run/cloud-init/dhclient.hooks/.json + # a dhclient exit hook that calls cloud-init-dhclient-hook + dhcp_options = WALinuxAgentShim._load_dhclient_json() + value = WALinuxAgentShim._get_value_from_dhcpoptions(dhcp_options) if value is None: - raise ValueError('No endpoint found in DHCP config.') + # Fallback and check the leases file if unsuccessful + LOG.debug("Unable to find endpoint in dhclient logs. " + " Falling back to check lease files") + if fallback_lease_file is None: + LOG.warn("No fallback lease file was specified.") + value = None + else: + LOG.debug("Looking for endpoint in lease file %s", + fallback_lease_file) + value = WALinuxAgentShim._get_value_from_leases_file( + fallback_lease_file) + + if value is None: + raise ValueError('No endpoint found.') + endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value) LOG.debug('Azure endpoint found at %s', endpoint_ip_address) return endpoint_ip_address @@ -271,8 +346,8 @@ class WALinuxAgentShim(object): LOG.info('Reported ready to Azure fabric.') -def get_metadata_from_fabric(): - shim = WALinuxAgentShim() +def get_metadata_from_fabric(fallback_lease_file=None): + shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file) try: return shim.register_with_azure_and_fetch_data() finally: diff --git a/config/cloud.cfg b/config/cloud.cfg index 2d7fb473..93ef3423 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -98,6 +98,7 @@ system_info: cloud_dir: /var/lib/cloud/ templates_dir: /etc/cloud/templates/ upstart_dir: /etc/init/ + dhclient_lease: package_mirrors: - arches: [i386, amd64] failsafe: @@ -114,3 +115,8 @@ system_info: primary: http://ports.ubuntu.com/ubuntu-ports security: http://ports.ubuntu.com/ubuntu-ports ssh_svcname: ssh +datasource: + Azure: + set_hostname: False + agent_command: __builtin__ + diff --git a/doc/sources/azure/README.rst b/doc/sources/azure/README.rst index 8239d1fa..48f3cc7a 100644 --- a/doc/sources/azure/README.rst +++ b/doc/sources/azure/README.rst @@ -9,10 +9,34 @@ Azure Platform The azure cloud-platform provides initial data to an instance via an attached CD formated in UDF. That CD contains a 'ovf-env.xml' file that provides some information. Additional information is obtained via interaction with the -"endpoint". The ip address of the endpoint is advertised to the instance -inside of dhcp option 245. On ubuntu, that can be seen in -/var/lib/dhcp/dhclient.eth0.leases as a colon delimited hex value (example: -``option unknown-245 64:41:60:82;`` is 100.65.96.130) +"endpoint". + +To find the endpoint, we now leverage the dhcp client's ability to log its +known values on exit. The endpoint server is special DHCP option 245. +Depending on your networking stack, this can be done +by calling a script in /etc/dhcp/dhclient-exit-hooks or a file in +/etc/NetworkManager/dispatcher.d. Both of these call a sub-command +'dhclient_hook' of cloud-init itself. This sub-command will write the client +information in json format to /run/cloud-init/dhclient.hook/.json. + +In order for cloud-init to leverage this method to find the endpoint, the +cloud.cfg file must contain: + +datasource: + Azure: + set_hostname: False + agent_command: __builtin__ + +If those files are not available, the fallback is to check the leases file +for the endpoint server (again option 245). + +You can define the path to the lease file with the 'dhclient_lease' configuration +value under system_info: and paths:. For example: + + dhclient_lease: /var/lib/dhcp/dhclient.eth0.leases + +If no configuration value is provided, the dhclient_lease value will fallback to +/var/lib/dhcp/dhclient.eth0.leases. walinuxagent ------------ diff --git a/setup.py b/setup.py index 4abbb67e..bbadd7bf 100755 --- a/setup.py +++ b/setup.py @@ -176,6 +176,8 @@ else: (ETC + '/cloud', glob('config/*.cfg')), (ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')), (ETC + '/cloud/templates', glob('templates/*')), + (ETC + '/NetworkManager/dispatcher.d/', ['tools/hook-network-manager']), + (ETC + '/dhcp/dhclient-exit-hooks.d/', ['tools/hook-dhclient']), (USR_LIB_EXEC + '/cloud-init', ['tools/uncloud-init', 'tools/write-ssh-key-fingerprints']), (USR + '/share/doc/cloud-init', [f for f in glob('doc/*') if is_f(f)]), diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index 65202ff0..64523e16 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -54,13 +54,17 @@ class TestFindEndpoint(TestCase): self.load_file = patches.enter_context( mock.patch.object(azure_helper.util, 'load_file')) + self.dhcp_options = patches.enter_context( + mock.patch.object(azure_helper.WALinuxAgentShim, + '_load_dhclient_json')) + def test_missing_file(self): - self.load_file.side_effect = IOError - self.assertRaises(IOError, + self.assertRaises(ValueError, azure_helper.WALinuxAgentShim.find_endpoint) def test_missing_special_azure_line(self): self.load_file.return_value = '' + self.dhcp_options.return_value = {'eth0': {'key': 'value'}} self.assertRaises(ValueError, azure_helper.WALinuxAgentShim.find_endpoint) @@ -72,13 +76,18 @@ class TestFindEndpoint(TestCase): ' option unknown-245 {0};'.format(encoded_address), '}']) + def test_from_dhcp_client(self): + self.dhcp_options.return_value = {"eth0": {"unknown_245": "5:4:3:2"}} + self.assertEqual('5.4.3.2', + azure_helper.WALinuxAgentShim.find_endpoint(None)) + def test_latest_lease_used(self): encoded_addresses = ['5:4:3:2', '4:3:2:1'] file_content = '\n'.join([self._build_lease_content(encoded_address) for encoded_address in encoded_addresses]) self.load_file.return_value = file_content self.assertEqual(encoded_addresses[-1].replace(':', '.'), - azure_helper.WALinuxAgentShim.find_endpoint()) + azure_helper.WALinuxAgentShim.find_endpoint("foobar")) class TestExtractIpAddressFromLeaseValue(TestCase): diff --git a/tools/hook-dhclient b/tools/hook-dhclient new file mode 100755 index 00000000..d099979a --- /dev/null +++ b/tools/hook-dhclient @@ -0,0 +1,9 @@ +#!/bin/sh +# This script writes DHCP lease information into the cloud-init run directory +# It is sourced, not executed. For more information see dhclient-script(8). + +case "$reason" in + BOUND) cloud-init dhclient-hook up "$interface";; + DOWN|RELEASE|REBOOT|STOP|EXPIRE) + cloud-init dhclient-hook down "$interface";; +esac diff --git a/tools/hook-network-manager b/tools/hook-network-manager new file mode 100755 index 00000000..447b134e --- /dev/null +++ b/tools/hook-network-manager @@ -0,0 +1,9 @@ +#!/bin/sh +# This script hooks into NetworkManager(8) via its scripts +# arguments are 'interface-name' and 'action' +# + +case "$1:$2" in + *:up) exec cloud-init dhclient-hook up "$1";; + *:down) exec cloud-init dhclient-hook down "$1";; +esac diff --git a/tools/hook-rhel.sh b/tools/hook-rhel.sh new file mode 100755 index 00000000..5e963a89 --- /dev/null +++ b/tools/hook-rhel.sh @@ -0,0 +1,12 @@ +#!/bin/sh +# Current versions of RHEL and CentOS do not honor the directory +# /etc/dhcp/dhclient-exit-hooks.d so this file can be placed in +# /etc/dhcp/dhclient.d instead + +hook-rhel_config(){ + cloud-init dhclient-hook up "$interface" +} + +hook-rhel_restore(){ + cloud-init dhclient-hook down "$interface" +} -- cgit v1.2.3 From 64522efe710faf6fa1615dbb60a2fc4cc8a7c278 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 18 Aug 2016 12:25:29 -0400 Subject: azure dhclient-hook cleanups This adds some function to the generator to maintain the presense of a flag file '/run/cloud-init/enabled' indicating that cloud-init is enabled. Then, only run the dhclient hooks if on Azure and cloud-init is enabled. The test for is_azure currently only checks to see that the board vendor is Microsoft, not actually that we are on azure. Running should not be harmful anywhere, other than slowing down dhclient. The value of this additional code is that then dhclient having run does not task the system with the load of cloud-init. Additionally, some changes to config are done here. * rename 'dhclient_leases' to 'dhclient_lease_file' * move that to the datasource config (datasource/Azure/dhclient_lease_file) Also, it removes the config in config/cloud.cfg that set agent_command to __builtin__. This means that by default cloud-init still needs the agent installed. The suggested follow-on improvement is to use __builtin__ if there is no walinux-agent installed. --- cloudinit/sources/DataSourceAzure.py | 13 +++++++------ cloudinit/sources/helpers/azure.py | 3 ++- config/cloud.cfg | 6 ------ doc/sources/azure/README.rst | 9 +++------ systemd/cloud-init-generator | 5 +++++ tools/hook-dhclient | 25 ++++++++++++++++++++----- tools/hook-network-manager | 23 +++++++++++++++++++---- tools/hook-rhel.sh | 15 +++++++++++++++ 8 files changed, 71 insertions(+), 28 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index a251fe01..dbc2bb68 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -54,6 +54,7 @@ BUILTIN_DS_CONFIG = { 'hostname_command': 'hostname', }, 'disk_aliases': {'ephemeral0': '/dev/sdb'}, + 'dhclient_lease_file': '/var/lib/dhcp/dhclient.eth0.leases', } BUILTIN_CLOUD_CONFIG = { @@ -106,8 +107,6 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): class DataSourceAzureNet(sources.DataSource): - FALLBACK_LEASE = '/var/lib/dhcp/dhclient.eth0.leases' - def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, 'azure') @@ -116,8 +115,7 @@ class DataSourceAzureNet(sources.DataSource): self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]) - self.dhclient_lease_file = self.paths.cfgs.get('dhclient_lease', - self.FALLBACK_LEASE) + self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file') def __str__(self): root = sources.DataSource.__str__(self) @@ -126,6 +124,9 @@ class DataSourceAzureNet(sources.DataSource): def get_metadata_from_agent(self): temp_hostname = self.metadata.get('local-hostname') hostname_command = self.ds_cfg['hostname_bounce']['hostname_command'] + agent_cmd = self.ds_cfg['agent_command'] + LOG.debug("Getting metadata via agent. hostname=%s cmd=%s", + temp_hostname, agent_cmd) with temporary_hostname(temp_hostname, self.ds_cfg, hostname_command=hostname_command) \ as previous_hostname: @@ -141,7 +142,7 @@ class DataSourceAzureNet(sources.DataSource): util.logexc(LOG, "handling set_hostname failed") try: - invoke_agent(self.ds_cfg['agent_command']) + invoke_agent(agent_cmd) except util.ProcessExecutionError: # claim the datasource even if the command failed util.logexc(LOG, "agent command '%s' failed.", @@ -234,13 +235,13 @@ class DataSourceAzureNet(sources.DataSource): dhclient_lease_file) else: metadata_func = self.get_metadata_from_agent + try: fabric_data = metadata_func() except Exception as exc: LOG.info("Error communicating with Azure fabric; assume we aren't" " on Azure.", exc_info=True) return False - self.metadata['instance-id'] = util.read_dmi_data('system-uuid') self.metadata.update(fabric_data) diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 6e43440f..689ed4cc 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -190,7 +190,8 @@ class WALinuxAgentShim(object): '']) def __init__(self, fallback_lease_file=None): - LOG.debug('WALinuxAgentShim instantiated...') + LOG.debug('WALinuxAgentShim instantiated, fallback_lease_file=%s', + fallback_lease_file) self.dhcpoptions = None self._endpoint = None self.openssl_manager = None diff --git a/config/cloud.cfg b/config/cloud.cfg index 93ef3423..2d7fb473 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -98,7 +98,6 @@ system_info: cloud_dir: /var/lib/cloud/ templates_dir: /etc/cloud/templates/ upstart_dir: /etc/init/ - dhclient_lease: package_mirrors: - arches: [i386, amd64] failsafe: @@ -115,8 +114,3 @@ system_info: primary: http://ports.ubuntu.com/ubuntu-ports security: http://ports.ubuntu.com/ubuntu-ports ssh_svcname: ssh -datasource: - Azure: - set_hostname: False - agent_command: __builtin__ - diff --git a/doc/sources/azure/README.rst b/doc/sources/azure/README.rst index 48f3cc7a..ec7d9e84 100644 --- a/doc/sources/azure/README.rst +++ b/doc/sources/azure/README.rst @@ -30,13 +30,10 @@ datasource: If those files are not available, the fallback is to check the leases file for the endpoint server (again option 245). -You can define the path to the lease file with the 'dhclient_lease' configuration -value under system_info: and paths:. For example: +You can define the path to the lease file with the 'dhclient_lease_file' +configuration. The default value is /var/lib/dhcp/dhclient.eth0.leases. - dhclient_lease: /var/lib/dhcp/dhclient.eth0.leases - -If no configuration value is provided, the dhclient_lease value will fallback to -/var/lib/dhcp/dhclient.eth0.leases. + dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases walinuxagent ------------ diff --git a/systemd/cloud-init-generator b/systemd/cloud-init-generator index 2d319695..fedb6309 100755 --- a/systemd/cloud-init-generator +++ b/systemd/cloud-init-generator @@ -6,6 +6,7 @@ DEBUG_LEVEL=1 LOG_D="/run/cloud-init" ENABLE="enabled" DISABLE="disabled" +RUN_ENABLED_FILE="$LOG_D/$ENABLE" CLOUD_SYSTEM_TARGET="/lib/systemd/system/cloud-init.target" CLOUD_TARGET_NAME="cloud-init.target" # lxc sets 'container', but lets make that explicitly a global @@ -107,6 +108,7 @@ main() { "ln $CLOUD_SYSTEM_TARGET $link_path" fi fi + : > "$RUN_ENABLED_FILE" elif [ "$result" = "$DISABLE" ]; then if [ -f "$link_path" ]; then if rm -f "$link_path"; then @@ -118,6 +120,9 @@ main() { else debug 1 "already disabled: no change needed [no $link_path]" fi + if [ -e "$RUN_ENABLED_FILE" ]; then + rm -f "$RUN_ENABLED_FILE" + fi else debug 0 "unexpected result '$result'" ret=3 diff --git a/tools/hook-dhclient b/tools/hook-dhclient index d099979a..6a4626c6 100755 --- a/tools/hook-dhclient +++ b/tools/hook-dhclient @@ -1,9 +1,24 @@ #!/bin/sh # This script writes DHCP lease information into the cloud-init run directory # It is sourced, not executed. For more information see dhclient-script(8). +is_azure() { + local dmi_path="/sys/class/dmi/id/board_vendor" vendor="" + if [ -e "$dmi_path" ] && read vendor < "$dmi_path"; then + [ "$vendor" = "Microsoft Corporation" ] && return 0 + fi + return 1 +} -case "$reason" in - BOUND) cloud-init dhclient-hook up "$interface";; - DOWN|RELEASE|REBOOT|STOP|EXPIRE) - cloud-init dhclient-hook down "$interface";; -esac +is_enabled() { + # only execute hooks if cloud-init is enabled and on azure + [ -e /run/cloud-init/enabled ] || return 1 + is_azure +} + +if is_enabled; then + case "$reason" in + BOUND) cloud-init dhclient-hook up "$interface";; + DOWN|RELEASE|REBOOT|STOP|EXPIRE) + cloud-init dhclient-hook down "$interface";; + esac +fi diff --git a/tools/hook-network-manager b/tools/hook-network-manager index 447b134e..98a36c8a 100755 --- a/tools/hook-network-manager +++ b/tools/hook-network-manager @@ -2,8 +2,23 @@ # This script hooks into NetworkManager(8) via its scripts # arguments are 'interface-name' and 'action' # +is_azure() { + local dmi_path="/sys/class/dmi/id/board_vendor" vendor="" + if [ -e "$dmi_path" ] && read vendor < "$dmi_path"; then + [ "$vendor" = "Microsoft Corporation" ] && return 0 + fi + return 1 +} -case "$1:$2" in - *:up) exec cloud-init dhclient-hook up "$1";; - *:down) exec cloud-init dhclient-hook down "$1";; -esac +is_enabled() { + # only execute hooks if cloud-init is enabled and on azure + [ -e /run/cloud-init/enabled ] || return 1 + is_azure +} + +if is_enabled; then + case "$1:$2" in + *:up) exec cloud-init dhclient-hook up "$1";; + *:down) exec cloud-init dhclient-hook down "$1";; + esac +fi diff --git a/tools/hook-rhel.sh b/tools/hook-rhel.sh index 5e963a89..8232414c 100755 --- a/tools/hook-rhel.sh +++ b/tools/hook-rhel.sh @@ -2,11 +2,26 @@ # Current versions of RHEL and CentOS do not honor the directory # /etc/dhcp/dhclient-exit-hooks.d so this file can be placed in # /etc/dhcp/dhclient.d instead +is_azure() { + local dmi_path="/sys/class/dmi/id/board_vendor" vendor="" + if [ -e "$dmi_path" ] && read vendor < "$dmi_path"; then + [ "$vendor" = "Microsoft Corporation" ] && return 0 + fi + return 1 +} + +is_enabled() { + # only execute hooks if cloud-init is enabled and on azure + [ -e /run/cloud-init/enabled ] || return 1 + is_azure +} hook-rhel_config(){ + is_enabled || return 0 cloud-init dhclient-hook up "$interface" } hook-rhel_restore(){ + is_enabled || return 0 cloud-init dhclient-hook down "$interface" } -- cgit v1.2.3 From 6a8aa46863f1a4a5f3c0d37d34fd02d57790be01 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 3 Aug 2016 14:55:18 -0400 Subject: Generate a dummy bond name for OpenStack The OpenStack network_data.json does not provide a name for bond links. This change makes it so a dummy one is generated and used instead to satisfy cloud-init which does require one. In order to write the correct link (underlying 'link' names) for the bonds, we maintain a list of info by ids so we can easily get the right device name. Also: * add a vlan test case that similarly references an id rather than name. * make bond interfaces auto LP: #1605749 --- cloudinit/net/eni.py | 2 +- cloudinit/sources/helpers/openstack.py | 58 ++++++++++-- .../unittests/test_datasource/test_configdrive.py | 105 +++++++++++++++++++++ 3 files changed, 156 insertions(+), 9 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index eff5b924..cd533ddb 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -399,7 +399,7 @@ class Renderer(renderer.Renderer): else: # ifenslave docs say to auto the slave devices lines = [] - if 'bond-master' in iface: + if 'bond-master' in iface or 'bond-slaves' in iface: lines.append("auto {name}".format(**iface)) lines.append("iface {name} {inet} {mode}".format(**iface)) lines.extend(_iface_add_attrs(iface, index=0)) diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 84322e0e..a5a2a1d6 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -539,6 +539,10 @@ def convert_net_json(network_json=None, known_macs=None): networks = network_json.get('networks', []) services = network_json.get('services', []) + link_updates = [] + link_id_info = {} + bond_name_fmt = "bond%d" + bond_number = 0 config = [] for link in links: subnets = [] @@ -551,6 +555,13 @@ def convert_net_json(network_json=None, known_macs=None): if 'name' in link: cfg['name'] = link['name'] + if link.get('ethernet_mac_address'): + link_id_info[link['id']] = link.get('ethernet_mac_address') + + curinfo = {'name': cfg.get('name'), + 'mac': link.get('ethernet_mac_address'), + 'id': link['id'], 'type': link['type']} + for network in [n for n in networks if n['link'] == link['id']]: subnet = dict((k, v) for k, v in network.items() @@ -582,31 +593,56 @@ def convert_net_json(network_json=None, known_macs=None): continue elif k.startswith('bond'): params.update({k: v}) - cfg.update({ - 'bond_interfaces': copy.deepcopy(link['bond_links']), - 'params': params, - }) + + # openstack does not provide a name for the bond. + # they do provide an 'id', but that is possibly non-sensical. + # so we just create our own name. + link_name = bond_name_fmt % bond_number + bond_number += 1 + + # bond_links reference links by their id, but we need to add + # to the network config by their nic name. + # store that in bond_links_needed, and update these later. + link_updates.append( + (cfg, 'bond_interfaces', '%s', + copy.deepcopy(link['bond_links'])) + ) + cfg.update({'params': params, 'name': link_name}) + + curinfo['name'] = link_name elif link['type'] in ['vlan']: + name = "%s.%s" % (link['vlan_link'], link['vlan_id']) cfg.update({ - 'name': "%s.%s" % (link['vlan_link'], - link['vlan_id']), - 'vlan_link': link['vlan_link'], + 'name': name, 'vlan_id': link['vlan_id'], 'mac_address': link['vlan_mac_address'], }) + link_updates.append((cfg, 'vlan_link', '%s', link['vlan_link'])) + link_updates.append((cfg, 'name', "%%s.%s" % link['vlan_id'], + link['vlan_link'])) + curinfo.update({'mac': link['vlan_mac_address'], + 'name': name}) else: raise ValueError( 'Unknown network_data link type: %s' % link['type']) config.append(cfg) + link_id_info[curinfo['id']] = curinfo need_names = [d for d in config if d.get('type') == 'physical' and 'name' not in d] - if need_names: + if need_names or link_updates: if known_macs is None: known_macs = net.get_interfaces_by_mac() + # go through and fill out the link_id_info with names + for link_id, info in link_id_info.items(): + if info.get('name'): + continue + if info.get('mac') in known_macs: + info['name'] = known_macs[info['mac']] + for d in need_names: mac = d.get('mac_address') if not mac: @@ -615,6 +651,12 @@ def convert_net_json(network_json=None, known_macs=None): raise ValueError("Unable to find a system nic for %s" % d) d['name'] = known_macs[mac] + for cfg, key, fmt, target in link_updates: + if isinstance(target, (list, tuple)): + cfg[key] = [fmt % link_id_info[l]['name'] for l in target] + else: + cfg[key] = fmt % link_id_info[target]['name'] + for service in services: cfg = service cfg.update({'type': 'nameserver'}) diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index d0269943..98ff97a7 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -137,12 +137,71 @@ NETWORK_DATA_3 = { ] } +NETWORK_DATA_BOND = { + "services": [ + {"type": "dns", "address": "1.1.1.191"}, + {"type": "dns", "address": "1.1.1.4"}, + ], + "networks": [ + {"id": "network2-ipv4", "ip_address": "2.2.2.13", + "link": "vlan2", "netmask": "255.255.255.248", + "network_id": "4daf5ce8-38cf-4240-9f1a-04e86d7c6117", + "type": "ipv4", + "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", + "gateway": "2.2.2.9"}]}, + {"id": "network3-ipv4", "ip_address": "10.0.1.5", + "link": "vlan3", "netmask": "255.255.255.248", + "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d", + "type": "ipv4", + "routes": [{"netmask": "255.255.255.255", + "network": "192.168.1.0", "gateway": "10.0.1.1"}]} + ], + "links": [ + {"ethernet_mac_address": "0c:c4:7a:34:6e:3c", + "id": "eth0", "mtu": 1500, "type": "phy"}, + {"ethernet_mac_address": "0c:c4:7a:34:6e:3d", + "id": "eth1", "mtu": 1500, "type": "phy"}, + {"bond_links": ["eth0", "eth1"], + "bond_miimon": 100, "bond_mode": "4", + "bond_xmit_hash_policy": "layer3+4", + "ethernet_mac_address": "0c:c4:7a:34:6e:3c", + "id": "bond0", "type": "bond"}, + {"ethernet_mac_address": "fa:16:3e:b3:72:30", + "id": "vlan2", "type": "vlan", "vlan_id": 602, + "vlan_link": "bond0", "vlan_mac_address": "fa:16:3e:b3:72:30"}, + {"ethernet_mac_address": "fa:16:3e:66:ab:a6", + "id": "vlan3", "type": "vlan", "vlan_id": 612, "vlan_link": "bond0", + "vlan_mac_address": "fa:16:3e:66:ab:a6"} + ] +} + +NETWORK_DATA_VLAN = { + "services": [{"type": "dns", "address": "1.1.1.191"}], + "networks": [ + {"id": "network1-ipv4", "ip_address": "10.0.1.5", + "link": "vlan1", "netmask": "255.255.255.248", + "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d", + "type": "ipv4", + "routes": [{"netmask": "255.255.255.255", + "network": "192.168.1.0", "gateway": "10.0.1.1"}]} + ], + "links": [ + {"ethernet_mac_address": "fa:16:3e:69:b0:58", + "id": "eth0", "mtu": 1500, "type": "phy"}, + {"ethernet_mac_address": "fa:16:3e:b3:72:30", + "id": "vlan1", "type": "vlan", "vlan_id": 602, + "vlan_link": "eth0", "vlan_mac_address": "fa:16:3e:b3:72:30"}, + ] +} + KNOWN_MACS = { 'fa:16:3e:69:b0:58': 'enp0s1', 'fa:16:3e:d4:57:ad': 'enp0s2', 'fa:16:3e:dd:50:9a': 'foo1', 'fa:16:3e:a8:14:69': 'foo2', 'fa:16:3e:ed:9a:59': 'foo3', + '0c:c4:7a:34:6e:3d': 'oeth1', + '0c:c4:7a:34:6e:3c': 'oeth0', } CFG_DRIVE_FILES_V2 = { @@ -599,6 +658,52 @@ class TestConvertNetworkData(TestCase): physicals.add(i['name']) self.assertEqual(physicals, set(('foo1', 'foo2'))) + def test_bond_conversion(self): + # light testing of bond conversion and eni rendering of bond + ncfg = openstack.convert_net_json(NETWORK_DATA_BOND, + known_macs=KNOWN_MACS) + eni_renderer = eni.Renderer() + eni_renderer.render_network_state( + self.tmp, network_state.parse_net_config_data(ncfg)) + with open(os.path.join(self.tmp, "etc", + "network", "interfaces"), 'r') as f: + eni_rendering = f.read() + + # Verify there are expected interfaces in the net config. + interfaces = sorted( + [i['name'] for i in ncfg['config'] + if i['type'] in ('vlan', 'bond', 'physical')]) + self.assertEqual( + sorted(["oeth0", "oeth1", "bond0", "bond0.602", "bond0.612"]), + interfaces) + + words = eni_rendering.split() + # 'eth0' and 'eth1' are the ids. because their mac adresses + # map to other names, we should not see them in the ENI + self.assertNotIn('eth0', words) + self.assertNotIn('eth1', words) + + # oeth0 and oeth1 are the interface names for eni. + # bond0 will be generated for the bond. Each should be auto. + self.assertIn("auto oeth0", eni_rendering) + self.assertIn("auto oeth1", eni_rendering) + self.assertIn("auto bond0", eni_rendering) + + def test_vlan(self): + # light testing of vlan config conversion and eni rendering + ncfg = openstack.convert_net_json(NETWORK_DATA_VLAN, + known_macs=KNOWN_MACS) + eni_renderer = eni.Renderer() + eni_renderer.render_network_state( + self.tmp, network_state.parse_net_config_data(ncfg)) + with open(os.path.join(self.tmp, "etc", + "network", "interfaces"), 'r') as f: + eni_rendering = f.read() + + self.assertIn("iface enp0s1", eni_rendering) + self.assertIn("address 10.0.1.5", eni_rendering) + self.assertIn("auto enp0s1.602", eni_rendering) + def cfg_ds_from_dir(seed_d): cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None, -- cgit v1.2.3 From 3f1373a9785de5c5843c060835e444046ab50756 Mon Sep 17 00:00:00 2001 From: Matthew Thode Date: Thu, 18 Aug 2016 16:27:27 -0500 Subject: add install option for openrc Adds an install option for for OpenRC init scripts. I've also restricted installing tests more correctly. Also, don't hardcode the path to ip (/bin/ip on gentoo). --- cloudinit/sources/DataSourceOpenNebula.py | 2 +- setup.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 7b3a76b9..635a836c 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -407,7 +407,7 @@ def read_context_disk_dir(source_dir, asuser=None): # http://opennebula.org/documentation:rel3.8:cong#network_configuration for k in context: if re.match(r'^ETH\d+_IP$', k): - (out, _) = util.subp(['/sbin/ip', 'link']) + (out, _) = util.subp(['ip', 'link']) net = OpenNebulaNetwork(out, context) results['network-interfaces'] = net.gen_conf() break diff --git a/setup.py b/setup.py index bbadd7bf..8ff667d5 100755 --- a/setup.py +++ b/setup.py @@ -74,6 +74,7 @@ INITSYS_FILES = { 'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)], 'sysvinit_freebsd': [f for f in glob('sysvinit/freebsd/*') if is_f(f)], 'sysvinit_deb': [f for f in glob('sysvinit/debian/*') if is_f(f)], + 'sysvinit_openrc': [f for f in glob('sysvinit/gentoo/*') if is_f(f)], 'systemd': [f for f in (glob('systemd/*.service') + glob('systemd/*.target')) if is_f(f)], 'systemd.generators': [f for f in glob('systemd/*-generator') if is_f(f)], @@ -83,6 +84,7 @@ INITSYS_ROOTS = { 'sysvinit': '/etc/rc.d/init.d', 'sysvinit_freebsd': '/usr/local/etc/rc.d', 'sysvinit_deb': '/etc/init.d', + 'sysvinit_openrc': '/etc/init.d', 'systemd': pkg_config_read('systemd', 'systemdsystemunitdir'), 'systemd.generators': pkg_config_read('systemd', 'systemdsystemgeneratordir'), -- cgit v1.2.3