From c6a6f59e80f1fd62562b1fe9acfd45e1cee3cbe8 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 15 Dec 2017 15:24:53 -0700 Subject: lint: Fix lints seen by pylint version 1.8.1. This branch resolves lints seen by pylint revision 1.8.1 and updates our pinned tox pylint dependency used by our tox pylint target. --- cloudinit/net/cmdline.py | 9 +++++---- cloudinit/net/network_state.py | 8 +++++--- 2 files changed, 10 insertions(+), 7 deletions(-) (limited to 'cloudinit/net') diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index 38b27a52..7b2cc9db 100755 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -116,10 +116,11 @@ def config_from_klibc_net_cfg(files=None, mac_addrs=None): prev = names[name]['entry'] if prev.get('mac_address') != entry.get('mac_address'): raise ValueError( - "device '%s' was defined multiple times (%s)" - " but had differing mac addresses: %s -> %s.", - (name, ' '.join(names[name]['files']), - prev.get('mac_address'), entry.get('mac_address'))) + "device '{name}' was defined multiple times ({files})" + " but had differing mac addresses: {old} -> {new}.".format( + name=name, files=' '.join(names[name]['files']), + old=prev.get('mac_address'), + new=entry.get('mac_address'))) prev['subnets'].extend(entry['subnets']) names[name]['files'].append(cfg_file) else: diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index e9e2cf4e..31738c73 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -474,8 +474,9 @@ class NetworkStateInterpreter(object): elif bridge_stp in ['off', '0', 0]: bridge_stp = False else: - raise ValueError("Cannot convert bridge_stp value" - "(%s) to boolean", bridge_stp) + raise ValueError( + 'Cannot convert bridge_stp value ({stp}) to' + ' boolean'.format(stp=bridge_stp)) iface.update({'bridge_stp': bridge_stp}) interfaces.update({iface['name']: iface}) @@ -692,7 +693,8 @@ class NetworkStateInterpreter(object): elif cmd_type == "bond": self.handle_bond(v1_cmd) else: - raise ValueError('Unknown command type: %s', cmd_type) + raise ValueError('Unknown command type: {cmd_type}'.format( + cmd_type=cmd_type)) def _v2_to_v1_ipcfg(self, cfg): """Common ipconfig extraction from v2 to v1 subnets array.""" -- cgit v1.2.3 From 8a9421421497b3e7c05589c62389745d565c6633 Mon Sep 17 00:00:00 2001 From: Akihiko Ota Date: Wed, 13 Dec 2017 23:46:02 +0900 Subject: OpenNebula: Improve network configuration support. Network configuration in OpenNebula would only work if the host correctly guessed the names of the devices in the guest. OpenNebula provided data in its context.sh like 'ETH0_NETWORK', but if the guest named devices differently then results were not predictable. This would occur with Predictable Network Interface Names. To address this, newer versions (of OpenNebula provide the mac address ETH0_MAC. This function is present in 4.14 and documented officially in 5.0 docs. This provides support for reading the mac addresses from the context.sh. It also fixes cases where context.sh provided a field (ETH0_NETWORK or ETH0_MASK) with a empty string. Previously the empty string would be used rather than falling back to the default. LP: #1719157, #1716397, #1736750 --- cloudinit/net/__init__.py | 4 +- cloudinit/sources/DataSourceOpenNebula.py | 112 ++++++----- tests/unittests/test_datasource/test_opennebula.py | 223 ++++++++++++++++----- tests/unittests/test_net.py | 6 +- 4 files changed, 241 insertions(+), 104 deletions(-) (limited to 'cloudinit/net') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index a1b0db10..c015e793 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -18,7 +18,7 @@ SYS_CLASS_NET = "/sys/class/net/" DEFAULT_PRIMARY_INTERFACE = 'eth0' -def _natural_sort_key(s, _nsre=re.compile('([0-9]+)')): +def natural_sort_key(s, _nsre=re.compile('([0-9]+)')): """Sorting for Humans: natural sort order. Can be use as the key to sort functions. This will sort ['eth0', 'ens3', 'ens10', 'ens12', 'ens8', 'ens0'] as @@ -224,7 +224,7 @@ def find_fallback_nic(blacklist_drivers=None): # if eth0 exists use it above anything else, otherwise get the interface # that we can read 'first' (using the sorted defintion of first). - names = list(sorted(potential_interfaces, key=_natural_sort_key)) + names = list(sorted(potential_interfaces, key=natural_sort_key)) if DEFAULT_PRIMARY_INTERFACE in names: names.remove(DEFAULT_PRIMARY_INTERFACE) names.insert(0, DEFAULT_PRIMARY_INTERFACE) diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index f66c95d7..ce47b6bd 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -12,6 +12,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import collections import os import pwd import re @@ -19,6 +20,7 @@ import string from cloudinit import log as logging from cloudinit import net +from cloudinit.net import eni from cloudinit import sources from cloudinit import util @@ -89,11 +91,18 @@ class DataSourceOpenNebula(sources.DataSource): return False self.seed = seed - self.network_eni = results.get("network_config") + self.network_eni = results.get('network-interfaces') self.metadata = md self.userdata_raw = results.get('userdata') return True + @property + def network_config(self): + if self.network_eni is not None: + return eni.convert_eni_data(self.network_eni) + else: + return None + def get_hostname(self, fqdn=False, resolve_ip=None): if resolve_ip is None: if self.dsmode == sources.DSMODE_NETWORK: @@ -116,58 +125,53 @@ class OpenNebulaNetwork(object): self.context = context if system_nics_by_mac is None: system_nics_by_mac = get_physical_nics_by_mac() - self.ifaces = system_nics_by_mac + self.ifaces = collections.OrderedDict( + [k for k in sorted(system_nics_by_mac.items(), + key=lambda k: net.natural_sort_key(k[1]))]) + + # OpenNebula 4.14+ provide macaddr for ETHX in variable ETH_MAC. + # context_devname provides {mac.lower():ETHX, mac2.lower():ETHX} + self.context_devname = {} + for k, v in context.items(): + m = re.match(r'^(.+)_MAC$', k) + if m: + self.context_devname[v.lower()] = m.group(1) def mac2ip(self, mac): - components = mac.split(':')[2:] - return [str(int(c, 16)) for c in components] + return '.'.join([str(int(c, 16)) for c in mac.split(':')[2:]]) - def get_ip(self, dev, components): - var_name = dev.upper() + '_IP' - if var_name in self.context: - return self.context[var_name] - else: - return '.'.join(components) + def mac2network(self, mac): + return self.mac2ip(mac).rpartition(".")[0] + ".0" - def get_mask(self, dev): - var_name = dev.upper() + '_MASK' - if var_name in self.context: - return self.context[var_name] - else: - return '255.255.255.0' + def get_dns(self, dev): + return self.get_field(dev, "dns", "").split() - def get_network(self, dev, components): - var_name = dev.upper() + '_NETWORK' - if var_name in self.context: - return self.context[var_name] - else: - return '.'.join(components[:-1]) + '.0' + def get_domain(self, dev): + return self.get_field(dev, "domain") + + def get_ip(self, dev, mac): + return self.get_field(dev, "ip", self.mac2ip(mac)) def get_gateway(self, dev): - var_name = dev.upper() + '_GATEWAY' - if var_name in self.context: - return self.context[var_name] - else: - return None + return self.get_field(dev, "gateway") - def get_dns(self, dev): - var_name = dev.upper() + '_DNS' - if var_name in self.context: - return self.context[var_name] - else: - return None + def get_mask(self, dev): + return self.get_field(dev, "mask", "255.255.255.0") - def get_domain(self, dev): - var_name = dev.upper() + '_DOMAIN' - if var_name in self.context: - return self.context[var_name] - else: - return None + def get_network(self, dev, mac): + return self.get_field(dev, "network", self.mac2network(mac)) + + def get_field(self, dev, name, default=None): + """return the field name in context for device dev. + + context stores _ (example: eth0_DOMAIN). + an empty string for value will return default.""" + val = self.context.get('_'.join((dev, name,)).upper()) + # allow empty string to return the default. + return default if val in (None, "") else val def gen_conf(self): - global_dns = [] - if 'DNS' in self.context: - global_dns.append(self.context['DNS']) + global_dns = self.context.get('DNS', "").split() conf = [] conf.append('auto lo') @@ -175,29 +179,31 @@ class OpenNebulaNetwork(object): conf.append('') for mac, dev in self.ifaces.items(): - ip_components = self.mac2ip(mac) + mac = mac.lower() + + # c_dev stores name in context 'ETHX' for this device. + # dev stores the current system name. + c_dev = self.context_devname.get(mac, dev) conf.append('auto ' + dev) conf.append('iface ' + dev + ' inet static') - conf.append(' address ' + self.get_ip(dev, ip_components)) - conf.append(' network ' + self.get_network(dev, ip_components)) - conf.append(' netmask ' + self.get_mask(dev)) + conf.append(' #hwaddress %s' % mac) + conf.append(' address ' + self.get_ip(c_dev, mac)) + conf.append(' network ' + self.get_network(c_dev, mac)) + conf.append(' netmask ' + self.get_mask(c_dev)) - gateway = self.get_gateway(dev) + gateway = self.get_gateway(c_dev) if gateway: conf.append(' gateway ' + gateway) - domain = self.get_domain(dev) + domain = self.get_domain(c_dev) if domain: conf.append(' dns-search ' + domain) # add global DNS servers to all interfaces - dns = self.get_dns(dev) + dns = self.get_dns(c_dev) if global_dns or dns: - all_dns = global_dns - if dns: - all_dns.append(dns) - conf.append(' dns-nameservers ' + ' '.join(all_dns)) + conf.append(' dns-nameservers ' + ' '.join(global_dns + dns)) conf.append('') diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index 2326dd58..5c3ba012 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -4,6 +4,7 @@ from cloudinit import helpers from cloudinit.sources import DataSourceOpenNebula as ds from cloudinit import util from cloudinit.tests.helpers import mock, populate_dir, CiTestCase +from textwrap import dedent import os import pwd @@ -30,6 +31,8 @@ USER_DATA = '#cloud-config\napt_upgrade: true' SSH_KEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i' HOSTNAME = 'foo.example.com' PUBLIC_IP = '10.0.0.3' +MACADDR = '02:00:0a:12:01:01' +IP_BY_MACADDR = '10.18.1.1' DS_PATH = "cloudinit.sources.DataSourceOpenNebula" @@ -195,24 +198,96 @@ class TestOpenNebulaDataSource(CiTestCase): @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_hostname(self, m_get_phys_by_mac): - m_get_phys_by_mac.return_value = {'02:00:0a:12:01:01': 'eth0'} - for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'): - my_d = os.path.join(self.tmp, k) - populate_context_dir(my_d, {k: PUBLIC_IP}) - results = ds.read_context_disk_dir(my_d) + for dev in ('eth0', 'ens3'): + m_get_phys_by_mac.return_value = {MACADDR: dev} + for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'): + my_d = os.path.join(self.tmp, k) + populate_context_dir(my_d, {k: PUBLIC_IP}) + results = ds.read_context_disk_dir(my_d) - self.assertTrue('metadata' in results) - self.assertTrue('local-hostname' in results['metadata']) - self.assertEqual(PUBLIC_IP, results['metadata']['local-hostname']) + self.assertTrue('metadata' in results) + self.assertTrue('local-hostname' in results['metadata']) + self.assertEqual( + PUBLIC_IP, results['metadata']['local-hostname']) @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_network_interfaces(self, m_get_phys_by_mac): - m_get_phys_by_mac.return_value = {'02:00:0a:12:01:01': 'eth0'} - populate_context_dir(self.seed_dir, {'ETH0_IP': '1.2.3.4'}) - results = ds.read_context_disk_dir(self.seed_dir) - - self.assertTrue('network-interfaces' in results) - self.assertTrue('1.2.3.4' in results['network-interfaces']) + for dev in ('eth0', 'ens3'): + m_get_phys_by_mac.return_value = {MACADDR: dev} + + # without ETH0_MAC + # for Older OpenNebula? + populate_context_dir(self.seed_dir, {'ETH0_IP': IP_BY_MACADDR}) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue(IP_BY_MACADDR in results['network-interfaces']) + + # ETH0_IP and ETH0_MAC + populate_context_dir( + self.seed_dir, {'ETH0_IP': IP_BY_MACADDR, 'ETH0_MAC': MACADDR}) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue(IP_BY_MACADDR in results['network-interfaces']) + + # ETH0_IP with empty string and ETH0_MAC + # in the case of using Virtual Network contains + # "AR = [ TYPE = ETHER ]" + populate_context_dir( + self.seed_dir, {'ETH0_IP': '', 'ETH0_MAC': MACADDR}) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue(IP_BY_MACADDR in results['network-interfaces']) + + # ETH0_NETWORK + populate_context_dir( + self.seed_dir, { + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_MAC': MACADDR, + 'ETH0_NETWORK': '10.18.0.0' + }) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue('10.18.0.0' in results['network-interfaces']) + + # ETH0_NETWORK with empty string + populate_context_dir( + self.seed_dir, { + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_MAC': MACADDR, + 'ETH0_NETWORK': '' + }) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue('10.18.1.0' in results['network-interfaces']) + + # ETH0_MASK + populate_context_dir( + self.seed_dir, { + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_MAC': MACADDR, + 'ETH0_MASK': '255.255.0.0' + }) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue('255.255.0.0' in results['network-interfaces']) + + # ETH0_MASK with empty string + populate_context_dir( + self.seed_dir, { + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_MAC': MACADDR, + 'ETH0_MASK': '' + }) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue('255.255.255.0' in results['network-interfaces']) def test_find_candidates(self): def my_devs_with(criteria): @@ -233,7 +308,7 @@ class TestOpenNebulaDataSource(CiTestCase): class TestOpenNebulaNetwork(unittest.TestCase): - system_nics = {'02:00:0a:12:01:01': 'eth0'} + system_nics = ('eth0', 'ens3') def test_lo(self): net = ds.OpenNebulaNetwork(context={}, system_nics_by_mac={}) @@ -244,45 +319,101 @@ iface lo inet loopback @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_eth0(self, m_get_phys_by_mac): - m_get_phys_by_mac.return_value = self.system_nics - net = ds.OpenNebulaNetwork({}) - self.assertEqual(net.gen_conf(), u'''\ -auto lo -iface lo inet loopback - -auto eth0 -iface eth0 inet static - address 10.18.1.1 - network 10.18.1.0 - netmask 255.255.255.0 -''') + for nic in self.system_nics: + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork({}) + self.assertEqual(net.gen_conf(), dedent("""\ + auto lo + iface lo inet loopback + + auto {dev} + iface {dev} inet static + #hwaddress {macaddr} + address 10.18.1.1 + network 10.18.1.0 + netmask 255.255.255.0 + """.format(dev=nic, macaddr=MACADDR))) def test_eth0_override(self): context = { 'DNS': '1.2.3.8', - 'ETH0_IP': '1.2.3.4', - 'ETH0_NETWORK': '1.2.3.0', + 'ETH0_IP': '10.18.1.1', + 'ETH0_NETWORK': '10.18.0.0', 'ETH0_MASK': '255.255.0.0', 'ETH0_GATEWAY': '1.2.3.5', 'ETH0_DOMAIN': 'example.com', - 'ETH0_DNS': '1.2.3.6 1.2.3.7' + 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_MAC': '02:00:0a:12:01:01' } - - net = ds.OpenNebulaNetwork(context, - system_nics_by_mac=self.system_nics) - self.assertEqual(net.gen_conf(), u'''\ -auto lo -iface lo inet loopback - -auto eth0 -iface eth0 inet static - address 1.2.3.4 - network 1.2.3.0 - netmask 255.255.0.0 - gateway 1.2.3.5 - dns-search example.com - dns-nameservers 1.2.3.8 1.2.3.6 1.2.3.7 -''') + for nic in self.system_nics: + expected = dedent("""\ + auto lo + iface lo inet loopback + + auto {dev} + iface {dev} inet static + #hwaddress {macaddr} + address 10.18.1.1 + network 10.18.0.0 + netmask 255.255.0.0 + gateway 1.2.3.5 + dns-search example.com + dns-nameservers 1.2.3.8 1.2.3.6 1.2.3.7 + """).format(dev=nic, macaddr=MACADDR) + net = ds.OpenNebulaNetwork(context, + system_nics_by_mac={MACADDR: nic}) + self.assertEqual(expected, net.gen_conf()) + + def test_multiple_nics(self): + """Test rendering multiple nics with names that differ from context.""" + MAC_1 = "02:00:0a:12:01:01" + MAC_2 = "02:00:0a:12:01:02" + context = { + 'DNS': '1.2.3.8', + 'ETH0_IP': '10.18.1.1', + 'ETH0_NETWORK': '10.18.0.0', + 'ETH0_MASK': '255.255.0.0', + 'ETH0_GATEWAY': '1.2.3.5', + 'ETH0_DOMAIN': 'example.com', + 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_MAC': MAC_2, + 'ETH3_IP': '10.3.1.3', + 'ETH3_NETWORK': '10.3.0.0', + 'ETH3_MASK': '255.255.0.0', + 'ETH3_GATEWAY': '10.3.0.1', + 'ETH3_DOMAIN': 'third.example.com', + 'ETH3_DNS': '10.3.1.2', + 'ETH3_MAC': MAC_1, + } + net = ds.OpenNebulaNetwork( + context, system_nics_by_mac={MAC_1: 'enp0s25', MAC_2: 'enp1s2'}) + + expected = dedent("""\ + auto lo + iface lo inet loopback + + auto enp0s25 + iface enp0s25 inet static + #hwaddress 02:00:0a:12:01:01 + address 10.3.1.3 + network 10.3.0.0 + netmask 255.255.0.0 + gateway 10.3.0.1 + dns-search third.example.com + dns-nameservers 1.2.3.8 10.3.1.2 + + auto enp1s2 + iface enp1s2 inet static + #hwaddress 02:00:0a:12:01:02 + address 10.18.1.1 + network 10.18.0.0 + netmask 255.255.0.0 + gateway 1.2.3.5 + dns-search example.com + dns-nameservers 1.2.3.8 1.2.3.6 1.2.3.7 + """) + + self.assertEqual(expected, net.gen_conf()) class TestParseShellConfig(unittest.TestCase): diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index f3fa2a30..ddea13d7 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1,9 +1,9 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit import net -from cloudinit.net import _natural_sort_key from cloudinit.net import cmdline from cloudinit.net import eni +from cloudinit.net import natural_sort_key from cloudinit.net import netplan from cloudinit.net import network_state from cloudinit.net import renderers @@ -2708,11 +2708,11 @@ class TestInterfacesSorting(CiTestCase): def test_natural_order(self): data = ['ens5', 'ens6', 'ens3', 'ens20', 'ens13', 'ens2'] self.assertEqual( - sorted(data, key=_natural_sort_key), + sorted(data, key=natural_sort_key), ['ens2', 'ens3', 'ens5', 'ens6', 'ens13', 'ens20']) data2 = ['enp2s0', 'enp2s3', 'enp0s3', 'enp0s13', 'enp0s8', 'enp1s2'] self.assertEqual( - sorted(data2, key=_natural_sort_key), + sorted(data2, key=natural_sort_key), ['enp0s3', 'enp0s8', 'enp0s13', 'enp1s2', 'enp2s0', 'enp2s3']) -- cgit v1.2.3 From c03bdd3d8ed762cada813c5e95a40b14d2047b57 Mon Sep 17 00:00:00 2001 From: Douglas Jordan Date: Wed, 24 Jan 2018 16:10:08 -0700 Subject: Azure VM Preprovisioning support. This change will enable azure vms to report provisioning has completed twice, first to tell the fabric it has completed then a second time to enable customer settings. The datasource for the second provisioning is the Instance Metadata Service (IMDS),and the VM will poll indefinitely for the new ovf-env.xml from IMDS. This branch introduces EphemeralDHCPv4 which encapsulates common logic used by both DataSourceEc2 an DataSourceAzure for temporary DHCP interactions without side-effects. LP: #1734991 --- .gitignore | 1 + cloudinit/net/dhcp.py | 43 ++++++- cloudinit/net/network_state.py | 12 ++ cloudinit/sources/DataSourceAzure.py | 138 ++++++++++++++++++++-- cloudinit/sources/DataSourceEc2.py | 23 ++-- cloudinit/sources/helpers/azure.py | 22 ++-- cloudinit/temp_utils.py | 11 +- cloudinit/url_helper.py | 29 +++-- tests/unittests/test_datasource/test_azure.py | 157 +++++++++++++++++++++++++- tests/unittests/test_datasource/test_ec2.py | 2 +- tests/unittests/test_net.py | 12 ++ 11 files changed, 397 insertions(+), 53 deletions(-) (limited to 'cloudinit/net') diff --git a/.gitignore b/.gitignore index b0500a68..75565ed4 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ parts prime stage *.snap +*.cover diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py index 875a4609..087c0c03 100644 --- a/cloudinit/net/dhcp.py +++ b/cloudinit/net/dhcp.py @@ -10,7 +10,9 @@ import os import re import signal -from cloudinit.net import find_fallback_nic, get_devicelist +from cloudinit.net import ( + EphemeralIPv4Network, find_fallback_nic, get_devicelist) +from cloudinit.net.network_state import mask_and_ipv4_to_bcast_addr as bcip from cloudinit import temp_utils from cloudinit import util from six import StringIO @@ -29,6 +31,45 @@ class InvalidDHCPLeaseFileError(Exception): pass +class NoDHCPLeaseError(Exception): + """Raised when unable to get a DHCP lease.""" + pass + + +class EphemeralDHCPv4(object): + def __init__(self, iface=None): + self.iface = iface + self._ephipv4 = None + + def __enter__(self): + try: + leases = maybe_perform_dhcp_discovery(self.iface) + except InvalidDHCPLeaseFileError: + raise NoDHCPLeaseError() + if not leases: + raise NoDHCPLeaseError() + lease = leases[-1] + LOG.debug("Received dhcp lease on %s for %s/%s", + lease['interface'], lease['fixed-address'], + lease['subnet-mask']) + nmap = {'interface': 'interface', 'ip': 'fixed-address', + 'prefix_or_mask': 'subnet-mask', + 'broadcast': 'broadcast-address', + 'router': 'routers'} + kwargs = dict([(k, lease.get(v)) for k, v in nmap.items()]) + if not kwargs['broadcast']: + kwargs['broadcast'] = bcip(kwargs['prefix_or_mask'], kwargs['ip']) + ephipv4 = EphemeralIPv4Network(**kwargs) + ephipv4.__enter__() + self._ephipv4 = ephipv4 + return lease + + def __exit__(self, excp_type, excp_value, excp_traceback): + if not self._ephipv4: + return + self._ephipv4.__exit__(excp_type, excp_value, excp_traceback) + + def maybe_perform_dhcp_discovery(nic=None): """Perform dhcp discovery if nic valid and dhclient command exists. diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 31738c73..fe667d88 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -961,4 +961,16 @@ def mask_to_net_prefix(mask): return ipv4_mask_to_net_prefix(mask) +def mask_and_ipv4_to_bcast_addr(mask, ip): + """Calculate the broadcast address from the subnet mask and ip addr. + + Supports ipv4 only.""" + ip_bin = int(''.join([bin(int(x) + 256)[3:] for x in ip.split('.')]), 2) + mask_dec = ipv4_mask_to_net_prefix(mask) + bcast_bin = ip_bin | (2**(32 - mask_dec) - 1) + bcast_str = '.'.join([str(bcast_bin >> (i << 3) & 0xFF) + for i in range(4)[::-1]]) + return bcast_str + + # vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index d1d09757..4bcbf3a4 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -11,13 +11,16 @@ from functools import partial import os import os.path import re +from time import time from xml.dom import minidom import xml.etree.ElementTree as ET from cloudinit import log as logging from cloudinit import net +from cloudinit.net.dhcp import EphemeralDHCPv4 from cloudinit import sources from cloudinit.sources.helpers.azure import get_metadata_from_fabric +from cloudinit.url_helper import readurl, wait_for_url, UrlError from cloudinit import util LOG = logging.getLogger(__name__) @@ -44,6 +47,9 @@ LEASE_FILE = '/var/lib/dhcp/dhclient.eth0.leases' DEFAULT_FS = 'ext4' # DMI chassis-asset-tag is set static for all azure instances AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' +REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" +IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata" +IMDS_RETRIES = 5 def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid): @@ -276,19 +282,20 @@ class DataSourceAzure(sources.DataSource): with temporary_hostname(azure_hostname, self.ds_cfg, hostname_command=hostname_command) \ - as previous_hostname: - if (previous_hostname is not None and + as previous_hn: + if (previous_hn is not None and util.is_true(self.ds_cfg.get('set_hostname'))): cfg = self.ds_cfg['hostname_bounce'] # "Bouncing" the network try: - perform_hostname_bounce(hostname=azure_hostname, - cfg=cfg, - prev_hostname=previous_hostname) + return perform_hostname_bounce(hostname=azure_hostname, + cfg=cfg, + prev_hostname=previous_hn) except Exception as e: LOG.warning("Failed publishing hostname: %s", e) util.logexc(LOG, "handling set_hostname failed") + return False def get_metadata_from_agent(self): temp_hostname = self.metadata.get('local-hostname') @@ -345,15 +352,20 @@ class DataSourceAzure(sources.DataSource): ddir = self.ds_cfg['data_dir'] candidates = [self.seed_dir] + if os.path.isfile(REPROVISION_MARKER_FILE): + candidates.insert(0, "IMDS") candidates.extend(list_possible_azure_ds_devs()) if ddir: candidates.append(ddir) found = None - + reprovision = False for cdev in candidates: try: - if cdev.startswith("/dev/"): + if cdev == "IMDS": + ret = None + reprovision = True + elif cdev.startswith("/dev/"): if util.is_FreeBSD(): ret = util.mount_cb(cdev, load_azure_ds_dir, mtype="udf", sync=False) @@ -370,6 +382,8 @@ class DataSourceAzure(sources.DataSource): LOG.warning("%s was not mountable", cdev) continue + if reprovision or self._should_reprovision(ret): + ret = self._reprovision() (md, self.userdata_raw, cfg, files) = ret self.seed = cdev self.metadata = util.mergemanydict([md, DEFAULT_METADATA]) @@ -428,6 +442,83 @@ class DataSourceAzure(sources.DataSource): LOG.debug("negotiating already done for %s", self.get_instance_id()) + def _poll_imds(self, report_ready=True): + """Poll IMDS for the new provisioning data until we get a valid + response. Then return the returned JSON object.""" + url = IMDS_URL + "?api-version=2017-04-02" + headers = {"Metadata": "true"} + LOG.debug("Start polling IMDS") + + def sleep_cb(response, loop_n): + return 1 + + def exception_cb(msg, exception): + if isinstance(exception, UrlError) and exception.code == 404: + return + LOG.warning("Exception during polling. Will try DHCP.", + exc_info=True) + + # If we get an exception while trying to call IMDS, we + # call DHCP and setup the ephemeral network to acquire the new IP. + raise exception + + need_report = report_ready + for i in range(IMDS_RETRIES): + try: + with EphemeralDHCPv4() as lease: + if need_report: + self._report_ready(lease=lease) + need_report = False + wait_for_url([url], max_wait=None, timeout=60, + status_cb=LOG.info, + headers_cb=lambda url: headers, sleep_time=1, + exception_cb=exception_cb, + sleep_time_cb=sleep_cb) + return str(readurl(url, headers=headers)) + except Exception: + LOG.debug("Exception during polling-retrying dhcp" + + " %d more time(s).", (IMDS_RETRIES - i), + exc_info=True) + + def _report_ready(self, lease): + """Tells the fabric provisioning has completed + before we go into our polling loop.""" + try: + get_metadata_from_fabric(None, lease['unknown-245']) + except Exception as exc: + LOG.warning( + "Error communicating with Azure fabric; You may experience." + "connectivity issues.", exc_info=True) + + def _should_reprovision(self, ret): + """Whether or not we should poll IMDS for reprovisioning data. + Also sets a marker file to poll IMDS. + + The marker file is used for the following scenario: the VM boots into + this polling loop, which we expect to be proceeding infinitely until + the VM is picked. If for whatever reason the platform moves us to a + new host (for instance a hardware issue), we need to keep polling. + However, since the VM reports ready to the Fabric, we will not attach + the ISO, thus cloud-init needs to have a way of knowing that it should + jump back into the polling loop in order to retrieve the ovf_env.""" + if not ret: + return False + (md, self.userdata_raw, cfg, files) = ret + path = REPROVISION_MARKER_FILE + if (cfg.get('PreprovisionedVm') is True or + os.path.isfile(path)): + if not os.path.isfile(path): + LOG.info("Creating a marker file to poll imds") + util.write_file(path, "%s: %s\n" % (os.getpid(), time())) + return True + return False + + def _reprovision(self): + """Initiate the reprovisioning workflow.""" + contents = self._poll_imds() + md, ud, cfg = read_azure_ovf(contents) + return (md, ud, cfg, {'ovf-env.xml': contents}) + def _negotiate(self): """Negotiate with fabric and return data from it. @@ -453,7 +544,7 @@ class DataSourceAzure(sources.DataSource): "Error communicating with Azure fabric; You may experience." "connectivity issues.", exc_info=True) return False - + util.del_file(REPROVISION_MARKER_FILE) return fabric_data def activate(self, cfg, is_new_instance): @@ -595,6 +686,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, def perform_hostname_bounce(hostname, cfg, prev_hostname): # set the hostname to 'hostname' if it is not already set to that. # then, if policy is not off, bounce the interface using command + # Returns True if the network was bounced, False otherwise. command = cfg['command'] interface = cfg['interface'] policy = cfg['policy'] @@ -614,7 +706,8 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): else: LOG.debug( "Skipping network bounce: ifupdown utils aren't present.") - return # Don't bounce as networkd handles hostname DDNS updates + # Don't bounce as networkd handles hostname DDNS updates + return False LOG.debug("pubhname: publishing hostname [%s]", msg) shell = not isinstance(command, (list, tuple)) # capture=False, see comments in bug 1202758 and bug 1206164. @@ -622,6 +715,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): get_uptime=True, func=util.subp, kwargs={'args': command, 'shell': shell, 'capture': False, 'env': env}) + return True def crtfile_to_pubkey(fname, data=None): @@ -838,9 +932,35 @@ def read_azure_ovf(contents): if 'ssh_pwauth' not in cfg and password: cfg['ssh_pwauth'] = True + cfg['PreprovisionedVm'] = _extract_preprovisioned_vm_setting(dom) + return (md, ud, cfg) +def _extract_preprovisioned_vm_setting(dom): + """Read the preprovision flag from the ovf. It should not + exist unless true.""" + platform_settings_section = find_child( + dom.documentElement, + lambda n: n.localName == "PlatformSettingsSection") + if not platform_settings_section or len(platform_settings_section) == 0: + LOG.debug("PlatformSettingsSection not found") + return False + platform_settings = find_child( + platform_settings_section[0], + lambda n: n.localName == "PlatformSettings") + if not platform_settings or len(platform_settings) == 0: + LOG.debug("PlatformSettings not found") + return False + preprovisionedVm = find_child( + platform_settings[0], + lambda n: n.localName == "PreprovisionedVm") + if not preprovisionedVm or len(preprovisionedVm) == 0: + LOG.debug("PreprovisionedVm not found") + return False + return util.translate_bool(preprovisionedVm[0].firstChild.nodeValue) + + def encrypt_pass(password, salt_id="$6$"): return crypt.crypt(password, salt_id + util.rand_str(strlen=16)) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 0f89f34d..e14553b3 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -14,7 +14,7 @@ import time from cloudinit import ec2_utils as ec2 from cloudinit import log as logging from cloudinit import net -from cloudinit.net import dhcp +from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError from cloudinit import sources from cloudinit import url_helper as uhelp from cloudinit import util @@ -102,22 +102,13 @@ class DataSourceEc2(sources.DataSource): if util.is_FreeBSD(): LOG.debug("FreeBSD doesn't support running dhclient with -sf") return False - dhcp_leases = dhcp.maybe_perform_dhcp_discovery( - self.fallback_interface) - if not dhcp_leases: - # DataSourceEc2Local failed in init-local stage. DataSourceEc2 - # will still run in init-network stage. + try: + with EphemeralDHCPv4(self.fallback_interface): + return util.log_time( + logfunc=LOG.debug, msg='Crawl of metadata service', + func=self._crawl_metadata) + except NoDHCPLeaseError: return False - dhcp_opts = dhcp_leases[-1] - net_params = {'interface': dhcp_opts.get('interface'), - 'ip': dhcp_opts.get('fixed-address'), - 'prefix_or_mask': dhcp_opts.get('subnet-mask'), - 'broadcast': dhcp_opts.get('broadcast-address'), - 'router': dhcp_opts.get('routers')} - with net.EphemeralIPv4Network(**net_params): - return util.log_time( - logfunc=LOG.debug, msg='Crawl of metadata service', - func=self._crawl_metadata) else: return self._crawl_metadata() diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 6cda5721..90c12df1 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -199,10 +199,10 @@ class WALinuxAgentShim(object): ' ', '']) - def __init__(self, fallback_lease_file=None): + def __init__(self, fallback_lease_file=None, dhcp_options=None): LOG.debug('WALinuxAgentShim instantiated, fallback_lease_file=%s', fallback_lease_file) - self.dhcpoptions = None + self.dhcpoptions = dhcp_options self._endpoint = None self.openssl_manager = None self.values = {} @@ -220,7 +220,8 @@ class WALinuxAgentShim(object): @property def endpoint(self): if self._endpoint is None: - self._endpoint = self.find_endpoint(self.lease_file) + self._endpoint = self.find_endpoint(self.lease_file, + self.dhcpoptions) return self._endpoint @staticmethod @@ -292,10 +293,14 @@ class WALinuxAgentShim(object): return _value @staticmethod - def find_endpoint(fallback_lease_file=None): + def find_endpoint(fallback_lease_file=None, dhcp245=None): value = None - LOG.debug('Finding Azure endpoint from networkd...') - value = WALinuxAgentShim._networkd_get_value_from_leases() + if dhcp245 is not None: + value = dhcp245 + LOG.debug("Using Azure Endpoint from dhcp options") + if value is None: + LOG.debug('Finding Azure endpoint from networkd...') + value = WALinuxAgentShim._networkd_get_value_from_leases() if value is None: # Option-245 stored in /run/cloud-init/dhclient.hooks/.json # a dhclient exit hook that calls cloud-init-dhclient-hook @@ -367,8 +372,9 @@ class WALinuxAgentShim(object): LOG.info('Reported ready to Azure fabric.') -def get_metadata_from_fabric(fallback_lease_file=None): - shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file) +def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None): + shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, + dhcp_options=dhcp_opts) try: return shim.register_with_azure_and_fetch_data() finally: diff --git a/cloudinit/temp_utils.py b/cloudinit/temp_utils.py index 5d7adf70..c98a1b53 100644 --- a/cloudinit/temp_utils.py +++ b/cloudinit/temp_utils.py @@ -28,13 +28,18 @@ def _tempfile_dir_arg(odir=None, needs_exe=False): if odir is not None: return odir + if needs_exe: + tdir = _EXE_ROOT_TMPDIR + if not os.path.isdir(tdir): + os.makedirs(tdir) + os.chmod(tdir, 0o1777) + return tdir + global _TMPDIR if _TMPDIR: return _TMPDIR - if needs_exe: - tdir = _EXE_ROOT_TMPDIR - elif os.getuid() == 0: + if os.getuid() == 0: tdir = _ROOT_TMPDIR else: tdir = os.environ.get('TMPDIR', '/tmp') diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 0e0f5b4c..0a5be0b3 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -273,7 +273,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None, headers_cb=None, sleep_time=1, - exception_cb=None): + exception_cb=None, sleep_time_cb=None): """ urls: a list of urls to try max_wait: roughly the maximum time to wait before giving up @@ -286,6 +286,8 @@ def wait_for_url(urls, max_wait=None, timeout=None, for request. exception_cb: call method with 2 arguments 'msg' (per status_cb) and 'exception', the exception that occurred. + sleep_time_cb: call method with 2 arguments (response, loop_n) that + generates the next sleep time. the idea of this routine is to wait for the EC2 metdata service to come up. On both Eucalyptus and EC2 we have seen the case where @@ -301,6 +303,8 @@ def wait_for_url(urls, max_wait=None, timeout=None, service but is not going to find one. It is possible that the instance data host (169.254.169.254) may be firewalled off Entirely for a sytem, meaning that the connection will block forever unless a timeout is set. + + A value of None for max_wait will retry indefinitely. """ start_time = time.time() @@ -311,18 +315,24 @@ def wait_for_url(urls, max_wait=None, timeout=None, status_cb = log_status_cb def timeup(max_wait, start_time): - return ((max_wait <= 0 or max_wait is None) or - (time.time() - start_time > max_wait)) + if (max_wait is None): + return False + return ((max_wait <= 0) or (time.time() - start_time > max_wait)) loop_n = 0 + response = None while True: - sleep_time = int(loop_n / 5) + 1 + if sleep_time_cb is not None: + sleep_time = sleep_time_cb(response, loop_n) + else: + sleep_time = int(loop_n / 5) + 1 for url in urls: now = time.time() if loop_n != 0: if timeup(max_wait, start_time): break - if timeout and (now + timeout > (start_time + max_wait)): + if (max_wait is not None and + timeout and (now + timeout > (start_time + max_wait))): # shorten timeout to not run way over max_time timeout = int((start_time + max_wait) - now) @@ -354,10 +364,11 @@ def wait_for_url(urls, max_wait=None, timeout=None, url_exc = e time_taken = int(time.time() - start_time) - status_msg = "Calling '%s' failed [%s/%ss]: %s" % (url, - time_taken, - max_wait, - reason) + max_wait_str = "%ss" % max_wait if max_wait else "unlimited" + status_msg = "Calling '%s' failed [%s/%s]: %s" % (url, + time_taken, + max_wait_str, + reason) status_cb(status_msg) if exception_cb: # This can be used to alter the headers that will be sent diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 6341e1e8..254e9876 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -5,7 +5,7 @@ from cloudinit.util import b64e, decode_binary, load_file, write_file from cloudinit.sources import DataSourceAzure as dsaz from cloudinit.util import find_freebsd_part from cloudinit.util import get_path_dev_freebsd - +from cloudinit.version import version_string as vs from cloudinit.tests.helpers import (CiTestCase, TestCase, populate_dir, mock, ExitStack, PY26, SkipTest) @@ -16,7 +16,8 @@ import xml.etree.ElementTree as ET import yaml -def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): +def construct_valid_ovf_env(data=None, pubkeys=None, + userdata=None, platform_settings=None): if data is None: data = {'HostName': 'FOOHOST'} if pubkeys is None: @@ -66,10 +67,12 @@ def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): xmlns:i="http://www.w3.org/2001/XMLSchema-instance"> kms.core.windows.net false - - - - """ + """ + if platform_settings: + for k, v in platform_settings.items(): + content += "<%s>%s\n" % (k, v, k) + content += """ +""" return content @@ -1107,4 +1110,146 @@ class TestAzureNetExists(CiTestCase): self.assertTrue(hasattr(dsaz, "DataSourceAzureNet")) +@mock.patch('cloudinit.sources.DataSourceAzure.util.subp') +@mock.patch.object(dsaz, 'get_hostname') +@mock.patch.object(dsaz, 'set_hostname') +class TestAzureDataSourcePreprovisioning(CiTestCase): + + def setUp(self): + super(TestAzureDataSourcePreprovisioning, self).setUp() + tmp = self.tmp_dir() + self.waagent_d = self.tmp_path('/var/lib/waagent', tmp) + self.paths = helpers.Paths({'cloud_dir': tmp}) + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + + def test_read_azure_ovf_with_true_flag(self, *args): + """The read_azure_ovf method should set the PreprovisionedVM + cfg flag if the proper setting is present.""" + content = construct_valid_ovf_env( + platform_settings={"PreprovisionedVm": "True"}) + ret = dsaz.read_azure_ovf(content) + cfg = ret[2] + self.assertTrue(cfg['PreprovisionedVm']) + + def test_read_azure_ovf_with_false_flag(self, *args): + """The read_azure_ovf method should set the PreprovisionedVM + cfg flag to false if the proper setting is false.""" + content = construct_valid_ovf_env( + platform_settings={"PreprovisionedVm": "False"}) + ret = dsaz.read_azure_ovf(content) + cfg = ret[2] + self.assertFalse(cfg['PreprovisionedVm']) + + def test_read_azure_ovf_without_flag(self, *args): + """The read_azure_ovf method should not set the + PreprovisionedVM cfg flag.""" + content = construct_valid_ovf_env() + ret = dsaz.read_azure_ovf(content) + cfg = ret[2] + self.assertFalse(cfg['PreprovisionedVm']) + + @mock.patch('cloudinit.sources.DataSourceAzure.util.is_FreeBSD') + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @mock.patch('requests.Session.request') + def test_poll_imds_returns_ovf_env(self, fake_resp, m_dhcp, m_net, + m_is_bsd, *args): + """The _poll_imds method should return the ovf_env.xml.""" + m_is_bsd.return_value = False + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0'}] + url = 'http://{0}/metadata/reprovisiondata?api-version=2017-04-02' + host = "169.254.169.254" + full_url = url.format(host) + fake_resp.return_value = mock.MagicMock(status_code=200, text="ovf") + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertTrue(len(dsa._poll_imds()) > 0) + self.assertEqual(fake_resp.call_args_list, + [mock.call(allow_redirects=True, + headers={'Metadata': 'true', + 'User-Agent': + 'Cloud-Init/%s' % vs() + }, method='GET', timeout=60.0, + url=full_url), + mock.call(allow_redirects=True, + headers={'Metadata': 'true', + 'User-Agent': + 'Cloud-Init/%s' % vs() + }, method='GET', url=full_url)]) + self.assertEqual(m_dhcp.call_count, 1) + m_net.assert_any_call( + broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', + prefix_or_mask='255.255.255.0', router='192.168.2.1') + self.assertEqual(m_net.call_count, 1) + + @mock.patch('cloudinit.sources.DataSourceAzure.util.is_FreeBSD') + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @mock.patch('requests.Session.request') + def test__reprovision_calls__poll_imds(self, fake_resp, m_dhcp, m_net, + m_is_bsd, *args): + """The _reprovision method should call poll IMDS.""" + m_is_bsd.return_value = False + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'}] + url = 'http://{0}/metadata/reprovisiondata?api-version=2017-04-02' + host = "169.254.169.254" + full_url = url.format(host) + hostname = "myhost" + username = "myuser" + odata = {'HostName': hostname, 'UserName': username} + content = construct_valid_ovf_env(data=odata) + fake_resp.return_value = mock.MagicMock(status_code=200, text=content) + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + md, ud, cfg, d = dsa._reprovision() + self.assertEqual(md['local-hostname'], hostname) + self.assertEqual(cfg['system_info']['default_user']['name'], username) + self.assertEqual(fake_resp.call_args_list, + [mock.call(allow_redirects=True, + headers={'Metadata': 'true', + 'User-Agent': + 'Cloud-Init/%s' % vs()}, + method='GET', timeout=60.0, url=full_url), + mock.call(allow_redirects=True, + headers={'Metadata': 'true', + 'User-Agent': + 'Cloud-Init/%s' % vs()}, + method='GET', url=full_url)]) + self.assertEqual(m_dhcp.call_count, 1) + m_net.assert_any_call( + broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', + prefix_or_mask='255.255.255.0', router='192.168.2.1') + self.assertEqual(m_net.call_count, 1) + + @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + @mock.patch('os.path.isfile') + def test__should_reprovision_with_true_cfg(self, isfile, write_f, *args): + """The _should_reprovision method should return true with config + flag present.""" + isfile.return_value = False + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertTrue(dsa._should_reprovision( + (None, None, {'PreprovisionedVm': True}, None))) + + @mock.patch('os.path.isfile') + def test__should_reprovision_with_file_existing(self, isfile, *args): + """The _should_reprovision method should return True if the sentinal + exists.""" + isfile.return_value = True + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertTrue(dsa._should_reprovision( + (None, None, {'preprovisionedvm': False}, None))) + + @mock.patch('os.path.isfile') + def test__should_reprovision_returns_false(self, isfile, *args): + """The _should_reprovision method should return False + if config and sentinal are not present.""" + isfile.return_value = False + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertFalse(dsa._should_reprovision((None, None, {}, None))) + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index f0dc8338..0f7267bb 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -425,7 +425,7 @@ class TestEc2(test_helpers.HttprettyTestCase): self.logs.getvalue()) @httpretty.activate - @mock.patch('cloudinit.net.EphemeralIPv4Network') + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') @mock.patch('cloudinit.net.find_fallback_nic') @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') @mock.patch('cloudinit.sources.DataSourceEc2.util.is_FreeBSD') diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index ddea13d7..ac33e8ef 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -2948,4 +2948,16 @@ class TestRenameInterfaces(CiTestCase): mock_subp.assert_has_calls(expected) +class TestNetworkState(CiTestCase): + + def test_bcast_addr(self): + """Test mask_and_ipv4_to_bcast_addr proper execution.""" + bcast_addr = network_state.mask_and_ipv4_to_bcast_addr + self.assertEqual("192.168.1.255", + bcast_addr("255.255.255.0", "192.168.1.1")) + self.assertEqual("128.42.7.255", + bcast_addr("255.255.248.0", "128.42.5.4")) + self.assertEqual("10.1.21.255", + bcast_addr("255.255.255.0", "10.1.21.4")) + # vi: ts=4 expandtab -- cgit v1.2.3