From c92f02037afc6b0434c9498904f7d888e00cd55b Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 29 Apr 2016 09:04:36 -0400 Subject: Config Drive: fix check_instance_id signature. After reboot cloud-init would fail as the previously pickled object would have a check_instance_id signature but it didn't match expected LP: #1575055 --- cloudinit/sources/DataSourceConfigDrive.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit/sources/DataSourceConfigDrive.py') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 3fa62ef3..52a9f543 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -155,7 +155,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): return True - def check_instance_id(self): + def check_instance_id(self, sys_cfg): # quickly (local check only) if self.instance_id is still valid return sources.instance_id_matches_system_uuid(self.get_instance_id()) -- cgit v1.2.3 From a2249942522d140a063acdc007aced991d4f0588 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 5 May 2016 14:57:48 -0700 Subject: Work on refactoring (and adding) network conversion tests --- cloudinit/net/network_state.py | 68 ----------- cloudinit/sources/DataSourceConfigDrive.py | 127 +-------------------- cloudinit/sources/helpers/openstack.py | 116 +++++++++++++++++++ .../unittests/test_datasource/test_configdrive.py | 104 ++++++++++++++--- tests/unittests/test_net.py | 1 + 5 files changed, 209 insertions(+), 207 deletions(-) (limited to 'cloudinit/sources/DataSourceConfigDrive.py') diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index e32d2cdf..d08e94fe 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -376,71 +376,3 @@ def mask2cidr(mask): return ipv4mask2cidr(mask) else: return mask - - -if __name__ == '__main__': - import sys - import random - from cloudinit import net - - def load_config(nc): - version = nc.get('version') - config = nc.get('config') - return (version, config) - - def test_parse(network_config): - (version, config) = load_config(network_config) - ns1 = NetworkState(version=version, config=config) - ns1.parse_config() - random.shuffle(config) - ns2 = NetworkState(version=version, config=config) - ns2.parse_config() - print("----NS1-----") - print(ns1.dump_network_state()) - print() - print("----NS2-----") - print(ns2.dump_network_state()) - print("NS1 == NS2 ?=> {}".format( - ns1.network_state == ns2.network_state)) - eni = net.render_interfaces(ns2.network_state) - print(eni) - udev_rules = net.render_persistent_net(ns2.network_state) - print(udev_rules) - - def test_dump_and_load(network_config): - print("Loading network_config into NetworkState") - (version, config) = load_config(network_config) - ns1 = NetworkState(version=version, config=config) - ns1.parse_config() - print("Dumping state to file") - ns1_dump = ns1.dump() - ns1_state = "/tmp/ns1.state" - with open(ns1_state, "w+") as f: - f.write(ns1_dump) - - print("Loading state from file") - ns2 = from_state_file(ns1_state) - print("NS1 == NS2 ?=> {}".format( - ns1.network_state == ns2.network_state)) - - def test_output(network_config): - (version, config) = load_config(network_config) - ns1 = NetworkState(version=version, config=config) - ns1.parse_config() - random.shuffle(config) - ns2 = NetworkState(version=version, config=config) - ns2.parse_config() - print("NS1 == NS2 ?=> {}".format( - ns1.network_state == ns2.network_state)) - eni_1 = net.render_interfaces(ns1.network_state) - eni_2 = net.render_interfaces(ns2.network_state) - print(eni_1) - print(eni_2) - print("eni_1 == eni_2 ?=> {}".format( - eni_1 == eni_2)) - - y = util.read_conf(sys.argv[1]) - network_config = y.get('network') - test_parse(network_config) - test_dump_and_load(network_config) - test_output(network_config) diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 52a9f543..70373b43 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -61,7 +61,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): mstr += "[source=%s]" % (self.source) return mstr - def get_data(self): + def get_data(self, skip_first_boot=False): found = None md = {} results = {} @@ -119,7 +119,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): # instance-id prev_iid = get_previous_iid(self.paths) cur_iid = md['instance-id'] - if prev_iid != cur_iid and self.dsmode == "local": + if prev_iid != cur_iid and \ + self.dsmode == "local" and not skip_first_boot: on_first_boot(results, distro=self.distro) # dsmode != self.dsmode here if: @@ -163,7 +164,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): def network_config(self): if self._network_config is None: if self.network_json is not None: - self._network_config = convert_network_data(self.network_json) + self._network_config = openstack.convert_net_json( + self.network_json) return self._network_config @@ -303,122 +305,3 @@ datasources = [ # Return a list of data sources that match this set of dependencies def get_datasource_list(depends): return sources.list_from_depends(depends, datasources) - - -# Convert OpenStack ConfigDrive NetworkData json to network_config yaml -def convert_network_data(network_json=None): - """Return a dictionary of network_config by parsing provided - OpenStack ConfigDrive NetworkData json format - - OpenStack network_data.json provides a 3 element dictionary - - "links" (links are network devices, physical or virtual) - - "networks" (networks are ip network configurations for one or more - links) - - services (non-ip services, like dns) - - networks and links are combined via network items referencing specific - links via a 'link_id' which maps to a links 'id' field. - - To convert this format to network_config yaml, we first iterate over the - links and then walk the network list to determine if any of the networks - utilize the current link; if so we generate a subnet entry for the device - - We also need to map network_data.json fields to network_config fields. For - example, the network_data links 'id' field is equivalent to network_config - 'name' field for devices. We apply more of this mapping to the various - link types that we encounter. - - There are additional fields that are populated in the network_data.json - from OpenStack that are not relevant to network_config yaml, so we - enumerate a dictionary of valid keys for network_yaml and apply filtering - to drop these superflous keys from the network_config yaml. - """ - if network_json is None: - return None - - # dict of network_config key for filtering network_json - valid_keys = { - 'physical': [ - 'name', - 'type', - 'mac_address', - 'subnets', - 'params', - ], - 'subnet': [ - 'type', - 'address', - 'netmask', - 'broadcast', - 'metric', - 'gateway', - 'pointopoint', - 'mtu', - 'scope', - 'dns_nameservers', - 'dns_search', - 'routes', - ], - } - - links = network_json.get('links', []) - networks = network_json.get('networks', []) - services = network_json.get('services', []) - - config = [] - for link in links: - subnets = [] - cfg = {k: v for k, v in link.items() - if k in valid_keys['physical']} - cfg.update({'name': link['id']}) - for network in [net for net in networks - if net['link'] == link['id']]: - subnet = {k: v for k, v in network.items() - if k in valid_keys['subnet']} - if 'dhcp' in network['type']: - t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4' - subnet.update({ - 'type': t, - }) - else: - subnet.update({ - 'type': 'static', - 'address': network.get('ip_address'), - }) - subnets.append(subnet) - cfg.update({'subnets': subnets}) - if link['type'] in ['ethernet', 'vif', 'ovs', 'phy']: - cfg.update({ - 'type': 'physical', - 'mac_address': link['ethernet_mac_address']}) - elif link['type'] in ['bond']: - params = {} - for k, v in link.items(): - if k == 'bond_links': - continue - elif k.startswith('bond'): - params.update({k: v}) - cfg.update({ - 'bond_interfaces': copy.deepcopy(link['bond_links']), - 'params': params, - }) - elif link['type'] in ['vlan']: - cfg.update({ - 'name': "%s.%s" % (link['vlan_link'], - link['vlan_id']), - 'vlan_link': link['vlan_link'], - 'vlan_id': link['vlan_id'], - 'mac_address': link['vlan_mac_address'], - }) - else: - raise ValueError( - 'Unknown network_data link type: %s' % link['type']) - - config.append(cfg) - - for service in services: - cfg = service - cfg.update({'type': 'nameserver'}) - config.append(cfg) - - return {'version': 1, 'config': config} diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 1aa6bbae..475ccab3 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -474,6 +474,122 @@ class MetadataReader(BaseReader): retries=self.retries) +def convert_net_json(network_json): + """Return a dictionary of network_config by parsing provided + OpenStack ConfigDrive NetworkData json format + + OpenStack network_data.json provides a 3 element dictionary + - "links" (links are network devices, physical or virtual) + - "networks" (networks are ip network configurations for one or more + links) + - services (non-ip services, like dns) + + networks and links are combined via network items referencing specific + links via a 'link_id' which maps to a links 'id' field. + + To convert this format to network_config yaml, we first iterate over the + links and then walk the network list to determine if any of the networks + utilize the current link; if so we generate a subnet entry for the device + + We also need to map network_data.json fields to network_config fields. For + example, the network_data links 'id' field is equivalent to network_config + 'name' field for devices. We apply more of this mapping to the various + link types that we encounter. + + There are additional fields that are populated in the network_data.json + from OpenStack that are not relevant to network_config yaml, so we + enumerate a dictionary of valid keys for network_yaml and apply filtering + to drop these superflous keys from the network_config yaml. + """ + + # Dict of network_config key for filtering network_json + valid_keys = { + 'physical': [ + 'name', + 'type', + 'mac_address', + 'subnets', + 'params', + ], + 'subnet': [ + 'type', + 'address', + 'netmask', + 'broadcast', + 'metric', + 'gateway', + 'pointopoint', + 'mtu', + 'scope', + 'dns_nameservers', + 'dns_search', + 'routes', + ], + } + + links = network_json.get('links', []) + networks = network_json.get('networks', []) + services = network_json.get('services', []) + + config = [] + for link in links: + subnets = [] + cfg = {k: v for k, v in link.items() + if k in valid_keys['physical']} + cfg.update({'name': link['id']}) + for network in [net for net in networks + if net['link'] == link['id']]: + subnet = {k: v for k, v in network.items() + if k in valid_keys['subnet']} + if 'dhcp' in network['type']: + t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4' + subnet.update({ + 'type': t, + }) + else: + subnet.update({ + 'type': 'static', + 'address': network.get('ip_address'), + }) + subnets.append(subnet) + cfg.update({'subnets': subnets}) + if link['type'] in ['ethernet', 'vif', 'ovs', 'phy']: + cfg.update({ + 'type': 'physical', + 'mac_address': link['ethernet_mac_address']}) + elif link['type'] in ['bond']: + params = {} + for k, v in link.items(): + if k == 'bond_links': + continue + elif k.startswith('bond'): + params.update({k: v}) + cfg.update({ + 'bond_interfaces': copy.deepcopy(link['bond_links']), + 'params': params, + }) + elif link['type'] in ['vlan']: + cfg.update({ + 'name': "%s.%s" % (link['vlan_link'], + link['vlan_id']), + 'vlan_link': link['vlan_link'], + 'vlan_id': link['vlan_id'], + 'mac_address': link['vlan_mac_address'], + }) + else: + raise ValueError( + 'Unknown network_data link type: %s' % link['type']) + + config.append(cfg) + + for service in services: + cfg = copy.deepcopy(service) + cfg.update({'type': 'nameserver'}) + config.append(cfg) + + return {'version': 1, 'config': config} + + def convert_vendordata_json(data, recurse=True): """ data: a loaded json *object* (strings, arrays, dicts). return something suitable for cloudinit vendordata_raw. diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 89b15f54..06fba202 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -355,6 +355,14 @@ class TestConfigDriveDataSource(TestCase): self.assertEqual(myds.get_public_ssh_keys(), [OSTACK_META['public_keys']['mykey']]) + +class TestNetJson(TestCase): + def setUp(self): + super(TestNetJson, self).setUp() + self.tmp = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.tmp) + self.maxDiff = None + def test_network_data_is_found(self): """Verify that network_data is present in ds in config-drive-v2.""" populate_dir(self.tmp, CFG_DRIVE_FILES_V2) @@ -365,31 +373,94 @@ class TestConfigDriveDataSource(TestCase): """Verify that network_data is converted and present on ds object.""" populate_dir(self.tmp, CFG_DRIVE_FILES_V2) myds = cfg_ds_from_dir(self.tmp) - network_config = ds.convert_network_data(NETWORK_DATA) + network_config = openstack.convert_net_json(NETWORK_DATA) self.assertEqual(myds.network_config, network_config) + def test_network_config_conversions(self): + """Tests a bunch of input network json and checks the expected conversions.""" + in_datas = [ + NETWORK_DATA, + { + 'services': [{'type': 'dns', 'address': '172.19.0.12'}], + 'networks': [ + {'network_id': 'dacd568d-5be6-4786-91fe-750c374b78b4', + 'type': 'ipv4', 'netmask': '255.255.252.0', + 'link': 'tap1a81968a-79', + 'routes': [ + { + 'netmask': '0.0.0.0', + 'network': '0.0.0.0', + 'gateway': '172.19.3.254' + }, + ], + 'ip_address': '172.19.1.34', + 'id': 'network0', + }], + 'links': [ + {'type': 'bridge', + 'vif_id': '1a81968a-797a-400f-8a80-567f997eb93f', + 'ethernet_mac_address': 'fa:16:3e:ed:9a:59', + 'id': 'tap1a81968a-79', 'mtu': None}] + }, + ] + out_datas = [ + { + 'version': 1, + 'config': [ + { + 'subnets': [{'type': 'dhcp4'}], + 'type': 'physical', + 'mac_address': 'fa:16:3e:69:b0:58', + 'name': 'tap2ecc7709-b3', + }, + { + 'subnets': [{'type': 'dhcp4'}], + 'type': 'physical', + 'mac_address': 'fa:16:3e:d4:57:ad', + 'name': 'tap2f88d109-5b', + }, + { + 'subnets': [{'type': 'dhcp4'}], + 'type': 'physical', + 'mac_address': 'fa:16:3e:05:30:fe', + 'name': 'tap1a5382f8-04', + }, + { + 'type': 'nameserver', + 'address': '199.204.44.24', + }, + { + 'type': 'nameserver', + 'address': '199.204.47.54', + } + ], + + }, + { + 'version': 1, + 'config': [ + { + 'name': 'tap1a81968a-79', + + } + ], + }, + ] + for in_data, out_data in zip(in_datas, out_datas): + self.assertEqual(openstack.convert_net_json(in_data), + out_data) + def cfg_ds_from_dir(seed_d): - found = ds.read_config_drive(seed_d) cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None, helpers.Paths({})) - populate_ds_from_read_config(cfg_ds, seed_d, found) + cfg_ds.seed_dir = seed_d + if not cfg_ds.get_data(skip_first_boot=True): + raise RuntimeError("Data source did not extract itself from" + " seed directory %s" % seed_d) return cfg_ds -def populate_ds_from_read_config(cfg_ds, source, results): - """Patch the DataSourceConfigDrive from the results of - read_config_drive_dir hopefully in line with what it would have - if cfg_ds.get_data had been successfully called""" - cfg_ds.source = source - cfg_ds.metadata = results.get('metadata') - cfg_ds.ec2_metadata = results.get('ec2-metadata') - cfg_ds.userdata_raw = results.get('userdata') - cfg_ds.version = results.get('version') - cfg_ds.network_json = results.get('networkdata') - cfg_ds._network_config = ds.convert_network_data(cfg_ds.network_json) - - def populate_dir(seed_dir, files): for (name, content) in files.items(): path = os.path.join(seed_dir, name) @@ -400,7 +471,6 @@ def populate_dir(seed_dir, files): mode = "w" else: mode = "wb" - with open(path, mode) as fp: fp.write(content) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 09235c4d..a0cdc493 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -121,6 +121,7 @@ class TestNetConfigParsing(TestCase): self.assertEqual(found, self.simple_cfg) + def _gzip_data(data): with io.BytesIO() as iobuf: gzfp = gzip.GzipFile(mode="wb", fileobj=iobuf) -- cgit v1.2.3 From 2fb5af62229b8975910bf0ef63731047bd8d7e63 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 19 May 2016 15:33:15 -0700 Subject: Fix up tests and flake8 warnings --- cloudinit/net/__init__.py | 2 +- cloudinit/net/eni.py | 16 ++++----- cloudinit/sources/DataSourceConfigDrive.py | 1 - cloudinit/stages.py | 2 +- tests/unittests/helpers.py | 1 + tests/unittests/test__init__.py | 1 - tests/unittests/test_datasource/test_cloudsigma.py | 1 - .../unittests/test_datasource/test_configdrive.py | 41 ++++++++++++---------- tests/unittests/test_datasource/test_nocloud.py | 2 ++ tests/unittests/test_datasource/test_smartos.py | 2 +- tests/unittests/test_net.py | 6 ++-- tests/unittests/test_rh_subscription.py | 5 +-- tox.ini | 4 +-- 13 files changed, 41 insertions(+), 43 deletions(-) (limited to 'cloudinit/sources/DataSourceConfigDrive.py') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index ad44911b..ba0e39ae 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -49,8 +49,8 @@ DEFAULT_PRIMARY_INTERFACE = 'eth0' # whole module can be easily extracted and placed into other # code-bases (curtin for example). + def write_file(path, content): - """Simple writing a file helper.""" base_path = os.path.dirname(path) if not os.path.isdir(base_path): os.makedirs(base_path) diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index adb31c22..18bae97a 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -258,7 +258,7 @@ class Renderer(object): return content def _render_route(self, route, indent=""): - """ When rendering routes for an iface, in some cases applying a route + """When rendering routes for an iface, in some cases applying a route may result in the route command returning non-zero which produces some confusing output for users manually using ifup/ifdown[1]. To that end, we will optionally include an '|| true' postfix to each @@ -302,7 +302,7 @@ class Renderer(object): return content def _render_interfaces(self, network_state): - ''' Given state, emit etc/network/interfaces content ''' + '''Given state, emit etc/network/interfaces content''' content = "" interfaces = network_state.get('interfaces') @@ -336,8 +336,8 @@ class Renderer(object): iface['control'] = subnet.get('control', 'auto') if iface['mode'].endswith('6'): iface['inet'] += '6' - elif iface['mode'] == 'static' \ - and ":" in subnet['address']: + elif (iface['mode'] == 'static' + and ":" in subnet['address']): iface['inet'] += '6' if iface['mode'].startswith('dhcp'): iface['mode'] = 'dhcp' @@ -359,10 +359,10 @@ class Renderer(object): content = content.replace('mac_address', 'hwaddress') return content - def render_network_state(self, - target, network_state, eni="etc/network/interfaces", - links_prefix=LINKS_FNAME_PREFIX, - netrules='etc/udev/rules.d/70-persistent-net.rules'): + def render_network_state( + self, target, network_state, + eni="etc/network/interfaces", links_prefix=LINKS_FNAME_PREFIX, + netrules='etc/udev/rules.d/70-persistent-net.rules'): fpeni = os.path.join(target, eni) net.write_file(fpeni, self._render_interfaces(network_state)) diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 70373b43..4478c4e2 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -18,7 +18,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import copy import os from cloudinit import log as logging diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 5dd31539..b837009a 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -44,8 +44,8 @@ from cloudinit import helpers from cloudinit import importer from cloudinit import log as logging from cloudinit import net -from cloudinit.reporting import events from cloudinit.net import cmdline +from cloudinit.reporting import events from cloudinit import sources from cloudinit import type_utils from cloudinit import util diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index 7b4d44e8..8d46a8bf 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -45,6 +45,7 @@ else: if _PY_MINOR == 4 and _PY_MICRO < 3: FIX_HTTPRETTY = True + # Makes the old path start # with new base instead of whatever # it previously had diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index a9b35afe..0154784a 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -1,7 +1,6 @@ import os import shutil import tempfile -import unittest2 from cloudinit import handlers from cloudinit import helpers diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py index 7950fc52..2a42ce0c 100644 --- a/tests/unittests/test_datasource/test_cloudsigma.py +++ b/tests/unittests/test_datasource/test_cloudsigma.py @@ -6,7 +6,6 @@ from cloudinit.cs_utils import Cepko from cloudinit.sources import DataSourceCloudSigma from .. import helpers as test_helpers -from ..helpers import SkipTest SERVER_CONTEXT = { "cpu": 1000, diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 1db50798..5395e544 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -368,30 +368,32 @@ class TestNetJson(TestCase): self.assertEqual(myds.network_config, network_config) def test_network_config_conversions(self): - """Tests a bunch of input network json and checks the expected conversions.""" + """Tests a bunch of input network json and checks the + expected conversions.""" in_datas = [ NETWORK_DATA, { 'services': [{'type': 'dns', 'address': '172.19.0.12'}], - 'networks': [ - {'network_id': 'dacd568d-5be6-4786-91fe-750c374b78b4', - 'type': 'ipv4', 'netmask': '255.255.252.0', - 'link': 'tap1a81968a-79', - 'routes': [ - { - 'netmask': '0.0.0.0', - 'network': '0.0.0.0', - 'gateway': '172.19.3.254' - }, - ], - 'ip_address': '172.19.1.34', - 'id': 'network0', + 'networks': [{ + 'network_id': 'dacd568d-5be6-4786-91fe-750c374b78b4', + 'type': 'ipv4', + 'netmask': '255.255.252.0', + 'link': 'tap1a81968a-79', + 'routes': [{ + 'netmask': '0.0.0.0', + 'network': '0.0.0.0', + 'gateway': '172.19.3.254', + }], + 'ip_address': '172.19.1.34', + 'id': 'network0', + }], + 'links': [{ + 'type': 'bridge', + 'vif_id': '1a81968a-797a-400f-8a80-567f997eb93f', + 'ethernet_mac_address': 'fa:16:3e:ed:9a:59', + 'id': 'tap1a81968a-79', + 'mtu': None, }], - 'links': [ - {'type': 'bridge', - 'vif_id': '1a81968a-797a-400f-8a80-567f997eb93f', - 'ethernet_mac_address': 'fa:16:3e:ed:9a:59', - 'id': 'tap1a81968a-79', 'mtu': None}] }, ] out_datas = [ @@ -440,6 +442,7 @@ class TestNetJson(TestCase): 'address': '172.19.1.34', 'netmask': '255.255.252.0', 'type': 'static', + 'ipv4': True, 'routes': [{ 'gateway': '172.19.3.254', 'netmask': '0.0.0.0', diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index 077603b4..b0fa1130 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -7,6 +7,8 @@ import os import shutil import tempfile +import yaml + class TestNoCloudDataSource(TestCase): diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 2f159ac4..28f56039 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -42,7 +42,7 @@ from cloudinit import helpers as c_helpers from cloudinit.util import b64e from .. import helpers -from ..helpers import mock, SkipTest +from ..helpers import mock MOCK_RETURNS = { 'hostname': 'test-host', diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index faf0f0fb..7998111a 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1,13 +1,11 @@ -from cloudinit import net -from cloudinit import util - from cloudinit import net from cloudinit.net import cmdline from cloudinit.net import eni from cloudinit.net import network_state +from cloudinit import util -from .helpers import TestCase from .helpers import mock +from .helpers import TestCase import base64 import copy diff --git a/tests/unittests/test_rh_subscription.py b/tests/unittests/test_rh_subscription.py index e4dcc58b..891dbe77 100644 --- a/tests/unittests/test_rh_subscription.py +++ b/tests/unittests/test_rh_subscription.py @@ -10,13 +10,10 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . - import logging -import mock -import unittest - from cloudinit.config import cc_rh_subscription +from cloudinit import util from .helpers import TestCase, mock diff --git a/tox.ini b/tox.ini index b92ebac1..7802a291 100644 --- a/tox.ini +++ b/tox.ini @@ -1,11 +1,11 @@ [tox] -envlist = py27,py3,flake8 +envlist = py26,py27,py3,flake8 recreate = True usedevelop = True [testenv] -commands = python -m nose {posargs:tests} +commands = nosetests {posargs:tests} deps = -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt setenv = -- cgit v1.2.3 From 7f2e99f5345c227d07849da68acdf8562b44c3e1 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 25 May 2016 17:05:09 -0400 Subject: commit to push for fear of loss. == background == DataSource Mode (dsmode) is present in many datasources in cloud-init. dsmode was originally added to cloud-init to specify when this datasource should be 'realized'. cloud-init has 4 stages of boot. a.) cloud-init --local . network is guaranteed not present. b.) cloud-init (--network). network is guaranteed present. c.) cloud-config d.) cloud-init final 'init_modules' [1] are run "as early as possible". And as such, are executed in either 'a' or 'b' based on the datasource. However, executing them means that user-data has been fully consumed. User-data and vendor-data may have '#include http://...' which then rely on the network being present. boothooks are an example of the things run in init_modules. The 'dsmode' was a way for a user to indicate that init_modules should run at 'a' (dsmode=local) or 'b' (dsmode=net) directly. Things were further confused when a datasource could provide networking configuration. Then, we needed to apply the networking config at 'a' but if the user had provided boothooks that expected networking, then the init_modules would need to be executed at 'b'. The config drive datasource hacked its way through this and applies networking if *it* detects it is a new instance. == Suggested Change == The plan is to 1. incorporate 'dsmode' into DataSource superclass 2. make all existing datasources default to network 3. apply any networking configuration from a datasource on first boot only apply_networking will always rename network devices when it runs. for bug 1579130. 4. run init_modules at cloud-init (network) time frame unless datasource is 'local'. 5. Datasources can provide a 'first_boot' method that will be called when a new instance_id is found. This will allow the config drive's write_files to be applied once. Over all, this will very much simplify things. We'll no longer have 2 sources like DataSourceNoCloud and DataSourceNoCloudNet, but would just have one source with a dsmode. == Concerns == Some things have odd reliance on dsmode. For example, OpenNebula's get_hostname uses it to determine if it should do a lookup of an ip address. == Bugs to fix here == http://pad.lv/1577982 ConfigDrive: cloud-init fails to configure network from network_data.json http://pad.lv/1579130 need to support systemd.link renaming of devices in container http://pad.lv/1577844 Drop unnecessary blocking of all net udev rules --- bin/cloud-init | 20 ++++--- cloudinit/distros/__init__.py | 2 + cloudinit/helpers.py | 23 ++++---- cloudinit/net/__init__.py | 45 +++++++++++++++ cloudinit/sources/DataSourceCloudSigma.py | 18 +----- cloudinit/sources/DataSourceConfigDrive.py | 90 ++++++++---------------------- cloudinit/sources/DataSourceNoCloud.py | 78 +++++++++++--------------- cloudinit/sources/DataSourceOpenNebula.py | 39 ++----------- cloudinit/sources/DataSourceOpenStack.py | 9 +-- cloudinit/sources/__init__.py | 33 +++++++++++ cloudinit/stages.py | 66 ++++++++++++++++------ tox.ini | 3 +- 12 files changed, 223 insertions(+), 203 deletions(-) (limited to 'cloudinit/sources/DataSourceConfigDrive.py') diff --git a/bin/cloud-init b/bin/cloud-init index 5857af32..482b8402 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -236,6 +236,7 @@ def main_init(name, args): else: LOG.debug("Execution continuing, no previous run detected that" " would allow us to stop early.") + else: existing = "check" if util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False): @@ -265,17 +266,20 @@ def main_init(name, args): else: return (None, ["No instance datasource found."]) - if args.local: - if not init.ds_restored: - # if local mode and the datasource was not restored from cache - # (this is not first boot) then apply networking. - init.apply_network_config() - else: - LOG.debug("skipping networking config from restored datasource.") - # Stage 6 iid = init.instancify() LOG.debug("%s will now be targeting instance id: %s", name, iid) + + if init.is_new_instance(): + # on new instance, apply network config. if not in local mode, + # then we just bring up new networking as the OS has already + # brought up the configured networking. + init.apply_network_config(bringup=not args.local) + + if args.local and init.datasource.dsmode != sources.DSMODE_LOCAL: + return (init.datasource, []) + + # update fully realizes user-data (pulling in #include if necessary) init.update() # Stage 7 try: diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 0f222c8c..3bfbc484 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -128,6 +128,8 @@ class Distro(object): mirror_info=arch_info) def apply_network(self, settings, bring_up=True): + # this applies network where 'settings' is interfaces(5) style + # it is obsolete compared to apply_network_config # Write it out dev_names = self._write_network(settings) # Now try to bring them up diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index 09d75e65..abfb0cbb 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -328,6 +328,7 @@ class Paths(object): self.cfgs = path_cfgs # Populate all the initial paths self.cloud_dir = path_cfgs.get('cloud_dir', '/var/lib/cloud') + self.run_dir = path_cfgs.get('run_dir', '/run/cloud-init') self.instance_link = os.path.join(self.cloud_dir, 'instance') self.boot_finished = os.path.join(self.instance_link, "boot-finished") self.upstart_conf_d = path_cfgs.get('upstart_dir') @@ -349,26 +350,19 @@ class Paths(object): "data": "data", "vendordata_raw": "vendor-data.txt", "vendordata": "vendor-data.txt.i", + "instance_id": "instance-id", } # Set when a datasource becomes active self.datasource = ds # get_ipath_cur: get the current instance path for an item def get_ipath_cur(self, name=None): - ipath = self.instance_link - add_on = self.lookups.get(name) - if add_on: - ipath = os.path.join(ipath, add_on) - return ipath + return self._get_path(self.instance_link, name) # get_cpath : get the "clouddir" (/var/lib/cloud/) # for a name in dirmap def get_cpath(self, name=None): - cpath = self.cloud_dir - add_on = self.lookups.get(name) - if add_on: - cpath = os.path.join(cpath, add_on) - return cpath + return self._get_path(self.cloud_dir, name) # _get_ipath : get the instance path for a name in pathmap # (/var/lib/cloud/instances//) @@ -397,6 +391,15 @@ class Paths(object): else: return ipath + def _get_path(self, base, name=None): + add_on = self.lookups.get(name) + if not add_on: + return base + return os.path.join(base, add_on) + + def get_runpath(self, name=None): + return self._get_path(self.run_dir, name) + # This config parser will not throw when sections don't exist # and you are setting values on those sections which is useful diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 91e36aca..40d330b5 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -768,4 +768,49 @@ def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None): return config_from_klibc_net_cfg(files=files, mac_addrs=mac_addrs) +def convert_eni_data(eni_data): + # return a network config representation of what is in eni_data + ifaces = {} + parse_deb_config_data(ifaces, eni_data, src_dir=None, src_path=None) + return _ifaces_to_net_config_data(ifaces) + + +def _ifaces_to_net_config_data(ifaces): + """Return network config that represents the ifaces data provided. + ifaces = parse_deb_config("/etc/network/interfaces") + config = ifaces_to_net_config_data(ifaces) + state = parse_net_config_data(config).""" + devs = {} + for name, data in ifaces.items(): + # devname is 'eth0' for name='eth0:1' + devname = name.partition(":")[0] + if devname not in devs: + devs[devname] = {'type': 'physical', 'name': devname, + 'subnets': []} + # this isnt strictly correct, but some might specify + # hwaddress on a nic for matching / declaring name. + if 'hwaddress' in data: + devs[devname]['mac_address'] = data['hwaddress'] + subnet = {'_orig_eni_name': name, 'type': data['method']} + if data.get('auto'): + subnet['control'] = 'auto' + else: + subnet['control'] = 'manual' + + if data.get('method') == 'static': + subnet['address'] = data['address'] + + if 'gateway' in data: + subnet['gateway'] = data['gateway'] + + if 'dns' in data: + for n in ('nameservers', 'search'): + if n in data['dns'] and data['dns'][n]: + subnet['dns_' + n] = data['dns'][n] + devs[devname]['subnets'].append(subnet) + + return {'version': 1, + 'config': [devs[d] for d in sorted(devs)]} + + # vi: ts=4 expandtab syntax=python diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index 33fe78b9..07e8ae11 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -27,8 +27,6 @@ from cloudinit import util LOG = logging.getLogger(__name__) -VALID_DSMODES = ("local", "net", "disabled") - class DataSourceCloudSigma(sources.DataSource): """ @@ -38,7 +36,6 @@ class DataSourceCloudSigma(sources.DataSource): http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html """ def __init__(self, sys_cfg, distro, paths): - self.dsmode = 'local' self.cepko = Cepko() self.ssh_public_key = '' sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -84,11 +81,9 @@ class DataSourceCloudSigma(sources.DataSource): LOG.debug("CloudSigma: Unable to read from serial port") return False - dsmode = server_meta.get('cloudinit-dsmode', self.dsmode) - if dsmode not in VALID_DSMODES: - LOG.warn("Invalid dsmode %s, assuming default of 'net'", dsmode) - dsmode = 'net' - if dsmode == "disabled" or dsmode != self.dsmode: + self.dsmode = self._determine_dsmode( + [server_meta.get('cloudinit-dsmode')]) + if dsmode == sources.DSMODE_DISABLED: return False base64_fields = server_meta.get('base64_fields', '').split(',') @@ -120,17 +115,10 @@ class DataSourceCloudSigma(sources.DataSource): return self.metadata['uuid'] -class DataSourceCloudSigmaNet(DataSourceCloudSigma): - def __init__(self, sys_cfg, distro, paths): - DataSourceCloudSigma.__init__(self, sys_cfg, distro, paths) - self.dsmode = 'net' - - # Used to match classes to dependencies. Since this datasource uses the serial # port network is not really required, so it's okay to load without it, too. datasources = [ (DataSourceCloudSigma, (sources.DEP_FILESYSTEM)), - (DataSourceCloudSigmaNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 52a9f543..20df5fcd 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -22,6 +22,7 @@ import copy import os from cloudinit import log as logging +from cloudinit import net from cloudinit import sources from cloudinit import util @@ -35,7 +36,6 @@ DEFAULT_MODE = 'pass' DEFAULT_METADATA = { "instance-id": DEFAULT_IID, } -VALID_DSMODES = ("local", "net", "pass", "disabled") FS_TYPES = ('vfat', 'iso9660') LABEL_TYPES = ('config-2',) POSSIBLE_MOUNTS = ('sr', 'cd') @@ -47,12 +47,12 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): def __init__(self, sys_cfg, distro, paths): super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths) self.source = None - self.dsmode = 'local' self.seed_dir = os.path.join(paths.seed_dir, 'config_drive') self.version = None self.ec2_metadata = None self._network_config = None self.network_json = None + self.network_eni = None self.files = {} def __str__(self): @@ -98,38 +98,22 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): md = results.get('metadata', {}) md = util.mergemanydict([md, DEFAULT_METADATA]) - user_dsmode = results.get('dsmode', None) - if user_dsmode not in VALID_DSMODES + (None,): - LOG.warn("User specified invalid mode: %s", user_dsmode) - user_dsmode = None - dsmode = get_ds_mode(cfgdrv_ver=results['version'], - ds_cfg=self.ds_cfg.get('dsmode'), - user=user_dsmode) + self.dsmode = self._determine_dsmode( + [results.get('dsmode'), self.ds_cfg.get('dsmode'), + sources.DSMODE_PASS if results['version'] == 1 else None]) - if dsmode == "disabled": - # most likely user specified + if self.dsmode == sources.DSMODE_DISABLED: return False - # TODO(smoser): fix this, its dirty. - # we want to do some things (writing files and network config) - # only on first boot, and even then, we want to do so in the - # local datasource (so they happen earlier) even if the configured - # dsmode is 'net' or 'pass'. To do this, we check the previous - # instance-id + # This is legacy and sneaky. If dsmode is 'pass' then write + # 'injected files' and apply legacy ENI network format. prev_iid = get_previous_iid(self.paths) cur_iid = md['instance-id'] - if prev_iid != cur_iid and self.dsmode == "local": + if prev_iid != cur_iid and self.dsmode == sources.DSMODE_PASS: on_first_boot(results, distro=self.distro) - - # dsmode != self.dsmode here if: - # * dsmode = "pass", pass means it should only copy files and then - # pass to another datasource - # * dsmode = "net" and self.dsmode = "local" - # so that user boothooks would be applied with network, the - # local datasource just gets out of the way, and lets the net claim - if dsmode != self.dsmode: - LOG.debug("%s: not claiming datasource, dsmode=%s", self, dsmode) + LOG.debug("%s: not claiming datasource, dsmode=%s", self, + self.dsmode) return False self.source = found @@ -147,12 +131,11 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): LOG.warn("Invalid content in vendor-data: %s", e) self.vendordata_raw = None - try: - self.network_json = results.get('networkdata') - except ValueError as e: - LOG.warn("Invalid content in network-data: %s", e) - self.network_json = None - + # network_config is an /etc/network/interfaces formated file and is + # obsolete compared to networkdata (from network_data.json) but both + # might be present. + self.network_eni = results.get("network_config") + self.network_json = results.get('networkdata') return True def check_instance_id(self, sys_cfg): @@ -164,40 +147,11 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): if self._network_config is None: if self.network_json is not None: self._network_config = convert_network_data(self.network_json) + elif self.network_eni is not None: + self._network_config = net.convert_eni_data(self.network_eni) return self._network_config -class DataSourceConfigDriveNet(DataSourceConfigDrive): - def __init__(self, sys_cfg, distro, paths): - DataSourceConfigDrive.__init__(self, sys_cfg, distro, paths) - self.dsmode = 'net' - - -def get_ds_mode(cfgdrv_ver, ds_cfg=None, user=None): - """Determine what mode should be used. - valid values are 'pass', 'disabled', 'local', 'net' - """ - # user passed data trumps everything - if user is not None: - return user - - if ds_cfg is not None: - return ds_cfg - - # at config-drive version 1, the default behavior was pass. That - # meant to not use use it as primary data source, but expect a ec2 metadata - # source. for version 2, we default to 'net', which means - # the DataSourceConfigDriveNet, would be used. - # - # this could change in the future. If there was definitive metadata - # that indicated presense of an openstack metadata service, then - # we could change to 'pass' by default also. The motivation for that - # would be 'cloud-init query' as the web service could be more dynamic - if cfgdrv_ver == 1: - return "pass" - return "net" - - def read_config_drive(source_dir): reader = openstack.ConfigDriveReader(source_dir) finders = [ @@ -231,9 +185,12 @@ def on_first_boot(data, distro=None): % (type(data))) net_conf = data.get("network_config", '') if net_conf and distro: - LOG.debug("Updating network interfaces from config drive") + LOG.warn("Updating network interfaces from config drive") distro.apply_network(net_conf) - files = data.get('files', {}) + write_injected_files(data.get('files')) + + +def write_injected_files(files): if files: LOG.debug("Writing %s injected files", len(files)) for (filename, content) in files.items(): @@ -296,7 +253,6 @@ def find_candidate_devs(probe_optical=True): # Used to match classes to dependencies datasources = [ (DataSourceConfigDrive, (sources.DEP_FILESYSTEM, )), - (DataSourceConfigDriveNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 48c61a90..7e30118c 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -24,6 +24,7 @@ import errno import os from cloudinit import log as logging +from cloudinit import net from cloudinit import sources from cloudinit import util @@ -35,7 +36,6 @@ class DataSourceNoCloud(sources.DataSource): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.dsmode = 'local' self.seed = None - self.cmdline_id = "ds=nocloud" self.seed_dirs = [os.path.join(paths.seed_dir, 'nocloud'), os.path.join(paths.seed_dir, 'nocloud-net')] self.seed_dir = None @@ -58,7 +58,7 @@ class DataSourceNoCloud(sources.DataSource): try: # Parse the kernel command line, getting data passed in md = {} - if parse_cmdline_data(self.cmdline_id, md): + if load_cmdline_data(md): found.append("cmdline") mydata = _merge_new_seed(mydata, {'meta-data': md}) except Exception: @@ -123,12 +123,6 @@ class DataSourceNoCloud(sources.DataSource): mydata = _merge_new_seed(mydata, seeded) - # For seed from a device, the default mode is 'net'. - # that is more likely to be what is desired. If they want - # dsmode of local, then they must specify that. - if 'dsmode' not in mydata['meta-data']: - mydata['meta-data']['dsmode'] = "net" - LOG.debug("Using data from %s", dev) found.append(dev) break @@ -144,7 +138,6 @@ class DataSourceNoCloud(sources.DataSource): if len(found) == 0: return False - seeded_network = None # The special argument "seedfrom" indicates we should # attempt to seed the userdata / metadata from its value # its primarily value is in allowing the user to type less @@ -160,10 +153,6 @@ class DataSourceNoCloud(sources.DataSource): LOG.debug("Seed from %s not supported by %s", seedfrom, self) return False - if (mydata['meta-data'].get('network-interfaces') or - mydata.get('network-config')): - seeded_network = self.dsmode - # This could throw errors, but the user told us to do it # so if errors are raised, let them raise (md_seed, ud) = util.read_seeded(seedfrom, timeout=None) @@ -179,35 +168,21 @@ class DataSourceNoCloud(sources.DataSource): mydata['meta-data'] = util.mergemanydict([mydata['meta-data'], defaults]) - netdata = {'format': None, 'data': None} - if mydata['meta-data'].get('network-interfaces'): - netdata['format'] = 'interfaces' - netdata['data'] = mydata['meta-data']['network-interfaces'] - elif mydata.get('network-config'): - netdata['format'] = 'network-config' - netdata['data'] = mydata['network-config'] - - # if this is the local datasource or 'seedfrom' was used - # and the source of the seed was self.dsmode. - # Then see if there is network config to apply. - # note this is obsolete network-interfaces style seeding. - if self.dsmode in ("local", seeded_network): - if mydata['meta-data'].get('network-interfaces'): - LOG.debug("Updating network interfaces from %s", self) - self.distro.apply_network( - mydata['meta-data']['network-interfaces']) - - if mydata['meta-data']['dsmode'] == self.dsmode: - self.seed = ",".join(found) - self.metadata = mydata['meta-data'] - self.userdata_raw = mydata['user-data'] - self.vendordata_raw = mydata['vendor-data'] - self._network_config = mydata['network-config'] - return True + self.dsmode = self._determine_dsmode( + [mydata['meta-data'].get('dsmode')]) - LOG.debug("%s: not claiming datasource, dsmode=%s", self, - mydata['meta-data']['dsmode']) - return False + if self.dsmode == sources.DSMODE_DISABLED: + LOG.debug("%s: not claiming datasource, dsmode=%s", self, + self.dsmode) + return False + + self.seed = ",".join(found) + self.metadata = mydata['meta-data'] + self.userdata_raw = mydata['user-data'] + self.vendordata_raw = mydata['vendor-data'] + self._network_config = mydata['network-config'] + self._network_eni = mydata['meta-data'].get('network-interfaces') + return True def check_instance_id(self, sys_cfg): # quickly (local check only) if self.instance_id is still valid @@ -227,6 +202,9 @@ class DataSourceNoCloud(sources.DataSource): @property def network_config(self): + if self._network_config is None: + if self.network_eni is not None: + self._network_config = net.convert_eni_data(self.network_eni) return self._network_config @@ -254,8 +232,22 @@ def _quick_read_instance_id(cmdline_id, dirs=None): return None +def load_cmdline_data(fill, cmdline=None): + pairs = [("ds=nocloud", sources.DSMODE_LOCAL), + ("ds=nocloud-net", sources.DSMODE_NETWORK)] + for idstr, dsmode in pairs: + if parse_cmdline_data(idstr, fill, cmdline): + # if dsmode was explicitly in the commanad line, then + # prefer it to the dsmode based on the command line id + if 'dsmode' not in fill: + fill['dsmode'] = dsmode + return True + return False + + # Returns true or false indicating if cmdline indicated -# that this module should be used +# that this module should be used. Updates dictionary 'fill' +# with data that was found. # Example cmdline: # root=LABEL=uec-rootfs ro ds=nocloud def parse_cmdline_data(ds_id, fill, cmdline=None): @@ -319,9 +311,7 @@ def _merge_new_seed(cur, seeded): class DataSourceNoCloudNet(DataSourceNoCloud): def __init__(self, sys_cfg, distro, paths): DataSourceNoCloud.__init__(self, sys_cfg, distro, paths) - self.cmdline_id = "ds=nocloud-net" self.supported_seed_starts = ("http://", "https://", "ftp://") - self.dsmode = "net" # Used to match classes to dependencies diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 681f3a96..15819a4f 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -37,16 +37,13 @@ from cloudinit import util LOG = logging.getLogger(__name__) DEFAULT_IID = "iid-dsopennebula" -DEFAULT_MODE = 'net' DEFAULT_PARSEUSER = 'nobody' CONTEXT_DISK_FILES = ["context.sh"] -VALID_DSMODES = ("local", "net", "disabled") class DataSourceOpenNebula(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) - self.dsmode = 'local' self.seed = None self.seed_dir = os.path.join(paths.seed_dir, 'opennebula') @@ -93,52 +90,27 @@ class DataSourceOpenNebula(sources.DataSource): md = util.mergemanydict([md, defaults]) # check for valid user specified dsmode - user_dsmode = results['metadata'].get('DSMODE', None) - if user_dsmode not in VALID_DSMODES + (None,): - LOG.warn("user specified invalid mode: %s", user_dsmode) - user_dsmode = None - - # decide dsmode - if user_dsmode: - dsmode = user_dsmode - elif self.ds_cfg.get('dsmode'): - dsmode = self.ds_cfg.get('dsmode') - else: - dsmode = DEFAULT_MODE - - if dsmode == "disabled": - # most likely user specified - return False - - # apply static network configuration only in 'local' dsmode - if ('network-interfaces' in results and self.dsmode == "local"): - LOG.debug("Updating network interfaces from %s", self) - self.distro.apply_network(results['network-interfaces']) + self.dsmode = self._determine_dsmode( + [results.get('DSMODE'), self.ds_cfg.get('dsmode')]) - if dsmode != self.dsmode: - LOG.debug("%s: not claiming datasource, dsmode=%s", self, dsmode) + if self.dsmode == sources.DSMODE_DISABLED: return False self.seed = seed + self.network_eni = results.get("network_config") self.metadata = md self.userdata_raw = results.get('userdata') return True def get_hostname(self, fqdn=False, resolve_ip=None): if resolve_ip is None: - if self.dsmode == 'net': + if self.dsmode == sources.DSMODE_NET: resolve_ip = True else: resolve_ip = False return sources.DataSource.get_hostname(self, fqdn, resolve_ip) -class DataSourceOpenNebulaNet(DataSourceOpenNebula): - def __init__(self, sys_cfg, distro, paths): - DataSourceOpenNebula.__init__(self, sys_cfg, distro, paths) - self.dsmode = 'net' - - class NonContextDiskDir(Exception): pass @@ -446,7 +418,6 @@ def read_context_disk_dir(source_dir, asuser=None): # Used to match classes to dependencies datasources = [ (DataSourceOpenNebula, (sources.DEP_FILESYSTEM, )), - (DataSourceOpenNebulaNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index dfd96035..c06d17f3 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -33,13 +33,11 @@ DEFAULT_IID = "iid-dsopenstack" DEFAULT_METADATA = { "instance-id": DEFAULT_IID, } -VALID_DSMODES = ("net", "disabled") class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): def __init__(self, sys_cfg, distro, paths): super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths) - self.dsmode = 'net' self.metadata_address = None self.ssl_details = util.fetch_ssl_details(self.paths) self.version = None @@ -125,11 +123,8 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): self.metadata_address) return False - user_dsmode = results.get('dsmode', None) - if user_dsmode not in VALID_DSMODES + (None,): - LOG.warn("User specified invalid mode: %s", user_dsmode) - user_dsmode = None - if user_dsmode == 'disabled': + self.dsmode = self._determine_dsmode([results.get('dsmode')]) + if self.dsmode == sources.DSMODE_DISABLED: return False md = results.get('metadata', {}) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 43e4fd57..e0171e8c 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -34,6 +34,13 @@ from cloudinit import util from cloudinit.filters import launch_index from cloudinit.reporting import events +DSMODE_DISABLED = "disabled" +DSMODE_LOCAL = "net" +DSMODE_NETWORK = "local" +DSMODE_PASS = "pass" + +VALID_DSMODES = [DSMODE_DISABLED, DSMODE_LOCAL, DSMODE_NETWORK] + DEP_FILESYSTEM = "FILESYSTEM" DEP_NETWORK = "NETWORK" DS_PREFIX = 'DataSource' @@ -57,6 +64,7 @@ class DataSource(object): self.userdata_raw = None self.vendordata = None self.vendordata_raw = None + self.dsmode = DSMODE_NETWORK # find the datasource config name. # remove 'DataSource' from classname on front, and remove 'Net' on end. @@ -223,10 +231,35 @@ class DataSource(object): # quickly (local check only) if self.instance_id is still return False + @staticmethod + def _determine_dsmode(candidates, default=None, valid=None): + # return the first candidate that is non None, warn if not valid + if default is None: + default = DSMODE_NETWORK + + if valid is None: + valid = VALID_DSMODES + + for candidate in candidates: + if candidate is None: + continue + if candidate in valid: + return candidate + else: + LOG.warn("invalid dsmode '%s', using default=%s", + candidate, default) + return default + + return default + @property def network_config(self): return None + @property + def first_instance_boot(self): + return + def normalize_pubkey_data(pubkey_data): keys = [] diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 62d066de..53ebcb45 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -67,6 +67,7 @@ class Init(object): # Changed only when a fetch occurs self.datasource = NULL_DATA_SOURCE self.ds_restored = False + self._previous_iid = None if reporter is None: reporter = events.ReportEventStack( @@ -213,6 +214,31 @@ class Init(object): cfg_list = self.cfg.get('datasource_list') or [] return (cfg_list, pkg_list) + def _restore_from_checked_cache(self, existing): + if existing not in ("check", "trust"): + raise ValueError("Unexpected value for existing: %s" % existing) + + ds = self._restore_from_cache() + if not ds: + return (None, "no cache found") + + run_iid_fn = self.paths.get_runpath('instance-id') + if os.path.exists(run_iid_fn): + run_iid = util.load_file(run_iid_fn).strip() + else: + run_iid = None + + if run_iid == ds.get_instance_id: + return (ds, "restored from cache with run check: %s" % ds) + elif existing == "trust": + return (ds, "restored from cache: %s" % ds) + else: + if (hasattr(ds, 'check_instance_id') and + ds.check_instance_id(self.cfg)): + return (ds, "restored from checked cache: %s" % ds) + else: + return (None, "cache invalid in datasource: %s" % ds) + def _get_data_source(self, existing): if self.datasource is not NULL_DATA_SOURCE: return self.datasource @@ -221,19 +247,9 @@ class Init(object): name="check-cache", description="attempting to read from cache [%s]" % existing, parent=self.reporter) as myrep: - ds = self._restore_from_cache() - if ds and existing == "trust": - myrep.description = "restored from cache: %s" % ds - elif ds and existing == "check": - if (hasattr(ds, 'check_instance_id') and - ds.check_instance_id(self.cfg)): - myrep.description = "restored from checked cache: %s" % ds - else: - myrep.description = "cache invalid in datasource: %s" % ds - ds = None - else: - myrep.description = "no cache found" + ds, desc = self._restore_from_checked_cache(existing) + myrep.description = desc self.ds_restored = bool(ds) LOG.debug(myrep.description) @@ -301,15 +317,15 @@ class Init(object): # What the instance id was and is... iid = self.datasource.get_instance_id() - previous_iid = None iid_fn = os.path.join(dp, 'instance-id') try: previous_iid = util.load_file(iid_fn).strip() except Exception: - pass + previous_iid = None if not previous_iid: previous_iid = iid util.write_file(iid_fn, "%s\n" % iid) + util.write_file(self.paths.get_runpath('instance-id'), "%s\n" % iid) util.write_file(os.path.join(dp, 'previous-instance-id'), "%s\n" % (previous_iid)) # Ensure needed components are regenerated @@ -318,6 +334,21 @@ class Init(object): self._reset() return iid + def previous_iid(self): + if self._previous_iid is not None: + return self._previous_iid + + dp = self.paths.get_cpath('data') + iid_fn = os.path.join(dp, 'instance-id') + try: + self._previous_iid = util.load_file(iid_fn).strip() + except Exception: + pass + return self._previous_iid + + def is_new_instance(self): + return self.datasource.get_instance_id() == self.previous_iid() + def fetch(self, existing="check"): return self._get_data_source(existing=existing) @@ -593,15 +624,16 @@ class Init(object): return (ncfg, loc) return (net.generate_fallback_config(), "fallback") - def apply_network_config(self): + def apply_network_config(self, bringup): netcfg, src = self._find_networking_config() if netcfg is None: LOG.info("network config is disabled by %s", src) return - LOG.info("Applying network configuration from %s: %s", src, netcfg) + LOG.info("Applying network configuration from %s bringup=%s: %s", + src, bringup, netcfg) try: - return self.distro.apply_network_config(netcfg) + return self.distro.apply_network_config(netcfg, bringup=bringup) except NotImplementedError: LOG.warn("distro '%s' does not implement apply_network_config. " "networking may not be configured properly." % diff --git a/tox.ini b/tox.ini index dafaaf6d..18d059df 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,7 @@ [tox] envlist = py27,py3,flake8 -recreate = True +recreate = False +skip_install = True [testenv] commands = python -m nose {posargs:tests} -- cgit v1.2.3 From 399eb29662ee67f7744d3a482f63e8af377e75db Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 26 May 2016 11:22:39 -0400 Subject: config drive: log where network config came from --- cloudinit/sources/DataSourceConfigDrive.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'cloudinit/sources/DataSourceConfigDrive.py') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 20df5fcd..2d13a32f 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -146,9 +146,13 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): def network_config(self): if self._network_config is None: if self.network_json is not None: + LOG.debug("network config provided via network_json") self._network_config = convert_network_data(self.network_json) elif self.network_eni is not None: self._network_config = net.convert_eni_data(self.network_eni) + LOG.debug("network config provided via converted eni data") + else: + LOG.debug("no network configuration available") return self._network_config -- cgit v1.2.3 From d709524941ba2b4e06940a9eb0861f0819d5560f Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 2 Jun 2016 15:18:27 -0400 Subject: re-add the 'Net' classes for datasources When the .pkl file is loaded, the module that it is loaded from must have the same symbol. Ie, if booted once and got DataSourceConfigDriveNet then upgraded and rebooted, then next boot would show Can't get attribute 'DataSourceConfigDriveNet' --- cloudinit/sources/DataSourceCloudSigma.py | 3 +++ cloudinit/sources/DataSourceConfigDrive.py | 25 ++++++++++++++----------- cloudinit/sources/DataSourceOpenNebula.py | 3 +++ 3 files changed, 20 insertions(+), 11 deletions(-) (limited to 'cloudinit/sources/DataSourceConfigDrive.py') diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index 07e8ae11..d1f806d6 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -115,6 +115,9 @@ class DataSourceCloudSigma(sources.DataSource): return self.metadata['uuid'] +# Legacy: Must be present in case we load an old pkl object +DataSourceCloudSigmaNet = DataSourceCloudSigma + # Used to match classes to dependencies. Since this datasource uses the serial # port network is not really required, so it's okay to load without it, too. datasources = [ diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 2d13a32f..61d967d9 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -254,17 +254,6 @@ def find_candidate_devs(probe_optical=True): return devices -# Used to match classes to dependencies -datasources = [ - (DataSourceConfigDrive, (sources.DEP_FILESYSTEM, )), -] - - -# Return a list of data sources that match this set of dependencies -def get_datasource_list(depends): - return sources.list_from_depends(depends, datasources) - - # Convert OpenStack ConfigDrive NetworkData json to network_config yaml def convert_network_data(network_json=None): """Return a dictionary of network_config by parsing provided @@ -382,3 +371,17 @@ def convert_network_data(network_json=None): config.append(cfg) return {'version': 1, 'config': config} + + +# Legacy: Must be present in case we load an old pkl object +DataSourceConfigDriveNet = DataSourceConfigDrive + +# Used to match classes to dependencies +datasources = [ + (DataSourceConfigDrive, (sources.DEP_FILESYSTEM, )), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 15819a4f..8f85b115 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -415,6 +415,9 @@ def read_context_disk_dir(source_dir, asuser=None): return results +# Legacy: Must be present in case we load an old pkl object +DataSourceOpenNebulaNet = DataSourceOpenNebula + # Used to match classes to dependencies datasources = [ (DataSourceOpenNebula, (sources.DEP_FILESYSTEM, )), -- cgit v1.2.3 From 6bd7fbc35ac8726a8a0f422cd802d290c236bf3b Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 2 Jun 2016 23:03:38 -0400 Subject: ConfigDrive: do not use 'id' on a link for the device name 'id' on a link in the openstack spec should be "Generic, generated ID". current implementation was to use the host's name for the host side nic. Which provided names like 'tap-adfasdffd'. We do not want to name devices like that as its quite unexpected and non user friendly. So here we use the system name for any nic that is present, but then require that the nics found also be present at the time of rendering. The end result is that if the system boots with net.ifnames=0 then it will get 'eth0' like names. and if it boots without net.ifnames then it will get enp0s1 like names. --- cloudinit/net/__init__.py | 15 +++++--- cloudinit/sources/DataSourceConfigDrive.py | 29 ++++++++++++--- .../unittests/test_datasource/test_configdrive.py | 41 ++++++++++++++++++++-- 3 files changed, 74 insertions(+), 11 deletions(-) (limited to 'cloudinit/sources/DataSourceConfigDrive.py') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 05152ead..f47053b2 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -970,9 +970,16 @@ def get_interface_mac(ifname): return read_sys_net(ifname, "address", enoent=False) -def get_ifname_mac_pairs(): - """Build a list of tuples (ifname, mac)""" - return [(ifname, get_interface_mac(ifname)) for ifname in get_devicelist()] - +def get_interfaces_by_mac(devs=None): + """Build a dictionary of tuples {mac: name}""" + if devs is None: + devs = get_devicelist() + ret = {} + for name in devs: + mac = get_interface_mac(name) + # some devices may not have a mac (tun0) + if mac: + ret[mac] = name + return ret # vi: ts=4 expandtab syntax=python diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 61d967d9..3cc9155d 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -255,7 +255,7 @@ def find_candidate_devs(probe_optical=True): # Convert OpenStack ConfigDrive NetworkData json to network_config yaml -def convert_network_data(network_json=None): +def convert_network_data(network_json=None, known_macs=None): """Return a dictionary of network_config by parsing provided OpenStack ConfigDrive NetworkData json format @@ -319,9 +319,15 @@ def convert_network_data(network_json=None): subnets = [] cfg = {k: v for k, v in link.items() if k in valid_keys['physical']} - cfg.update({'name': link['id']}) - for network in [net for net in networks - if net['link'] == link['id']]: + # 'name' is not in openstack spec yet, but we will support it if it is + # present. The 'id' in the spec is currently implemented as the host + # nic's name, meaning something like 'tap-adfasdffd'. We do not want + # to name guest devices with such ugly names. + if 'name' in link: + cfg['name'] = link['name'] + + for network in [n for n in networks + if n['link'] == link['id']]: subnet = {k: v for k, v in network.items() if k in valid_keys['subnet']} if 'dhcp' in network['type']: @@ -365,6 +371,21 @@ def convert_network_data(network_json=None): config.append(cfg) + need_names = [d for d in config + if d.get('type') == 'physical' and 'name' not in d] + + if need_names: + if known_macs is None: + known_macs = net.get_interfaces_by_mac() + + for d in need_names: + mac = d.get('mac_address') + if not mac: + raise ValueError("No mac_address or name entry for %s" % d) + if mac not in known_macs: + raise ValueError("Unable to find a system nic for %s" % d) + d['name'] = known_macs[mac] + for service in services: cfg = service cfg.update({'type': 'nameserver'}) diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 195b8207..1364b39d 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -73,7 +73,7 @@ NETWORK_DATA = { 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'}, {'vif_id': '1a5382f8-04c5-4d75-ab98-d666c1ef52cc', 'ethernet_mac_address': 'fa:16:3e:05:30:fe', - 'type': 'ovs', 'mtu': None, 'id': 'tap1a5382f8-04'} + 'type': 'ovs', 'mtu': None, 'id': 'tap1a5382f8-04', 'name': 'nic0'} ], 'networks': [ {'link': 'tap2ecc7709-b3', 'type': 'ipv4_dhcp', @@ -88,6 +88,10 @@ NETWORK_DATA = { ] } +KNOWN_MACS = { + 'fa:16:3e:69:b0:58': 'enp0s1', + 'fa:16:3e:d4:57:ad': 'enp0s2'} + CFG_DRIVE_FILES_V2 = { 'ec2/2009-04-04/meta-data.json': json.dumps(EC2_META), 'ec2/2009-04-04/user-data': USER_DATA, @@ -365,10 +369,40 @@ class TestConfigDriveDataSource(TestCase): """Verify that network_data is converted and present on ds object.""" populate_dir(self.tmp, CFG_DRIVE_FILES_V2) myds = cfg_ds_from_dir(self.tmp) - network_config = ds.convert_network_data(NETWORK_DATA) + network_config = ds.convert_network_data(NETWORK_DATA, + known_macs=KNOWN_MACS) self.assertEqual(myds.network_config, network_config) +class TestConvertNetworkData(TestCase): + def _getnames_in_config(self, ncfg): + return set([n['name'] for n in ncfg['config'] + if n['type'] == 'physical']) + + def test_conversion_fills_names(self): + ncfg = ds.convert_network_data(NETWORK_DATA, known_macs=KNOWN_MACS) + expected = set(['nic0', 'enp0s1', 'enp0s2']) + found = self._getnames_in_config(ncfg) + self.assertEqual(found, expected) + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_convert_reads_system_prefers_name(self, get_interfaces_by_mac): + macs = KNOWN_MACS.copy() + macs.update({'fa:16:3e:05:30:fe': 'foonic1', + 'fa:16:3e:69:b0:58': 'ens1'}) + get_interfaces_by_mac.return_value = macs + + ncfg = ds.convert_network_data(NETWORK_DATA) + expected = set(['nic0', 'ens1', 'enp0s2']) + found = self._getnames_in_config(ncfg) + self.assertEqual(found, expected) + + def test_convert_raises_value_error_on_missing_name(self): + macs = {'aa:aa:aa:aa:aa:00': 'ens1'} + self.assertRaises(ValueError, ds.convert_network_data, + NETWORK_DATA, known_macs=macs) + + def cfg_ds_from_dir(seed_d): found = ds.read_config_drive(seed_d) cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None, @@ -387,7 +421,8 @@ def populate_ds_from_read_config(cfg_ds, source, results): cfg_ds.userdata_raw = results.get('userdata') cfg_ds.version = results.get('version') cfg_ds.network_json = results.get('networkdata') - cfg_ds._network_config = ds.convert_network_data(cfg_ds.network_json) + cfg_ds._network_config = ds.convert_network_data( + cfg_ds.network_json, known_macs=KNOWN_MACS) def populate_dir(seed_dir, files): -- cgit v1.2.3 From 42a7d2b6d44be5fd6e41734902e08897b709015d Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 3 Jun 2016 15:06:55 -0400 Subject: config drive conversion: recognize 'bridge' as a physical type, fix mtu the network json in openstack provides a type of 'bridge' when the underlying (host) type is a bridge. Silly, but we need to consider that a physical device as it will be for us. also, the 'mtu' will appear on the link, not on the route --- cloudinit/sources/DataSourceConfigDrive.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources/DataSourceConfigDrive.py') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 3cc9155d..c87f57fd 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -293,6 +293,7 @@ def convert_network_data(network_json=None, known_macs=None): 'mac_address', 'subnets', 'params', + 'mtu', ], 'subnet': [ 'type', @@ -302,7 +303,6 @@ def convert_network_data(network_json=None, known_macs=None): 'metric', 'gateway', 'pointopoint', - 'mtu', 'scope', 'dns_nameservers', 'dns_search', @@ -342,7 +342,7 @@ def convert_network_data(network_json=None, known_macs=None): }) subnets.append(subnet) cfg.update({'subnets': subnets}) - if link['type'] in ['ethernet', 'vif', 'ovs', 'phy']: + if link['type'] in ['ethernet', 'vif', 'ovs', 'phy', 'bridge']: cfg.update({ 'type': 'physical', 'mac_address': link['ethernet_mac_address']}) -- cgit v1.2.3 From 6ada153ebfd9483d76f904dafaa1fc80f61c9205 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 10 Jun 2016 14:16:51 -0700 Subject: Just mock 'on_first_boot' vs special argument --- cloudinit/sources/DataSourceConfigDrive.py | 5 ++--- tests/unittests/test_datasource/test_configdrive.py | 11 +++++++---- 2 files changed, 9 insertions(+), 7 deletions(-) (limited to 'cloudinit/sources/DataSourceConfigDrive.py') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index cea08de2..3130e618 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -62,7 +62,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): mstr += "[source=%s]" % (self.source) return mstr - def get_data(self, skip_first_boot=False): + def get_data(self): found = None md = {} results = {} @@ -111,8 +111,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): # 'injected files' and apply legacy ENI network format. prev_iid = get_previous_iid(self.paths) cur_iid = md['instance-id'] - if (prev_iid != cur_iid and self.dsmode == sources.DSMODE_PASS - and not skip_first_boot): + if prev_iid != cur_iid and self.dsmode == sources.DSMODE_PASS: on_first_boot(results, distro=self.distro) LOG.debug("%s: not claiming datasource, dsmode=%s", self, self.dsmode) diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index b898e2bd..18551b92 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -370,7 +370,8 @@ class TestConfigDriveDataSource(TestCase): util.find_devs_with = orig_find_devs_with util.is_partition = orig_is_partition - def test_pubkeys_v2(self): + @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot') + def test_pubkeys_v2(self, on_first_boot): """Verify that public-keys work in config-drive-v2.""" populate_dir(self.tmp, CFG_DRIVE_FILES_V2) myds = cfg_ds_from_dir(self.tmp) @@ -385,13 +386,15 @@ class TestNetJson(TestCase): self.addCleanup(shutil.rmtree, self.tmp) self.maxDiff = None - def test_network_data_is_found(self): + @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot') + def test_network_data_is_found(self, on_first_boot): """Verify that network_data is present in ds in config-drive-v2.""" populate_dir(self.tmp, CFG_DRIVE_FILES_V2) myds = cfg_ds_from_dir(self.tmp) self.assertIsNotNone(myds.network_json) - def test_network_config_is_converted(self): + @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot') + def test_network_config_is_converted(self, on_first_boot): """Verify that network_data is converted and present on ds object.""" populate_dir(self.tmp, CFG_DRIVE_FILES_V2) myds = cfg_ds_from_dir(self.tmp) @@ -558,7 +561,7 @@ def cfg_ds_from_dir(seed_d): helpers.Paths({})) cfg_ds.seed_dir = seed_d cfg_ds.known_macs = KNOWN_MACS.copy() - if not cfg_ds.get_data(skip_first_boot=True): + if not cfg_ds.get_data(): raise RuntimeError("Data source did not extract itself from" " seed directory %s" % seed_d) return cfg_ds -- cgit v1.2.3