From 6a8aa46863f1a4a5f3c0d37d34fd02d57790be01 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 3 Aug 2016 14:55:18 -0400 Subject: Generate a dummy bond name for OpenStack The OpenStack network_data.json does not provide a name for bond links. This change makes it so a dummy one is generated and used instead to satisfy cloud-init which does require one. In order to write the correct link (underlying 'link' names) for the bonds, we maintain a list of info by ids so we can easily get the right device name. Also: * add a vlan test case that similarly references an id rather than name. * make bond interfaces auto LP: #1605749 --- cloudinit/net/eni.py | 2 +- cloudinit/sources/helpers/openstack.py | 58 ++++++++++-- .../unittests/test_datasource/test_configdrive.py | 105 +++++++++++++++++++++ 3 files changed, 156 insertions(+), 9 deletions(-) diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index eff5b924..cd533ddb 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -399,7 +399,7 @@ class Renderer(renderer.Renderer): else: # ifenslave docs say to auto the slave devices lines = [] - if 'bond-master' in iface: + if 'bond-master' in iface or 'bond-slaves' in iface: lines.append("auto {name}".format(**iface)) lines.append("iface {name} {inet} {mode}".format(**iface)) lines.extend(_iface_add_attrs(iface, index=0)) diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 84322e0e..a5a2a1d6 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -539,6 +539,10 @@ def convert_net_json(network_json=None, known_macs=None): networks = network_json.get('networks', []) services = network_json.get('services', []) + link_updates = [] + link_id_info = {} + bond_name_fmt = "bond%d" + bond_number = 0 config = [] for link in links: subnets = [] @@ -551,6 +555,13 @@ def convert_net_json(network_json=None, known_macs=None): if 'name' in link: cfg['name'] = link['name'] + if link.get('ethernet_mac_address'): + link_id_info[link['id']] = link.get('ethernet_mac_address') + + curinfo = {'name': cfg.get('name'), + 'mac': link.get('ethernet_mac_address'), + 'id': link['id'], 'type': link['type']} + for network in [n for n in networks if n['link'] == link['id']]: subnet = dict((k, v) for k, v in network.items() @@ -582,31 +593,56 @@ def convert_net_json(network_json=None, known_macs=None): continue elif k.startswith('bond'): params.update({k: v}) - cfg.update({ - 'bond_interfaces': copy.deepcopy(link['bond_links']), - 'params': params, - }) + + # openstack does not provide a name for the bond. + # they do provide an 'id', but that is possibly non-sensical. + # so we just create our own name. + link_name = bond_name_fmt % bond_number + bond_number += 1 + + # bond_links reference links by their id, but we need to add + # to the network config by their nic name. + # store that in bond_links_needed, and update these later. + link_updates.append( + (cfg, 'bond_interfaces', '%s', + copy.deepcopy(link['bond_links'])) + ) + cfg.update({'params': params, 'name': link_name}) + + curinfo['name'] = link_name elif link['type'] in ['vlan']: + name = "%s.%s" % (link['vlan_link'], link['vlan_id']) cfg.update({ - 'name': "%s.%s" % (link['vlan_link'], - link['vlan_id']), - 'vlan_link': link['vlan_link'], + 'name': name, 'vlan_id': link['vlan_id'], 'mac_address': link['vlan_mac_address'], }) + link_updates.append((cfg, 'vlan_link', '%s', link['vlan_link'])) + link_updates.append((cfg, 'name', "%%s.%s" % link['vlan_id'], + link['vlan_link'])) + curinfo.update({'mac': link['vlan_mac_address'], + 'name': name}) else: raise ValueError( 'Unknown network_data link type: %s' % link['type']) config.append(cfg) + link_id_info[curinfo['id']] = curinfo need_names = [d for d in config if d.get('type') == 'physical' and 'name' not in d] - if need_names: + if need_names or link_updates: if known_macs is None: known_macs = net.get_interfaces_by_mac() + # go through and fill out the link_id_info with names + for link_id, info in link_id_info.items(): + if info.get('name'): + continue + if info.get('mac') in known_macs: + info['name'] = known_macs[info['mac']] + for d in need_names: mac = d.get('mac_address') if not mac: @@ -615,6 +651,12 @@ def convert_net_json(network_json=None, known_macs=None): raise ValueError("Unable to find a system nic for %s" % d) d['name'] = known_macs[mac] + for cfg, key, fmt, target in link_updates: + if isinstance(target, (list, tuple)): + cfg[key] = [fmt % link_id_info[l]['name'] for l in target] + else: + cfg[key] = fmt % link_id_info[target]['name'] + for service in services: cfg = service cfg.update({'type': 'nameserver'}) diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index d0269943..98ff97a7 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -137,12 +137,71 @@ NETWORK_DATA_3 = { ] } +NETWORK_DATA_BOND = { + "services": [ + {"type": "dns", "address": "1.1.1.191"}, + {"type": "dns", "address": "1.1.1.4"}, + ], + "networks": [ + {"id": "network2-ipv4", "ip_address": "2.2.2.13", + "link": "vlan2", "netmask": "255.255.255.248", + "network_id": "4daf5ce8-38cf-4240-9f1a-04e86d7c6117", + "type": "ipv4", + "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", + "gateway": "2.2.2.9"}]}, + {"id": "network3-ipv4", "ip_address": "10.0.1.5", + "link": "vlan3", "netmask": "255.255.255.248", + "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d", + "type": "ipv4", + "routes": [{"netmask": "255.255.255.255", + "network": "192.168.1.0", "gateway": "10.0.1.1"}]} + ], + "links": [ + {"ethernet_mac_address": "0c:c4:7a:34:6e:3c", + "id": "eth0", "mtu": 1500, "type": "phy"}, + {"ethernet_mac_address": "0c:c4:7a:34:6e:3d", + "id": "eth1", "mtu": 1500, "type": "phy"}, + {"bond_links": ["eth0", "eth1"], + "bond_miimon": 100, "bond_mode": "4", + "bond_xmit_hash_policy": "layer3+4", + "ethernet_mac_address": "0c:c4:7a:34:6e:3c", + "id": "bond0", "type": "bond"}, + {"ethernet_mac_address": "fa:16:3e:b3:72:30", + "id": "vlan2", "type": "vlan", "vlan_id": 602, + "vlan_link": "bond0", "vlan_mac_address": "fa:16:3e:b3:72:30"}, + {"ethernet_mac_address": "fa:16:3e:66:ab:a6", + "id": "vlan3", "type": "vlan", "vlan_id": 612, "vlan_link": "bond0", + "vlan_mac_address": "fa:16:3e:66:ab:a6"} + ] +} + +NETWORK_DATA_VLAN = { + "services": [{"type": "dns", "address": "1.1.1.191"}], + "networks": [ + {"id": "network1-ipv4", "ip_address": "10.0.1.5", + "link": "vlan1", "netmask": "255.255.255.248", + "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d", + "type": "ipv4", + "routes": [{"netmask": "255.255.255.255", + "network": "192.168.1.0", "gateway": "10.0.1.1"}]} + ], + "links": [ + {"ethernet_mac_address": "fa:16:3e:69:b0:58", + "id": "eth0", "mtu": 1500, "type": "phy"}, + {"ethernet_mac_address": "fa:16:3e:b3:72:30", + "id": "vlan1", "type": "vlan", "vlan_id": 602, + "vlan_link": "eth0", "vlan_mac_address": "fa:16:3e:b3:72:30"}, + ] +} + KNOWN_MACS = { 'fa:16:3e:69:b0:58': 'enp0s1', 'fa:16:3e:d4:57:ad': 'enp0s2', 'fa:16:3e:dd:50:9a': 'foo1', 'fa:16:3e:a8:14:69': 'foo2', 'fa:16:3e:ed:9a:59': 'foo3', + '0c:c4:7a:34:6e:3d': 'oeth1', + '0c:c4:7a:34:6e:3c': 'oeth0', } CFG_DRIVE_FILES_V2 = { @@ -599,6 +658,52 @@ class TestConvertNetworkData(TestCase): physicals.add(i['name']) self.assertEqual(physicals, set(('foo1', 'foo2'))) + def test_bond_conversion(self): + # light testing of bond conversion and eni rendering of bond + ncfg = openstack.convert_net_json(NETWORK_DATA_BOND, + known_macs=KNOWN_MACS) + eni_renderer = eni.Renderer() + eni_renderer.render_network_state( + self.tmp, network_state.parse_net_config_data(ncfg)) + with open(os.path.join(self.tmp, "etc", + "network", "interfaces"), 'r') as f: + eni_rendering = f.read() + + # Verify there are expected interfaces in the net config. + interfaces = sorted( + [i['name'] for i in ncfg['config'] + if i['type'] in ('vlan', 'bond', 'physical')]) + self.assertEqual( + sorted(["oeth0", "oeth1", "bond0", "bond0.602", "bond0.612"]), + interfaces) + + words = eni_rendering.split() + # 'eth0' and 'eth1' are the ids. because their mac adresses + # map to other names, we should not see them in the ENI + self.assertNotIn('eth0', words) + self.assertNotIn('eth1', words) + + # oeth0 and oeth1 are the interface names for eni. + # bond0 will be generated for the bond. Each should be auto. + self.assertIn("auto oeth0", eni_rendering) + self.assertIn("auto oeth1", eni_rendering) + self.assertIn("auto bond0", eni_rendering) + + def test_vlan(self): + # light testing of vlan config conversion and eni rendering + ncfg = openstack.convert_net_json(NETWORK_DATA_VLAN, + known_macs=KNOWN_MACS) + eni_renderer = eni.Renderer() + eni_renderer.render_network_state( + self.tmp, network_state.parse_net_config_data(ncfg)) + with open(os.path.join(self.tmp, "etc", + "network", "interfaces"), 'r') as f: + eni_rendering = f.read() + + self.assertIn("iface enp0s1", eni_rendering) + self.assertIn("address 10.0.1.5", eni_rendering) + self.assertIn("auto enp0s1.602", eni_rendering) + def cfg_ds_from_dir(seed_d): cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None, -- cgit v1.2.3 From 3f1373a9785de5c5843c060835e444046ab50756 Mon Sep 17 00:00:00 2001 From: Matthew Thode Date: Thu, 18 Aug 2016 16:27:27 -0500 Subject: add install option for openrc Adds an install option for for OpenRC init scripts. I've also restricted installing tests more correctly. Also, don't hardcode the path to ip (/bin/ip on gentoo). --- cloudinit/sources/DataSourceOpenNebula.py | 2 +- setup.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 7b3a76b9..635a836c 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -407,7 +407,7 @@ def read_context_disk_dir(source_dir, asuser=None): # http://opennebula.org/documentation:rel3.8:cong#network_configuration for k in context: if re.match(r'^ETH\d+_IP$', k): - (out, _) = util.subp(['/sbin/ip', 'link']) + (out, _) = util.subp(['ip', 'link']) net = OpenNebulaNetwork(out, context) results['network-interfaces'] = net.gen_conf() break diff --git a/setup.py b/setup.py index bbadd7bf..8ff667d5 100755 --- a/setup.py +++ b/setup.py @@ -74,6 +74,7 @@ INITSYS_FILES = { 'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)], 'sysvinit_freebsd': [f for f in glob('sysvinit/freebsd/*') if is_f(f)], 'sysvinit_deb': [f for f in glob('sysvinit/debian/*') if is_f(f)], + 'sysvinit_openrc': [f for f in glob('sysvinit/gentoo/*') if is_f(f)], 'systemd': [f for f in (glob('systemd/*.service') + glob('systemd/*.target')) if is_f(f)], 'systemd.generators': [f for f in glob('systemd/*-generator') if is_f(f)], @@ -83,6 +84,7 @@ INITSYS_ROOTS = { 'sysvinit': '/etc/rc.d/init.d', 'sysvinit_freebsd': '/usr/local/etc/rc.d', 'sysvinit_deb': '/etc/init.d', + 'sysvinit_openrc': '/etc/init.d', 'systemd': pkg_config_read('systemd', 'systemdsystemunitdir'), 'systemd.generators': pkg_config_read('systemd', 'systemdsystemgeneratordir'), -- cgit v1.2.3 From 6fce6db9a5fd75e2c398de0e0b4cd72a9f05a7a6 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 26 Jul 2016 14:12:18 -0700 Subject: Add a module that can configure spacewalk. Spacewalk is used by some peopel to manage connections into redhat package management systems and kickstart and various other tasks, so having a system be able to do the needed tasks on first boot to integrate with that system would be very useful (to some). See: https://fedorahosted.org/spacewalk/ --- cloudinit/config/cc_spacewalk.py | 85 ++++++++++++++++++++++ .../test_handler/test_handler_spacewalk.py | 42 +++++++++++ 2 files changed, 127 insertions(+) create mode 100644 cloudinit/config/cc_spacewalk.py create mode 100644 tests/unittests/test_handler/test_handler_spacewalk.py diff --git a/cloudinit/config/cc_spacewalk.py b/cloudinit/config/cc_spacewalk.py new file mode 100644 index 00000000..f3c1a664 --- /dev/null +++ b/cloudinit/config/cc_spacewalk.py @@ -0,0 +1,85 @@ +# vi: ts=4 expandtab +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +""" +**Summary:** helper to setup https://fedorahosted.org/spacewalk/ + +**Description:** This module will enable for configuring the needed +actions to setup spacewalk on redhat based systems. + +It can be configured with the following option structure:: + + spacewalk: + server: spacewalk api server (required) +""" + +from cloudinit import util + + +distros = ['redhat', 'fedora'] +required_packages = ['rhn-setup'] +def_ca_cert_path = "/usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT" + + +def is_registered(): + # Check to see if already registered and don't bother; this is + # apparently done by trying to sync and if that fails then we + # assume we aren't registered; which is sorta ghetto... + already_registered = False + try: + util.subp(['rhn-profile-sync', '--verbose'], capture=False) + already_registered = True + except util.ProcessExecutionError as e: + if e.exit_code != 1: + raise + return already_registered + + +def do_register(server, profile_name, + ca_cert_path=def_ca_cert_path, + proxy=None, log=None, + activation_key=None): + if log is not None: + log.info("Registering using `rhnreg_ks` profile '%s'" + " into server '%s'", profile_name, server) + cmd = ['rhnreg_ks'] + cmd.extend(['--serverUrl', 'https://%s/XMLRPC' % server]) + cmd.extend(['--profilename', str(profile_name)]) + if proxy: + cmd.extend(["--proxy", str(proxy)]) + if ca_cert_path: + cmd.extend(['--sslCACert', str(ca_cert_path)]) + if activation_key: + cmd.extend(['--activationkey', str(activation_key)]) + util.subp(cmd, capture=False) + + +def handle(name, cfg, cloud, log, _args): + if 'spacewalk' not in cfg: + log.debug(("Skipping module named %s," + " no 'spacewalk' key in configuration"), name) + return + cfg = cfg['spacewalk'] + spacewalk_server = cfg.get('server') + if spacewalk_server: + # Need to have this installed before further things will work. + cloud.distro.install_packages(required_packages) + if not is_registered(): + do_register(spacewalk_server, + cloud.datasource.get_hostname(fqdn=True), + proxy=cfg.get("proxy"), log=log, + activation_key=cfg.get('activation_key')) + else: + log.debug("Skipping module named %s, 'spacewalk/server' key" + " was not found in configuration", name) diff --git a/tests/unittests/test_handler/test_handler_spacewalk.py b/tests/unittests/test_handler/test_handler_spacewalk.py new file mode 100644 index 00000000..44f95e4c --- /dev/null +++ b/tests/unittests/test_handler/test_handler_spacewalk.py @@ -0,0 +1,42 @@ +from cloudinit.config import cc_spacewalk +from cloudinit import util + +from .. import helpers + +import logging + +try: + from unittest import mock +except ImportError: + import mock + +LOG = logging.getLogger(__name__) + + +class TestSpacewalk(helpers.TestCase): + space_cfg = { + 'spacewalk': { + 'server': 'localhost', + 'profile_name': 'test', + } + } + + @mock.patch("cloudinit.config.cc_spacewalk.util.subp") + def test_not_is_registered(self, mock_util_subp): + mock_util_subp.side_effect = util.ProcessExecutionError(exit_code=1) + self.assertFalse(cc_spacewalk.is_registered()) + + @mock.patch("cloudinit.config.cc_spacewalk.util.subp") + def test_is_registered(self, mock_util_subp): + mock_util_subp.side_effect = None + self.assertTrue(cc_spacewalk.is_registered()) + + @mock.patch("cloudinit.config.cc_spacewalk.util.subp") + def test_do_register(self, mock_util_subp): + cc_spacewalk.do_register(**self.space_cfg['spacewalk']) + mock_util_subp.assert_called_with([ + 'rhnreg_ks', + '--serverUrl', 'https://localhost/XMLRPC', + '--profilename', 'test', + '--sslCACert', cc_spacewalk.def_ca_cert_path, + ], capture=False) -- cgit v1.2.3 From 817df42d3853106396f2c505fd26a695b663d7ea Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 25 Aug 2016 13:10:45 -0700 Subject: python2.6: fix dict comprehension usage in _lsb_release. This syntax doesn't work in python 2.6 --- cloudinit/util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/util.py b/cloudinit/util.py index 9c89de61..7c37eb8f 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -113,7 +113,7 @@ def _lsb_release(target=None): except ProcessExecutionError as err: LOG.warn("Unable to get lsb_release --all: %s", err) - data = {v: "UNAVAILABLE" for v in fmap.values()} + data = dict((v, "UNAVAILABLE") for v in fmap.values()) return data -- cgit v1.2.3 From 763f403c7b848b31780ef869fb7728b0d5e571a2 Mon Sep 17 00:00:00 2001 From: Christian Ehrhardt Date: Fri, 26 Aug 2016 11:26:13 +0200 Subject: apt-config: allow both old and new format to be present. This allows both v1/2 and and v3 formats to exist in config. If both are present, then prefer v3. If values are not the same then a ValueError is raised. LP: #1616831 --- cloudinit/config/cc_apt_configure.py | 61 ++++++++------- .../test_handler/test_handler_apt_source_v1.py | 87 ++++++++++++++++++++-- 2 files changed, 117 insertions(+), 31 deletions(-) diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 609dbb51..42c56418 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -464,13 +464,19 @@ def convert_mirror(oldcfg, aptcfg): def convert_v2_to_v3_apt_format(oldcfg): """convert old to new keys and adapt restructured mirror spec""" - oldkeys = ['apt_sources', 'apt_mirror', 'apt_mirror_search', - 'apt_mirror_search_dns', 'apt_proxy', 'apt_http_proxy', - 'apt_ftp_proxy', 'apt_https_proxy', - 'apt_preserve_sources_list', 'apt_custom_sources_list', - 'add_apt_repo_match'] + mapoldkeys = {'apt_sources': 'sources', + 'apt_mirror': None, + 'apt_mirror_search': None, + 'apt_mirror_search_dns': None, + 'apt_proxy': 'proxy', + 'apt_http_proxy': 'http_proxy', + 'apt_ftp_proxy': 'https_proxy', + 'apt_https_proxy': 'ftp_proxy', + 'apt_preserve_sources_list': 'preserve_sources_list', + 'apt_custom_sources_list': 'sources_list', + 'add_apt_repo_match': 'add_apt_repo_match'} needtoconvert = [] - for oldkey in oldkeys: + for oldkey in mapoldkeys: if oldcfg.get(oldkey, None) is not None: needtoconvert.append(oldkey) @@ -480,32 +486,37 @@ def convert_v2_to_v3_apt_format(oldcfg): LOG.debug("apt config: convert V2 to V3 format for keys '%s'", ", ".join(needtoconvert)) - if oldcfg.get('apt', None) is not None: - msg = ("Error in apt configuration: " - "old and new format of apt features are mutually exclusive " - "('apt':'%s' vs '%s' key)" % (oldcfg.get('apt', None), - ", ".join(needtoconvert))) - LOG.error(msg) - raise ValueError(msg) + # if old AND new config are provided, prefer the new one (LP #1616831) + newaptcfg = oldcfg.get('apt', None) + if newaptcfg is not None: + LOG.debug("apt config: V1/2 and V3 format specified, preferring V3") + for oldkey in needtoconvert: + newkey = mapoldkeys[oldkey] + verify = oldcfg[oldkey] # drop, but keep a ref for verification + del oldcfg[oldkey] + if newkey is None or newaptcfg.get(newkey, None) is None: + # no simple mapping or no collision on this particular key + continue + if verify != newaptcfg[newkey]: + raise ValueError("Old and New apt format defined with unequal " + "values %s vs %s @ %s" % (verify, + newaptcfg[newkey], + oldkey)) + # return conf after clearing conflicting V1/2 keys + return oldcfg # create new format from old keys aptcfg = {} - # renames / moves under the apt key - convert_key(oldcfg, aptcfg, 'add_apt_repo_match', 'add_apt_repo_match') - convert_key(oldcfg, aptcfg, 'apt_proxy', 'proxy') - convert_key(oldcfg, aptcfg, 'apt_http_proxy', 'http_proxy') - convert_key(oldcfg, aptcfg, 'apt_https_proxy', 'https_proxy') - convert_key(oldcfg, aptcfg, 'apt_ftp_proxy', 'ftp_proxy') - convert_key(oldcfg, aptcfg, 'apt_custom_sources_list', 'sources_list') - convert_key(oldcfg, aptcfg, 'apt_preserve_sources_list', - 'preserve_sources_list') - # dict format not changed since v2, just renamed and moved - convert_key(oldcfg, aptcfg, 'apt_sources', 'sources') + # simple renames / moves under the apt key + for oldkey in mapoldkeys: + if mapoldkeys[oldkey] is not None: + convert_key(oldcfg, aptcfg, oldkey, mapoldkeys[oldkey]) + # mirrors changed in a more complex way convert_mirror(oldcfg, aptcfg) - for oldkey in oldkeys: + for oldkey in mapoldkeys: if oldcfg.get(oldkey, None) is not None: raise ValueError("old apt key '%s' left after conversion" % oldkey) diff --git a/tests/unittests/test_handler/test_handler_apt_source_v1.py b/tests/unittests/test_handler/test_handler_apt_source_v1.py index d96779c5..ddff4341 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v1.py +++ b/tests/unittests/test_handler/test_handler_apt_source_v1.py @@ -528,6 +528,7 @@ class TestAptSourceConfig(TestCase): 'filename': self.aptlistfile2} cfg3 = {'source': 'deb $MIRROR $RELEASE universe', 'filename': self.aptlistfile3} + cfg = {'apt_sources': [cfg1, cfg2, cfg3]} checkcfg = {self.aptlistfile: {'filename': self.aptlistfile, 'source': 'deb $MIRROR $RELEASE ' 'multiverse'}, @@ -537,15 +538,89 @@ class TestAptSourceConfig(TestCase): 'source': 'deb $MIRROR $RELEASE ' 'universe'}} - newcfg = cc_apt_configure.convert_v1_to_v2_apt_format([cfg1, cfg2, - cfg3]) - self.assertEqual(newcfg, checkcfg) + newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg) + self.assertEqual(newcfg['apt']['sources'], checkcfg) - newcfg2 = cc_apt_configure.convert_v1_to_v2_apt_format(newcfg) - self.assertEqual(newcfg2, checkcfg) + # convert again, should stay the same + newcfg2 = cc_apt_configure.convert_to_v3_apt_format(newcfg) + self.assertEqual(newcfg2['apt']['sources'], checkcfg) + # should work without raising an exception + cc_apt_configure.convert_to_v3_apt_format({}) + + with self.assertRaises(ValueError): + cc_apt_configure.convert_to_v3_apt_format({'apt_sources': 5}) + + def test_convert_to_new_format_collision(self): + """Test the conversion of old to new format with collisions + That matches e.g. the MAAS case specifying old and new config""" + cfg_1_and_3 = {'apt': {'proxy': 'http://192.168.122.1:8000/'}, + 'apt_proxy': 'http://192.168.122.1:8000/'} + cfg_3_only = {'apt': {'proxy': 'http://192.168.122.1:8000/'}} + cfgconflict = {'apt': {'proxy': 'http://192.168.122.1:8000/'}, + 'apt_proxy': 'ftp://192.168.122.1:8000/'} + + # collision (equal) + newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3) + self.assertEqual(newcfg, cfg_3_only) + # collision (equal, so ok to remove) + newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_3_only) + self.assertEqual(newcfg, cfg_3_only) + # collision (unequal) + with self.assertRaises(ValueError): + cc_apt_configure.convert_to_v3_apt_format(cfgconflict) + + def test_convert_to_new_format_dict_collision(self): + cfg1 = {'source': 'deb $MIRROR $RELEASE multiverse', + 'filename': self.aptlistfile} + cfg2 = {'source': 'deb $MIRROR $RELEASE main', + 'filename': self.aptlistfile2} + cfg3 = {'source': 'deb $MIRROR $RELEASE universe', + 'filename': self.aptlistfile3} + fullv3 = {self.aptlistfile: {'filename': self.aptlistfile, + 'source': 'deb $MIRROR $RELEASE ' + 'multiverse'}, + self.aptlistfile2: {'filename': self.aptlistfile2, + 'source': 'deb $MIRROR $RELEASE main'}, + self.aptlistfile3: {'filename': self.aptlistfile3, + 'source': 'deb $MIRROR $RELEASE ' + 'universe'}} + cfg_3_only = {'apt': {'sources': fullv3}} + cfg_1_and_3 = {'apt_sources': [cfg1, cfg2, cfg3]} + cfg_1_and_3.update(cfg_3_only) + + # collision (equal, so ok to remove) + newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3) + self.assertEqual(newcfg, cfg_3_only) + # no old spec (same result) + newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_3_only) + self.assertEqual(newcfg, cfg_3_only) + + diff = {self.aptlistfile: {'filename': self.aptlistfile, + 'source': 'deb $MIRROR $RELEASE ' + 'DIFFERENTVERSE'}, + self.aptlistfile2: {'filename': self.aptlistfile2, + 'source': 'deb $MIRROR $RELEASE main'}, + self.aptlistfile3: {'filename': self.aptlistfile3, + 'source': 'deb $MIRROR $RELEASE ' + 'universe'}} + cfg_3_only = {'apt': {'sources': diff}} + cfg_1_and_3_different = {'apt_sources': [cfg1, cfg2, cfg3]} + cfg_1_and_3_different.update(cfg_3_only) + + # collision (unequal by dict having a different entry) + with self.assertRaises(ValueError): + cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3_different) + + missing = {self.aptlistfile: {'filename': self.aptlistfile, + 'source': 'deb $MIRROR $RELEASE ' + 'multiverse'}} + cfg_3_only = {'apt': {'sources': missing}} + cfg_1_and_3_missing = {'apt_sources': [cfg1, cfg2, cfg3]} + cfg_1_and_3_missing.update(cfg_3_only) + # collision (unequal by dict missing an entry) with self.assertRaises(ValueError): - cc_apt_configure.convert_v1_to_v2_apt_format(5) + cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3_missing) # vi: ts=4 expandtab -- cgit v1.2.3