summaryrefslogtreecommitdiff
path: root/cloudinit
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit')
-rw-r--r--cloudinit/config/cc_apt_configure.py61
-rw-r--r--cloudinit/config/cc_spacewalk.py85
-rw-r--r--cloudinit/net/eni.py2
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py2
-rw-r--r--cloudinit/sources/helpers/openstack.py58
-rw-r--r--cloudinit/util.py2
6 files changed, 174 insertions, 36 deletions
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 609dbb51..42c56418 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -464,13 +464,19 @@ def convert_mirror(oldcfg, aptcfg):
def convert_v2_to_v3_apt_format(oldcfg):
"""convert old to new keys and adapt restructured mirror spec"""
- oldkeys = ['apt_sources', 'apt_mirror', 'apt_mirror_search',
- 'apt_mirror_search_dns', 'apt_proxy', 'apt_http_proxy',
- 'apt_ftp_proxy', 'apt_https_proxy',
- 'apt_preserve_sources_list', 'apt_custom_sources_list',
- 'add_apt_repo_match']
+ mapoldkeys = {'apt_sources': 'sources',
+ 'apt_mirror': None,
+ 'apt_mirror_search': None,
+ 'apt_mirror_search_dns': None,
+ 'apt_proxy': 'proxy',
+ 'apt_http_proxy': 'http_proxy',
+ 'apt_ftp_proxy': 'https_proxy',
+ 'apt_https_proxy': 'ftp_proxy',
+ 'apt_preserve_sources_list': 'preserve_sources_list',
+ 'apt_custom_sources_list': 'sources_list',
+ 'add_apt_repo_match': 'add_apt_repo_match'}
needtoconvert = []
- for oldkey in oldkeys:
+ for oldkey in mapoldkeys:
if oldcfg.get(oldkey, None) is not None:
needtoconvert.append(oldkey)
@@ -480,32 +486,37 @@ def convert_v2_to_v3_apt_format(oldcfg):
LOG.debug("apt config: convert V2 to V3 format for keys '%s'",
", ".join(needtoconvert))
- if oldcfg.get('apt', None) is not None:
- msg = ("Error in apt configuration: "
- "old and new format of apt features are mutually exclusive "
- "('apt':'%s' vs '%s' key)" % (oldcfg.get('apt', None),
- ", ".join(needtoconvert)))
- LOG.error(msg)
- raise ValueError(msg)
+ # if old AND new config are provided, prefer the new one (LP #1616831)
+ newaptcfg = oldcfg.get('apt', None)
+ if newaptcfg is not None:
+ LOG.debug("apt config: V1/2 and V3 format specified, preferring V3")
+ for oldkey in needtoconvert:
+ newkey = mapoldkeys[oldkey]
+ verify = oldcfg[oldkey] # drop, but keep a ref for verification
+ del oldcfg[oldkey]
+ if newkey is None or newaptcfg.get(newkey, None) is None:
+ # no simple mapping or no collision on this particular key
+ continue
+ if verify != newaptcfg[newkey]:
+ raise ValueError("Old and New apt format defined with unequal "
+ "values %s vs %s @ %s" % (verify,
+ newaptcfg[newkey],
+ oldkey))
+ # return conf after clearing conflicting V1/2 keys
+ return oldcfg
# create new format from old keys
aptcfg = {}
- # renames / moves under the apt key
- convert_key(oldcfg, aptcfg, 'add_apt_repo_match', 'add_apt_repo_match')
- convert_key(oldcfg, aptcfg, 'apt_proxy', 'proxy')
- convert_key(oldcfg, aptcfg, 'apt_http_proxy', 'http_proxy')
- convert_key(oldcfg, aptcfg, 'apt_https_proxy', 'https_proxy')
- convert_key(oldcfg, aptcfg, 'apt_ftp_proxy', 'ftp_proxy')
- convert_key(oldcfg, aptcfg, 'apt_custom_sources_list', 'sources_list')
- convert_key(oldcfg, aptcfg, 'apt_preserve_sources_list',
- 'preserve_sources_list')
- # dict format not changed since v2, just renamed and moved
- convert_key(oldcfg, aptcfg, 'apt_sources', 'sources')
+ # simple renames / moves under the apt key
+ for oldkey in mapoldkeys:
+ if mapoldkeys[oldkey] is not None:
+ convert_key(oldcfg, aptcfg, oldkey, mapoldkeys[oldkey])
+ # mirrors changed in a more complex way
convert_mirror(oldcfg, aptcfg)
- for oldkey in oldkeys:
+ for oldkey in mapoldkeys:
if oldcfg.get(oldkey, None) is not None:
raise ValueError("old apt key '%s' left after conversion" % oldkey)
diff --git a/cloudinit/config/cc_spacewalk.py b/cloudinit/config/cc_spacewalk.py
new file mode 100644
index 00000000..f3c1a664
--- /dev/null
+++ b/cloudinit/config/cc_spacewalk.py
@@ -0,0 +1,85 @@
+# vi: ts=4 expandtab
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+**Summary:** helper to setup https://fedorahosted.org/spacewalk/
+
+**Description:** This module will enable for configuring the needed
+actions to setup spacewalk on redhat based systems.
+
+It can be configured with the following option structure::
+
+ spacewalk:
+ server: spacewalk api server (required)
+"""
+
+from cloudinit import util
+
+
+distros = ['redhat', 'fedora']
+required_packages = ['rhn-setup']
+def_ca_cert_path = "/usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT"
+
+
+def is_registered():
+ # Check to see if already registered and don't bother; this is
+ # apparently done by trying to sync and if that fails then we
+ # assume we aren't registered; which is sorta ghetto...
+ already_registered = False
+ try:
+ util.subp(['rhn-profile-sync', '--verbose'], capture=False)
+ already_registered = True
+ except util.ProcessExecutionError as e:
+ if e.exit_code != 1:
+ raise
+ return already_registered
+
+
+def do_register(server, profile_name,
+ ca_cert_path=def_ca_cert_path,
+ proxy=None, log=None,
+ activation_key=None):
+ if log is not None:
+ log.info("Registering using `rhnreg_ks` profile '%s'"
+ " into server '%s'", profile_name, server)
+ cmd = ['rhnreg_ks']
+ cmd.extend(['--serverUrl', 'https://%s/XMLRPC' % server])
+ cmd.extend(['--profilename', str(profile_name)])
+ if proxy:
+ cmd.extend(["--proxy", str(proxy)])
+ if ca_cert_path:
+ cmd.extend(['--sslCACert', str(ca_cert_path)])
+ if activation_key:
+ cmd.extend(['--activationkey', str(activation_key)])
+ util.subp(cmd, capture=False)
+
+
+def handle(name, cfg, cloud, log, _args):
+ if 'spacewalk' not in cfg:
+ log.debug(("Skipping module named %s,"
+ " no 'spacewalk' key in configuration"), name)
+ return
+ cfg = cfg['spacewalk']
+ spacewalk_server = cfg.get('server')
+ if spacewalk_server:
+ # Need to have this installed before further things will work.
+ cloud.distro.install_packages(required_packages)
+ if not is_registered():
+ do_register(spacewalk_server,
+ cloud.datasource.get_hostname(fqdn=True),
+ proxy=cfg.get("proxy"), log=log,
+ activation_key=cfg.get('activation_key'))
+ else:
+ log.debug("Skipping module named %s, 'spacewalk/server' key"
+ " was not found in configuration", name)
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index eff5b924..cd533ddb 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -399,7 +399,7 @@ class Renderer(renderer.Renderer):
else:
# ifenslave docs say to auto the slave devices
lines = []
- if 'bond-master' in iface:
+ if 'bond-master' in iface or 'bond-slaves' in iface:
lines.append("auto {name}".format(**iface))
lines.append("iface {name} {inet} {mode}".format(**iface))
lines.extend(_iface_add_attrs(iface, index=0))
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 7b3a76b9..635a836c 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -407,7 +407,7 @@ def read_context_disk_dir(source_dir, asuser=None):
# http://opennebula.org/documentation:rel3.8:cong#network_configuration
for k in context:
if re.match(r'^ETH\d+_IP$', k):
- (out, _) = util.subp(['/sbin/ip', 'link'])
+ (out, _) = util.subp(['ip', 'link'])
net = OpenNebulaNetwork(out, context)
results['network-interfaces'] = net.gen_conf()
break
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 84322e0e..a5a2a1d6 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -539,6 +539,10 @@ def convert_net_json(network_json=None, known_macs=None):
networks = network_json.get('networks', [])
services = network_json.get('services', [])
+ link_updates = []
+ link_id_info = {}
+ bond_name_fmt = "bond%d"
+ bond_number = 0
config = []
for link in links:
subnets = []
@@ -551,6 +555,13 @@ def convert_net_json(network_json=None, known_macs=None):
if 'name' in link:
cfg['name'] = link['name']
+ if link.get('ethernet_mac_address'):
+ link_id_info[link['id']] = link.get('ethernet_mac_address')
+
+ curinfo = {'name': cfg.get('name'),
+ 'mac': link.get('ethernet_mac_address'),
+ 'id': link['id'], 'type': link['type']}
+
for network in [n for n in networks
if n['link'] == link['id']]:
subnet = dict((k, v) for k, v in network.items()
@@ -582,31 +593,56 @@ def convert_net_json(network_json=None, known_macs=None):
continue
elif k.startswith('bond'):
params.update({k: v})
- cfg.update({
- 'bond_interfaces': copy.deepcopy(link['bond_links']),
- 'params': params,
- })
+
+ # openstack does not provide a name for the bond.
+ # they do provide an 'id', but that is possibly non-sensical.
+ # so we just create our own name.
+ link_name = bond_name_fmt % bond_number
+ bond_number += 1
+
+ # bond_links reference links by their id, but we need to add
+ # to the network config by their nic name.
+ # store that in bond_links_needed, and update these later.
+ link_updates.append(
+ (cfg, 'bond_interfaces', '%s',
+ copy.deepcopy(link['bond_links']))
+ )
+ cfg.update({'params': params, 'name': link_name})
+
+ curinfo['name'] = link_name
elif link['type'] in ['vlan']:
+ name = "%s.%s" % (link['vlan_link'], link['vlan_id'])
cfg.update({
- 'name': "%s.%s" % (link['vlan_link'],
- link['vlan_id']),
- 'vlan_link': link['vlan_link'],
+ 'name': name,
'vlan_id': link['vlan_id'],
'mac_address': link['vlan_mac_address'],
})
+ link_updates.append((cfg, 'vlan_link', '%s', link['vlan_link']))
+ link_updates.append((cfg, 'name', "%%s.%s" % link['vlan_id'],
+ link['vlan_link']))
+ curinfo.update({'mac': link['vlan_mac_address'],
+ 'name': name})
else:
raise ValueError(
'Unknown network_data link type: %s' % link['type'])
config.append(cfg)
+ link_id_info[curinfo['id']] = curinfo
need_names = [d for d in config
if d.get('type') == 'physical' and 'name' not in d]
- if need_names:
+ if need_names or link_updates:
if known_macs is None:
known_macs = net.get_interfaces_by_mac()
+ # go through and fill out the link_id_info with names
+ for link_id, info in link_id_info.items():
+ if info.get('name'):
+ continue
+ if info.get('mac') in known_macs:
+ info['name'] = known_macs[info['mac']]
+
for d in need_names:
mac = d.get('mac_address')
if not mac:
@@ -615,6 +651,12 @@ def convert_net_json(network_json=None, known_macs=None):
raise ValueError("Unable to find a system nic for %s" % d)
d['name'] = known_macs[mac]
+ for cfg, key, fmt, target in link_updates:
+ if isinstance(target, (list, tuple)):
+ cfg[key] = [fmt % link_id_info[l]['name'] for l in target]
+ else:
+ cfg[key] = fmt % link_id_info[target]['name']
+
for service in services:
cfg = service
cfg.update({'type': 'nameserver'})
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 9c89de61..7c37eb8f 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -113,7 +113,7 @@ def _lsb_release(target=None):
except ProcessExecutionError as err:
LOG.warn("Unable to get lsb_release --all: %s", err)
- data = {v: "UNAVAILABLE" for v in fmap.values()}
+ data = dict((v, "UNAVAILABLE") for v in fmap.values())
return data