summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog2
-rwxr-xr-xbin/cloud-init20
-rw-r--r--cloudinit/distros/__init__.py11
-rw-r--r--cloudinit/distros/debian.py12
-rw-r--r--cloudinit/net/__init__.py751
-rw-r--r--cloudinit/net/network_state.py446
-rw-r--r--cloudinit/net/udev.py54
-rw-r--r--cloudinit/sources/DataSourceAzure.py2
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py135
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py89
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py2
-rw-r--r--cloudinit/sources/__init__.py6
-rw-r--r--cloudinit/sources/helpers/openstack.py9
-rw-r--r--cloudinit/stages.py98
-rw-r--r--cloudinit/util.py21
-rwxr-xr-xsetup.py3
-rwxr-xr-xsystemd/cloud-init-generator3
-rw-r--r--systemd/cloud-init-local.service2
-rw-r--r--tests/unittests/test_datasource/test_configdrive.py51
-rw-r--r--tests/unittests/test_distros/test_netconfig.py5
-rw-r--r--tests/unittests/test_net.py127
-rw-r--r--udev/79-cloud-init-net-wait.rules10
-rwxr-xr-xudev/cloud-init-wait68
23 files changed, 1846 insertions, 81 deletions
diff --git a/ChangeLog b/ChangeLog
index b08665b0..6d6da417 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -94,6 +94,8 @@
described in documentation [Chris Cosby]
- quickly check to see if the previous instance id is still valid to
avoid dependency on network metadata service on every boot (LP: #1553815)
+ - support network configuration in cloud-init --local with support
+ device naming via systemd.link.
0.7.6:
- open 0.7.6
diff --git a/bin/cloud-init b/bin/cloud-init
index 11cc0237..341359e3 100755
--- a/bin/cloud-init
+++ b/bin/cloud-init
@@ -263,6 +263,10 @@ def main_init(name, args):
return (None, [])
else:
return (None, ["No instance datasource found."])
+
+ if args.local:
+ init.apply_network_config()
+
# Stage 6
iid = init.instancify()
LOG.debug("%s will now be targeting instance id: %s", name, iid)
@@ -325,7 +329,7 @@ def main_modules(action_name, args):
init.read_cfg(extract_fns(args))
# Stage 2
try:
- init.fetch()
+ init.fetch(existing="trust")
except sources.DataSourceNotFoundException:
# There was no datasource found, theres nothing to do
msg = ('Can not apply stage %s, no datasource found! Likely bad '
@@ -379,7 +383,7 @@ def main_single(name, args):
init.read_cfg(extract_fns(args))
# Stage 2
try:
- init.fetch()
+ init.fetch(existing="trust")
except sources.DataSourceNotFoundException:
# There was no datasource found,
# that might be bad (or ok) depending on
@@ -432,20 +436,24 @@ def main_single(name, args):
return 0
-def atomic_write_json(path, data):
+def atomic_write_file(path, content, mode='w'):
tf = None
try:
tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(path),
- delete=False)
- tf.write(util.encode_text(json.dumps(data, indent=1) + "\n"))
+ delete=False, mode=mode)
+ tf.write(content)
tf.close()
os.rename(tf.name, path)
except Exception as e:
if tf is not None:
- util.del_file(tf.name)
+ os.unlink(tf.name)
raise e
+def atomic_write_json(path, data):
+ return atomic_write_file(path, json.dumps(data, indent=1) + "\n")
+
+
def status_wrapper(name, args, data_d=None, link_d=None):
if data_d is None:
data_d = os.path.normpath("/var/lib/cloud/data")
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index e8220985..418421b9 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -75,6 +75,9 @@ class Distro(object):
# to write this blob out in a distro format
raise NotImplementedError()
+ def _write_network_config(self, settings):
+ raise NotImplementedError()
+
def _find_tz_file(self, tz):
tz_file = os.path.join(self.tz_zone_dir, str(tz))
if not os.path.isfile(tz_file):
@@ -132,6 +135,14 @@ class Distro(object):
return self._bring_up_interfaces(dev_names)
return False
+ def apply_network_config(self, netconfig, bring_up=False):
+ # Write it out
+ dev_names = self._write_network_config(netconfig)
+ # Now try to bring them up
+ if bring_up:
+ return self._bring_up_interfaces(dev_names)
+ return False
+
@abc.abstractmethod
def apply_locale(self, locale, out_fn=None):
raise NotImplementedError()
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index db5890b1..b14fa3e2 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -26,6 +26,7 @@ from cloudinit import distros
from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import util
+from cloudinit import net
from cloudinit.distros.parsers.hostname import HostnameConf
@@ -45,7 +46,8 @@ APT_GET_WRAPPER = {
class Distro(distros.Distro):
hostname_conf_fn = "/etc/hostname"
locale_conf_fn = "/etc/default/locale"
- network_conf_fn = "/etc/network/interfaces"
+ network_conf_fn = "/etc/network/interfaces.d/50-cloud-init.cfg"
+ links_prefix = "/etc/systemd/network/50-cloud-init-"
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
@@ -76,6 +78,14 @@ class Distro(distros.Distro):
util.write_file(self.network_conf_fn, settings)
return ['all']
+ def _write_network_config(self, netconfig):
+ ns = net.parse_net_config_data(netconfig)
+ net.render_network_state(target="/", network_state=ns,
+ eni=self.network_conf_fn,
+ links_prefix=self.links_prefix)
+ util.del_file("/etc/network/interfaces.d/eth0.cfg")
+ return []
+
def _bring_up_interfaces(self, device_names):
use_all = False
for d in device_names:
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
new file mode 100644
index 00000000..40929c6e
--- /dev/null
+++ b/cloudinit/net/__init__.py
@@ -0,0 +1,751 @@
+# Copyright (C) 2013-2014 Canonical Ltd.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Blake Rouse <blake.rouse@canonical.com>
+#
+# Curtin is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Affero General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+#
+# Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
+# more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with Curtin. If not, see <http://www.gnu.org/licenses/>.
+
+import base64
+import errno
+import glob
+import gzip
+import io
+import os
+import re
+import shlex
+
+from cloudinit import log as logging
+from cloudinit import util
+from .udev import generate_udev_rule
+from . import network_state
+
+LOG = logging.getLogger(__name__)
+
+SYS_CLASS_NET = "/sys/class/net/"
+LINKS_FNAME_PREFIX = "etc/systemd/network/50-cloud-init-"
+
+NET_CONFIG_OPTIONS = [
+ "address", "netmask", "broadcast", "network", "metric", "gateway",
+ "pointtopoint", "media", "mtu", "hostname", "leasehours", "leasetime",
+ "vendor", "client", "bootfile", "server", "hwaddr", "provider", "frame",
+ "netnum", "endpoint", "local", "ttl",
+ ]
+
+NET_CONFIG_COMMANDS = [
+ "pre-up", "up", "post-up", "down", "pre-down", "post-down",
+ ]
+
+NET_CONFIG_BRIDGE_OPTIONS = [
+ "bridge_ageing", "bridge_bridgeprio", "bridge_fd", "bridge_gcinit",
+ "bridge_hello", "bridge_maxage", "bridge_maxwait", "bridge_stp",
+ ]
+
+DEFAULT_PRIMARY_INTERFACE = 'eth0'
+
+
+def sys_dev_path(devname, path=""):
+ return SYS_CLASS_NET + devname + "/" + path
+
+
+def read_sys_net(devname, path, translate=None, enoent=None, keyerror=None):
+ try:
+ contents = ""
+ with open(sys_dev_path(devname, path), "r") as fp:
+ contents = fp.read().strip()
+ if translate is None:
+ return contents
+
+ try:
+ return translate.get(contents)
+ except KeyError:
+ LOG.debug("found unexpected value '%s' in '%s/%s'", contents,
+ devname, path)
+ if keyerror is not None:
+ return keyerror
+ raise
+ except OSError as e:
+ if e.errno == errno.ENOENT and enoent is not None:
+ return enoent
+ raise
+
+
+def is_up(devname):
+ # The linux kernel says to consider devices in 'unknown'
+ # operstate as up for the purposes of network configuration. See
+ # Documentation/networking/operstates.txt in the kernel source.
+ translate = {'up': True, 'unknown': True, 'down': False}
+ return read_sys_net(devname, "operstate", enoent=False, keyerror=False,
+ translate=translate)
+
+
+def is_wireless(devname):
+ return os.path.exists(sys_dev_path(devname, "wireless"))
+
+
+def is_connected(devname):
+ # is_connected isn't really as simple as that. 2 is
+ # 'physically connected'. 3 is 'not connected'. but a wlan interface will
+ # always show 3.
+ try:
+ iflink = read_sys_net(devname, "iflink", enoent=False)
+ if iflink == "2":
+ return True
+ if not is_wireless(devname):
+ return False
+ LOG.debug("'%s' is wireless, basing 'connected' on carrier", devname)
+
+ return read_sys_net(devname, "carrier", enoent=False, keyerror=False,
+ translate={'0': False, '1': True})
+
+ except IOError as e:
+ if e.errno == errno.EINVAL:
+ return False
+ raise
+
+
+def is_physical(devname):
+ return os.path.exists(sys_dev_path(devname, "device"))
+
+
+def is_present(devname):
+ return os.path.exists(sys_dev_path(devname))
+
+
+def get_devicelist():
+ return os.listdir(SYS_CLASS_NET)
+
+
+class ParserError(Exception):
+ """Raised when parser has issue parsing the interfaces file."""
+
+
+def parse_deb_config_data(ifaces, contents, src_dir, src_path):
+ """Parses the file contents, placing result into ifaces.
+
+ '_source_path' is added to every dictionary entry to define which file
+ the configration information came from.
+
+ :param ifaces: interface dictionary
+ :param contents: contents of interfaces file
+ :param src_dir: directory interfaces file was located
+ :param src_path: file path the `contents` was read
+ """
+ currif = None
+ for line in contents.splitlines():
+ line = line.strip()
+ if line.startswith('#'):
+ continue
+ split = line.split(' ')
+ option = split[0]
+ if option == "source-directory":
+ parsed_src_dir = split[1]
+ if not parsed_src_dir.startswith("/"):
+ parsed_src_dir = os.path.join(src_dir, parsed_src_dir)
+ for expanded_path in glob.glob(parsed_src_dir):
+ dir_contents = os.listdir(expanded_path)
+ dir_contents = [
+ os.path.join(expanded_path, path)
+ for path in dir_contents
+ if (os.path.isfile(os.path.join(expanded_path, path)) and
+ re.match("^[a-zA-Z0-9_-]+$", path) is not None)
+ ]
+ for entry in dir_contents:
+ with open(entry, "r") as fp:
+ src_data = fp.read().strip()
+ abs_entry = os.path.abspath(entry)
+ parse_deb_config_data(
+ ifaces, src_data,
+ os.path.dirname(abs_entry), abs_entry)
+ elif option == "source":
+ new_src_path = split[1]
+ if not new_src_path.startswith("/"):
+ new_src_path = os.path.join(src_dir, new_src_path)
+ for expanded_path in glob.glob(new_src_path):
+ with open(expanded_path, "r") as fp:
+ src_data = fp.read().strip()
+ abs_path = os.path.abspath(expanded_path)
+ parse_deb_config_data(
+ ifaces, src_data,
+ os.path.dirname(abs_path), abs_path)
+ elif option == "auto":
+ for iface in split[1:]:
+ if iface not in ifaces:
+ ifaces[iface] = {
+ # Include the source path this interface was found in.
+ "_source_path": src_path
+ }
+ ifaces[iface]['auto'] = True
+ elif option == "iface":
+ iface, family, method = split[1:4]
+ if iface not in ifaces:
+ ifaces[iface] = {
+ # Include the source path this interface was found in.
+ "_source_path": src_path
+ }
+ elif 'family' in ifaces[iface]:
+ raise ParserError(
+ "Interface %s can only be defined once. "
+ "Re-defined in '%s'." % (iface, src_path))
+ ifaces[iface]['family'] = family
+ ifaces[iface]['method'] = method
+ currif = iface
+ elif option == "hwaddress":
+ ifaces[currif]['hwaddress'] = split[1]
+ elif option in NET_CONFIG_OPTIONS:
+ ifaces[currif][option] = split[1]
+ elif option in NET_CONFIG_COMMANDS:
+ if option not in ifaces[currif]:
+ ifaces[currif][option] = []
+ ifaces[currif][option].append(' '.join(split[1:]))
+ elif option.startswith('dns-'):
+ if 'dns' not in ifaces[currif]:
+ ifaces[currif]['dns'] = {}
+ if option == 'dns-search':
+ ifaces[currif]['dns']['search'] = []
+ for domain in split[1:]:
+ ifaces[currif]['dns']['search'].append(domain)
+ elif option == 'dns-nameservers':
+ ifaces[currif]['dns']['nameservers'] = []
+ for server in split[1:]:
+ ifaces[currif]['dns']['nameservers'].append(server)
+ elif option.startswith('bridge_'):
+ if 'bridge' not in ifaces[currif]:
+ ifaces[currif]['bridge'] = {}
+ if option in NET_CONFIG_BRIDGE_OPTIONS:
+ bridge_option = option.replace('bridge_', '', 1)
+ ifaces[currif]['bridge'][bridge_option] = split[1]
+ elif option == "bridge_ports":
+ ifaces[currif]['bridge']['ports'] = []
+ for iface in split[1:]:
+ ifaces[currif]['bridge']['ports'].append(iface)
+ elif option == "bridge_hw" and split[1].lower() == "mac":
+ ifaces[currif]['bridge']['mac'] = split[2]
+ elif option == "bridge_pathcost":
+ if 'pathcost' not in ifaces[currif]['bridge']:
+ ifaces[currif]['bridge']['pathcost'] = {}
+ ifaces[currif]['bridge']['pathcost'][split[1]] = split[2]
+ elif option == "bridge_portprio":
+ if 'portprio' not in ifaces[currif]['bridge']:
+ ifaces[currif]['bridge']['portprio'] = {}
+ ifaces[currif]['bridge']['portprio'][split[1]] = split[2]
+ elif option.startswith('bond-'):
+ if 'bond' not in ifaces[currif]:
+ ifaces[currif]['bond'] = {}
+ bond_option = option.replace('bond-', '', 1)
+ ifaces[currif]['bond'][bond_option] = split[1]
+ for iface in ifaces.keys():
+ if 'auto' not in ifaces[iface]:
+ ifaces[iface]['auto'] = False
+
+
+def parse_deb_config(path):
+ """Parses a debian network configuration file."""
+ ifaces = {}
+ with open(path, "r") as fp:
+ contents = fp.read().strip()
+ abs_path = os.path.abspath(path)
+ parse_deb_config_data(
+ ifaces, contents,
+ os.path.dirname(abs_path), abs_path)
+ return ifaces
+
+
+def parse_net_config_data(net_config):
+ """Parses the config, returns NetworkState dictionary
+
+ :param net_config: curtin network config dict
+ """
+ state = None
+ if 'version' in net_config and 'config' in net_config:
+ ns = network_state.NetworkState(version=net_config.get('version'),
+ config=net_config.get('config'))
+ ns.parse_config()
+ state = ns.network_state
+
+ return state
+
+
+def parse_net_config(path):
+ """Parses a curtin network configuration file and
+ return network state"""
+ ns = None
+ net_config = util.read_conf(path)
+ if 'network' in net_config:
+ ns = parse_net_config_data(net_config.get('network'))
+
+ return ns
+
+
+def _load_shell_content(content, add_empty=False, empty_val=None):
+ """Given shell like syntax (key=value\nkey2=value2\n) in content
+ return the data in dictionary form. If 'add_empty' is True
+ then add entries in to the returned dictionary for 'VAR='
+ variables. Set their value to empty_val."""
+ data = {}
+ for line in shlex.split(content):
+ key, value = line.split("=", 1)
+ if not value:
+ value = empty_val
+ if add_empty or value:
+ data[key] = value
+
+ return data
+
+
+def _klibc_to_config_entry(content, mac_addrs=None):
+ """Convert a klibc writtent shell content file to a 'config' entry
+ When ip= is seen on the kernel command line in debian initramfs
+ and networking is brought up, ipconfig will populate
+ /run/net-<name>.cfg.
+
+ The files are shell style syntax, and examples are in the tests
+ provided here. There is no good documentation on this unfortunately.
+
+ DEVICE=<name> is expected/required and PROTO should indicate if
+ this is 'static' or 'dhcp'.
+ """
+
+ if mac_addrs is None:
+ mac_addrs = {}
+
+ data = _load_shell_content(content)
+ try:
+ name = data['DEVICE']
+ except KeyError:
+ raise ValueError("no 'DEVICE' entry in data")
+
+ # ipconfig on precise does not write PROTO
+ proto = data.get('PROTO')
+ if not proto:
+ if data.get('filename'):
+ proto = 'dhcp'
+ else:
+ proto = 'static'
+
+ if proto not in ('static', 'dhcp'):
+ raise ValueError("Unexpected value for PROTO: %s" % proto)
+
+ iface = {
+ 'type': 'physical',
+ 'name': name,
+ 'subnets': [],
+ }
+
+ if name in mac_addrs:
+ iface['mac_address'] = mac_addrs[name]
+
+ # originally believed there might be IPV6* values
+ for v, pre in (('ipv4', 'IPV4'),):
+ # if no IPV4ADDR or IPV6ADDR, then go on.
+ if pre + "ADDR" not in data:
+ continue
+ subnet = {'type': proto}
+
+ # these fields go right on the subnet
+ for key in ('NETMASK', 'BROADCAST', 'GATEWAY'):
+ if pre + key in data:
+ subnet[key.lower()] = data[pre + key]
+
+ dns = []
+ # handle IPV4DNS0 or IPV6DNS0
+ for nskey in ('DNS0', 'DNS1'):
+ ns = data.get(pre + nskey)
+ # verify it has something other than 0.0.0.0 (or ipv6)
+ if ns and len(ns.strip(":.0")):
+ dns.append(data[pre + nskey])
+ if dns:
+ subnet['dns_nameservers'] = dns
+ # add search to both ipv4 and ipv6, as it has no namespace
+ search = data.get('DOMAINSEARCH')
+ if search:
+ if ',' in search:
+ subnet['dns_search'] = search.split(",")
+ else:
+ subnet['dns_search'] = search.split()
+
+ iface['subnets'].append(subnet)
+
+ return name, iface
+
+
+def config_from_klibc_net_cfg(files=None, mac_addrs=None):
+ if files is None:
+ files = glob.glob('/run/net*.conf')
+
+ entries = []
+ names = {}
+ for cfg_file in files:
+ name, entry = _klibc_to_config_entry(util.load_file(cfg_file),
+ mac_addrs=mac_addrs)
+ if name in names:
+ raise ValueError(
+ "device '%s' defined multiple times: %s and %s" % (
+ name, names[name], cfg_file))
+
+ names[name] = cfg_file
+ entries.append(entry)
+ return {'config': entries, 'version': 1}
+
+
+def render_persistent_net(network_state):
+ ''' Given state, emit udev rules to map
+ mac to ifname
+ '''
+ content = ""
+ interfaces = network_state.get('interfaces')
+ for iface in interfaces.values():
+ # for physical interfaces write out a persist net udev rule
+ if iface['type'] == 'physical' and \
+ 'name' in iface and iface.get('mac_address'):
+ content += generate_udev_rule(iface['name'],
+ iface['mac_address'])
+
+ return content
+
+
+# TODO: switch valid_map based on mode inet/inet6
+def iface_add_subnet(iface, subnet):
+ content = ""
+ valid_map = [
+ 'address',
+ 'netmask',
+ 'broadcast',
+ 'metric',
+ 'gateway',
+ 'pointopoint',
+ 'mtu',
+ 'scope',
+ 'dns_search',
+ 'dns_nameservers',
+ ]
+ for key, value in subnet.items():
+ if value and key in valid_map:
+ if type(value) == list:
+ value = " ".join(value)
+ if '_' in key:
+ key = key.replace('_', '-')
+ content += " {} {}\n".format(key, value)
+
+ return content
+
+
+# TODO: switch to valid_map for attrs
+def iface_add_attrs(iface):
+ content = ""
+ ignore_map = [
+ 'type',
+ 'name',
+ 'inet',
+ 'mode',
+ 'index',
+ 'subnets',
+ ]
+ if iface['type'] not in ['bond', 'bridge', 'vlan']:
+ ignore_map.append('mac_address')
+
+ for key, value in iface.items():
+ if value and key not in ignore_map:
+ if type(value) == list:
+ value = " ".join(value)
+ content += " {} {}\n".format(key, value)
+
+ return content
+
+
+def render_route(route, indent=""):
+ """ When rendering routes for an iface, in some cases applying a route
+ may result in the route command returning non-zero which produces
+ some confusing output for users manually using ifup/ifdown[1]. To
+ that end, we will optionally include an '|| true' postfix to each
+ route line allowing users to work with ifup/ifdown without using
+ --force option.
+
+ We may at somepoint not want to emit this additional postfix, and
+ add a 'strict' flag to this function. When called with strict=True,
+ then we will not append the postfix.
+
+ 1. http://askubuntu.com/questions/168033/
+ how-to-set-static-routes-in-ubuntu-server
+ """
+ content = ""
+ up = indent + "post-up route add"
+ down = indent + "pre-down route del"
+ eol = " || true\n"
+ mapping = {
+ 'network': '-net',
+ 'netmask': 'netmask',
+ 'gateway': 'gw',
+ 'metric': 'metric',
+ }
+ if route['network'] == '0.0.0.0' and route['netmask'] == '0.0.0.0':
+ default_gw = " default gw %s" % route['gateway']
+ content += up + default_gw + eol
+ content += down + default_gw + eol
+ elif route['network'] == '::' and route['netmask'] == 0:
+ # ipv6!
+ default_gw = " -A inet6 default gw %s" % route['gateway']
+ content += up + default_gw + eol
+ content += down + default_gw + eol
+ else:
+ route_line = ""
+ for k in ['network', 'netmask', 'gateway', 'metric']:
+ if k in route:
+ route_line += " %s %s" % (mapping[k], route[k])
+ content += up + route_line + eol
+ content += down + route_line + eol
+
+ return content
+
+
+def render_interfaces(network_state):
+ ''' Given state, emit etc/network/interfaces content '''
+
+ content = ""
+ interfaces = network_state.get('interfaces')
+ ''' Apply a sort order to ensure that we write out
+ the physical interfaces first; this is critical for
+ bonding
+ '''
+ order = {
+ 'physical': 0,
+ 'bond': 1,
+ 'bridge': 2,
+ 'vlan': 3,
+ }
+ content += "auto lo\niface lo inet loopback\n"
+ for dnskey, value in network_state.get('dns', {}).items():
+ if len(value):
+ content += " dns-{} {}\n".format(dnskey, " ".join(value))
+
+ content += "\n"
+ for iface in sorted(interfaces.values(),
+ key=lambda k: (order[k['type']], k['name'])):
+ content += "auto {name}\n".format(**iface)
+
+ subnets = iface.get('subnets', {})
+ if subnets:
+ for index, subnet in zip(range(0, len(subnets)), subnets):
+ iface['index'] = index
+ iface['mode'] = subnet['type']
+ if iface['mode'].endswith('6'):
+ iface['inet'] += '6'
+ elif iface['mode'] == 'static' and ":" in subnet['address']:
+ iface['inet'] += '6'
+ if iface['mode'].startswith('dhcp'):
+ iface['mode'] = 'dhcp'
+
+ if index == 0:
+ content += "iface {name} {inet} {mode}\n".format(**iface)
+ else:
+ content += "auto {name}:{index}\n".format(**iface)
+ content += \
+ "iface {name}:{index} {inet} {mode}\n".format(**iface)
+
+ content += iface_add_subnet(iface, subnet)
+ content += iface_add_attrs(iface)
+ for route in subnet.get('routes', []):
+ content += render_route(route, indent=" ")
+ content += "\n"
+ else:
+ content += "iface {name} {inet} {mode}\n".format(**iface)
+ content += iface_add_attrs(iface)
+ content += "\n"
+
+ for route in network_state.get('routes'):
+ content += render_route(route)
+
+ # global replacements until v2 format
+ content = content.replace('mac_address', 'hwaddress ether')
+ return content
+
+
+def render_network_state(target, network_state, eni="etc/network/interfaces",
+ links_prefix=LINKS_FNAME_PREFIX,
+ netrules='etc/udev/rules.d/70-persistent-net.rules'):
+
+ fpeni = os.path.sep.join((target, eni,))
+ util.ensure_dir(os.path.dirname(fpeni))
+ with open(fpeni, 'w+') as f:
+ f.write(render_interfaces(network_state))
+
+ if netrules:
+ netrules = os.path.sep.join((target, netrules,))
+ util.ensure_dir(os.path.dirname(netrules))
+ with open(netrules, 'w+') as f:
+ f.write(render_persistent_net(network_state))
+
+ if links_prefix:
+ render_systemd_links(target, network_state, links_prefix)
+
+
+def render_systemd_links(target, network_state,
+ links_prefix=LINKS_FNAME_PREFIX):
+ fp_prefix = os.path.sep.join((target, links_prefix))
+ for f in glob.glob(fp_prefix + "*"):
+ os.unlink(f)
+
+ interfaces = network_state.get('interfaces')
+ for iface in interfaces.values():
+ if (iface['type'] == 'physical' and 'name' in iface and
+ iface.get('mac_address')):
+ fname = fp_prefix + iface['name'] + ".link"
+ with open(fname, "w") as fp:
+ fp.write("\n".join([
+ "[Match]",
+ "MACAddress=" + iface['mac_address'],
+ "",
+ "[Link]",
+ "Name=" + iface['name'],
+ ""
+ ]))
+
+
+def is_disabled_cfg(cfg):
+ if not cfg or not isinstance(cfg, dict):
+ return False
+ return cfg.get('config') == "disabled"
+
+
+def sys_netdev_info(name, field):
+ if not os.path.exists(os.path.join(SYS_CLASS_NET, name)):
+ raise OSError("%s: interface does not exist in %s" %
+ (name, SYS_CLASS_NET))
+
+ fname = os.path.join(SYS_CLASS_NET, name, field)
+ if not os.path.exists(fname):
+ raise OSError("%s: could not find sysfs entry: %s" % (name, fname))
+ data = util.load_file(fname)
+ if data[-1] == '\n':
+ data = data[:-1]
+ return data
+
+
+def generate_fallback_config():
+ """Determine which attached net dev is most likely to have a connection and
+ generate network state to run dhcp on that interface"""
+ # by default use eth0 as primary interface
+ nconf = {'config': [], 'version': 1}
+
+ # get list of interfaces that could have connections
+ invalid_interfaces = set(['lo'])
+ potential_interfaces = set(get_devicelist())
+ potential_interfaces = potential_interfaces.difference(invalid_interfaces)
+ # sort into interfaces with carrier, interfaces which could have carrier,
+ # and ignore interfaces that are definitely disconnected
+ connected = []
+ possibly_connected = []
+ for interface in potential_interfaces:
+ try:
+ carrier = int(sys_netdev_info(interface, 'carrier'))
+ if carrier:
+ connected.append(interface)
+ continue
+ except OSError:
+ pass
+ # check if nic is dormant or down, as this may make a nick appear to
+ # not have a carrier even though it could acquire one when brought
+ # online by dhclient
+ try:
+ dormant = int(sys_netdev_info(interface, 'dormant'))
+ if dormant:
+ possibly_connected.append(interface)
+ continue
+ except OSError:
+ pass
+ try:
+ operstate = sys_netdev_info(interface, 'operstate')
+ if operstate in ['dormant', 'down', 'lowerlayerdown', 'unknown']:
+ possibly_connected.append(interface)
+ continue
+ except OSError:
+ pass
+
+ # don't bother with interfaces that might not be connected if there are
+ # some that definitely are
+ if connected:
+ potential_interfaces = connected
+ else:
+ potential_interfaces = possibly_connected
+ # if there are no interfaces, give up
+ if not potential_interfaces:
+ return
+ # if eth0 exists use it above anything else, otherwise get the interface
+ # that looks 'first'
+ if DEFAULT_PRIMARY_INTERFACE in potential_interfaces:
+ name = DEFAULT_PRIMARY_INTERFACE
+ else:
+ name = sorted(potential_interfaces)[0]
+
+ mac = sys_netdev_info(name, 'address')
+ target_name = name
+
+ nconf['config'].append(
+ {'type': 'physical', 'name': target_name,
+ 'mac_address': mac, 'subnets': [{'type': 'dhcp'}]})
+ return nconf
+
+
+def _decomp_gzip(blob, strict=True):
+ # decompress blob. raise exception if not compressed unless strict=False.
+ with io.BytesIO(blob) as iobuf:
+ gzfp = None
+ try:
+ gzfp = gzip.GzipFile(mode="rb", fileobj=iobuf)
+ return gzfp.read()
+ except IOError:
+ if strict:
+ raise
+ return blob
+ finally:
+ if gzfp:
+ gzfp.close()
+
+
+def _b64dgz(b64str, gzipped="try"):
+ # decode a base64 string. If gzipped is true, transparently uncompresss
+ # if gzipped is 'try', then try gunzip, returning the original on fail.
+ try:
+ blob = base64.b64decode(b64str)
+ except TypeError:
+ raise ValueError("Invalid base64 text: %s" % b64str)
+
+ if not gzipped:
+ return blob
+
+ return _decomp_gzip(blob, strict=gzipped != "try")
+
+
+def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None):
+ if cmdline is None:
+ cmdline = util.get_cmdline()
+
+ if 'network-config=' in cmdline:
+ data64 = None
+ for tok in cmdline.split():
+ if tok.startswith("network-config="):
+ data64 = tok.split("=", 1)[1]
+ if data64:
+ return util.load_yaml(_b64dgz(data64))
+
+ if 'ip=' not in cmdline:
+ return None
+
+ if mac_addrs is None:
+ mac_addrs = {k: sys_netdev_info(k, 'address')
+ for k in get_devicelist()}
+
+ return config_from_klibc_net_cfg(files=files, mac_addrs=mac_addrs)
+
+
+# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
new file mode 100644
index 00000000..e32d2cdf
--- /dev/null
+++ b/cloudinit/net/network_state.py
@@ -0,0 +1,446 @@
+# Copyright (C) 2013-2014 Canonical Ltd.
+#
+# Author: Ryan Harper <ryan.harper@canonical.com>
+#
+# Curtin is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Affero General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+#
+# Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
+# more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with Curtin. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit import log as logging
+from cloudinit import util
+from cloudinit.util import yaml_dumps as dump_config
+
+LOG = logging.getLogger(__name__)
+
+NETWORK_STATE_VERSION = 1
+NETWORK_STATE_REQUIRED_KEYS = {
+ 1: ['version', 'config', 'network_state'],
+}
+
+
+def from_state_file(state_file):
+ network_state = None
+ state = util.read_conf(state_file)
+ network_state = NetworkState()
+ network_state.load(state)
+
+ return network_state
+
+
+class NetworkState:
+ def __init__(self, version=NETWORK_STATE_VERSION, config=None):
+ self.version = version
+ self.config = config
+ self.network_state = {
+ 'interfaces': {},
+ 'routes': [],
+ 'dns': {
+ 'nameservers': [],
+ 'search': [],
+ }
+ }
+ self.command_handlers = self.get_command_handlers()
+
+ def get_command_handlers(self):
+ METHOD_PREFIX = 'handle_'
+ methods = filter(lambda x: callable(getattr(self, x)) and
+ x.startswith(METHOD_PREFIX), dir(self))
+ handlers = {}
+ for m in methods:
+ key = m.replace(METHOD_PREFIX, '')
+ handlers[key] = getattr(self, m)
+
+ return handlers
+
+ def dump(self):
+ state = {
+ 'version': self.version,
+ 'config': self.config,
+ 'network_state': self.network_state,
+ }
+ return dump_config(state)
+
+ def load(self, state):
+ if 'version' not in state:
+ LOG.error('Invalid state, missing version field')
+ raise Exception('Invalid state, missing version field')
+
+ required_keys = NETWORK_STATE_REQUIRED_KEYS[state['version']]
+ if not self.valid_command(state, required_keys):
+ msg = 'Invalid state, missing keys: {}'.format(required_keys)
+ LOG.error(msg)
+ raise Exception(msg)
+
+ # v1 - direct attr mapping, except version
+ for key in [k for k in required_keys if k not in ['version']]:
+ setattr(self, key, state[key])
+ self.command_handlers = self.get_command_handlers()
+
+ def dump_network_state(self):
+ return dump_config(self.network_state)
+
+ def parse_config(self):
+ # rebuild network state
+ for command in self.config:
+ handler = self.command_handlers.get(command['type'])
+ handler(command)
+
+ def valid_command(self, command, required_keys):
+ if not required_keys:
+ return False
+
+ found_keys = [key for key in command.keys() if key in required_keys]
+ return len(found_keys) == len(required_keys)
+
+ def handle_physical(self, command):
+ '''
+ command = {
+ 'type': 'physical',
+ 'mac_address': 'c0:d6:9f:2c:e8:80',
+ 'name': 'eth0',
+ 'subnets': [
+ {'type': 'dhcp4'}
+ ]
+ }
+ '''
+ required_keys = [
+ 'name',
+ ]
+ if not self.valid_command(command, required_keys):
+ LOG.warn('Skipping Invalid command: {}'.format(command))
+ LOG.debug(self.dump_network_state())
+ return
+
+ interfaces = self.network_state.get('interfaces')
+ iface = interfaces.get(command['name'], {})
+ for param, val in command.get('params', {}).items():
+ iface.update({param: val})
+
+ # convert subnet ipv6 netmask to cidr as needed
+ subnets = command.get('subnets')
+ if subnets:
+ for subnet in subnets:
+ if subnet['type'] == 'static':
+ if 'netmask' in subnet and ':' in subnet['address']:
+ subnet['netmask'] = mask2cidr(subnet['netmask'])
+ for route in subnet.get('routes', []):
+ if 'netmask' in route:
+ route['netmask'] = mask2cidr(route['netmask'])
+ iface.update({
+ 'name': command.get('name'),
+ 'type': command.get('type'),
+ 'mac_address': command.get('mac_address'),
+ 'inet': 'inet',
+ 'mode': 'manual',
+ 'mtu': command.get('mtu'),
+ 'address': None,
+ 'gateway': None,
+ 'subnets': subnets,
+ })
+ self.network_state['interfaces'].update({command.get('name'): iface})
+ self.dump_network_state()
+
+ def handle_vlan(self, command):
+ '''
+ auto eth0.222
+ iface eth0.222 inet static
+ address 10.10.10.1
+ netmask 255.255.255.0
+ hwaddress ether BC:76:4E:06:96:B3
+ vlan-raw-device eth0
+ '''
+ required_keys = [
+ 'name',
+ 'vlan_link',
+ 'vlan_id',
+ ]
+ if not self.valid_command(command, required_keys):
+ print('Skipping Invalid command: {}'.format(command))
+ print(self.dump_network_state())
+ return
+
+ interfaces = self.network_state.get('interfaces')
+ self.handle_physical(command)
+ iface = interfaces.get(command.get('name'), {})
+ iface['vlan-raw-device'] = command.get('vlan_link')
+ iface['vlan_id'] = command.get('vlan_id')
+ interfaces.update({iface['name']: iface})
+
+ def handle_bond(self, command):
+ '''
+ #/etc/network/interfaces
+ auto eth0
+ iface eth0 inet manual
+ bond-master bond0
+ bond-mode 802.3ad
+
+ auto eth1
+ iface eth1 inet manual
+ bond-master bond0
+ bond-mode 802.3ad
+
+ auto bond0
+ iface bond0 inet static
+ address 192.168.0.10
+ gateway 192.168.0.1
+ netmask 255.255.255.0
+ bond-slaves none
+ bond-mode 802.3ad
+ bond-miimon 100
+ bond-downdelay 200
+ bond-updelay 200
+ bond-lacp-rate 4
+ '''
+ required_keys = [
+ 'name',
+ 'bond_interfaces',
+ 'params',
+ ]
+ if not self.valid_command(command, required_keys):
+ print('Skipping Invalid command: {}'.format(command))
+ print(self.dump_network_state())
+ return
+
+ self.handle_physical(command)
+ interfaces = self.network_state.get('interfaces')
+ iface = interfaces.get(command.get('name'), {})
+ for param, val in command.get('params').items():
+ iface.update({param: val})
+ iface.update({'bond-slaves': 'none'})
+ self.network_state['interfaces'].update({iface['name']: iface})
+
+ # handle bond slaves
+ for ifname in command.get('bond_interfaces'):
+ if ifname not in interfaces:
+ cmd = {
+ 'name': ifname,
+ 'type': 'bond',
+ }
+ # inject placeholder
+ self.handle_physical(cmd)
+
+ interfaces = self.network_state.get('interfaces')
+ bond_if = interfaces.get(ifname)
+ bond_if['bond-master'] = command.get('name')
+ # copy in bond config into slave
+ for param, val in command.get('params').items():
+ bond_if.update({param: val})
+ self.network_state['interfaces'].update({ifname: bond_if})
+
+ def handle_bridge(self, command):
+ '''
+ auto br0
+ iface br0 inet static
+ address 10.10.10.1
+ netmask 255.255.255.0
+ bridge_ports eth0 eth1
+ bridge_stp off
+ bridge_fd 0
+ bridge_maxwait 0
+
+ bridge_params = [
+ "bridge_ports",
+ "bridge_ageing",
+ "bridge_bridgeprio",
+ "bridge_fd",
+ "bridge_gcint",
+ "bridge_hello",
+ "bridge_hw",
+ "bridge_maxage",
+ "bridge_maxwait",
+ "bridge_pathcost",
+ "bridge_portprio",
+ "bridge_stp",
+ "bridge_waitport",
+ ]
+ '''
+ required_keys = [
+ 'name',
+ 'bridge_interfaces',
+ 'params',
+ ]
+ if not self.valid_command(command, required_keys):
+ print('Skipping Invalid command: {}'.format(command))
+ print(self.dump_network_state())
+ return
+
+ # find one of the bridge port ifaces to get mac_addr
+ # handle bridge_slaves
+ interfaces = self.network_state.get('interfaces')
+ for ifname in command.get('bridge_interfaces'):
+ if ifname in interfaces:
+ continue
+
+ cmd = {
+ 'name': ifname,
+ }
+ # inject placeholder
+ self.handle_physical(cmd)
+
+ interfaces = self.network_state.get('interfaces')
+ self.handle_physical(command)
+ iface = interfaces.get(command.get('name'), {})
+ iface['bridge_ports'] = command['bridge_interfaces']
+ for param, val in command.get('params').items():
+ iface.update({param: val})
+
+ interfaces.update({iface['name']: iface})
+
+ def handle_nameserver(self, command):
+ required_keys = [
+ 'address',
+ ]
+ if not self.valid_command(command, required_keys):
+ print('Skipping Invalid command: {}'.format(command))
+ print(self.dump_network_state())
+ return
+
+ dns = self.network_state.get('dns')
+ if 'address' in command:
+ addrs = command['address']
+ if not type(addrs) == list:
+ addrs = [addrs]
+ for addr in addrs:
+ dns['nameservers'].append(addr)
+ if 'search' in command:
+ paths = command['search']
+ if not isinstance(paths, list):
+ paths = [paths]
+ for path in paths:
+ dns['search'].append(path)
+
+ def handle_route(self, command):
+ required_keys = [
+ 'destination',
+ ]
+ if not self.valid_command(command, required_keys):
+ print('Skipping Invalid command: {}'.format(command))
+ print(self.dump_network_state())
+ return
+
+ routes = self.network_state.get('routes')
+ network, cidr = command['destination'].split("/")
+ netmask = cidr2mask(int(cidr))
+ route = {
+ 'network': network,
+ 'netmask': netmask,
+ 'gateway': command.get('gateway'),
+ 'metric': command.get('metric'),
+ }
+ routes.append(route)
+
+
+def cidr2mask(cidr):
+ mask = [0, 0, 0, 0]
+ for i in list(range(0, cidr)):
+ idx = int(i / 8)
+ mask[idx] = mask[idx] + (1 << (7 - i % 8))
+ return ".".join([str(x) for x in mask])
+
+
+def ipv4mask2cidr(mask):
+ if '.' not in mask:
+ return mask
+ return sum([bin(int(x)).count('1') for x in mask.split('.')])
+
+
+def ipv6mask2cidr(mask):
+ if ':' not in mask:
+ return mask
+
+ bitCount = [0, 0x8000, 0xc000, 0xe000, 0xf000, 0xf800, 0xfc00, 0xfe00,
+ 0xff00, 0xff80, 0xffc0, 0xffe0, 0xfff0, 0xfff8, 0xfffc,
+ 0xfffe, 0xffff]
+ cidr = 0
+ for word in mask.split(':'):
+ if not word or int(word, 16) == 0:
+ break
+ cidr += bitCount.index(int(word, 16))
+
+ return cidr
+
+
+def mask2cidr(mask):
+ if ':' in mask:
+ return ipv6mask2cidr(mask)
+ elif '.' in mask:
+ return ipv4mask2cidr(mask)
+ else:
+ return mask
+
+
+if __name__ == '__main__':
+ import sys
+ import random
+ from cloudinit import net
+
+ def load_config(nc):
+ version = nc.get('version')
+ config = nc.get('config')
+ return (version, config)
+
+ def test_parse(network_config):
+ (version, config) = load_config(network_config)
+ ns1 = NetworkState(version=version, config=config)
+ ns1.parse_config()
+ random.shuffle(config)
+ ns2 = NetworkState(version=version, config=config)
+ ns2.parse_config()
+ print("----NS1-----")
+ print(ns1.dump_network_state())
+ print()
+ print("----NS2-----")
+ print(ns2.dump_network_state())
+ print("NS1 == NS2 ?=> {}".format(
+ ns1.network_state == ns2.network_state))
+ eni = net.render_interfaces(ns2.network_state)
+ print(eni)
+ udev_rules = net.render_persistent_net(ns2.network_state)
+ print(udev_rules)
+
+ def test_dump_and_load(network_config):
+ print("Loading network_config into NetworkState")
+ (version, config) = load_config(network_config)
+ ns1 = NetworkState(version=version, config=config)
+ ns1.parse_config()
+ print("Dumping state to file")
+ ns1_dump = ns1.dump()
+ ns1_state = "/tmp/ns1.state"
+ with open(ns1_state, "w+") as f:
+ f.write(ns1_dump)
+
+ print("Loading state from file")
+ ns2 = from_state_file(ns1_state)
+ print("NS1 == NS2 ?=> {}".format(
+ ns1.network_state == ns2.network_state))
+
+ def test_output(network_config):
+ (version, config) = load_config(network_config)
+ ns1 = NetworkState(version=version, config=config)
+ ns1.parse_config()
+ random.shuffle(config)
+ ns2 = NetworkState(version=version, config=config)
+ ns2.parse_config()
+ print("NS1 == NS2 ?=> {}".format(
+ ns1.network_state == ns2.network_state))
+ eni_1 = net.render_interfaces(ns1.network_state)
+ eni_2 = net.render_interfaces(ns2.network_state)
+ print(eni_1)
+ print(eni_2)
+ print("eni_1 == eni_2 ?=> {}".format(
+ eni_1 == eni_2))
+
+ y = util.read_conf(sys.argv[1])
+ network_config = y.get('network')
+ test_parse(network_config)
+ test_dump_and_load(network_config)
+ test_output(network_config)
diff --git a/cloudinit/net/udev.py b/cloudinit/net/udev.py
new file mode 100644
index 00000000..6435ace0
--- /dev/null
+++ b/cloudinit/net/udev.py
@@ -0,0 +1,54 @@
+# Copyright (C) 2015 Canonical Ltd.
+#
+# Author: Ryan Harper <ryan.harper@canonical.com>
+#
+# Curtin is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Affero General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+#
+# Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
+# more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with Curtin. If not, see <http://www.gnu.org/licenses/>.
+
+
+def compose_udev_equality(key, value):
+ """Return a udev comparison clause, like `ACTION=="add"`."""
+ assert key == key.upper()
+ return '%s=="%s"' % (key, value)
+
+
+def compose_udev_attr_equality(attribute, value):
+ """Return a udev attribute comparison clause, like `ATTR{type}=="1"`."""
+ assert attribute == attribute.lower()
+ return 'ATTR{%s}=="%s"' % (attribute, value)
+
+
+def compose_udev_setting(key, value):
+ """Return a udev assignment clause, like `NAME="eth0"`."""
+ assert key == key.upper()
+ return '%s="%s"' % (key, value)
+
+
+def generate_udev_rule(interface, mac):
+ """Return a udev rule to set the name of network interface with `mac`.
+
+ The rule ends up as a single line looking something like:
+
+ SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*",
+ ATTR{address}="ff:ee:dd:cc:bb:aa", NAME="eth0"
+ """
+ rule = ', '.join([
+ compose_udev_equality('SUBSYSTEM', 'net'),
+ compose_udev_equality('ACTION', 'add'),
+ compose_udev_equality('DRIVERS', '?*'),
+ compose_udev_attr_equality('address', mac),
+ compose_udev_setting('NAME', interface),
+ ])
+ return '%s\n' % rule
+
+# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 832b3063..698f4cac 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -254,7 +254,7 @@ class DataSourceAzureNet(sources.DataSource):
def get_config_obj(self):
return self.cfg
- def check_instance_id(self):
+ def check_instance_id(self, sys_cfg):
# quickly (local check only) if self.instance_id is still valid
return sources.instance_id_matches_system_uuid(self.get_instance_id())
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 6fc9e05b..3fa62ef3 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -18,6 +18,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import copy
import os
from cloudinit import log as logging
@@ -50,6 +51,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
self.version = None
self.ec2_metadata = None
+ self._network_config = None
+ self.network_json = None
self.files = {}
def __str__(self):
@@ -144,12 +147,25 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
LOG.warn("Invalid content in vendor-data: %s", e)
self.vendordata_raw = None
+ try:
+ self.network_json = results.get('networkdata')
+ except ValueError as e:
+ LOG.warn("Invalid content in network-data: %s", e)
+ self.network_json = None
+
return True
def check_instance_id(self):
# quickly (local check only) if self.instance_id is still valid
return sources.instance_id_matches_system_uuid(self.get_instance_id())
+ @property
+ def network_config(self):
+ if self._network_config is None:
+ if self.network_json is not None:
+ self._network_config = convert_network_data(self.network_json)
+ return self._network_config
+
class DataSourceConfigDriveNet(DataSourceConfigDrive):
def __init__(self, sys_cfg, distro, paths):
@@ -287,3 +303,122 @@ datasources = [
# Return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
+
+# Convert OpenStack ConfigDrive NetworkData json to network_config yaml
+def convert_network_data(network_json=None):
+ """Return a dictionary of network_config by parsing provided
+ OpenStack ConfigDrive NetworkData json format
+
+ OpenStack network_data.json provides a 3 element dictionary
+ - "links" (links are network devices, physical or virtual)
+ - "networks" (networks are ip network configurations for one or more
+ links)
+ - services (non-ip services, like dns)
+
+ networks and links are combined via network items referencing specific
+ links via a 'link_id' which maps to a links 'id' field.
+
+ To convert this format to network_config yaml, we first iterate over the
+ links and then walk the network list to determine if any of the networks
+ utilize the current link; if so we generate a subnet entry for the device
+
+ We also need to map network_data.json fields to network_config fields. For
+ example, the network_data links 'id' field is equivalent to network_config
+ 'name' field for devices. We apply more of this mapping to the various
+ link types that we encounter.
+
+ There are additional fields that are populated in the network_data.json
+ from OpenStack that are not relevant to network_config yaml, so we
+ enumerate a dictionary of valid keys for network_yaml and apply filtering
+ to drop these superflous keys from the network_config yaml.
+ """
+ if network_json is None:
+ return None
+
+ # dict of network_config key for filtering network_json
+ valid_keys = {
+ 'physical': [
+ 'name',
+ 'type',
+ 'mac_address',
+ 'subnets',
+ 'params',
+ ],
+ 'subnet': [
+ 'type',
+ 'address',
+ 'netmask',
+ 'broadcast',
+ 'metric',
+ 'gateway',
+ 'pointopoint',
+ 'mtu',
+ 'scope',
+ 'dns_nameservers',
+ 'dns_search',
+ 'routes',
+ ],
+ }
+
+ links = network_json.get('links', [])
+ networks = network_json.get('networks', [])
+ services = network_json.get('services', [])
+
+ config = []
+ for link in links:
+ subnets = []
+ cfg = {k: v for k, v in link.items()
+ if k in valid_keys['physical']}
+ cfg.update({'name': link['id']})
+ for network in [net for net in networks
+ if net['link'] == link['id']]:
+ subnet = {k: v for k, v in network.items()
+ if k in valid_keys['subnet']}
+ if 'dhcp' in network['type']:
+ t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4'
+ subnet.update({
+ 'type': t,
+ })
+ else:
+ subnet.update({
+ 'type': 'static',
+ 'address': network.get('ip_address'),
+ })
+ subnets.append(subnet)
+ cfg.update({'subnets': subnets})
+ if link['type'] in ['ethernet', 'vif', 'ovs', 'phy']:
+ cfg.update({
+ 'type': 'physical',
+ 'mac_address': link['ethernet_mac_address']})
+ elif link['type'] in ['bond']:
+ params = {}
+ for k, v in link.items():
+ if k == 'bond_links':
+ continue
+ elif k.startswith('bond'):
+ params.update({k: v})
+ cfg.update({
+ 'bond_interfaces': copy.deepcopy(link['bond_links']),
+ 'params': params,
+ })
+ elif link['type'] in ['vlan']:
+ cfg.update({
+ 'name': "%s.%s" % (link['vlan_link'],
+ link['vlan_id']),
+ 'vlan_link': link['vlan_link'],
+ 'vlan_id': link['vlan_id'],
+ 'mac_address': link['vlan_mac_address'],
+ })
+ else:
+ raise ValueError(
+ 'Unknown network_data link type: %s' % link['type'])
+
+ config.append(cfg)
+
+ for service in services:
+ cfg = service
+ cfg.update({'type': 'nameserver'})
+ config.append(cfg)
+
+ return {'version': 1, 'config': config}
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index d07e6f84..c2fba4d2 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -36,7 +36,9 @@ class DataSourceNoCloud(sources.DataSource):
self.dsmode = 'local'
self.seed = None
self.cmdline_id = "ds=nocloud"
- self.seed_dir = os.path.join(paths.seed_dir, 'nocloud')
+ self.seed_dirs = [os.path.join(paths.seed_dir, 'nocloud'),
+ os.path.join(paths.seed_dir, 'nocloud-net')]
+ self.seed_dir = None
self.supported_seed_starts = ("/", "file://")
def __str__(self):
@@ -50,31 +52,32 @@ class DataSourceNoCloud(sources.DataSource):
}
found = []
- mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': ""}
+ mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "",
+ 'network-config': {}}
try:
# Parse the kernel command line, getting data passed in
md = {}
if parse_cmdline_data(self.cmdline_id, md):
found.append("cmdline")
- mydata['meta-data'].update(md)
+ mydata = _merge_new_seed(mydata, {'meta-data': md})
except:
util.logexc(LOG, "Unable to parse command line data")
return False
# Check to see if the seed dir has data.
pp2d_kwargs = {'required': ['user-data', 'meta-data'],
- 'optional': ['vendor-data']}
-
- try:
- seeded = util.pathprefix2dict(self.seed_dir, **pp2d_kwargs)
- found.append(self.seed_dir)
- LOG.debug("Using seeded data from %s", self.seed_dir)
- except ValueError as e:
- pass
-
- if self.seed_dir in found:
- mydata = _merge_new_seed(mydata, seeded)
+ 'optional': ['vendor-data', 'network-config']}
+
+ for path in self.seed_dirs:
+ try:
+ seeded = util.pathprefix2dict(path, **pp2d_kwargs)
+ found.append(path)
+ LOG.debug("Using seeded data from %s", path)
+ mydata = _merge_new_seed(mydata, seeded)
+ break
+ except ValueError as e:
+ pass
# If the datasource config had a 'seedfrom' entry, then that takes
# precedence over a 'seedfrom' that was found in a filesystem
@@ -141,8 +144,7 @@ class DataSourceNoCloud(sources.DataSource):
if len(found) == 0:
return False
- seeded_interfaces = None
-
+ seeded_network = None
# The special argument "seedfrom" indicates we should
# attempt to seed the userdata / metadata from its value
# its primarily value is in allowing the user to type less
@@ -158,8 +160,9 @@ class DataSourceNoCloud(sources.DataSource):
LOG.debug("Seed from %s not supported by %s", seedfrom, self)
return False
- if 'network-interfaces' in mydata['meta-data']:
- seeded_interfaces = self.dsmode
+ if (mydata['meta-data'].get('network-interfaces') or
+ mydata.get('network-config')):
+ seeded_network = self.dsmode
# This could throw errors, but the user told us to do it
# so if errors are raised, let them raise
@@ -176,28 +179,37 @@ class DataSourceNoCloud(sources.DataSource):
mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
defaults])
- # Update the network-interfaces if metadata had 'network-interfaces'
- # entry and this is the local datasource, or 'seedfrom' was used
- # and the source of the seed was self.dsmode
- # ('local' for NoCloud, 'net' for NoCloudNet')
- if ('network-interfaces' in mydata['meta-data'] and
- (self.dsmode in ("local", seeded_interfaces))):
- LOG.debug("Updating network interfaces from %s", self)
- self.distro.apply_network(
- mydata['meta-data']['network-interfaces'])
+ netdata = {'format': None, 'data': None}
+ if mydata['meta-data'].get('network-interfaces'):
+ netdata['format'] = 'interfaces'
+ netdata['data'] = mydata['meta-data']['network-interfaces']
+ elif mydata.get('network-config'):
+ netdata['format'] = 'network-config'
+ netdata['data'] = mydata['network-config']
+
+ # if this is the local datasource or 'seedfrom' was used
+ # and the source of the seed was self.dsmode.
+ # Then see if there is network config to apply.
+ # note this is obsolete network-interfaces style seeding.
+ if self.dsmode in ("local", seeded_network):
+ if mydata['meta-data'].get('network-interfaces'):
+ LOG.debug("Updating network interfaces from %s", self)
+ self.distro.apply_network(
+ mydata['meta-data']['network-interfaces'])
if mydata['meta-data']['dsmode'] == self.dsmode:
self.seed = ",".join(found)
self.metadata = mydata['meta-data']
self.userdata_raw = mydata['user-data']
self.vendordata_raw = mydata['vendor-data']
+ self._network_config = mydata['network-config']
return True
LOG.debug("%s: not claiming datasource, dsmode=%s", self,
mydata['meta-data']['dsmode'])
return False
- def check_instance_id(self):
+ def check_instance_id(self, sys_cfg):
# quickly (local check only) if self.instance_id is still valid
# we check kernel command line or files.
current = self.get_instance_id()
@@ -205,11 +217,15 @@ class DataSourceNoCloud(sources.DataSource):
return None
quick_id = _quick_read_instance_id(cmdline_id=self.cmdline_id,
- dirs=[self.seed_dir])
+ dirs=self.seed_dirs)
if not quick_id:
return None
return quick_id == current
+ @property
+ def network_config(self):
+ return self._network_config
+
def _quick_read_instance_id(cmdline_id, dirs=None):
if dirs is None:
@@ -279,9 +295,17 @@ def parse_cmdline_data(ds_id, fill, cmdline=None):
def _merge_new_seed(cur, seeded):
ret = cur.copy()
- ret['meta-data'] = util.mergemanydict([cur['meta-data'],
- util.load_yaml(seeded['meta-data'])])
- ret['user-data'] = seeded['user-data']
+
+ newmd = seeded.get('meta-data', {})
+ if not isinstance(seeded['meta-data'], dict):
+ newmd = util.load_yaml(seeded['meta-data'])
+ ret['meta-data'] = util.mergemanydict([cur['meta-data'], newmd])
+
+ if seeded.get('network-config'):
+ ret['network-config'] = util.load_yaml(seeded['network-config'])
+
+ if 'user-data' in seeded:
+ ret['user-data'] = seeded['user-data']
if 'vendor-data' in seeded:
ret['vendor-data'] = seeded['vendor-data']
return ret
@@ -292,7 +316,6 @@ class DataSourceNoCloudNet(DataSourceNoCloud):
DataSourceNoCloud.__init__(self, sys_cfg, distro, paths)
self.cmdline_id = "ds=nocloud-net"
self.supported_seed_starts = ("http://", "https://", "ftp://")
- self.seed_dir = os.path.join(paths.seed_dir, 'nocloud-net')
self.dsmode = "net"
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index 79bb9d63..f7f4590b 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -150,7 +150,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
return True
- def check_instance_id(self):
+ def check_instance_id(self, sys_cfg):
# quickly (local check only) if self.instance_id is still valid
return sources.instance_id_matches_system_uuid(self.get_instance_id())
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 28540a7b..82cd3553 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -217,10 +217,14 @@ class DataSource(object):
def get_package_mirror_info(self):
return self.distro.get_package_mirror_info(data_source=self)
- def check_instance_id(self):
+ def check_instance_id(self, sys_cfg):
# quickly (local check only) if self.instance_id is still
return False
+ @property
+ def network_config(self):
+ return None
+
def normalize_pubkey_data(pubkey_data):
keys = []
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index bd93d22f..1aa6bbae 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -51,11 +51,13 @@ OS_LATEST = 'latest'
OS_FOLSOM = '2012-08-10'
OS_GRIZZLY = '2013-04-04'
OS_HAVANA = '2013-10-17'
+OS_LIBERTY = '2015-10-15'
# keep this in chronological order. new supported versions go at the end.
OS_VERSIONS = (
OS_FOLSOM,
OS_GRIZZLY,
OS_HAVANA,
+ OS_LIBERTY,
)
@@ -229,6 +231,11 @@ class BaseReader(object):
False,
load_json_anytype,
)
+ files['networkdata'] = (
+ self._path_join("openstack", version, 'network_data.json'),
+ False,
+ load_json_anytype,
+ )
return files
results = {
@@ -334,7 +341,7 @@ class ConfigDriveReader(BaseReader):
path = self._path_join(self.base_path, 'openstack')
found = [d for d in os.listdir(path)
if os.path.isdir(os.path.join(path))]
- self._versions = found
+ self._versions = sorted(found)
return self._versions
def _read_ec2_metadata(self):
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index edad6450..143a4fc9 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -43,6 +43,7 @@ from cloudinit import distros
from cloudinit import helpers
from cloudinit import importer
from cloudinit import log as logging
+from cloudinit import net
from cloudinit import sources
from cloudinit import type_utils
from cloudinit import util
@@ -193,40 +194,12 @@ class Init(object):
# We try to restore from a current link and static path
# by using the instance link, if purge_cache was called
# the file wont exist.
- pickled_fn = self.paths.get_ipath_cur('obj_pkl')
- pickle_contents = None
- try:
- pickle_contents = util.load_file(pickled_fn, decode=False)
- except Exception as e:
- if os.path.isfile(pickled_fn):
- LOG.warn("failed loading pickle in %s: %s" % (pickled_fn, e))
- pass
-
- # This is expected so just return nothing
- # successfully loaded...
- if not pickle_contents:
- return None
- try:
- return pickle.loads(pickle_contents)
- except Exception:
- util.logexc(LOG, "Failed loading pickled blob from %s", pickled_fn)
- return None
+ return _pkl_load(self.paths.get_ipath_cur('obj_pkl'))
def _write_to_cache(self):
if self.datasource is NULL_DATA_SOURCE:
return False
- pickled_fn = self.paths.get_ipath_cur("obj_pkl")
- try:
- pk_contents = pickle.dumps(self.datasource)
- except Exception:
- util.logexc(LOG, "Failed pickling datasource %s", self.datasource)
- return False
- try:
- util.write_file(pickled_fn, pk_contents, omode="wb", mode=0o400)
- except Exception:
- util.logexc(LOG, "Failed pickling datasource to %s", pickled_fn)
- return False
- return True
+ return _pkl_store(self.datasource, self.paths.get_ipath_cur("obj_pkl"))
def _get_datasources(self):
# Any config provided???
@@ -250,7 +223,8 @@ class Init(object):
if ds and existing == "trust":
myrep.description = "restored from cache: %s" % ds
elif ds and existing == "check":
- if hasattr(ds, 'check_instance_id') and ds.check_instance_id():
+ if (hasattr(ds, 'check_instance_id') and
+ ds.check_instance_id(self.cfg)):
myrep.description = "restored from checked cache: %s" % ds
else:
myrep.description = "cache invalid in datasource: %s" % ds
@@ -595,6 +569,35 @@ class Init(object):
# Run the handlers
self._do_handlers(user_data_msg, c_handlers_list, frequency)
+ def _find_networking_config(self):
+ disable_file = os.path.join(
+ self.paths.get_cpath('data'), 'upgraded-network')
+ if os.path.exists(disable_file):
+ return (None, disable_file)
+
+ cmdline_cfg = ('cmdline', net.read_kernel_cmdline_config())
+ dscfg = ('ds', None)
+ if self.datasource and hasattr(self.datasource, 'network_config'):
+ dscfg = ('ds', self.datasource.network_config)
+ sys_cfg = ('system_cfg', self.cfg.get('network'))
+
+ for loc, ncfg in (cmdline_cfg, dscfg, sys_cfg):
+ if net.is_disabled_cfg(ncfg):
+ LOG.debug("network config disabled by %s", loc)
+ return (None, loc)
+ if ncfg:
+ return (ncfg, loc)
+ return (net.generate_fallback_config(), "fallback")
+
+ def apply_network_config(self):
+ netcfg, src = self._find_networking_config()
+ if netcfg is None:
+ LOG.info("network config is disabled by %s", src)
+ return
+
+ LOG.info("Applying network configuration from %s: %s", src, netcfg)
+ return self.distro.apply_network_config(netcfg)
+
class Modules(object):
def __init__(self, init, cfg_files=None, reporter=None):
@@ -796,3 +799,36 @@ def fetch_base_config():
base_cfgs.append(default_cfg)
return util.mergemanydict(base_cfgs)
+
+
+def _pkl_store(obj, fname):
+ try:
+ pk_contents = pickle.dumps(obj)
+ except Exception:
+ util.logexc(LOG, "Failed pickling datasource %s", obj)
+ return False
+ try:
+ util.write_file(fname, pk_contents, omode="wb", mode=0o400)
+ except Exception:
+ util.logexc(LOG, "Failed pickling datasource to %s", fname)
+ return False
+ return True
+
+
+def _pkl_load(fname):
+ pickle_contents = None
+ try:
+ pickle_contents = util.load_file(fname, decode=False)
+ except Exception as e:
+ if os.path.isfile(fname):
+ LOG.warn("failed loading pickle in %s: %s" % (fname, e))
+ pass
+
+ # This is allowed so just return nothing successfully loaded...
+ if not pickle_contents:
+ return None
+ try:
+ return pickle.loads(pickle_contents)
+ except Exception:
+ util.logexc(LOG, "Failed loading pickled blob from %s", fname)
+ return None
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 20916e53..0d21e11b 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -80,6 +80,8 @@ CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'],
['running-in-container'],
['lxc-is-container'])
+PROC_CMDLINE = None
+
def decode_binary(blob, encoding='utf-8'):
# Converts a binary type into a text type using given encoding.
@@ -1191,12 +1193,27 @@ def load_file(fname, read_cb=None, quiet=False, decode=True):
def get_cmdline():
if 'DEBUG_PROC_CMDLINE' in os.environ:
- cmdline = os.environ["DEBUG_PROC_CMDLINE"]
+ return os.environ["DEBUG_PROC_CMDLINE"]
+
+ global PROC_CMDLINE
+ if PROC_CMDLINE is not None:
+ return PROC_CMDLINE
+
+ if is_container():
+ try:
+ contents = load_file("/proc/1/cmdline")
+ # replace nulls with space and drop trailing null
+ cmdline = contents.replace("\x00", " ")[:-1]
+ except Exception as e:
+ LOG.warn("failed reading /proc/1/cmdline: %s", e)
+ cmdline = ""
else:
try:
cmdline = load_file("/proc/cmdline").strip()
except:
cmdline = ""
+
+ PROC_CMDLINE = cmdline
return cmdline
@@ -1569,7 +1586,7 @@ def uptime():
try:
if os.path.exists("/proc/uptime"):
method = '/proc/uptime'
- contents = load_file("/proc/uptime").strip()
+ contents = load_file("/proc/uptime")
if contents:
uptime_str = contents.split()[0]
else:
diff --git a/setup.py b/setup.py
index 0b261dfe..f86727b2 100755
--- a/setup.py
+++ b/setup.py
@@ -183,7 +183,8 @@ else:
[f for f in glob('doc/examples/*') if is_f(f)]),
(USR + '/share/doc/cloud-init/examples/seed',
[f for f in glob('doc/examples/seed/*') if is_f(f)]),
- (LIB + '/udev/rules.d', ['udev/66-azure-ephemeral.rules']),
+ (LIB + '/udev/rules.d', [f for f in glob('udev/*.rules')]),
+ (LIB + '/udev', ['udev/cloud-init-wait']),
]
# Use a subclass for install that handles
# adding on the right init system configuration files
diff --git a/systemd/cloud-init-generator b/systemd/cloud-init-generator
index 2d319695..ae286d58 100755
--- a/systemd/cloud-init-generator
+++ b/systemd/cloud-init-generator
@@ -107,6 +107,9 @@ main() {
"ln $CLOUD_SYSTEM_TARGET $link_path"
fi
fi
+ # this touches /run/cloud-init/enabled, which is read by
+ # udev/cloud-init-wait. If not present, it will exit quickly.
+ touch "$LOG_D/$ENABLE"
elif [ "$result" = "$DISABLE" ]; then
if [ -f "$link_path" ]; then
if rm -f "$link_path"; then
diff --git a/systemd/cloud-init-local.service b/systemd/cloud-init-local.service
index 475a2e11..b19eeaee 100644
--- a/systemd/cloud-init-local.service
+++ b/systemd/cloud-init-local.service
@@ -2,6 +2,7 @@
Description=Initial cloud-init job (pre-networking)
DefaultDependencies=no
Wants=local-fs.target
+Wants=network-pre.target
After=local-fs.target
Conflicts=shutdown.target
Before=network-pre.target
@@ -10,6 +11,7 @@ Before=shutdown.target
[Service]
Type=oneshot
ExecStart=/usr/bin/cloud-init init --local
+ExecStart=/bin/touch /run/cloud-init/network-config-ready
RemainAfterExit=yes
TimeoutSec=0
diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py
index bfd787d1..89b15f54 100644
--- a/tests/unittests/test_datasource/test_configdrive.py
+++ b/tests/unittests/test_datasource/test_configdrive.py
@@ -59,6 +59,34 @@ OSTACK_META = {
CONTENT_0 = b'This is contents of /etc/foo.cfg\n'
CONTENT_1 = b'# this is /etc/bar/bar.cfg\n'
+NETWORK_DATA = {
+ 'services': [
+ {'type': 'dns', 'address': '199.204.44.24'},
+ {'type': 'dns', 'address': '199.204.47.54'}
+ ],
+ 'links': [
+ {'vif_id': '2ecc7709-b3f7-4448-9580-e1ec32d75bbd',
+ 'ethernet_mac_address': 'fa:16:3e:69:b0:58',
+ 'type': 'ovs', 'mtu': None, 'id': 'tap2ecc7709-b3'},
+ {'vif_id': '2f88d109-5b57-40e6-af32-2472df09dc33',
+ 'ethernet_mac_address': 'fa:16:3e:d4:57:ad',
+ 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'},
+ {'vif_id': '1a5382f8-04c5-4d75-ab98-d666c1ef52cc',
+ 'ethernet_mac_address': 'fa:16:3e:05:30:fe',
+ 'type': 'ovs', 'mtu': None, 'id': 'tap1a5382f8-04'}
+ ],
+ 'networks': [
+ {'link': 'tap2ecc7709-b3', 'type': 'ipv4_dhcp',
+ 'network_id': '6d6357ac-0f70-4afa-8bd7-c274cc4ea235',
+ 'id': 'network0'},
+ {'link': 'tap2f88d109-5b', 'type': 'ipv4_dhcp',
+ 'network_id': 'd227a9b3-6960-4d94-8976-ee5788b44f54',
+ 'id': 'network1'},
+ {'link': 'tap1a5382f8-04', 'type': 'ipv4_dhcp',
+ 'network_id': 'dab2ba57-cae2-4311-a5ed-010b263891f5',
+ 'id': 'network2'}
+ ]
+}
CFG_DRIVE_FILES_V2 = {
'ec2/2009-04-04/meta-data.json': json.dumps(EC2_META),
@@ -70,7 +98,11 @@ CFG_DRIVE_FILES_V2 = {
'openstack/content/0000': CONTENT_0,
'openstack/content/0001': CONTENT_1,
'openstack/latest/meta_data.json': json.dumps(OSTACK_META),
- 'openstack/latest/user_data': USER_DATA}
+ 'openstack/latest/user_data': USER_DATA,
+ 'openstack/latest/network_data.json': json.dumps(NETWORK_DATA),
+ 'openstack/2015-10-15/meta_data.json': json.dumps(OSTACK_META),
+ 'openstack/2015-10-15/user_data': USER_DATA,
+ 'openstack/2015-10-15/network_data.json': json.dumps(NETWORK_DATA)}
class TestConfigDriveDataSource(TestCase):
@@ -225,6 +257,7 @@ class TestConfigDriveDataSource(TestCase):
self.assertEqual(USER_DATA, found['userdata'])
self.assertEqual(expected_md, found['metadata'])
+ self.assertEqual(NETWORK_DATA, found['networkdata'])
self.assertEqual(found['files']['/etc/foo.cfg'], CONTENT_0)
self.assertEqual(found['files']['/etc/bar/bar.cfg'], CONTENT_1)
@@ -250,6 +283,7 @@ class TestConfigDriveDataSource(TestCase):
data = copy(CFG_DRIVE_FILES_V2)
data["openstack/2012-08-10/meta_data.json"] = "non-json garbage {}"
+ data["openstack/2015-10-15/meta_data.json"] = "non-json garbage {}"
data["openstack/latest/meta_data.json"] = "non-json garbage {}"
populate_dir(self.tmp, data)
@@ -321,6 +355,19 @@ class TestConfigDriveDataSource(TestCase):
self.assertEqual(myds.get_public_ssh_keys(),
[OSTACK_META['public_keys']['mykey']])
+ def test_network_data_is_found(self):
+ """Verify that network_data is present in ds in config-drive-v2."""
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+ myds = cfg_ds_from_dir(self.tmp)
+ self.assertEqual(myds.network_json, NETWORK_DATA)
+
+ def test_network_config_is_converted(self):
+ """Verify that network_data is converted and present on ds object."""
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+ myds = cfg_ds_from_dir(self.tmp)
+ network_config = ds.convert_network_data(NETWORK_DATA)
+ self.assertEqual(myds.network_config, network_config)
+
def cfg_ds_from_dir(seed_d):
found = ds.read_config_drive(seed_d)
@@ -339,6 +386,8 @@ def populate_ds_from_read_config(cfg_ds, source, results):
cfg_ds.ec2_metadata = results.get('ec2-metadata')
cfg_ds.userdata_raw = results.get('userdata')
cfg_ds.version = results.get('version')
+ cfg_ds.network_json = results.get('networkdata')
+ cfg_ds._network_config = ds.convert_network_data(cfg_ds.network_json)
def populate_dir(seed_dir, files):
diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py
index 6d30c5b8..2c2a424d 100644
--- a/tests/unittests/test_distros/test_netconfig.py
+++ b/tests/unittests/test_distros/test_netconfig.py
@@ -109,8 +109,9 @@ class TestNetCfgDistro(TestCase):
ub_distro.apply_network(BASE_NET_CFG, False)
self.assertEquals(len(write_bufs), 1)
- self.assertIn('/etc/network/interfaces', write_bufs)
- write_buf = write_bufs['/etc/network/interfaces']
+ eni_name = '/etc/network/interfaces.d/50-cloud-init.cfg'
+ self.assertIn(eni_name, write_bufs)
+ write_buf = write_bufs[eni_name]
self.assertEquals(str(write_buf).strip(), BASE_NET_CFG.strip())
self.assertEquals(write_buf.mode, 0o644)
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
new file mode 100644
index 00000000..dfb31710
--- /dev/null
+++ b/tests/unittests/test_net.py
@@ -0,0 +1,127 @@
+from cloudinit import util
+from cloudinit import net
+from .helpers import TestCase
+
+import base64
+import copy
+import io
+import gzip
+import json
+import os
+
+DHCP_CONTENT_1 = """
+DEVICE='eth0'
+PROTO='dhcp'
+IPV4ADDR='192.168.122.89'
+IPV4BROADCAST='192.168.122.255'
+IPV4NETMASK='255.255.255.0'
+IPV4GATEWAY='192.168.122.1'
+IPV4DNS0='192.168.122.1'
+IPV4DNS1='0.0.0.0'
+HOSTNAME='foohost'
+DNSDOMAIN=''
+NISDOMAIN=''
+ROOTSERVER='192.168.122.1'
+ROOTPATH=''
+filename=''
+UPTIME='21'
+DHCPLEASETIME='3600'
+DOMAINSEARCH='foo.com'
+"""
+
+DHCP_EXPECTED_1 = {
+ 'name': 'eth0',
+ 'type': 'physical',
+ 'subnets': [{'broadcast': '192.168.122.255',
+ 'gateway': '192.168.122.1',
+ 'dns_search': ['foo.com'],
+ 'type': 'dhcp',
+ 'netmask': '255.255.255.0',
+ 'dns_nameservers': ['192.168.122.1']}],
+}
+
+
+STATIC_CONTENT_1 = """
+DEVICE='eth1'
+PROTO='static'
+IPV4ADDR='10.0.0.2'
+IPV4BROADCAST='10.0.0.255'
+IPV4NETMASK='255.255.255.0'
+IPV4GATEWAY='10.0.0.1'
+IPV4DNS0='10.0.1.1'
+IPV4DNS1='0.0.0.0'
+HOSTNAME='foohost'
+UPTIME='21'
+DHCPLEASETIME='3600'
+DOMAINSEARCH='foo.com'
+"""
+
+STATIC_EXPECTED_1 = {
+ 'name': 'eth1',
+ 'type': 'physical',
+ 'subnets': [{'broadcast': '10.0.0.255', 'gateway': '10.0.0.1',
+ 'dns_search': ['foo.com'], 'type': 'static',
+ 'netmask': '255.255.255.0',
+ 'dns_nameservers': ['10.0.1.1']}],
+}
+
+
+class TestNetConfigParsing(TestCase):
+ simple_cfg = {
+ 'config': [{"type": "physical", "name": "eth0",
+ "mac_address": "c0:d6:9f:2c:e8:80",
+ "subnets": [{"type": "dhcp"}]}]}
+
+ def test_klibc_convert_dhcp(self):
+ found = net._klibc_to_config_entry(DHCP_CONTENT_1)
+ self.assertEqual(found, ('eth0', DHCP_EXPECTED_1))
+
+ def test_klibc_convert_static(self):
+ found = net._klibc_to_config_entry(STATIC_CONTENT_1)
+ self.assertEqual(found, ('eth1', STATIC_EXPECTED_1))
+
+ def test_config_from_klibc_net_cfg(self):
+ files = []
+ pairs = (('net-eth0.cfg', DHCP_CONTENT_1),
+ ('net-eth1.cfg', STATIC_CONTENT_1))
+
+ macs = {'eth1': 'b8:ae:ed:75:ff:2b',
+ 'eth0': 'b8:ae:ed:75:ff:2a'}
+
+ dhcp = copy.deepcopy(DHCP_EXPECTED_1)
+ dhcp['mac_address'] = macs['eth0']
+
+ static = copy.deepcopy(STATIC_EXPECTED_1)
+ static['mac_address'] = macs['eth1']
+
+ expected = {'version': 1, 'config': [dhcp, static]}
+ with util.tempdir() as tmpd:
+ for fname, content in pairs:
+ fp = os.path.join(tmpd, fname)
+ files.append(fp)
+ util.write_file(fp, content)
+
+ found = net.config_from_klibc_net_cfg(files=files, mac_addrs=macs)
+ self.assertEqual(found, expected)
+
+ def test_cmdline_with_b64(self):
+ data = base64.b64encode(json.dumps(self.simple_cfg).encode())
+ encoded_text = data.decode()
+ cmdline = 'ro network-config=' + encoded_text + ' root=foo'
+ found = net.read_kernel_cmdline_config(cmdline=cmdline)
+ self.assertEqual(found, self.simple_cfg)
+
+ def test_cmdline_with_b64_gz(self):
+ data = _gzip_data(json.dumps(self.simple_cfg).encode())
+ encoded_text = base64.b64encode(data).decode()
+ cmdline = 'ro network-config=' + encoded_text + ' root=foo'
+ found = net.read_kernel_cmdline_config(cmdline=cmdline)
+ self.assertEqual(found, self.simple_cfg)
+
+
+def _gzip_data(data):
+ with io.BytesIO() as iobuf:
+ gzfp = gzip.GzipFile(mode="wb", fileobj=iobuf)
+ gzfp.write(data)
+ gzfp.close()
+ return iobuf.getvalue()
diff --git a/udev/79-cloud-init-net-wait.rules b/udev/79-cloud-init-net-wait.rules
new file mode 100644
index 00000000..8344222a
--- /dev/null
+++ b/udev/79-cloud-init-net-wait.rules
@@ -0,0 +1,10 @@
+# cloud-init cold/hot-plug blocking mechanism
+# this file blocks further processing of network events
+# until cloud-init local has had a chance to read and apply network
+SUBSYSTEM!="net", GOTO="cloudinit_naming_end"
+ACTION!="add", GOTO="cloudinit_naming_end"
+
+IMPORT{program}="/lib/udev/cloud-init-wait"
+
+LABEL="cloudinit_naming_end"
+# vi: ts=4 expandtab syntax=udevrules
diff --git a/udev/cloud-init-wait b/udev/cloud-init-wait
new file mode 100755
index 00000000..7d53dee4
--- /dev/null
+++ b/udev/cloud-init-wait
@@ -0,0 +1,68 @@
+#!/bin/sh
+
+CI_NET_READY="/run/cloud-init/network-config-ready"
+LOG="/run/cloud-init/${0##*/}.log"
+LOG_INIT=0
+DEBUG=0
+
+block_until_ready() {
+ local fname="$1"
+ local naplen="$2" max="$3" n=0
+ while ! [ -f "$fname" ]; do
+ n=$(($n+1))
+ [ "$n" -ge "$max" ] && return 1
+ sleep $naplen
+ done
+}
+
+log() {
+ [ -n "${LOG}" ] || return
+ [ "${DEBUG:-0}" = "0" ] && return
+
+ if [ $LOG_INIT = 0 ]; then
+ if [ -d "${LOG%/*}" ] || mkdir -p "${LOG%/*}"; then
+ LOG_INIT=1
+ else
+ echo "${0##*/}: WARN: log init to ${LOG%/*}" 1>&2
+ return
+ fi
+ elif [ "$LOG_INIT" = "-1" ]; then
+ return
+ fi
+ local info="$$ $INTERFACE"
+ if [ "$DEBUG" -gt 1 ]; then
+ local up idle
+ read up idle < /proc/uptime
+ info="$$ $INTERFACE $up"
+ fi
+ echo "[$info]" "$@" >> "$LOG"
+}
+
+main() {
+ local name="" readyfile="$CI_NET_READY"
+ local info="INTERFACE=${INTERFACE} ID_NET_NAME=${ID_NET_NAME}"
+ info="$info ID_NET_NAME_PATH=${ID_NET_NAME_PATH}"
+ info="$info MAC_ADDRESS=${MAC_ADDRESS}"
+ log "$info"
+
+ ## Check to see if cloud-init.target is set. If cloud-init is
+ ## disabled we do not want to do anything.
+ if [ ! -f "/run/cloud-init/enabled" ]; then
+ log "cloud-init disabled"
+ return 0
+ fi
+
+ if [ "${INTERFACE#lo}" != "$INTERFACE" ]; then
+ return 0
+ fi
+
+ block_until_ready "$readyfile" .1 600 ||
+ { log "failed waiting for ready on $INTERFACE"; return 1; }
+
+ log "net config ready"
+}
+
+main "$@"
+exit
+
+# vi: ts=4 expandtab