summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoshua Harlow <harlowja@gmail.com>2016-06-10 14:22:17 -0700
committerJoshua Harlow <harlowja@gmail.com>2016-06-10 14:22:17 -0700
commita8de234ec60c76b7b788513e8111be04ad9205fb (patch)
tree28f16138e108fab6b66a748d9ef697d732c4107a
parenta3720feb537cb5189bccf4889b601704a4cdf1da (diff)
parent6ada153ebfd9483d76f904dafaa1fc80f61c9205 (diff)
downloadvyos-cloud-init-a8de234ec60c76b7b788513e8111be04ad9205fb.tar.gz
vyos-cloud-init-a8de234ec60c76b7b788513e8111be04ad9205fb.zip
Refactor a large part of the networking code.
Splits off distro specific code into specific files so that other kinds of networking configuration can be written by the various distro(s) that cloud-init supports. It also isolates some of the cloudinit.net code so that it can be more easily used on its own (and incorporated into other projects such as curtin). During this process it adds tests so that the net process can be tested (to some level) so that the format conversion processes can be tested going forward.
-rw-r--r--cloudinit/cs_utils.py3
-rw-r--r--cloudinit/distros/debian.py10
-rw-r--r--cloudinit/net/__init__.py683
-rw-r--r--cloudinit/net/cmdline.py203
-rw-r--r--cloudinit/net/eni.py457
-rw-r--r--cloudinit/net/network_state.py281
-rw-r--r--cloudinit/serial.py50
-rw-r--r--cloudinit/sources/DataSourceAzure.py2
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py152
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py4
-rw-r--r--cloudinit/sources/helpers/openstack.py145
-rw-r--r--cloudinit/stages.py3
-rw-r--r--cloudinit/util.py16
-rwxr-xr-xpackages/bddeb1
-rw-r--r--requirements.txt8
-rwxr-xr-xsetup.py1
-rw-r--r--test-requirements.txt1
-rw-r--r--tests/unittests/helpers.py85
-rw-r--r--tests/unittests/test__init__.py17
-rw-r--r--tests/unittests/test_cli.py7
-rw-r--r--tests/unittests/test_cs_util.py27
-rw-r--r--tests/unittests/test_datasource/test_azure.py12
-rw-r--r--tests/unittests/test_datasource/test_azure_helper.py12
-rw-r--r--tests/unittests/test_datasource/test_cloudsigma.py2
-rw-r--r--tests/unittests/test_datasource/test_cloudstack.py11
-rw-r--r--tests/unittests/test_datasource/test_configdrive.py170
-rw-r--r--tests/unittests/test_datasource/test_nocloud.py15
-rw-r--r--tests/unittests/test_datasource/test_smartos.py6
-rw-r--r--tests/unittests/test_net.py95
-rw-r--r--tests/unittests/test_reporting.py4
-rw-r--r--tests/unittests/test_rh_subscription.py22
-rw-r--r--tox.ini17
32 files changed, 1328 insertions, 1194 deletions
diff --git a/cloudinit/cs_utils.py b/cloudinit/cs_utils.py
index 83ac1a0e..412431f2 100644
--- a/cloudinit/cs_utils.py
+++ b/cloudinit/cs_utils.py
@@ -33,7 +33,8 @@ API Docs: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
import json
import platform
-import serial
+from cloudinit import serial
+
# these high timeouts are necessary as read may read a lot of data.
READ_TIMEOUT = 60
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index 32bef1cd..e71aaa97 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -26,6 +26,7 @@ from cloudinit import distros
from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import net
+from cloudinit.net import eni
from cloudinit import util
from cloudinit.distros.parsers.hostname import HostnameConf
@@ -56,6 +57,7 @@ class Distro(distros.Distro):
# should only happen say once per instance...)
self._runner = helpers.Runners(paths)
self.osfamily = 'debian'
+ self._net_renderer = eni.Renderer()
def apply_locale(self, locale, out_fn=None):
if not out_fn:
@@ -80,10 +82,10 @@ class Distro(distros.Distro):
def _write_network_config(self, netconfig):
ns = net.parse_net_config_data(netconfig)
- net.render_network_state(target="/", network_state=ns,
- eni=self.network_conf_fn,
- links_prefix=self.links_prefix,
- netrules=None)
+ self._net_renderer.render_network_state(
+ target="/", network_state=ns,
+ eni=self.network_conf_fn, links_prefix=self.links_prefix,
+ netrules=None)
_maybe_remove_legacy_eth0()
return []
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 49e9d5c2..f5668fff 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -16,42 +16,17 @@
# You should have received a copy of the GNU Affero General Public License
# along with Curtin. If not, see <http://www.gnu.org/licenses/>.
-import base64
import errno
-import glob
-import gzip
-import io
+import logging
import os
import re
-import shlex
-from cloudinit import log as logging
-from cloudinit.net import network_state
-from cloudinit.net.udev import generate_udev_rule
from cloudinit import util
LOG = logging.getLogger(__name__)
-
SYS_CLASS_NET = "/sys/class/net/"
-LINKS_FNAME_PREFIX = "etc/systemd/network/50-cloud-init-"
-
-NET_CONFIG_OPTIONS = [
- "address", "netmask", "broadcast", "network", "metric", "gateway",
- "pointtopoint", "media", "mtu", "hostname", "leasehours", "leasetime",
- "vendor", "client", "bootfile", "server", "hwaddr", "provider", "frame",
- "netnum", "endpoint", "local", "ttl",
-]
-
-NET_CONFIG_COMMANDS = [
- "pre-up", "up", "post-up", "down", "pre-down", "post-down",
-]
-
-NET_CONFIG_BRIDGE_OPTIONS = [
- "bridge_ageing", "bridge_bridgeprio", "bridge_fd", "bridge_gcinit",
- "bridge_hello", "bridge_maxage", "bridge_maxwait", "bridge_stp",
-]
-
DEFAULT_PRIMARY_INTERFACE = 'eth0'
+LINKS_FNAME_PREFIX = "etc/systemd/network/50-cloud-init-"
def sys_dev_path(devname, path=""):
@@ -60,23 +35,22 @@ def sys_dev_path(devname, path=""):
def read_sys_net(devname, path, translate=None, enoent=None, keyerror=None):
try:
- contents = ""
- with open(sys_dev_path(devname, path), "r") as fp:
- contents = fp.read().strip()
- if translate is None:
- return contents
-
- try:
- return translate.get(contents)
- except KeyError:
- LOG.debug("found unexpected value '%s' in '%s/%s'", contents,
- devname, path)
- if keyerror is not None:
- return keyerror
- raise
- except OSError as e:
- if e.errno == errno.ENOENT and enoent is not None:
- return enoent
+ contents = util.load_file(sys_dev_path(devname, path))
+ except (OSError, IOError) as e:
+ if getattr(e, 'errno', None) == errno.ENOENT:
+ if enoent is not None:
+ return enoent
+ raise
+ contents = contents.strip()
+ if translate is None:
+ return contents
+ try:
+ return translate.get(contents)
+ except KeyError:
+ LOG.debug("found unexpected value '%s' in '%s/%s'", contents,
+ devname, path)
+ if keyerror is not None:
+ return keyerror
raise
@@ -127,509 +101,7 @@ def get_devicelist():
class ParserError(Exception):
- """Raised when parser has issue parsing the interfaces file."""
-
-
-def parse_deb_config_data(ifaces, contents, src_dir, src_path):
- """Parses the file contents, placing result into ifaces.
-
- '_source_path' is added to every dictionary entry to define which file
- the configration information came from.
-
- :param ifaces: interface dictionary
- :param contents: contents of interfaces file
- :param src_dir: directory interfaces file was located
- :param src_path: file path the `contents` was read
- """
- currif = None
- for line in contents.splitlines():
- line = line.strip()
- if line.startswith('#'):
- continue
- split = line.split(' ')
- option = split[0]
- if option == "source-directory":
- parsed_src_dir = split[1]
- if not parsed_src_dir.startswith("/"):
- parsed_src_dir = os.path.join(src_dir, parsed_src_dir)
- for expanded_path in glob.glob(parsed_src_dir):
- dir_contents = os.listdir(expanded_path)
- dir_contents = [
- os.path.join(expanded_path, path)
- for path in dir_contents
- if (os.path.isfile(os.path.join(expanded_path, path)) and
- re.match("^[a-zA-Z0-9_-]+$", path) is not None)
- ]
- for entry in dir_contents:
- with open(entry, "r") as fp:
- src_data = fp.read().strip()
- abs_entry = os.path.abspath(entry)
- parse_deb_config_data(
- ifaces, src_data,
- os.path.dirname(abs_entry), abs_entry)
- elif option == "source":
- new_src_path = split[1]
- if not new_src_path.startswith("/"):
- new_src_path = os.path.join(src_dir, new_src_path)
- for expanded_path in glob.glob(new_src_path):
- with open(expanded_path, "r") as fp:
- src_data = fp.read().strip()
- abs_path = os.path.abspath(expanded_path)
- parse_deb_config_data(
- ifaces, src_data,
- os.path.dirname(abs_path), abs_path)
- elif option == "auto":
- for iface in split[1:]:
- if iface not in ifaces:
- ifaces[iface] = {
- # Include the source path this interface was found in.
- "_source_path": src_path
- }
- ifaces[iface]['auto'] = True
- elif option == "iface":
- iface, family, method = split[1:4]
- if iface not in ifaces:
- ifaces[iface] = {
- # Include the source path this interface was found in.
- "_source_path": src_path
- }
- elif 'family' in ifaces[iface]:
- raise ParserError(
- "Interface %s can only be defined once. "
- "Re-defined in '%s'." % (iface, src_path))
- ifaces[iface]['family'] = family
- ifaces[iface]['method'] = method
- currif = iface
- elif option == "hwaddress":
- if split[1] == "ether":
- val = split[2]
- else:
- val = split[1]
- ifaces[currif]['hwaddress'] = val
- elif option in NET_CONFIG_OPTIONS:
- ifaces[currif][option] = split[1]
- elif option in NET_CONFIG_COMMANDS:
- if option not in ifaces[currif]:
- ifaces[currif][option] = []
- ifaces[currif][option].append(' '.join(split[1:]))
- elif option.startswith('dns-'):
- if 'dns' not in ifaces[currif]:
- ifaces[currif]['dns'] = {}
- if option == 'dns-search':
- ifaces[currif]['dns']['search'] = []
- for domain in split[1:]:
- ifaces[currif]['dns']['search'].append(domain)
- elif option == 'dns-nameservers':
- ifaces[currif]['dns']['nameservers'] = []
- for server in split[1:]:
- ifaces[currif]['dns']['nameservers'].append(server)
- elif option.startswith('bridge_'):
- if 'bridge' not in ifaces[currif]:
- ifaces[currif]['bridge'] = {}
- if option in NET_CONFIG_BRIDGE_OPTIONS:
- bridge_option = option.replace('bridge_', '', 1)
- ifaces[currif]['bridge'][bridge_option] = split[1]
- elif option == "bridge_ports":
- ifaces[currif]['bridge']['ports'] = []
- for iface in split[1:]:
- ifaces[currif]['bridge']['ports'].append(iface)
- elif option == "bridge_hw" and split[1].lower() == "mac":
- ifaces[currif]['bridge']['mac'] = split[2]
- elif option == "bridge_pathcost":
- if 'pathcost' not in ifaces[currif]['bridge']:
- ifaces[currif]['bridge']['pathcost'] = {}
- ifaces[currif]['bridge']['pathcost'][split[1]] = split[2]
- elif option == "bridge_portprio":
- if 'portprio' not in ifaces[currif]['bridge']:
- ifaces[currif]['bridge']['portprio'] = {}
- ifaces[currif]['bridge']['portprio'][split[1]] = split[2]
- elif option.startswith('bond-'):
- if 'bond' not in ifaces[currif]:
- ifaces[currif]['bond'] = {}
- bond_option = option.replace('bond-', '', 1)
- ifaces[currif]['bond'][bond_option] = split[1]
- for iface in ifaces.keys():
- if 'auto' not in ifaces[iface]:
- ifaces[iface]['auto'] = False
-
-
-def parse_deb_config(path):
- """Parses a debian network configuration file."""
- ifaces = {}
- with open(path, "r") as fp:
- contents = fp.read().strip()
- abs_path = os.path.abspath(path)
- parse_deb_config_data(
- ifaces, contents,
- os.path.dirname(abs_path), abs_path)
- return ifaces
-
-
-def parse_net_config_data(net_config):
- """Parses the config, returns NetworkState dictionary
-
- :param net_config: curtin network config dict
- """
- state = None
- if 'version' in net_config and 'config' in net_config:
- ns = network_state.NetworkState(version=net_config.get('version'),
- config=net_config.get('config'))
- ns.parse_config()
- state = ns.network_state
-
- return state
-
-
-def parse_net_config(path):
- """Parses a curtin network configuration file and
- return network state"""
- ns = None
- net_config = util.read_conf(path)
- if 'network' in net_config:
- ns = parse_net_config_data(net_config.get('network'))
-
- return ns
-
-
-def _load_shell_content(content, add_empty=False, empty_val=None):
- """Given shell like syntax (key=value\nkey2=value2\n) in content
- return the data in dictionary form. If 'add_empty' is True
- then add entries in to the returned dictionary for 'VAR='
- variables. Set their value to empty_val."""
- data = {}
- for line in shlex.split(content):
- key, value = line.split("=", 1)
- if not value:
- value = empty_val
- if add_empty or value:
- data[key] = value
-
- return data
-
-
-def _klibc_to_config_entry(content, mac_addrs=None):
- """Convert a klibc writtent shell content file to a 'config' entry
- When ip= is seen on the kernel command line in debian initramfs
- and networking is brought up, ipconfig will populate
- /run/net-<name>.cfg.
-
- The files are shell style syntax, and examples are in the tests
- provided here. There is no good documentation on this unfortunately.
-
- DEVICE=<name> is expected/required and PROTO should indicate if
- this is 'static' or 'dhcp'.
- """
-
- if mac_addrs is None:
- mac_addrs = {}
-
- data = _load_shell_content(content)
- try:
- name = data['DEVICE']
- except KeyError:
- raise ValueError("no 'DEVICE' entry in data")
-
- # ipconfig on precise does not write PROTO
- proto = data.get('PROTO')
- if not proto:
- if data.get('filename'):
- proto = 'dhcp'
- else:
- proto = 'static'
-
- if proto not in ('static', 'dhcp'):
- raise ValueError("Unexpected value for PROTO: %s" % proto)
-
- iface = {
- 'type': 'physical',
- 'name': name,
- 'subnets': [],
- }
-
- if name in mac_addrs:
- iface['mac_address'] = mac_addrs[name]
-
- # originally believed there might be IPV6* values
- for v, pre in (('ipv4', 'IPV4'),):
- # if no IPV4ADDR or IPV6ADDR, then go on.
- if pre + "ADDR" not in data:
- continue
- subnet = {'type': proto, 'control': 'manual'}
-
- # these fields go right on the subnet
- for key in ('NETMASK', 'BROADCAST', 'GATEWAY'):
- if pre + key in data:
- subnet[key.lower()] = data[pre + key]
-
- dns = []
- # handle IPV4DNS0 or IPV6DNS0
- for nskey in ('DNS0', 'DNS1'):
- ns = data.get(pre + nskey)
- # verify it has something other than 0.0.0.0 (or ipv6)
- if ns and len(ns.strip(":.0")):
- dns.append(data[pre + nskey])
- if dns:
- subnet['dns_nameservers'] = dns
- # add search to both ipv4 and ipv6, as it has no namespace
- search = data.get('DOMAINSEARCH')
- if search:
- if ',' in search:
- subnet['dns_search'] = search.split(",")
- else:
- subnet['dns_search'] = search.split()
-
- iface['subnets'].append(subnet)
-
- return name, iface
-
-
-def config_from_klibc_net_cfg(files=None, mac_addrs=None):
- if files is None:
- files = glob.glob('/run/net*.conf')
-
- entries = []
- names = {}
- for cfg_file in files:
- name, entry = _klibc_to_config_entry(util.load_file(cfg_file),
- mac_addrs=mac_addrs)
- if name in names:
- raise ValueError(
- "device '%s' defined multiple times: %s and %s" % (
- name, names[name], cfg_file))
-
- names[name] = cfg_file
- entries.append(entry)
- return {'config': entries, 'version': 1}
-
-
-def render_persistent_net(network_state):
- '''Given state, emit udev rules to map mac to ifname.'''
- content = ""
- interfaces = network_state.get('interfaces')
- for iface in interfaces.values():
- # for physical interfaces write out a persist net udev rule
- if iface['type'] == 'physical' and \
- 'name' in iface and iface.get('mac_address'):
- content += generate_udev_rule(iface['name'],
- iface['mac_address'])
-
- return content
-
-
-# TODO: switch valid_map based on mode inet/inet6
-def iface_add_subnet(iface, subnet):
- content = ""
- valid_map = [
- 'address',
- 'netmask',
- 'broadcast',
- 'metric',
- 'gateway',
- 'pointopoint',
- 'mtu',
- 'scope',
- 'dns_search',
- 'dns_nameservers',
- ]
- for key, value in subnet.items():
- if value and key in valid_map:
- if type(value) == list:
- value = " ".join(value)
- if '_' in key:
- key = key.replace('_', '-')
- content += " {} {}\n".format(key, value)
-
- return content
-
-
-# TODO: switch to valid_map for attrs
-def iface_add_attrs(iface):
- content = ""
- ignore_map = [
- 'control',
- 'index',
- 'inet',
- 'mode',
- 'name',
- 'subnets',
- 'type',
- ]
- if iface['type'] not in ['bond', 'bridge', 'vlan']:
- ignore_map.append('mac_address')
-
- for key, value in iface.items():
- if value and key not in ignore_map:
- if type(value) == list:
- value = " ".join(value)
- content += " {} {}\n".format(key, value)
-
- return content
-
-
-def render_route(route, indent=""):
- """When rendering routes for an iface, in some cases applying a route
- may result in the route command returning non-zero which produces
- some confusing output for users manually using ifup/ifdown[1]. To
- that end, we will optionally include an '|| true' postfix to each
- route line allowing users to work with ifup/ifdown without using
- --force option.
-
- We may at somepoint not want to emit this additional postfix, and
- add a 'strict' flag to this function. When called with strict=True,
- then we will not append the postfix.
-
- 1. http://askubuntu.com/questions/168033/
- how-to-set-static-routes-in-ubuntu-server
- """
- content = ""
- up = indent + "post-up route add"
- down = indent + "pre-down route del"
- eol = " || true\n"
- mapping = {
- 'network': '-net',
- 'netmask': 'netmask',
- 'gateway': 'gw',
- 'metric': 'metric',
- }
- if route['network'] == '0.0.0.0' and route['netmask'] == '0.0.0.0':
- default_gw = " default gw %s" % route['gateway']
- content += up + default_gw + eol
- content += down + default_gw + eol
- elif route['network'] == '::' and route['netmask'] == 0:
- # ipv6!
- default_gw = " -A inet6 default gw %s" % route['gateway']
- content += up + default_gw + eol
- content += down + default_gw + eol
- else:
- route_line = ""
- for k in ['network', 'netmask', 'gateway', 'metric']:
- if k in route:
- route_line += " %s %s" % (mapping[k], route[k])
- content += up + route_line + eol
- content += down + route_line + eol
-
- return content
-
-
-def iface_start_entry(iface, index):
- fullname = iface['name']
- if index != 0:
- fullname += ":%s" % index
-
- control = iface['control']
- if control == "auto":
- cverb = "auto"
- elif control in ("hotplug",):
- cverb = "allow-" + control
- else:
- cverb = "# control-" + control
-
- subst = iface.copy()
- subst.update({'fullname': fullname, 'cverb': cverb})
-
- return ("{cverb} {fullname}\n"
- "iface {fullname} {inet} {mode}\n").format(**subst)
-
-
-def render_interfaces(network_state):
- '''Given state, emit etc/network/interfaces content.'''
-
- content = ""
- interfaces = network_state.get('interfaces')
- ''' Apply a sort order to ensure that we write out
- the physical interfaces first; this is critical for
- bonding
- '''
- order = {
- 'physical': 0,
- 'bond': 1,
- 'bridge': 2,
- 'vlan': 3,
- }
- content += "auto lo\niface lo inet loopback\n"
- for dnskey, value in network_state.get('dns', {}).items():
- if len(value):
- content += " dns-{} {}\n".format(dnskey, " ".join(value))
-
- for iface in sorted(interfaces.values(),
- key=lambda k: (order[k['type']], k['name'])):
-
- if content[-2:] != "\n\n":
- content += "\n"
- subnets = iface.get('subnets', {})
- if subnets:
- for index, subnet in zip(range(0, len(subnets)), subnets):
- if content[-2:] != "\n\n":
- content += "\n"
- iface['index'] = index
- iface['mode'] = subnet['type']
- iface['control'] = subnet.get('control', 'auto')
- if iface['mode'].endswith('6'):
- iface['inet'] += '6'
- elif iface['mode'] == 'static' and ":" in subnet['address']:
- iface['inet'] += '6'
- if iface['mode'].startswith('dhcp'):
- iface['mode'] = 'dhcp'
-
- content += iface_start_entry(iface, index)
- content += iface_add_subnet(iface, subnet)
- content += iface_add_attrs(iface)
- for route in subnet.get('routes', []):
- content += render_route(route, indent=" ")
- else:
- # ifenslave docs say to auto the slave devices
- if 'bond-master' in iface:
- content += "auto {name}\n".format(**iface)
- content += "iface {name} {inet} {mode}\n".format(**iface)
- content += iface_add_attrs(iface)
-
- for route in network_state.get('routes'):
- content += render_route(route)
-
- # global replacements until v2 format
- content = content.replace('mac_address', 'hwaddress')
- return content
-
-
-def render_network_state(target, network_state, eni="etc/network/interfaces",
- links_prefix=LINKS_FNAME_PREFIX,
- netrules='etc/udev/rules.d/70-persistent-net.rules'):
-
- fpeni = os.path.sep.join((target, eni,))
- util.ensure_dir(os.path.dirname(fpeni))
- with open(fpeni, 'w+') as f:
- f.write(render_interfaces(network_state))
-
- if netrules:
- netrules = os.path.sep.join((target, netrules,))
- util.ensure_dir(os.path.dirname(netrules))
- with open(netrules, 'w+') as f:
- f.write(render_persistent_net(network_state))
-
- if links_prefix:
- render_systemd_links(target, network_state, links_prefix)
-
-
-def render_systemd_links(target, network_state,
- links_prefix=LINKS_FNAME_PREFIX):
- fp_prefix = os.path.sep.join((target, links_prefix))
- for f in glob.glob(fp_prefix + "*"):
- os.unlink(f)
-
- interfaces = network_state.get('interfaces')
- for iface in interfaces.values():
- if (iface['type'] == 'physical' and 'name' in iface and
- iface.get('mac_address')):
- fname = fp_prefix + iface['name'] + ".link"
- with open(fname, "w") as fp:
- fp.write("\n".join([
- "[Match]",
- "MACAddress=" + iface['mac_address'],
- "",
- "[Link]",
- "Name=" + iface['name'],
- ""
- ]))
+ """Raised when a parser has issue parsing a file/content."""
def is_disabled_cfg(cfg):
@@ -642,7 +114,6 @@ def sys_netdev_info(name, field):
if not os.path.exists(os.path.join(SYS_CLASS_NET, name)):
raise OSError("%s: interface does not exist in %s" %
(name, SYS_CLASS_NET))
-
fname = os.path.join(SYS_CLASS_NET, name, field)
if not os.path.exists(fname):
raise OSError("%s: could not find sysfs entry: %s" % (name, fname))
@@ -722,108 +193,6 @@ def generate_fallback_config():
return nconf
-def _decomp_gzip(blob, strict=True):
- # decompress blob. raise exception if not compressed unless strict=False.
- with io.BytesIO(blob) as iobuf:
- gzfp = None
- try:
- gzfp = gzip.GzipFile(mode="rb", fileobj=iobuf)
- return gzfp.read()
- except IOError:
- if strict:
- raise
- return blob
- finally:
- if gzfp:
- gzfp.close()
-
-
-def _b64dgz(b64str, gzipped="try"):
- # decode a base64 string. If gzipped is true, transparently uncompresss
- # if gzipped is 'try', then try gunzip, returning the original on fail.
- try:
- blob = base64.b64decode(b64str)
- except TypeError:
- raise ValueError("Invalid base64 text: %s" % b64str)
-
- if not gzipped:
- return blob
-
- return _decomp_gzip(blob, strict=gzipped != "try")
-
-
-def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None):
- if cmdline is None:
- cmdline = util.get_cmdline()
-
- if 'network-config=' in cmdline:
- data64 = None
- for tok in cmdline.split():
- if tok.startswith("network-config="):
- data64 = tok.split("=", 1)[1]
- if data64:
- return util.load_yaml(_b64dgz(data64))
-
- if 'ip=' not in cmdline:
- return None
-
- if mac_addrs is None:
- mac_addrs = {k: sys_netdev_info(k, 'address')
- for k in get_devicelist()}
-
- return config_from_klibc_net_cfg(files=files, mac_addrs=mac_addrs)
-
-
-def convert_eni_data(eni_data):
- # return a network config representation of what is in eni_data
- ifaces = {}
- parse_deb_config_data(ifaces, eni_data, src_dir=None, src_path=None)
- return _ifaces_to_net_config_data(ifaces)
-
-
-def _ifaces_to_net_config_data(ifaces):
- """Return network config that represents the ifaces data provided.
- ifaces = parse_deb_config("/etc/network/interfaces")
- config = ifaces_to_net_config_data(ifaces)
- state = parse_net_config_data(config)."""
- devs = {}
- for name, data in ifaces.items():
- # devname is 'eth0' for name='eth0:1'
- devname = name.partition(":")[0]
- if devname == "lo":
- # currently provding 'lo' in network config results in duplicate
- # entries. in rendered interfaces file. so skip it.
- continue
- if devname not in devs:
- devs[devname] = {'type': 'physical', 'name': devname,
- 'subnets': []}
- # this isnt strictly correct, but some might specify
- # hwaddress on a nic for matching / declaring name.
- if 'hwaddress' in data:
- devs[devname]['mac_address'] = data['hwaddress']
- subnet = {'_orig_eni_name': name, 'type': data['method']}
- if data.get('auto'):
- subnet['control'] = 'auto'
- else:
- subnet['control'] = 'manual'
-
- if data.get('method') == 'static':
- subnet['address'] = data['address']
-
- for copy_key in ('netmask', 'gateway', 'broadcast'):
- if copy_key in data:
- subnet[copy_key] = data[copy_key]
-
- if 'dns' in data:
- for n in ('nameservers', 'search'):
- if n in data['dns'] and data['dns'][n]:
- subnet['dns_' + n] = data['dns'][n]
- devs[devname]['subnets'].append(subnet)
-
- return {'version': 1,
- 'config': [devs[d] for d in sorted(devs)]}
-
-
def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
"""read the network config and rename devices accordingly.
if strict_present is false, then do not raise exception if no devices
@@ -839,7 +208,7 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
continue
renames.append([mac, name])
- return rename_interfaces(renames)
+ return _rename_interfaces(renames)
def _get_current_rename_info(check_downable=True):
@@ -867,8 +236,8 @@ def _get_current_rename_info(check_downable=True):
return bymac
-def rename_interfaces(renames, strict_present=True, strict_busy=True,
- current_info=None):
+def _rename_interfaces(renames, strict_present=True, strict_busy=True,
+ current_info=None):
if current_info is None:
current_info = _get_current_rename_info()
@@ -979,7 +348,13 @@ def get_interface_mac(ifname):
def get_interfaces_by_mac(devs=None):
"""Build a dictionary of tuples {mac: name}"""
if devs is None:
- devs = get_devicelist()
+ try:
+ devs = get_devicelist()
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ devs = []
+ else:
+ raise
ret = {}
for name in devs:
mac = get_interface_mac(name)
diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py
new file mode 100644
index 00000000..822a020b
--- /dev/null
+++ b/cloudinit/net/cmdline.py
@@ -0,0 +1,203 @@
+# Copyright (C) 2013-2014 Canonical Ltd.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Blake Rouse <blake.rouse@canonical.com>
+#
+# Curtin is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Affero General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+#
+# Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
+# more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with Curtin. If not, see <http://www.gnu.org/licenses/>.
+
+import base64
+import glob
+import gzip
+import io
+import shlex
+import sys
+
+import six
+
+from . import get_devicelist
+from . import sys_netdev_info
+
+from cloudinit import util
+
+PY26 = sys.version_info[0:2] == (2, 6)
+
+
+def _shlex_split(blob):
+ if PY26 and isinstance(blob, six.text_type):
+ # Older versions don't support unicode input
+ blob = blob.encode("utf8")
+ return shlex.split(blob)
+
+
+def _load_shell_content(content, add_empty=False, empty_val=None):
+ """Given shell like syntax (key=value\nkey2=value2\n) in content
+ return the data in dictionary form. If 'add_empty' is True
+ then add entries in to the returned dictionary for 'VAR='
+ variables. Set their value to empty_val."""
+ data = {}
+ for line in _shlex_split(content):
+ key, value = line.split("=", 1)
+ if not value:
+ value = empty_val
+ if add_empty or value:
+ data[key] = value
+
+ return data
+
+
+def _klibc_to_config_entry(content, mac_addrs=None):
+ """Convert a klibc writtent shell content file to a 'config' entry
+ When ip= is seen on the kernel command line in debian initramfs
+ and networking is brought up, ipconfig will populate
+ /run/net-<name>.cfg.
+
+ The files are shell style syntax, and examples are in the tests
+ provided here. There is no good documentation on this unfortunately.
+
+ DEVICE=<name> is expected/required and PROTO should indicate if
+ this is 'static' or 'dhcp'.
+ """
+
+ if mac_addrs is None:
+ mac_addrs = {}
+
+ data = _load_shell_content(content)
+ try:
+ name = data['DEVICE']
+ except KeyError:
+ raise ValueError("no 'DEVICE' entry in data")
+
+ # ipconfig on precise does not write PROTO
+ proto = data.get('PROTO')
+ if not proto:
+ if data.get('filename'):
+ proto = 'dhcp'
+ else:
+ proto = 'static'
+
+ if proto not in ('static', 'dhcp'):
+ raise ValueError("Unexpected value for PROTO: %s" % proto)
+
+ iface = {
+ 'type': 'physical',
+ 'name': name,
+ 'subnets': [],
+ }
+
+ if name in mac_addrs:
+ iface['mac_address'] = mac_addrs[name]
+
+ # originally believed there might be IPV6* values
+ for v, pre in (('ipv4', 'IPV4'),):
+ # if no IPV4ADDR or IPV6ADDR, then go on.
+ if pre + "ADDR" not in data:
+ continue
+ subnet = {'type': proto, 'control': 'manual'}
+
+ # these fields go right on the subnet
+ for key in ('NETMASK', 'BROADCAST', 'GATEWAY'):
+ if pre + key in data:
+ subnet[key.lower()] = data[pre + key]
+
+ dns = []
+ # handle IPV4DNS0 or IPV6DNS0
+ for nskey in ('DNS0', 'DNS1'):
+ ns = data.get(pre + nskey)
+ # verify it has something other than 0.0.0.0 (or ipv6)
+ if ns and len(ns.strip(":.0")):
+ dns.append(data[pre + nskey])
+ if dns:
+ subnet['dns_nameservers'] = dns
+ # add search to both ipv4 and ipv6, as it has no namespace
+ search = data.get('DOMAINSEARCH')
+ if search:
+ if ',' in search:
+ subnet['dns_search'] = search.split(",")
+ else:
+ subnet['dns_search'] = search.split()
+
+ iface['subnets'].append(subnet)
+
+ return name, iface
+
+
+def config_from_klibc_net_cfg(files=None, mac_addrs=None):
+ if files is None:
+ files = glob.glob('/run/net*.conf')
+
+ entries = []
+ names = {}
+ for cfg_file in files:
+ name, entry = _klibc_to_config_entry(util.load_file(cfg_file),
+ mac_addrs=mac_addrs)
+ if name in names:
+ raise ValueError(
+ "device '%s' defined multiple times: %s and %s" % (
+ name, names[name], cfg_file))
+
+ names[name] = cfg_file
+ entries.append(entry)
+ return {'config': entries, 'version': 1}
+
+
+def _decomp_gzip(blob, strict=True):
+ # decompress blob. raise exception if not compressed unless strict=False.
+ with io.BytesIO(blob) as iobuf:
+ gzfp = None
+ try:
+ gzfp = gzip.GzipFile(mode="rb", fileobj=iobuf)
+ return gzfp.read()
+ except IOError:
+ if strict:
+ raise
+ return blob
+ finally:
+ if gzfp:
+ gzfp.close()
+
+
+def _b64dgz(b64str, gzipped="try"):
+ # decode a base64 string. If gzipped is true, transparently uncompresss
+ # if gzipped is 'try', then try gunzip, returning the original on fail.
+ try:
+ blob = base64.b64decode(b64str)
+ except TypeError:
+ raise ValueError("Invalid base64 text: %s" % b64str)
+
+ if not gzipped:
+ return blob
+
+ return _decomp_gzip(blob, strict=gzipped != "try")
+
+
+def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None):
+ if cmdline is None:
+ cmdline = util.get_cmdline()
+
+ if 'network-config=' in cmdline:
+ data64 = None
+ for tok in cmdline.split():
+ if tok.startswith("network-config="):
+ data64 = tok.split("=", 1)[1]
+ if data64:
+ return util.load_yaml(_b64dgz(data64))
+
+ if 'ip=' not in cmdline:
+ return None
+
+ if mac_addrs is None:
+ mac_addrs = dict((k, sys_netdev_info(k, 'address'))
+ for k in get_devicelist())
+
+ return config_from_klibc_net_cfg(files=files, mac_addrs=mac_addrs)
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
new file mode 100644
index 00000000..a695f5ed
--- /dev/null
+++ b/cloudinit/net/eni.py
@@ -0,0 +1,457 @@
+# vi: ts=4 expandtab
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import glob
+import os
+import re
+
+from . import LINKS_FNAME_PREFIX
+from . import ParserError
+
+from .udev import generate_udev_rule
+
+from cloudinit import util
+
+
+NET_CONFIG_COMMANDS = [
+ "pre-up", "up", "post-up", "down", "pre-down", "post-down",
+]
+
+NET_CONFIG_BRIDGE_OPTIONS = [
+ "bridge_ageing", "bridge_bridgeprio", "bridge_fd", "bridge_gcinit",
+ "bridge_hello", "bridge_maxage", "bridge_maxwait", "bridge_stp",
+]
+
+NET_CONFIG_OPTIONS = [
+ "address", "netmask", "broadcast", "network", "metric", "gateway",
+ "pointtopoint", "media", "mtu", "hostname", "leasehours", "leasetime",
+ "vendor", "client", "bootfile", "server", "hwaddr", "provider", "frame",
+ "netnum", "endpoint", "local", "ttl",
+]
+
+
+# TODO: switch valid_map based on mode inet/inet6
+def _iface_add_subnet(iface, subnet):
+ content = ""
+ valid_map = [
+ 'address',
+ 'netmask',
+ 'broadcast',
+ 'metric',
+ 'gateway',
+ 'pointopoint',
+ 'mtu',
+ 'scope',
+ 'dns_search',
+ 'dns_nameservers',
+ ]
+ for key, value in subnet.items():
+ if value and key in valid_map:
+ if type(value) == list:
+ value = " ".join(value)
+ if '_' in key:
+ key = key.replace('_', '-')
+ content += " {} {}\n".format(key, value)
+
+ return content
+
+
+# TODO: switch to valid_map for attrs
+
+def _iface_add_attrs(iface):
+ content = ""
+ ignore_map = [
+ 'control',
+ 'index',
+ 'inet',
+ 'mode',
+ 'name',
+ 'subnets',
+ 'type',
+ ]
+ if iface['type'] not in ['bond', 'bridge', 'vlan']:
+ ignore_map.append('mac_address')
+
+ for key, value in iface.items():
+ if value and key not in ignore_map:
+ if type(value) == list:
+ value = " ".join(value)
+ content += " {} {}\n".format(key, value)
+
+ return content
+
+
+def _iface_start_entry(iface, index):
+ fullname = iface['name']
+ if index != 0:
+ fullname += ":%s" % index
+
+ control = iface['control']
+ if control == "auto":
+ cverb = "auto"
+ elif control in ("hotplug",):
+ cverb = "allow-" + control
+ else:
+ cverb = "# control-" + control
+
+ subst = iface.copy()
+ subst.update({'fullname': fullname, 'cverb': cverb})
+
+ return ("{cverb} {fullname}\n"
+ "iface {fullname} {inet} {mode}\n").format(**subst)
+
+
+def _parse_deb_config_data(ifaces, contents, src_dir, src_path):
+ """Parses the file contents, placing result into ifaces.
+
+ '_source_path' is added to every dictionary entry to define which file
+ the configration information came from.
+
+ :param ifaces: interface dictionary
+ :param contents: contents of interfaces file
+ :param src_dir: directory interfaces file was located
+ :param src_path: file path the `contents` was read
+ """
+ currif = None
+ for line in contents.splitlines():
+ line = line.strip()
+ if line.startswith('#'):
+ continue
+ split = line.split(' ')
+ option = split[0]
+ if option == "source-directory":
+ parsed_src_dir = split[1]
+ if not parsed_src_dir.startswith("/"):
+ parsed_src_dir = os.path.join(src_dir, parsed_src_dir)
+ for expanded_path in glob.glob(parsed_src_dir):
+ dir_contents = os.listdir(expanded_path)
+ dir_contents = [
+ os.path.join(expanded_path, path)
+ for path in dir_contents
+ if (os.path.isfile(os.path.join(expanded_path, path)) and
+ re.match("^[a-zA-Z0-9_-]+$", path) is not None)
+ ]
+ for entry in dir_contents:
+ with open(entry, "r") as fp:
+ src_data = fp.read().strip()
+ abs_entry = os.path.abspath(entry)
+ _parse_deb_config_data(
+ ifaces, src_data,
+ os.path.dirname(abs_entry), abs_entry)
+ elif option == "source":
+ new_src_path = split[1]
+ if not new_src_path.startswith("/"):
+ new_src_path = os.path.join(src_dir, new_src_path)
+ for expanded_path in glob.glob(new_src_path):
+ with open(expanded_path, "r") as fp:
+ src_data = fp.read().strip()
+ abs_path = os.path.abspath(expanded_path)
+ _parse_deb_config_data(
+ ifaces, src_data,
+ os.path.dirname(abs_path), abs_path)
+ elif option == "auto":
+ for iface in split[1:]:
+ if iface not in ifaces:
+ ifaces[iface] = {
+ # Include the source path this interface was found in.
+ "_source_path": src_path
+ }
+ ifaces[iface]['auto'] = True
+ elif option == "iface":
+ iface, family, method = split[1:4]
+ if iface not in ifaces:
+ ifaces[iface] = {
+ # Include the source path this interface was found in.
+ "_source_path": src_path
+ }
+ elif 'family' in ifaces[iface]:
+ raise ParserError(
+ "Interface %s can only be defined once. "
+ "Re-defined in '%s'." % (iface, src_path))
+ ifaces[iface]['family'] = family
+ ifaces[iface]['method'] = method
+ currif = iface
+ elif option == "hwaddress":
+ if split[1] == "ether":
+ val = split[2]
+ else:
+ val = split[1]
+ ifaces[currif]['hwaddress'] = val
+ elif option in NET_CONFIG_OPTIONS:
+ ifaces[currif][option] = split[1]
+ elif option in NET_CONFIG_COMMANDS:
+ if option not in ifaces[currif]:
+ ifaces[currif][option] = []
+ ifaces[currif][option].append(' '.join(split[1:]))
+ elif option.startswith('dns-'):
+ if 'dns' not in ifaces[currif]:
+ ifaces[currif]['dns'] = {}
+ if option == 'dns-search':
+ ifaces[currif]['dns']['search'] = []
+ for domain in split[1:]:
+ ifaces[currif]['dns']['search'].append(domain)
+ elif option == 'dns-nameservers':
+ ifaces[currif]['dns']['nameservers'] = []
+ for server in split[1:]:
+ ifaces[currif]['dns']['nameservers'].append(server)
+ elif option.startswith('bridge_'):
+ if 'bridge' not in ifaces[currif]:
+ ifaces[currif]['bridge'] = {}
+ if option in NET_CONFIG_BRIDGE_OPTIONS:
+ bridge_option = option.replace('bridge_', '', 1)
+ ifaces[currif]['bridge'][bridge_option] = split[1]
+ elif option == "bridge_ports":
+ ifaces[currif]['bridge']['ports'] = []
+ for iface in split[1:]:
+ ifaces[currif]['bridge']['ports'].append(iface)
+ elif option == "bridge_hw" and split[1].lower() == "mac":
+ ifaces[currif]['bridge']['mac'] = split[2]
+ elif option == "bridge_pathcost":
+ if 'pathcost' not in ifaces[currif]['bridge']:
+ ifaces[currif]['bridge']['pathcost'] = {}
+ ifaces[currif]['bridge']['pathcost'][split[1]] = split[2]
+ elif option == "bridge_portprio":
+ if 'portprio' not in ifaces[currif]['bridge']:
+ ifaces[currif]['bridge']['portprio'] = {}
+ ifaces[currif]['bridge']['portprio'][split[1]] = split[2]
+ elif option.startswith('bond-'):
+ if 'bond' not in ifaces[currif]:
+ ifaces[currif]['bond'] = {}
+ bond_option = option.replace('bond-', '', 1)
+ ifaces[currif]['bond'][bond_option] = split[1]
+ for iface in ifaces.keys():
+ if 'auto' not in ifaces[iface]:
+ ifaces[iface]['auto'] = False
+
+
+def parse_deb_config(path):
+ """Parses a debian network configuration file."""
+ ifaces = {}
+ with open(path, "r") as fp:
+ contents = fp.read().strip()
+ abs_path = os.path.abspath(path)
+ _parse_deb_config_data(
+ ifaces, contents,
+ os.path.dirname(abs_path), abs_path)
+ return ifaces
+
+
+def convert_eni_data(eni_data):
+ # return a network config representation of what is in eni_data
+ ifaces = {}
+ _parse_deb_config_data(ifaces, eni_data, src_dir=None, src_path=None)
+ return _ifaces_to_net_config_data(ifaces)
+
+
+def _ifaces_to_net_config_data(ifaces):
+ """Return network config that represents the ifaces data provided.
+ ifaces = parse_deb_config("/etc/network/interfaces")
+ config = ifaces_to_net_config_data(ifaces)
+ state = parse_net_config_data(config)."""
+ devs = {}
+ for name, data in ifaces.items():
+ # devname is 'eth0' for name='eth0:1'
+ devname = name.partition(":")[0]
+ if devname == "lo":
+ # currently provding 'lo' in network config results in duplicate
+ # entries. in rendered interfaces file. so skip it.
+ continue
+ if devname not in devs:
+ devs[devname] = {'type': 'physical', 'name': devname,
+ 'subnets': []}
+ # this isnt strictly correct, but some might specify
+ # hwaddress on a nic for matching / declaring name.
+ if 'hwaddress' in data:
+ devs[devname]['mac_address'] = data['hwaddress']
+ subnet = {'_orig_eni_name': name, 'type': data['method']}
+ if data.get('auto'):
+ subnet['control'] = 'auto'
+ else:
+ subnet['control'] = 'manual'
+
+ if data.get('method') == 'static':
+ subnet['address'] = data['address']
+
+ for copy_key in ('netmask', 'gateway', 'broadcast'):
+ if copy_key in data:
+ subnet[copy_key] = data[copy_key]
+
+ if 'dns' in data:
+ for n in ('nameservers', 'search'):
+ if n in data['dns'] and data['dns'][n]:
+ subnet['dns_' + n] = data['dns'][n]
+ devs[devname]['subnets'].append(subnet)
+
+ return {'version': 1,
+ 'config': [devs[d] for d in sorted(devs)]}
+
+
+class Renderer(object):
+ """Renders network information in a /etc/network/interfaces format."""
+
+ def _render_persistent_net(self, network_state):
+ """Given state, emit udev rules to map mac to ifname."""
+ content = ""
+ interfaces = network_state.get('interfaces')
+ for iface in interfaces.values():
+ # for physical interfaces write out a persist net udev rule
+ if iface['type'] == 'physical' and \
+ 'name' in iface and iface.get('mac_address'):
+ content += generate_udev_rule(iface['name'],
+ iface['mac_address'])
+
+ return content
+
+ def _render_route(self, route, indent=""):
+ """When rendering routes for an iface, in some cases applying a route
+ may result in the route command returning non-zero which produces
+ some confusing output for users manually using ifup/ifdown[1]. To
+ that end, we will optionally include an '|| true' postfix to each
+ route line allowing users to work with ifup/ifdown without using
+ --force option.
+
+ We may at somepoint not want to emit this additional postfix, and
+ add a 'strict' flag to this function. When called with strict=True,
+ then we will not append the postfix.
+
+ 1. http://askubuntu.com/questions/168033/
+ how-to-set-static-routes-in-ubuntu-server
+ """
+ content = ""
+ up = indent + "post-up route add"
+ down = indent + "pre-down route del"
+ eol = " || true\n"
+ mapping = {
+ 'network': '-net',
+ 'netmask': 'netmask',
+ 'gateway': 'gw',
+ 'metric': 'metric',
+ }
+ if route['network'] == '0.0.0.0' and route['netmask'] == '0.0.0.0':
+ default_gw = " default gw %s" % route['gateway']
+ content += up + default_gw + eol
+ content += down + default_gw + eol
+ elif route['network'] == '::' and route['netmask'] == 0:
+ # ipv6!
+ default_gw = " -A inet6 default gw %s" % route['gateway']
+ content += up + default_gw + eol
+ content += down + default_gw + eol
+ else:
+ route_line = ""
+ for k in ['network', 'netmask', 'gateway', 'metric']:
+ if k in route:
+ route_line += " %s %s" % (mapping[k], route[k])
+ content += up + route_line + eol
+ content += down + route_line + eol
+ return content
+
+ def _render_interfaces(self, network_state):
+ '''Given state, emit etc/network/interfaces content.'''
+
+ content = ""
+ interfaces = network_state.get('interfaces')
+ ''' Apply a sort order to ensure that we write out
+ the physical interfaces first; this is critical for
+ bonding
+ '''
+ order = {
+ 'physical': 0,
+ 'bond': 1,
+ 'bridge': 2,
+ 'vlan': 3,
+ }
+ content += "auto lo\niface lo inet loopback\n"
+ for dnskey, value in network_state.get('dns', {}).items():
+ if len(value):
+ content += " dns-{} {}\n".format(dnskey, " ".join(value))
+
+ for iface in sorted(interfaces.values(),
+ key=lambda k: (order[k['type']], k['name'])):
+
+ if content[-2:] != "\n\n":
+ content += "\n"
+ subnets = iface.get('subnets', {})
+ if subnets:
+ for index, subnet in zip(range(0, len(subnets)), subnets):
+ if content[-2:] != "\n\n":
+ content += "\n"
+ iface['index'] = index
+ iface['mode'] = subnet['type']
+ iface['control'] = subnet.get('control', 'auto')
+ if iface['mode'].endswith('6'):
+ iface['inet'] += '6'
+ elif (iface['mode'] == 'static'
+ and ":" in subnet['address']):
+ iface['inet'] += '6'
+ if iface['mode'].startswith('dhcp'):
+ iface['mode'] = 'dhcp'
+
+ content += _iface_start_entry(iface, index)
+ content += _iface_add_subnet(iface, subnet)
+ content += _iface_add_attrs(iface)
+ for route in subnet.get('routes', []):
+ content += self._render_route(route, indent=" ")
+ else:
+ # ifenslave docs say to auto the slave devices
+ if 'bond-master' in iface:
+ content += "auto {name}\n".format(**iface)
+ content += "iface {name} {inet} {mode}\n".format(**iface)
+ content += _iface_add_attrs(iface)
+
+ for route in network_state.get('routes'):
+ content += self._render_route(route)
+
+ # global replacements until v2 format
+ content = content.replace('mac_address', 'hwaddress')
+ return content
+
+ def render_network_state(
+ self, target, network_state, eni="etc/network/interfaces",
+ links_prefix=LINKS_FNAME_PREFIX,
+ netrules='etc/udev/rules.d/70-persistent-net.rules',
+ writer=None):
+
+ fpeni = os.path.sep.join((target, eni,))
+ util.ensure_dir(os.path.dirname(fpeni))
+ util.write_file(fpeni, self._render_interfaces(network_state))
+
+ if netrules:
+ netrules = os.path.sep.join((target, netrules,))
+ util.ensure_dir(os.path.dirname(netrules))
+ util.write_file(netrules,
+ self._render_persistent_net(network_state))
+
+ if links_prefix:
+ self._render_systemd_links(target, network_state,
+ links_prefix=links_prefix)
+
+ def _render_systemd_links(self, target, network_state,
+ links_prefix=LINKS_FNAME_PREFIX):
+ fp_prefix = os.path.sep.join((target, links_prefix))
+ for f in glob.glob(fp_prefix + "*"):
+ os.unlink(f)
+ interfaces = network_state.get('interfaces')
+ for iface in interfaces.values():
+ if (iface['type'] == 'physical' and 'name' in iface and
+ iface.get('mac_address')):
+ fname = fp_prefix + iface['name'] + ".link"
+ content = "\n".join([
+ "[Match]",
+ "MACAddress=" + iface['mac_address'],
+ "",
+ "[Link]",
+ "Name=" + iface['name'],
+ ""
+ ])
+ util.write_file(fname, content)
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 4c726ab4..a8be5e26 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -15,9 +15,13 @@
# You should have received a copy of the GNU Affero General Public License
# along with Curtin. If not, see <http://www.gnu.org/licenses/>.
-from cloudinit import log as logging
+import copy
+import functools
+import logging
+
+import six
+
from cloudinit import util
-from cloudinit.util import yaml_dumps as dump_config
LOG = logging.getLogger(__name__)
@@ -27,39 +31,104 @@ NETWORK_STATE_REQUIRED_KEYS = {
}
+def parse_net_config_data(net_config, skip_broken=True):
+ """Parses the config, returns NetworkState object
+
+ :param net_config: curtin network config dict
+ """
+ state = None
+ if 'version' in net_config and 'config' in net_config:
+ ns = NetworkState(version=net_config.get('version'),
+ config=net_config.get('config'))
+ ns.parse_config(skip_broken=skip_broken)
+ state = ns.network_state
+ return state
+
+
+def parse_net_config(path, skip_broken=True):
+ """Parses a curtin network configuration file and
+ return network state"""
+ ns = None
+ net_config = util.read_conf(path)
+ if 'network' in net_config:
+ ns = parse_net_config_data(net_config.get('network'),
+ skip_broken=skip_broken)
+ return ns
+
+
def from_state_file(state_file):
network_state = None
state = util.read_conf(state_file)
network_state = NetworkState()
network_state.load(state)
-
return network_state
+def diff_keys(expected, actual):
+ missing = set(expected)
+ for key in actual:
+ missing.discard(key)
+ return missing
+
+
+class InvalidCommand(Exception):
+ pass
+
+
+def ensure_command_keys(required_keys):
+
+ def wrapper(func):
+
+ @functools.wraps(func)
+ def decorator(self, command, *args, **kwargs):
+ if required_keys:
+ missing_keys = diff_keys(required_keys, command)
+ if missing_keys:
+ raise InvalidCommand("Command missing %s of required"
+ " keys %s" % (missing_keys,
+ required_keys))
+ return func(self, command, *args, **kwargs)
+
+ return decorator
+
+ return wrapper
+
+
+class CommandHandlerMeta(type):
+ """Metaclass that dynamically creates a 'command_handlers' attribute.
+
+ This will scan the to-be-created class for methods that start with
+ 'handle_' and on finding those will populate a class attribute mapping
+ so that those methods can be quickly located and called.
+ """
+ def __new__(cls, name, parents, dct):
+ command_handlers = {}
+ for attr_name, attr in dct.items():
+ if callable(attr) and attr_name.startswith('handle_'):
+ handles_what = attr_name[len('handle_'):]
+ if handles_what:
+ command_handlers[handles_what] = attr
+ dct['command_handlers'] = command_handlers
+ return super(CommandHandlerMeta, cls).__new__(cls, name,
+ parents, dct)
+
+
+@six.add_metaclass(CommandHandlerMeta)
class NetworkState(object):
+
+ initial_network_state = {
+ 'interfaces': {},
+ 'routes': [],
+ 'dns': {
+ 'nameservers': [],
+ 'search': [],
+ }
+ }
+
def __init__(self, version=NETWORK_STATE_VERSION, config=None):
self.version = version
self.config = config
- self.network_state = {
- 'interfaces': {},
- 'routes': [],
- 'dns': {
- 'nameservers': [],
- 'search': [],
- }
- }
- self.command_handlers = self.get_command_handlers()
-
- def get_command_handlers(self):
- METHOD_PREFIX = 'handle_'
- methods = filter(lambda x: callable(getattr(self, x)) and
- x.startswith(METHOD_PREFIX), dir(self))
- handlers = {}
- for m in methods:
- key = m.replace(METHOD_PREFIX, '')
- handlers[key] = getattr(self, m)
-
- return handlers
+ self.network_state = copy.deepcopy(self.initial_network_state)
def dump(self):
state = {
@@ -67,7 +136,7 @@ class NetworkState(object):
'config': self.config,
'network_state': self.network_state,
}
- return dump_config(state)
+ return util.yaml_dumps(state)
def load(self, state):
if 'version' not in state:
@@ -75,32 +144,39 @@ class NetworkState(object):
raise Exception('Invalid state, missing version field')
required_keys = NETWORK_STATE_REQUIRED_KEYS[state['version']]
- if not self.valid_command(state, required_keys):
- msg = 'Invalid state, missing keys: {}'.format(required_keys)
+ missing_keys = diff_keys(required_keys, state)
+ if missing_keys:
+ msg = 'Invalid state, missing keys: %s' % (missing_keys)
LOG.error(msg)
- raise Exception(msg)
+ raise ValueError(msg)
# v1 - direct attr mapping, except version
for key in [k for k in required_keys if k not in ['version']]:
setattr(self, key, state[key])
- self.command_handlers = self.get_command_handlers()
def dump_network_state(self):
- return dump_config(self.network_state)
+ return util.yaml_dumps(self.network_state)
- def parse_config(self):
+ def parse_config(self, skip_broken=True):
# rebuild network state
for command in self.config:
- handler = self.command_handlers.get(command['type'])
- handler(command)
-
- def valid_command(self, command, required_keys):
- if not required_keys:
- return False
-
- found_keys = [key for key in command.keys() if key in required_keys]
- return len(found_keys) == len(required_keys)
-
+ command_type = command['type']
+ try:
+ handler = self.command_handlers[command_type]
+ except KeyError:
+ raise RuntimeError("No handler found for"
+ " command '%s'" % command_type)
+ try:
+ handler(self, command)
+ except InvalidCommand:
+ if not skip_broken:
+ raise
+ else:
+ LOG.warn("Skipping invalid command: %s", command,
+ exc_info=True)
+ LOG.debug(self.dump_network_state())
+
+ @ensure_command_keys(['name'])
def handle_physical(self, command):
'''
command = {
@@ -112,13 +188,6 @@ class NetworkState(object):
]
}
'''
- required_keys = [
- 'name',
- ]
- if not self.valid_command(command, required_keys):
- LOG.warn('Skipping Invalid command: {}'.format(command))
- LOG.debug(self.dump_network_state())
- return
interfaces = self.network_state.get('interfaces')
iface = interfaces.get(command['name'], {})
@@ -149,6 +218,7 @@ class NetworkState(object):
self.network_state['interfaces'].update({command.get('name'): iface})
self.dump_network_state()
+ @ensure_command_keys(['name', 'vlan_id', 'vlan_link'])
def handle_vlan(self, command):
'''
auto eth0.222
@@ -158,16 +228,6 @@ class NetworkState(object):
hwaddress ether BC:76:4E:06:96:B3
vlan-raw-device eth0
'''
- required_keys = [
- 'name',
- 'vlan_link',
- 'vlan_id',
- ]
- if not self.valid_command(command, required_keys):
- print('Skipping Invalid command: {}'.format(command))
- print(self.dump_network_state())
- return
-
interfaces = self.network_state.get('interfaces')
self.handle_physical(command)
iface = interfaces.get(command.get('name'), {})
@@ -175,6 +235,7 @@ class NetworkState(object):
iface['vlan_id'] = command.get('vlan_id')
interfaces.update({iface['name']: iface})
+ @ensure_command_keys(['name', 'bond_interfaces', 'params'])
def handle_bond(self, command):
'''
#/etc/network/interfaces
@@ -200,15 +261,6 @@ class NetworkState(object):
bond-updelay 200
bond-lacp-rate 4
'''
- required_keys = [
- 'name',
- 'bond_interfaces',
- 'params',
- ]
- if not self.valid_command(command, required_keys):
- print('Skipping Invalid command: {}'.format(command))
- print(self.dump_network_state())
- return
self.handle_physical(command)
interfaces = self.network_state.get('interfaces')
@@ -236,6 +288,7 @@ class NetworkState(object):
bond_if.update({param: val})
self.network_state['interfaces'].update({ifname: bond_if})
+ @ensure_command_keys(['name', 'bridge_interfaces', 'params'])
def handle_bridge(self, command):
'''
auto br0
@@ -263,15 +316,6 @@ class NetworkState(object):
"bridge_waitport",
]
'''
- required_keys = [
- 'name',
- 'bridge_interfaces',
- 'params',
- ]
- if not self.valid_command(command, required_keys):
- print('Skipping Invalid command: {}'.format(command))
- print(self.dump_network_state())
- return
# find one of the bridge port ifaces to get mac_addr
# handle bridge_slaves
@@ -295,15 +339,8 @@ class NetworkState(object):
interfaces.update({iface['name']: iface})
+ @ensure_command_keys(['address'])
def handle_nameserver(self, command):
- required_keys = [
- 'address',
- ]
- if not self.valid_command(command, required_keys):
- print('Skipping Invalid command: {}'.format(command))
- print(self.dump_network_state())
- return
-
dns = self.network_state.get('dns')
if 'address' in command:
addrs = command['address']
@@ -318,15 +355,8 @@ class NetworkState(object):
for path in paths:
dns['search'].append(path)
+ @ensure_command_keys(['destination'])
def handle_route(self, command):
- required_keys = [
- 'destination',
- ]
- if not self.valid_command(command, required_keys):
- print('Skipping Invalid command: {}'.format(command))
- print(self.dump_network_state())
- return
-
routes = self.network_state.get('routes')
network, cidr = command['destination'].split("/")
netmask = cidr2mask(int(cidr))
@@ -376,72 +406,3 @@ def mask2cidr(mask):
return ipv4mask2cidr(mask)
else:
return mask
-
-
-if __name__ == '__main__':
- import random
- import sys
-
- from cloudinit import net
-
- def load_config(nc):
- version = nc.get('version')
- config = nc.get('config')
- return (version, config)
-
- def test_parse(network_config):
- (version, config) = load_config(network_config)
- ns1 = NetworkState(version=version, config=config)
- ns1.parse_config()
- random.shuffle(config)
- ns2 = NetworkState(version=version, config=config)
- ns2.parse_config()
- print("----NS1-----")
- print(ns1.dump_network_state())
- print()
- print("----NS2-----")
- print(ns2.dump_network_state())
- print("NS1 == NS2 ?=> {}".format(
- ns1.network_state == ns2.network_state))
- eni = net.render_interfaces(ns2.network_state)
- print(eni)
- udev_rules = net.render_persistent_net(ns2.network_state)
- print(udev_rules)
-
- def test_dump_and_load(network_config):
- print("Loading network_config into NetworkState")
- (version, config) = load_config(network_config)
- ns1 = NetworkState(version=version, config=config)
- ns1.parse_config()
- print("Dumping state to file")
- ns1_dump = ns1.dump()
- ns1_state = "/tmp/ns1.state"
- with open(ns1_state, "w+") as f:
- f.write(ns1_dump)
-
- print("Loading state from file")
- ns2 = from_state_file(ns1_state)
- print("NS1 == NS2 ?=> {}".format(
- ns1.network_state == ns2.network_state))
-
- def test_output(network_config):
- (version, config) = load_config(network_config)
- ns1 = NetworkState(version=version, config=config)
- ns1.parse_config()
- random.shuffle(config)
- ns2 = NetworkState(version=version, config=config)
- ns2.parse_config()
- print("NS1 == NS2 ?=> {}".format(
- ns1.network_state == ns2.network_state))
- eni_1 = net.render_interfaces(ns1.network_state)
- eni_2 = net.render_interfaces(ns2.network_state)
- print(eni_1)
- print(eni_2)
- print("eni_1 == eni_2 ?=> {}".format(
- eni_1 == eni_2))
-
- y = util.read_conf(sys.argv[1])
- network_config = y.get('network')
- test_parse(network_config)
- test_dump_and_load(network_config)
- test_output(network_config)
diff --git a/cloudinit/serial.py b/cloudinit/serial.py
new file mode 100644
index 00000000..af45c13e
--- /dev/null
+++ b/cloudinit/serial.py
@@ -0,0 +1,50 @@
+# vi: ts=4 expandtab
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+from __future__ import absolute_import
+
+try:
+ from serial import Serial
+except ImportError:
+ # For older versions of python (ie 2.6) pyserial may not exist and/or
+ # work and/or be installed, so make a dummy/fake serial that blows up
+ # when used...
+ class Serial(object):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ @staticmethod
+ def isOpen():
+ return False
+
+ @staticmethod
+ def write(data):
+ raise IOError("Unable to perform serial `write` operation,"
+ " pyserial not installed.")
+
+ @staticmethod
+ def readline():
+ raise IOError("Unable to perform serial `readline` operation,"
+ " pyserial not installed.")
+
+ @staticmethod
+ def flush():
+ raise IOError("Unable to perform serial `flush` operation,"
+ " pyserial not installed.")
+
+ @staticmethod
+ def read(size=1):
+ raise IOError("Unable to perform serial `read` operation,"
+ " pyserial not installed.")
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 2d046600..8c7e8673 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -423,7 +423,7 @@ def write_files(datadir, files, dirmode=None):
elem.text = DEF_PASSWD_REDACTION
return ET.tostring(root)
except Exception:
- LOG.critical("failed to redact userpassword in {}".format(fname))
+ LOG.critical("failed to redact userpassword in %s", fname)
return cnt
if not datadir:
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index c87f57fd..3130e618 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -18,14 +18,14 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import copy
import os
from cloudinit import log as logging
-from cloudinit import net
from cloudinit import sources
from cloudinit import util
+from cloudinit.net import eni
+
from cloudinit.sources.helpers import openstack
LOG = logging.getLogger(__name__)
@@ -53,6 +53,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
self._network_config = None
self.network_json = None
self.network_eni = None
+ self.known_macs = None
self.files = {}
def __str__(self):
@@ -147,9 +148,10 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
if self._network_config is None:
if self.network_json is not None:
LOG.debug("network config provided via network_json")
- self._network_config = convert_network_data(self.network_json)
+ self._network_config = openstack.convert_net_json(
+ self.network_json, known_macs=self.known_macs)
elif self.network_eni is not None:
- self._network_config = net.convert_eni_data(self.network_eni)
+ self._network_config = eni.convert_eni_data(self.network_eni)
LOG.debug("network config provided via converted eni data")
else:
LOG.debug("no network configuration available")
@@ -254,152 +256,12 @@ def find_candidate_devs(probe_optical=True):
return devices
-# Convert OpenStack ConfigDrive NetworkData json to network_config yaml
-def convert_network_data(network_json=None, known_macs=None):
- """Return a dictionary of network_config by parsing provided
- OpenStack ConfigDrive NetworkData json format
-
- OpenStack network_data.json provides a 3 element dictionary
- - "links" (links are network devices, physical or virtual)
- - "networks" (networks are ip network configurations for one or more
- links)
- - services (non-ip services, like dns)
-
- networks and links are combined via network items referencing specific
- links via a 'link_id' which maps to a links 'id' field.
-
- To convert this format to network_config yaml, we first iterate over the
- links and then walk the network list to determine if any of the networks
- utilize the current link; if so we generate a subnet entry for the device
-
- We also need to map network_data.json fields to network_config fields. For
- example, the network_data links 'id' field is equivalent to network_config
- 'name' field for devices. We apply more of this mapping to the various
- link types that we encounter.
-
- There are additional fields that are populated in the network_data.json
- from OpenStack that are not relevant to network_config yaml, so we
- enumerate a dictionary of valid keys for network_yaml and apply filtering
- to drop these superflous keys from the network_config yaml.
- """
- if network_json is None:
- return None
-
- # dict of network_config key for filtering network_json
- valid_keys = {
- 'physical': [
- 'name',
- 'type',
- 'mac_address',
- 'subnets',
- 'params',
- 'mtu',
- ],
- 'subnet': [
- 'type',
- 'address',
- 'netmask',
- 'broadcast',
- 'metric',
- 'gateway',
- 'pointopoint',
- 'scope',
- 'dns_nameservers',
- 'dns_search',
- 'routes',
- ],
- }
-
- links = network_json.get('links', [])
- networks = network_json.get('networks', [])
- services = network_json.get('services', [])
-
- config = []
- for link in links:
- subnets = []
- cfg = {k: v for k, v in link.items()
- if k in valid_keys['physical']}
- # 'name' is not in openstack spec yet, but we will support it if it is
- # present. The 'id' in the spec is currently implemented as the host
- # nic's name, meaning something like 'tap-adfasdffd'. We do not want
- # to name guest devices with such ugly names.
- if 'name' in link:
- cfg['name'] = link['name']
-
- for network in [n for n in networks
- if n['link'] == link['id']]:
- subnet = {k: v for k, v in network.items()
- if k in valid_keys['subnet']}
- if 'dhcp' in network['type']:
- t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4'
- subnet.update({
- 'type': t,
- })
- else:
- subnet.update({
- 'type': 'static',
- 'address': network.get('ip_address'),
- })
- subnets.append(subnet)
- cfg.update({'subnets': subnets})
- if link['type'] in ['ethernet', 'vif', 'ovs', 'phy', 'bridge']:
- cfg.update({
- 'type': 'physical',
- 'mac_address': link['ethernet_mac_address']})
- elif link['type'] in ['bond']:
- params = {}
- for k, v in link.items():
- if k == 'bond_links':
- continue
- elif k.startswith('bond'):
- params.update({k: v})
- cfg.update({
- 'bond_interfaces': copy.deepcopy(link['bond_links']),
- 'params': params,
- })
- elif link['type'] in ['vlan']:
- cfg.update({
- 'name': "%s.%s" % (link['vlan_link'],
- link['vlan_id']),
- 'vlan_link': link['vlan_link'],
- 'vlan_id': link['vlan_id'],
- 'mac_address': link['vlan_mac_address'],
- })
- else:
- raise ValueError(
- 'Unknown network_data link type: %s' % link['type'])
-
- config.append(cfg)
-
- need_names = [d for d in config
- if d.get('type') == 'physical' and 'name' not in d]
-
- if need_names:
- if known_macs is None:
- known_macs = net.get_interfaces_by_mac()
-
- for d in need_names:
- mac = d.get('mac_address')
- if not mac:
- raise ValueError("No mac_address or name entry for %s" % d)
- if mac not in known_macs:
- raise ValueError("Unable to find a system nic for %s" % d)
- d['name'] = known_macs[mac]
-
- for service in services:
- cfg = service
- cfg.update({'type': 'nameserver'})
- config.append(cfg)
-
- return {'version': 1, 'config': config}
-
-
# Legacy: Must be present in case we load an old pkl object
DataSourceConfigDriveNet = DataSourceConfigDrive
# Used to match classes to dependencies
datasources = [
- (DataSourceConfigDrive, (sources.DEP_FILESYSTEM, )),
+ (DataSourceConfigDrive, (sources.DEP_FILESYSTEM,)),
]
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 0e03b04f..08bc132b 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -40,13 +40,11 @@ import random
import re
import socket
-import serial
-
from cloudinit import log as logging
+from cloudinit import serial
from cloudinit import sources
from cloudinit import util
-
LOG = logging.getLogger(__name__)
SMARTOS_ATTRIB_MAP = {
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 3ccb11d3..494335b3 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -28,6 +28,7 @@ import six
from cloudinit import ec2_utils
from cloudinit import log as logging
+from cloudinit import net
from cloudinit import sources
from cloudinit import url_helper
from cloudinit import util
@@ -478,6 +479,150 @@ class MetadataReader(BaseReader):
retries=self.retries)
+# Convert OpenStack ConfigDrive NetworkData json to network_config yaml
+def convert_net_json(network_json=None, known_macs=None):
+ """Return a dictionary of network_config by parsing provided
+ OpenStack ConfigDrive NetworkData json format
+
+ OpenStack network_data.json provides a 3 element dictionary
+ - "links" (links are network devices, physical or virtual)
+ - "networks" (networks are ip network configurations for one or more
+ links)
+ - services (non-ip services, like dns)
+
+ networks and links are combined via network items referencing specific
+ links via a 'link_id' which maps to a links 'id' field.
+
+ To convert this format to network_config yaml, we first iterate over the
+ links and then walk the network list to determine if any of the networks
+ utilize the current link; if so we generate a subnet entry for the device
+
+ We also need to map network_data.json fields to network_config fields. For
+ example, the network_data links 'id' field is equivalent to network_config
+ 'name' field for devices. We apply more of this mapping to the various
+ link types that we encounter.
+
+ There are additional fields that are populated in the network_data.json
+ from OpenStack that are not relevant to network_config yaml, so we
+ enumerate a dictionary of valid keys for network_yaml and apply filtering
+ to drop these superflous keys from the network_config yaml.
+ """
+ if network_json is None:
+ return None
+
+ # dict of network_config key for filtering network_json
+ valid_keys = {
+ 'physical': [
+ 'name',
+ 'type',
+ 'mac_address',
+ 'subnets',
+ 'params',
+ 'mtu',
+ ],
+ 'subnet': [
+ 'type',
+ 'address',
+ 'netmask',
+ 'broadcast',
+ 'metric',
+ 'gateway',
+ 'pointopoint',
+ 'scope',
+ 'dns_nameservers',
+ 'dns_search',
+ 'routes',
+ ],
+ }
+
+ links = network_json.get('links', [])
+ networks = network_json.get('networks', [])
+ services = network_json.get('services', [])
+
+ config = []
+ for link in links:
+ subnets = []
+ cfg = {k: v for k, v in link.items()
+ if k in valid_keys['physical']}
+ # 'name' is not in openstack spec yet, but we will support it if it is
+ # present. The 'id' in the spec is currently implemented as the host
+ # nic's name, meaning something like 'tap-adfasdffd'. We do not want
+ # to name guest devices with such ugly names.
+ if 'name' in link:
+ cfg['name'] = link['name']
+
+ for network in [n for n in networks
+ if n['link'] == link['id']]:
+ subnet = {k: v for k, v in network.items()
+ if k in valid_keys['subnet']}
+ if 'dhcp' in network['type']:
+ t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4'
+ subnet.update({
+ 'type': t,
+ })
+ else:
+ subnet.update({
+ 'type': 'static',
+ 'address': network.get('ip_address'),
+ })
+ if network['type'] == 'ipv4':
+ subnet['ipv4'] = True
+ if network['type'] == 'ipv6':
+ subnet['ipv6'] = True
+ subnets.append(subnet)
+ cfg.update({'subnets': subnets})
+ if link['type'] in ['ethernet', 'vif', 'ovs', 'phy', 'bridge']:
+ cfg.update({
+ 'type': 'physical',
+ 'mac_address': link['ethernet_mac_address']})
+ elif link['type'] in ['bond']:
+ params = {}
+ for k, v in link.items():
+ if k == 'bond_links':
+ continue
+ elif k.startswith('bond'):
+ params.update({k: v})
+ cfg.update({
+ 'bond_interfaces': copy.deepcopy(link['bond_links']),
+ 'params': params,
+ })
+ elif link['type'] in ['vlan']:
+ cfg.update({
+ 'name': "%s.%s" % (link['vlan_link'],
+ link['vlan_id']),
+ 'vlan_link': link['vlan_link'],
+ 'vlan_id': link['vlan_id'],
+ 'mac_address': link['vlan_mac_address'],
+ })
+ else:
+ raise ValueError(
+ 'Unknown network_data link type: %s' % link['type'])
+
+ config.append(cfg)
+
+ need_names = [d for d in config
+ if d.get('type') == 'physical' and 'name' not in d]
+
+ if need_names:
+ if known_macs is None:
+ known_macs = net.get_interfaces_by_mac()
+
+ for d in need_names:
+ mac = d.get('mac_address')
+ if not mac:
+ raise ValueError("No mac_address or name entry for %s" % d)
+ if mac not in known_macs:
+ raise ValueError("Unable to find a system nic for %s" % d)
+ d['name'] = known_macs[mac]
+
+ for service in services:
+ cfg = service
+ cfg.update({'type': 'nameserver'})
+ config.append(cfg)
+
+ return {'version': 1, 'config': config}
+
+
def convert_vendordata_json(data, recurse=True):
"""data: a loaded json *object* (strings, arrays, dicts).
return something suitable for cloudinit vendordata_raw.
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 4ba95318..47deac6e 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -44,6 +44,7 @@ from cloudinit import helpers
from cloudinit import importer
from cloudinit import log as logging
from cloudinit import net
+from cloudinit.net import cmdline
from cloudinit.reporting import events
from cloudinit import sources
from cloudinit import type_utils
@@ -612,7 +613,7 @@ class Init(object):
if os.path.exists(disable_file):
return (None, disable_file)
- cmdline_cfg = ('cmdline', net.read_kernel_cmdline_config())
+ cmdline_cfg = ('cmdline', cmdline.read_kernel_cmdline_config())
dscfg = ('ds', None)
if self.datasource and hasattr(self.datasource, 'network_config'):
dscfg = ('ds', self.datasource.network_config)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index d6b80dbe..8873264d 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -171,7 +171,8 @@ class ProcessExecutionError(IOError):
def __init__(self, stdout=None, stderr=None,
exit_code=None, cmd=None,
- description=None, reason=None):
+ description=None, reason=None,
+ errno=None):
if not cmd:
self.cmd = '-'
else:
@@ -202,6 +203,7 @@ class ProcessExecutionError(IOError):
else:
self.reason = '-'
+ self.errno = errno
message = self.MESSAGE_TMPL % {
'description': self.description,
'cmd': self.cmd,
@@ -1157,7 +1159,14 @@ def find_devs_with(criteria=None, oformat='device',
options.append(path)
cmd = blk_id_cmd + options
# See man blkid for why 2 is added
- (out, _err) = subp(cmd, rcs=[0, 2])
+ try:
+ (out, _err) = subp(cmd, rcs=[0, 2])
+ except ProcessExecutionError as e:
+ if e.errno == errno.ENOENT:
+ # blkid not found...
+ out = ""
+ else:
+ raise
entries = []
for line in out.splitlines():
line = line.strip()
@@ -1706,7 +1715,8 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
sp = subprocess.Popen(args, **kws)
(out, err) = sp.communicate(data)
except OSError as e:
- raise ProcessExecutionError(cmd=args, reason=e)
+ raise ProcessExecutionError(cmd=args, reason=e,
+ errno=e.errno)
rc = sp.returncode
if rc not in rcs:
raise ProcessExecutionError(stdout=out, stderr=err,
diff --git a/packages/bddeb b/packages/bddeb
index f8de7b42..3c77ce1d 100755
--- a/packages/bddeb
+++ b/packages/bddeb
@@ -42,6 +42,7 @@ STD_NAMED_PACKAGES = [
'setuptools',
'flake8',
'hacking',
+ 'unittest2',
]
NONSTD_NAMED_PACKAGES = {
'argparse': ('python-argparse', None),
diff --git a/requirements.txt b/requirements.txt
index 19c88857..cc1dc05f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -11,8 +11,12 @@ PrettyTable
oauthlib
# This one is currently used only by the CloudSigma and SmartOS datasources.
-# If these datasources are removed, this is no longer needed
-pyserial
+# If these datasources are removed, this is no longer needed.
+#
+# This will not work in py2.6 so it is only optionally installed on
+# python 2.7 and later.
+#
+# pyserial
# This is only needed for places where we need to support configs in a manner
# that the built-in config parser is not sufficent (ie
diff --git a/setup.py b/setup.py
index 57d946ca..8c8f2402 100755
--- a/setup.py
+++ b/setup.py
@@ -196,7 +196,6 @@ requirements = read_requires()
if sys.version_info < (3,):
requirements.append('cheetah')
-
setuptools.setup(
name='cloud-init',
version=get_version(),
diff --git a/test-requirements.txt b/test-requirements.txt
index 651af11b..6bf38940 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -2,6 +2,7 @@
httpretty>=0.7.1
mock
nose
+unittest2
# Only needed if you want to know the test times
# nose-timer
diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
index 50b2bd72..8d46a8bf 100644
--- a/tests/unittests/helpers.py
+++ b/tests/unittests/helpers.py
@@ -7,13 +7,11 @@ import sys
import tempfile
import unittest
+import mock
import six
+import unittest2
try:
- from unittest import mock
-except ImportError:
- import mock
-try:
from contextlib import ExitStack
except ImportError:
from contextlib2 import ExitStack
@@ -21,6 +19,9 @@ except ImportError:
from cloudinit import helpers as ch
from cloudinit import util
+# Used for skipping tests
+SkipTest = unittest2.SkipTest
+
# Used for detecting different python versions
PY2 = False
PY26 = False
@@ -44,78 +45,6 @@ else:
if _PY_MINOR == 4 and _PY_MICRO < 3:
FIX_HTTPRETTY = True
-if PY26:
- # For now add these on, taken from python 2.7 + slightly adjusted. Drop
- # all this once Python 2.6 is dropped as a minimum requirement.
- class TestCase(unittest.TestCase):
- def setUp(self):
- super(TestCase, self).setUp()
- self.__all_cleanups = ExitStack()
-
- def tearDown(self):
- self.__all_cleanups.close()
- unittest.TestCase.tearDown(self)
-
- def addCleanup(self, function, *args, **kws):
- self.__all_cleanups.callback(function, *args, **kws)
-
- def assertIs(self, expr1, expr2, msg=None):
- if expr1 is not expr2:
- standardMsg = '%r is not %r' % (expr1, expr2)
- self.fail(self._formatMessage(msg, standardMsg))
-
- def assertIn(self, member, container, msg=None):
- if member not in container:
- standardMsg = '%r not found in %r' % (member, container)
- self.fail(self._formatMessage(msg, standardMsg))
-
- def assertNotIn(self, member, container, msg=None):
- if member in container:
- standardMsg = '%r unexpectedly found in %r'
- standardMsg = standardMsg % (member, container)
- self.fail(self._formatMessage(msg, standardMsg))
-
- def assertIsNone(self, value, msg=None):
- if value is not None:
- standardMsg = '%r is not None'
- standardMsg = standardMsg % (value)
- self.fail(self._formatMessage(msg, standardMsg))
-
- def assertIsInstance(self, obj, cls, msg=None):
- """Same as self.assertTrue(isinstance(obj, cls)), with a nicer
- default message."""
- if not isinstance(obj, cls):
- standardMsg = '%s is not an instance of %r' % (repr(obj), cls)
- self.fail(self._formatMessage(msg, standardMsg))
-
- def assertDictContainsSubset(self, expected, actual, msg=None):
- missing = []
- mismatched = []
- for k, v in expected.items():
- if k not in actual:
- missing.append(k)
- elif actual[k] != v:
- mismatched.append('%r, expected: %r, actual: %r'
- % (k, v, actual[k]))
-
- if len(missing) == 0 and len(mismatched) == 0:
- return
-
- standardMsg = ''
- if missing:
- standardMsg = 'Missing: %r' % ','.join(m for m in missing)
- if mismatched:
- if standardMsg:
- standardMsg += '; '
- standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
-
- self.fail(self._formatMessage(msg, standardMsg))
-
-
-else:
- class TestCase(unittest.TestCase):
- pass
-
# Makes the old path start
# with new base instead of whatever
@@ -151,6 +80,10 @@ def retarget_many_wrapper(new_base, am, old_func):
return wrapper
+class TestCase(unittest2.TestCase):
+ pass
+
+
class ResourceUsingTestCase(TestCase):
def setUp(self):
super(ResourceUsingTestCase, self).setUp()
diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py
index 153f1658..0154784a 100644
--- a/tests/unittests/test__init__.py
+++ b/tests/unittests/test__init__.py
@@ -1,16 +1,6 @@
import os
import shutil
import tempfile
-import unittest
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-try:
- from contextlib import ExitStack
-except ImportError:
- from contextlib2 import ExitStack
from cloudinit import handlers
from cloudinit import helpers
@@ -18,7 +8,7 @@ from cloudinit import settings
from cloudinit import url_helper
from cloudinit import util
-from .helpers import TestCase
+from .helpers import TestCase, ExitStack, mock
class FakeModule(handlers.Handler):
@@ -99,9 +89,10 @@ class TestWalkerHandleHandler(TestCase):
self.assertEqual(self.data['handlercount'], 0)
-class TestHandlerHandlePart(unittest.TestCase):
+class TestHandlerHandlePart(TestCase):
def setUp(self):
+ super(TestHandlerHandlePart, self).setUp()
self.data = "fake data"
self.ctype = "fake ctype"
self.filename = "fake filename"
@@ -177,7 +168,7 @@ class TestHandlerHandlePart(unittest.TestCase):
self.data, self.ctype, self.filename, self.payload)
-class TestCmdlineUrl(unittest.TestCase):
+class TestCmdlineUrl(TestCase):
def test_invalid_content(self):
url = "http://example.com/foo"
key = "mykey"
diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py
index f537bd83..163ce64c 100644
--- a/tests/unittests/test_cli.py
+++ b/tests/unittests/test_cli.py
@@ -4,12 +4,7 @@ import six
import sys
from . import helpers as test_helpers
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-
+mock = test_helpers.mock
BIN_CLOUDINIT = "bin/cloud-init"
diff --git a/tests/unittests/test_cs_util.py b/tests/unittests/test_cs_util.py
index d7273035..56c9ce9e 100644
--- a/tests/unittests/test_cs_util.py
+++ b/tests/unittests/test_cs_util.py
@@ -1,21 +1,9 @@
from __future__ import print_function
-import sys
-import unittest
+from . import helpers as test_helpers
from cloudinit.cs_utils import Cepko
-try:
- skip = unittest.skip
-except AttributeError:
- # Python 2.6. Doesn't have to be high fidelity.
- def skip(reason):
- def decorator(func):
- def wrapper(*args, **kws):
- print(reason, file=sys.stderr)
- return wrapper
- return decorator
-
SERVER_CONTEXT = {
"cpu": 1000,
@@ -43,18 +31,9 @@ class CepkoMock(Cepko):
# 2015-01-22 BAW: This test is completely useless because it only ever tests
# the CepkoMock object. Even in its original form, I don't think it ever
# touched the underlying Cepko class methods.
-@skip('This test is completely useless')
-class CepkoResultTests(unittest.TestCase):
+class CepkoResultTests(test_helpers.TestCase):
def setUp(self):
- pass
- # self.mocked = self.mocker.replace("cloudinit.cs_utils.Cepko",
- # spec=CepkoMock,
- # count=False,
- # passthrough=False)
- # self.mocked()
- # self.mocker.result(CepkoMock())
- # self.mocker.replay()
- # self.c = Cepko()
+ raise test_helpers.SkipTest('This test is completely useless')
def test_getitem(self):
result = self.c.all()
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 5f3eb31f..e90e903c 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -1,16 +1,8 @@
from cloudinit import helpers
from cloudinit.util import b64e, decode_binary, load_file
from cloudinit.sources import DataSourceAzure
-from ..helpers import TestCase, populate_dir
-try:
- from unittest import mock
-except ImportError:
- import mock
-try:
- from contextlib import ExitStack
-except ImportError:
- from contextlib2 import ExitStack
+from ..helpers import TestCase, populate_dir, mock, ExitStack, PY26, SkipTest
import crypt
import os
@@ -83,6 +75,8 @@ class TestAzureDataSource(TestCase):
def setUp(self):
super(TestAzureDataSource, self).setUp()
+ if PY26:
+ raise SkipTest("Does not work on python 2.6")
self.tmp = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp)
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
index 70e8f7f2..65202ff0 100644
--- a/tests/unittests/test_datasource/test_azure_helper.py
+++ b/tests/unittests/test_datasource/test_azure_helper.py
@@ -2,17 +2,7 @@ import os
from cloudinit.sources.helpers import azure as azure_helper
-from ..helpers import TestCase
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-
-try:
- from contextlib import ExitStack
-except ImportError:
- from contextlib2 import ExitStack
+from ..helpers import ExitStack, mock, TestCase
GOAL_STATE_TEMPLATE = """\
diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py
index 772d189a..2a42ce0c 100644
--- a/tests/unittests/test_datasource/test_cloudsigma.py
+++ b/tests/unittests/test_datasource/test_cloudsigma.py
@@ -1,4 +1,5 @@
# coding: utf-8
+
import copy
from cloudinit.cs_utils import Cepko
@@ -6,7 +7,6 @@ from cloudinit.sources import DataSourceCloudSigma
from .. import helpers as test_helpers
-
SERVER_CONTEXT = {
"cpu": 1000,
"cpus_instead_of_cores": False,
diff --git a/tests/unittests/test_datasource/test_cloudstack.py b/tests/unittests/test_datasource/test_cloudstack.py
index 974b3704..b1aab17b 100644
--- a/tests/unittests/test_datasource/test_cloudstack.py
+++ b/tests/unittests/test_datasource/test_cloudstack.py
@@ -1,16 +1,7 @@
from cloudinit import helpers
from cloudinit.sources.DataSourceCloudStack import DataSourceCloudStack
-from ..helpers import TestCase
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-try:
- from contextlib import ExitStack
-except ImportError:
- from contextlib2 import ExitStack
+from ..helpers import TestCase, mock, ExitStack
class TestCloudStackPasswordFetching(TestCase):
diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py
index 83d75f9c..18551b92 100644
--- a/tests/unittests/test_datasource/test_configdrive.py
+++ b/tests/unittests/test_datasource/test_configdrive.py
@@ -5,23 +5,15 @@ import shutil
import six
import tempfile
-try:
- from unittest import mock
-except ImportError:
- import mock
-try:
- from contextlib import ExitStack
-except ImportError:
- from contextlib2 import ExitStack
-
from cloudinit import helpers
-from cloudinit import net
+from cloudinit.net import eni
+from cloudinit.net import network_state
from cloudinit import settings
from cloudinit.sources import DataSourceConfigDrive as ds
from cloudinit.sources.helpers import openstack
from cloudinit import util
-from ..helpers import TestCase
+from ..helpers import TestCase, ExitStack, mock
PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n'
@@ -115,6 +107,7 @@ KNOWN_MACS = {
'fa:16:3e:d4:57:ad': 'enp0s2',
'fa:16:3e:dd:50:9a': 'foo1',
'fa:16:3e:a8:14:69': 'foo2',
+ 'fa:16:3e:ed:9a:59': 'foo3',
}
CFG_DRIVE_FILES_V2 = {
@@ -377,35 +370,150 @@ class TestConfigDriveDataSource(TestCase):
util.find_devs_with = orig_find_devs_with
util.is_partition = orig_is_partition
- def test_pubkeys_v2(self):
+ @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot')
+ def test_pubkeys_v2(self, on_first_boot):
"""Verify that public-keys work in config-drive-v2."""
populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
myds = cfg_ds_from_dir(self.tmp)
self.assertEqual(myds.get_public_ssh_keys(),
[OSTACK_META['public_keys']['mykey']])
- def test_network_data_is_found(self):
+
+class TestNetJson(TestCase):
+ def setUp(self):
+ super(TestNetJson, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.tmp)
+ self.maxDiff = None
+
+ @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot')
+ def test_network_data_is_found(self, on_first_boot):
"""Verify that network_data is present in ds in config-drive-v2."""
populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
myds = cfg_ds_from_dir(self.tmp)
- self.assertEqual(myds.network_json, NETWORK_DATA)
+ self.assertIsNotNone(myds.network_json)
- def test_network_config_is_converted(self):
+ @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot')
+ def test_network_config_is_converted(self, on_first_boot):
"""Verify that network_data is converted and present on ds object."""
populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
myds = cfg_ds_from_dir(self.tmp)
- network_config = ds.convert_network_data(NETWORK_DATA,
- known_macs=KNOWN_MACS)
+ network_config = openstack.convert_net_json(NETWORK_DATA,
+ known_macs=KNOWN_MACS)
self.assertEqual(myds.network_config, network_config)
+ def test_network_config_conversions(self):
+ """Tests a bunch of input network json and checks the
+ expected conversions."""
+ in_datas = [
+ NETWORK_DATA,
+ {
+ 'services': [{'type': 'dns', 'address': '172.19.0.12'}],
+ 'networks': [{
+ 'network_id': 'dacd568d-5be6-4786-91fe-750c374b78b4',
+ 'type': 'ipv4',
+ 'netmask': '255.255.252.0',
+ 'link': 'tap1a81968a-79',
+ 'routes': [{
+ 'netmask': '0.0.0.0',
+ 'network': '0.0.0.0',
+ 'gateway': '172.19.3.254',
+ }],
+ 'ip_address': '172.19.1.34',
+ 'id': 'network0',
+ }],
+ 'links': [{
+ 'type': 'bridge',
+ 'vif_id': '1a81968a-797a-400f-8a80-567f997eb93f',
+ 'ethernet_mac_address': 'fa:16:3e:ed:9a:59',
+ 'id': 'tap1a81968a-79',
+ 'mtu': None,
+ }],
+ },
+ ]
+ out_datas = [
+ {
+ 'version': 1,
+ 'config': [
+ {
+ 'subnets': [{'type': 'dhcp4'}],
+ 'type': 'physical',
+ 'mac_address': 'fa:16:3e:69:b0:58',
+ 'name': 'enp0s1',
+ 'mtu': None,
+ },
+ {
+ 'subnets': [{'type': 'dhcp4'}],
+ 'type': 'physical',
+ 'mac_address': 'fa:16:3e:d4:57:ad',
+ 'name': 'enp0s2',
+ 'mtu': None,
+ },
+ {
+ 'subnets': [{'type': 'dhcp4'}],
+ 'type': 'physical',
+ 'mac_address': 'fa:16:3e:05:30:fe',
+ 'name': 'nic0',
+ 'mtu': None,
+ },
+ {
+ 'type': 'nameserver',
+ 'address': '199.204.44.24',
+ },
+ {
+ 'type': 'nameserver',
+ 'address': '199.204.47.54',
+ }
+ ],
+
+ },
+ {
+ 'version': 1,
+ 'config': [
+ {
+ 'name': 'foo3',
+ 'mac_address': 'fa:16:3e:ed:9a:59',
+ 'mtu': None,
+ 'type': 'physical',
+ 'subnets': [
+ {
+ 'address': '172.19.1.34',
+ 'netmask': '255.255.252.0',
+ 'type': 'static',
+ 'ipv4': True,
+ 'routes': [{
+ 'gateway': '172.19.3.254',
+ 'netmask': '0.0.0.0',
+ 'network': '0.0.0.0',
+ }],
+ }
+ ]
+ },
+ {
+ 'type': 'nameserver',
+ 'address': '172.19.0.12',
+ }
+ ],
+ },
+ ]
+ for in_data, out_data in zip(in_datas, out_datas):
+ conv_data = openstack.convert_net_json(in_data,
+ known_macs=KNOWN_MACS)
+ self.assertEqual(out_data, conv_data)
+
class TestConvertNetworkData(TestCase):
+ def setUp(self):
+ super(TestConvertNetworkData, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.tmp)
+
def _getnames_in_config(self, ncfg):
return set([n['name'] for n in ncfg['config']
if n['type'] == 'physical'])
def test_conversion_fills_names(self):
- ncfg = ds.convert_network_data(NETWORK_DATA, known_macs=KNOWN_MACS)
+ ncfg = openstack.convert_net_json(NETWORK_DATA, known_macs=KNOWN_MACS)
expected = set(['nic0', 'enp0s1', 'enp0s2'])
found = self._getnames_in_config(ncfg)
self.assertEqual(found, expected)
@@ -417,18 +525,19 @@ class TestConvertNetworkData(TestCase):
'fa:16:3e:69:b0:58': 'ens1'})
get_interfaces_by_mac.return_value = macs
- ncfg = ds.convert_network_data(NETWORK_DATA)
+ ncfg = openstack.convert_net_json(NETWORK_DATA)
expected = set(['nic0', 'ens1', 'enp0s2'])
found = self._getnames_in_config(ncfg)
self.assertEqual(found, expected)
def test_convert_raises_value_error_on_missing_name(self):
macs = {'aa:aa:aa:aa:aa:00': 'ens1'}
- self.assertRaises(ValueError, ds.convert_network_data,
+ self.assertRaises(ValueError, openstack.convert_net_json,
NETWORK_DATA, known_macs=macs)
def test_conversion_with_route(self):
- ncfg = ds.convert_network_data(NETWORK_DATA_2, known_macs=KNOWN_MACS)
+ ncfg = openstack.convert_net_json(NETWORK_DATA_2,
+ known_macs=KNOWN_MACS)
# not the best test, but see that we get a route in the
# network config and that it gets rendered to an ENI file
routes = []
@@ -438,15 +547,23 @@ class TestConvertNetworkData(TestCase):
self.assertIn(
{'network': '0.0.0.0', 'netmask': '0.0.0.0', 'gateway': '2.2.2.9'},
routes)
- eni = net.render_interfaces(net.parse_net_config_data(ncfg))
- self.assertIn("route add default gw 2.2.2.9", eni)
+ eni_renderer = eni.Renderer()
+ eni_renderer.render_network_state(
+ self.tmp, network_state.parse_net_config_data(ncfg))
+ with open(os.path.join(self.tmp, "etc",
+ "network", "interfaces"), 'r') as f:
+ eni_rendering = f.read()
+ self.assertIn("route add default gw 2.2.2.9", eni_rendering)
def cfg_ds_from_dir(seed_d):
- found = ds.read_config_drive(seed_d)
cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None,
helpers.Paths({}))
- populate_ds_from_read_config(cfg_ds, seed_d, found)
+ cfg_ds.seed_dir = seed_d
+ cfg_ds.known_macs = KNOWN_MACS.copy()
+ if not cfg_ds.get_data():
+ raise RuntimeError("Data source did not extract itself from"
+ " seed directory %s" % seed_d)
return cfg_ds
@@ -460,7 +577,7 @@ def populate_ds_from_read_config(cfg_ds, source, results):
cfg_ds.userdata_raw = results.get('userdata')
cfg_ds.version = results.get('version')
cfg_ds.network_json = results.get('networkdata')
- cfg_ds._network_config = ds.convert_network_data(
+ cfg_ds._network_config = openstack.convert_net_json(
cfg_ds.network_json, known_macs=KNOWN_MACS)
@@ -474,7 +591,6 @@ def populate_dir(seed_dir, files):
mode = "w"
else:
mode = "wb"
-
with open(path, mode) as fp:
fp.write(content)
diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py
index 3c528c23..b0fa1130 100644
--- a/tests/unittests/test_datasource/test_nocloud.py
+++ b/tests/unittests/test_datasource/test_nocloud.py
@@ -1,22 +1,13 @@
from cloudinit import helpers
from cloudinit.sources import DataSourceNoCloud
from cloudinit import util
-from ..helpers import TestCase, populate_dir
+from ..helpers import TestCase, populate_dir, mock, ExitStack
import os
import shutil
import tempfile
-import unittest
-import yaml
-try:
- from unittest import mock
-except ImportError:
- import mock
-try:
- from contextlib import ExitStack
-except ImportError:
- from contextlib2 import ExitStack
+import yaml
class TestNoCloudDataSource(TestCase):
@@ -139,7 +130,7 @@ class TestNoCloudDataSource(TestCase):
self.assertTrue(ret)
-class TestParseCommandLineData(unittest.TestCase):
+class TestParseCommandLineData(TestCase):
def test_parse_cmdline_data_valid(self):
ds_id = "ds=nocloud"
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index ec57cff1..9c6c8768 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -34,11 +34,12 @@ import stat
import tempfile
import uuid
-import serial
+from cloudinit import serial
+from cloudinit.sources import DataSourceSmartOS
+
import six
from cloudinit import helpers as c_helpers
-from cloudinit.sources import DataSourceSmartOS
from cloudinit.util import b64e
from ..helpers import mock, FilesystemMockingTestCase, TestCase
@@ -380,6 +381,7 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase):
def setUp(self):
super(TestJoyentMetadataClient, self).setUp()
+
self.serial = mock.MagicMock(spec=serial.Serial)
self.request_id = 0xabcdef12
self.metadata_value = 'value'
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index 624a9aa8..7998111a 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -1,6 +1,10 @@
from cloudinit import net
+from cloudinit.net import cmdline
+from cloudinit.net import eni
+from cloudinit.net import network_state
from cloudinit import util
+from .helpers import mock
from .helpers import TestCase
import base64
@@ -9,6 +13,8 @@ import gzip
import io
import json
import os
+import shutil
+import tempfile
DHCP_CONTENT_1 = """
DEVICE='eth0'
@@ -69,21 +75,87 @@ STATIC_EXPECTED_1 = {
}
-class TestNetConfigParsing(TestCase):
+class TestEniNetRendering(TestCase):
+
+ @mock.patch("cloudinit.net.sys_dev_path")
+ @mock.patch("cloudinit.net.sys_netdev_info")
+ @mock.patch("cloudinit.net.get_devicelist")
+ def test_default_generation(self, mock_get_devicelist,
+ mock_sys_netdev_info,
+ mock_sys_dev_path):
+ mock_get_devicelist.return_value = ['eth1000', 'lo']
+
+ dev_characteristics = {
+ 'eth1000': {
+ "bridge": False,
+ "carrier": False,
+ "dormant": False,
+ "operstate": "down",
+ "address": "07-1C-C6-75-A4-BE",
+ }
+ }
+
+ def netdev_info(name, field):
+ return dev_characteristics[name][field]
+
+ mock_sys_netdev_info.side_effect = netdev_info
+
+ tmp_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, tmp_dir)
+
+ def sys_dev_path(devname, path=""):
+ return tmp_dir + devname + "/" + path
+
+ for dev in dev_characteristics:
+ os.makedirs(os.path.join(tmp_dir, dev))
+ with open(os.path.join(tmp_dir, dev, 'operstate'), 'w') as fh:
+ fh.write("down")
+
+ mock_sys_dev_path.side_effect = sys_dev_path
+
+ network_cfg = net.generate_fallback_config()
+ ns = network_state.parse_net_config_data(network_cfg,
+ skip_broken=False)
+
+ render_dir = os.path.join(tmp_dir, "render")
+ os.makedirs(render_dir)
+
+ renderer = eni.Renderer()
+ renderer.render_network_state(render_dir, ns,
+ eni="interfaces",
+ links_prefix=None,
+ netrules=None)
+
+ self.assertTrue(os.path.exists(os.path.join(render_dir,
+ 'interfaces')))
+ with open(os.path.join(render_dir, 'interfaces')) as fh:
+ contents = fh.read()
+
+ expected = """
+auto lo
+iface lo inet loopback
+
+auto eth1000
+iface eth1000 inet dhcp
+"""
+ self.assertEqual(expected.lstrip(), contents.lstrip())
+
+
+class TestCmdlineConfigParsing(TestCase):
simple_cfg = {
'config': [{"type": "physical", "name": "eth0",
"mac_address": "c0:d6:9f:2c:e8:80",
"subnets": [{"type": "dhcp"}]}]}
- def test_klibc_convert_dhcp(self):
- found = net._klibc_to_config_entry(DHCP_CONTENT_1)
+ def test_cmdline_convert_dhcp(self):
+ found = cmdline._klibc_to_config_entry(DHCP_CONTENT_1)
self.assertEqual(found, ('eth0', DHCP_EXPECTED_1))
- def test_klibc_convert_static(self):
- found = net._klibc_to_config_entry(STATIC_CONTENT_1)
+ def test_cmdline_convert_static(self):
+ found = cmdline._klibc_to_config_entry(STATIC_CONTENT_1)
self.assertEqual(found, ('eth1', STATIC_EXPECTED_1))
- def test_config_from_klibc_net_cfg(self):
+ def test_config_from_cmdline_net_cfg(self):
files = []
pairs = (('net-eth0.cfg', DHCP_CONTENT_1),
('net-eth1.cfg', STATIC_CONTENT_1))
@@ -104,21 +176,22 @@ class TestNetConfigParsing(TestCase):
files.append(fp)
util.write_file(fp, content)
- found = net.config_from_klibc_net_cfg(files=files, mac_addrs=macs)
+ found = cmdline.config_from_klibc_net_cfg(files=files,
+ mac_addrs=macs)
self.assertEqual(found, expected)
def test_cmdline_with_b64(self):
data = base64.b64encode(json.dumps(self.simple_cfg).encode())
encoded_text = data.decode()
- cmdline = 'ro network-config=' + encoded_text + ' root=foo'
- found = net.read_kernel_cmdline_config(cmdline=cmdline)
+ raw_cmdline = 'ro network-config=' + encoded_text + ' root=foo'
+ found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline)
self.assertEqual(found, self.simple_cfg)
def test_cmdline_with_b64_gz(self):
data = _gzip_data(json.dumps(self.simple_cfg).encode())
encoded_text = base64.b64encode(data).decode()
- cmdline = 'ro network-config=' + encoded_text + ' root=foo'
- found = net.read_kernel_cmdline_config(cmdline=cmdline)
+ raw_cmdline = 'ro network-config=' + encoded_text + ' root=foo'
+ found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline)
self.assertEqual(found, self.simple_cfg)
diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py
index 5cad8406..20ca23df 100644
--- a/tests/unittests/test_reporting.py
+++ b/tests/unittests/test_reporting.py
@@ -7,7 +7,9 @@ from cloudinit import reporting
from cloudinit.reporting import events
from cloudinit.reporting import handlers
-from .helpers import (mock, TestCase)
+import mock
+
+from .helpers import TestCase
def _fake_registry():
diff --git a/tests/unittests/test_rh_subscription.py b/tests/unittests/test_rh_subscription.py
index b84c807b..891dbe77 100644
--- a/tests/unittests/test_rh_subscription.py
+++ b/tests/unittests/test_rh_subscription.py
@@ -1,12 +1,24 @@
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import logging
+
from cloudinit.config import cc_rh_subscription
from cloudinit import util
-import logging
-import mock
-import unittest
+from .helpers import TestCase, mock
-class GoodTests(unittest.TestCase):
+class GoodTests(TestCase):
def setUp(self):
super(GoodTests, self).setUp()
self.name = "cc_rh_subscription"
@@ -93,7 +105,7 @@ class GoodTests(unittest.TestCase):
self.assertEqual(self.SM._sub_man_cli.call_count, 9)
-class TestBadInput(unittest.TestCase):
+class TestBadInput(TestCase):
name = "cc_rh_subscription"
cloud_init = None
log = logging.getLogger("bad_tests")
diff --git a/tox.ini b/tox.ini
index dafaaf6d..e7a6f22c 100644
--- a/tox.ini
+++ b/tox.ini
@@ -5,10 +5,9 @@ recreate = True
[testenv]
commands = python -m nose {posargs:tests}
deps = -r{toxinidir}/test-requirements.txt
- -r{toxinidir}/requirements.txt
-
-[testenv:py3]
-basepython = python3
+ -r{toxinidir}/requirements.txt
+setenv =
+ LC_ALL = en_US.utf-8
[testenv:flake8]
basepython = python3
@@ -18,15 +17,11 @@ commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/}
setenv =
LC_ALL = en_US.utf-8
+[testenv:py3]
+basepython = python3
+
[testenv:py26]
commands = nosetests {posargs:tests}
-deps =
- contextlib2
- httpretty>=0.7.1
- mock
- nose
- pep8==1.5.7
- pyflakes
setenv =
LC_ALL = C