summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorScott Moser <smoser@brickies.net>2017-04-03 11:52:06 -0400
committerScott Moser <smoser@brickies.net>2017-04-03 11:52:06 -0400
commit3b2e493b51153cd5bc1fa91e6ac52f59d41fe3fb (patch)
tree678b118605245e5bb1b218565728270702aba5b4
parente018fa910fdf0dbf3aa22e59e935461babd205c4 (diff)
parent61eb03fef92f435434d974fb46439189ef0b5f97 (diff)
downloadvyos-cloud-init-3b2e493b51153cd5bc1fa91e6ac52f59d41fe3fb.tar.gz
vyos-cloud-init-3b2e493b51153cd5bc1fa91e6ac52f59d41fe3fb.zip
merge from master at 0.7.9-90-g61eb03fe
-rw-r--r--.gitignore1
-rw-r--r--.pylintrc39
-rw-r--r--cloudinit/cmd/main.py17
-rw-r--r--cloudinit/config/cc_apt_configure.py20
-rw-r--r--cloudinit/config/cc_chef.py2
-rw-r--r--cloudinit/config/cc_disk_setup.py2
-rw-r--r--cloudinit/config/cc_growpart.py11
-rw-r--r--cloudinit/config/cc_resizefs.py21
-rwxr-xr-xcloudinit/config/cc_set_passwords.py75
-rwxr-xr-xcloudinit/distros/__init__.py13
-rw-r--r--cloudinit/distros/debian.py29
-rw-r--r--cloudinit/distros/parsers/resolv_conf.py11
-rw-r--r--cloudinit/distros/rhel.py7
-rw-r--r--[-rwxr-xr-x]cloudinit/net/__init__.py83
-rw-r--r--cloudinit/net/eni.py53
-rw-r--r--cloudinit/net/netplan.py412
-rw-r--r--cloudinit/net/network_state.py317
-rw-r--r--cloudinit/net/renderer.py10
-rw-r--r--cloudinit/net/renderers.py53
-rw-r--r--cloudinit/net/sysconfig.py80
-rw-r--r--cloudinit/settings.py1
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py3
-rw-r--r--cloudinit/sources/DataSourceAzure.py78
-rw-r--r--cloudinit/sources/DataSourceBigstep.py2
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py13
-rw-r--r--cloudinit/sources/DataSourceGCE.py18
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py4
-rw-r--r--cloudinit/sources/__init__.py4
-rw-r--r--cloudinit/sources/helpers/openstack.py1
-rw-r--r--cloudinit/stages.py6
-rw-r--r--cloudinit/url_helper.py2
-rw-r--r--cloudinit/util.py81
-rw-r--r--cloudinit/version.py7
-rw-r--r--doc/examples/cloud-config.txt9
-rw-r--r--doc/rtd/topics/capabilities.rst29
-rw-r--r--doc/rtd/topics/datasources/configdrive.rst8
-rw-r--r--doc/rtd/topics/tests.rst14
-rw-r--r--systemd/cloud-init.service1
-rw-r--r--tests/cloud_tests/configs/modules/set_password_list.yaml23
-rw-r--r--tests/cloud_tests/configs/modules/set_password_list_string.yaml40
-rw-r--r--tests/cloud_tests/configs/modules/timezone.yaml4
-rw-r--r--tests/cloud_tests/testcases/__init__.py2
-rw-r--r--tests/cloud_tests/testcases/base.py56
-rw-r--r--tests/cloud_tests/testcases/modules/set_password_list.py20
-rw-r--r--tests/cloud_tests/testcases/modules/set_password_list_string.py11
-rw-r--r--tests/cloud_tests/testcases/modules/timezone.py2
-rw-r--r--tests/unittests/test_datasource/test_configdrive.py7
-rw-r--r--tests/unittests/test_datasource/test_digitalocean.py14
-rw-r--r--tests/unittests/test_datasource/test_gce.py14
-rw-r--r--tests/unittests/test_datasource/test_opennebula.py9
-rw-r--r--tests/unittests/test_distros/test_netconfig.py354
-rw-r--r--tests/unittests/test_distros/test_resolv.py2
-rw-r--r--tests/unittests/test_handler/test_handler_disk_setup.py44
-rw-r--r--tests/unittests/test_net.py633
-rw-r--r--tests/unittests/test_version.py14
-rwxr-xr-xtools/ds-identify78
-rwxr-xr-xtools/net-convert.py84
-rw-r--r--tox.ini17
58 files changed, 2691 insertions, 274 deletions
diff --git a/.gitignore b/.gitignore
index 865cac15..3946ec76 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,3 +5,4 @@ dist
__pycache__
.tox
.coverage
+doc/rtd_html
diff --git a/.pylintrc b/.pylintrc
new file mode 100644
index 00000000..b8cda03c
--- /dev/null
+++ b/.pylintrc
@@ -0,0 +1,39 @@
+[MASTER]
+
+# --go-faster, use multiple processes to speed up Pylint
+jobs=4
+
+
+[MESSAGES CONTROL]
+
+# Errors only
+disable=C, F, I, R, W
+
+
+[REPORTS]
+
+# Set the output format. Available formats are text, parseable, colorized, msvs
+output-format=colorized
+
+# Just the errors please, no full report
+reports=no
+
+
+[TYPECHECK]
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis. It
+# supports qualified module names, as well as Unix pattern matching.
+ignored-modules=six.moves,pkg_resources
+
+# List of class names for which member attributes should not be checked (useful
+# for classes with dynamically set attributes). This supports the use of
+# qualified names.
+ignored-classes=optparse.Values,thread._local
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E1101 when accessed. Python regular
+# expressions are accepted.
+generated-members=types,http.client,command_handlers
+
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index 6ff4e1c0..fd221323 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -680,6 +680,10 @@ def status_wrapper(name, args, data_d=None, link_d=None):
return len(v1[mode]['errors'])
+def main_features(name, args):
+ sys.stdout.write('\n'.join(sorted(version.FEATURES)) + '\n')
+
+
def main(sysv_args=None):
if sysv_args is not None:
parser = argparse.ArgumentParser(prog=sysv_args[0])
@@ -770,6 +774,10 @@ def main(sysv_args=None):
' upon'))
parser_dhclient.set_defaults(action=('dhclient_hook', dhclient_hook))
+ parser_features = subparsers.add_parser('features',
+ help=('list defined features'))
+ parser_features.set_defaults(action=('features', main_features))
+
args = parser.parse_args(args=sysv_args)
try:
@@ -788,6 +796,7 @@ def main(sysv_args=None):
if name in ("modules", "init"):
functor = status_wrapper
+ rname = None
report_on = True
if name == "init":
if args.local:
@@ -802,10 +811,10 @@ def main(sysv_args=None):
rname, rdesc = ("single/%s" % args.name,
"running single module %s" % args.name)
report_on = args.report
-
- elif name == 'dhclient_hook':
- rname, rdesc = ("dhclient-hook",
- "running dhclient-hook module")
+ else:
+ rname = name
+ rdesc = "running 'cloud-init %s'" % name
+ report_on = False
args.reporter = events.ReportEventStack(
rname, rdesc, reporting_enabled=report_on)
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 7f09c919..06804e85 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -278,15 +278,29 @@ def handle(name, ocfg, cloud, log, _):
raise ValueError("Expected dictionary for 'apt' config, found %s",
type(cfg))
- LOG.debug("handling apt (module %s) with apt config '%s'", name, cfg)
+ apply_debconf_selections(cfg, target)
+ apply_apt(cfg, cloud, target)
+
+
+def apply_apt(cfg, cloud, target):
+ # cfg is the 'apt' top level dictionary already in 'v3' format.
+ if not cfg:
+ # no config was provided. If apt configuration does not seem
+ # necessary on this system, then return.
+ if util.system_is_snappy():
+ LOG.debug("Nothing to do: No apt config and running on snappy")
+ return
+ if not (util.which('apt-get') or util.which('apt')):
+ LOG.debug("Nothing to do: No apt config and no apt commands")
+ return
+
+ LOG.debug("handling apt config: %s", cfg)
release = util.lsb_release(target=target)['codename']
arch = util.get_architecture(target)
mirrors = find_apt_mirror_info(cfg, cloud, arch=arch)
LOG.debug("Apt Mirror info: %s", mirrors)
- apply_debconf_selections(cfg, target)
-
if util.is_false(cfg.get('preserve_sources_list', False)):
generate_sources_list(cfg, release, mirrors, cloud)
rename_apt_lists(mirrors, target)
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index f6564e5c..2be2532c 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -302,7 +302,7 @@ def install_chef(cloud, chef_cfg, log):
retries = max(0, util.get_cfg_option_int(chef_cfg,
"omnibus_url_retries",
default=OMNIBUS_URL_RETRIES))
- content = url_helper.readurl(url=url, retries=retries)
+ content = url_helper.readurl(url=url, retries=retries).contents
with util.tempdir() as tmpd:
# Use tmpdir over tmpfile to avoid 'text file busy' on execute
tmpf = "%s/chef-omnibus-install" % tmpd
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 38df13ab..f39f0815 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -201,7 +201,7 @@ def update_fs_setup_devices(disk_setup, tformer):
if part and 'partition' in definition:
definition['_partition'] = definition['partition']
- definition['partition'] = part
+ definition['partition'] = part
def value_splitter(values, start=None):
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index 832bb3fd..089693e8 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -247,7 +247,16 @@ def devent2dev(devent):
result = util.get_mount_info(devent)
if not result:
raise ValueError("Could not determine device of '%s' % dev_ent")
- return result[0]
+ dev = result[0]
+
+ container = util.is_container()
+
+ # Ensure the path is a block device.
+ if (dev == "/dev/root" and not os.path.exists(dev) and not container):
+ dev = util.rootdev_from_cmdline(util.get_cmdline())
+ if dev is None:
+ raise ValueError("Unable to find device '/dev/root'")
+ return dev
def resize_devices(resizer, devices):
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index e028abf4..60e3ab53 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -71,25 +71,6 @@ RESIZE_FS_PREFIXES_CMDS = [
NOBLOCK = "noblock"
-def rootdev_from_cmdline(cmdline):
- found = None
- for tok in cmdline.split():
- if tok.startswith("root="):
- found = tok[5:]
- break
- if found is None:
- return None
-
- if found.startswith("/dev/"):
- return found
- if found.startswith("LABEL="):
- return "/dev/disk/by-label/" + found[len("LABEL="):]
- if found.startswith("UUID="):
- return "/dev/disk/by-uuid/" + found[len("UUID="):]
-
- return "/dev/" + found
-
-
def handle(name, cfg, _cloud, log, args):
if len(args) != 0:
resize_root = args[0]
@@ -121,7 +102,7 @@ def handle(name, cfg, _cloud, log, args):
# Ensure the path is a block device.
if (devpth == "/dev/root" and not os.path.exists(devpth) and
not container):
- devpth = rootdev_from_cmdline(util.get_cmdline())
+ devpth = util.rootdev_from_cmdline(util.get_cmdline())
if devpth is None:
log.warn("Unable to find device '/dev/root'")
return
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index cf1f59ec..eb0bdab0 100755
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -23,7 +23,8 @@ If the ``list`` key is provided, a list of
``username:password`` pairs can be specified. The usernames specified
must already exist on the system, or have been created using the
``cc_users_groups`` module. A password can be randomly generated using
-``username:RANDOM`` or ``username:R``. Password ssh authentication can be
+``username:RANDOM`` or ``username:R``. A hashed password can be specified
+using ``username:$6$salt$hash``. Password ssh authentication can be
enabled, disabled, or left to system defaults using ``ssh_pwauth``.
.. note::
@@ -45,13 +46,25 @@ enabled, disabled, or left to system defaults using ``ssh_pwauth``.
expire: <true/false>
chpasswd:
+ list: |
+ user1:password1
+ user2:RANDOM
+ user3:password3
+ user4:R
+
+ ##
+ # or as yaml list
+ ##
+ chpasswd:
list:
- user1:password1
- - user2:Random
+ - user2:RANDOM
- user3:password3
- user4:R
+ - user4:$6$rL..$ej...
"""
+import re
import sys
from cloudinit.distros import ug_util
@@ -79,38 +92,66 @@ def handle(_name, cfg, cloud, log, args):
if 'chpasswd' in cfg:
chfg = cfg['chpasswd']
- plist = util.get_cfg_option_str(chfg, 'list', plist)
+ if 'list' in chfg and chfg['list']:
+ if isinstance(chfg['list'], list):
+ log.debug("Handling input for chpasswd as list.")
+ plist = util.get_cfg_option_list(chfg, 'list', plist)
+ else:
+ log.debug("Handling input for chpasswd as multiline string.")
+ plist = util.get_cfg_option_str(chfg, 'list', plist)
+ if plist:
+ plist = plist.splitlines()
+
expire = util.get_cfg_option_bool(chfg, 'expire', expire)
if not plist and password:
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
(user, _user_config) = ug_util.extract_default(users)
if user:
- plist = "%s:%s" % (user, password)
+ plist = ["%s:%s" % (user, password)]
else:
log.warn("No default or defined user to change password for.")
errors = []
if plist:
plist_in = []
+ hashed_plist_in = []
+ hashed_users = []
randlist = []
users = []
- for line in plist.splitlines():
+ prog = re.compile(r'\$[1,2a,2y,5,6](\$.+){2}')
+ for line in plist:
u, p = line.split(':', 1)
- if p == "R" or p == "RANDOM":
- p = rand_user_password()
- randlist.append("%s:%s" % (u, p))
- plist_in.append("%s:%s" % (u, p))
- users.append(u)
+ if prog.match(p) is not None and ":" not in p:
+ hashed_plist_in.append("%s:%s" % (u, p))
+ hashed_users.append(u)
+ else:
+ if p == "R" or p == "RANDOM":
+ p = rand_user_password()
+ randlist.append("%s:%s" % (u, p))
+ plist_in.append("%s:%s" % (u, p))
+ users.append(u)
ch_in = '\n'.join(plist_in) + '\n'
- try:
- log.debug("Changing password for %s:", users)
- util.subp(['chpasswd'], ch_in)
- except Exception as e:
- errors.append(e)
- util.logexc(log, "Failed to set passwords with chpasswd for %s",
- users)
+ if users:
+ try:
+ log.debug("Changing password for %s:", users)
+ util.subp(['chpasswd'], ch_in)
+ except Exception as e:
+ errors.append(e)
+ util.logexc(
+ log, "Failed to set passwords with chpasswd for %s", users)
+
+ hashed_ch_in = '\n'.join(hashed_plist_in) + '\n'
+ if hashed_users:
+ try:
+ log.debug("Setting hashed password for %s:", hashed_users)
+ util.subp(['chpasswd', '-e'], hashed_ch_in)
+ except Exception as e:
+ errors.append(e)
+ util.logexc(
+ log, "Failed to set hashed passwords with chpasswd for %s",
+ hashed_users)
if len(randlist):
blurb = ("Set the following 'random' passwords\n",
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index f3d395b9..803ac74e 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -22,6 +22,7 @@ from cloudinit import log as logging
from cloudinit import net
from cloudinit.net import eni
from cloudinit.net import network_state
+from cloudinit.net import renderers
from cloudinit import ssh_util
from cloudinit import type_utils
from cloudinit import util
@@ -50,6 +51,7 @@ class Distro(object):
hostname_conf_fn = "/etc/hostname"
tz_zone_dir = "/usr/share/zoneinfo"
init_cmd = ['service'] # systemctl, service etc
+ renderer_configs = {}
def __init__(self, name, cfg, paths):
self._paths = paths
@@ -69,6 +71,17 @@ class Distro(object):
def _write_network_config(self, settings):
raise NotImplementedError()
+ def _supported_write_network_config(self, network_config):
+ priority = util.get_cfg_by_path(
+ self._cfg, ('network', 'renderers'), None)
+
+ name, render_cls = renderers.select(priority=priority)
+ LOG.debug("Selected renderer '%s' from priority list: %s",
+ name, priority)
+ renderer = render_cls(config=self.renderer_configs.get(name))
+ renderer.render_network_config(network_config=network_config)
+ return []
+
def _find_tz_file(self, tz):
tz_file = os.path.join(self.tz_zone_dir, str(tz))
if not os.path.isfile(tz_file):
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index 48ccec8c..3f0f9d53 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -13,8 +13,6 @@ import os
from cloudinit import distros
from cloudinit import helpers
from cloudinit import log as logging
-from cloudinit.net import eni
-from cloudinit.net.network_state import parse_net_config_data
from cloudinit import util
from cloudinit.distros.parsers.hostname import HostnameConf
@@ -38,11 +36,23 @@ ENI_HEADER = """# This file is generated from information provided by
# network: {config: disabled}
"""
+NETWORK_CONF_FN = "/etc/network/interfaces.d/50-cloud-init.cfg"
+
class Distro(distros.Distro):
hostname_conf_fn = "/etc/hostname"
locale_conf_fn = "/etc/default/locale"
- network_conf_fn = "/etc/network/interfaces.d/50-cloud-init.cfg"
+ network_conf_fn = {
+ "eni": "/etc/network/interfaces.d/50-cloud-init.cfg",
+ "netplan": "/etc/netplan/50-cloud-init.yaml"
+ }
+ renderer_configs = {
+ "eni": {"eni_path": network_conf_fn["eni"],
+ "eni_header": ENI_HEADER},
+ "netplan": {"netplan_path": network_conf_fn["netplan"],
+ "netplan_header": ENI_HEADER,
+ "postcmds": True}
+ }
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
@@ -51,12 +61,6 @@ class Distro(distros.Distro):
# should only happen say once per instance...)
self._runner = helpers.Runners(paths)
self.osfamily = 'debian'
- self._net_renderer = eni.Renderer({
- 'eni_path': self.network_conf_fn,
- 'eni_header': ENI_HEADER,
- 'links_path_prefix': None,
- 'netrules_path': None,
- })
def apply_locale(self, locale, out_fn=None):
if not out_fn:
@@ -76,14 +80,13 @@ class Distro(distros.Distro):
self.package_command('install', pkgs=pkglist)
def _write_network(self, settings):
- util.write_file(self.network_conf_fn, settings)
+ # this is a legacy method, it will always write eni
+ util.write_file(self.network_conf_fn["eni"], settings)
return ['all']
def _write_network_config(self, netconfig):
- ns = parse_net_config_data(netconfig)
- self._net_renderer.render_network_state("/", ns)
_maybe_remove_legacy_eth0()
- return []
+ return self._supported_write_network_config(netconfig)
def _bring_up_interfaces(self, device_names):
use_all = False
diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py
index ff6ee307..d1f8a042 100644
--- a/cloudinit/distros/parsers/resolv_conf.py
+++ b/cloudinit/distros/parsers/resolv_conf.py
@@ -6,9 +6,11 @@
from six import StringIO
+from cloudinit.distros.parsers import chop_comment
+from cloudinit import log as logging
from cloudinit import util
-from cloudinit.distros.parsers import chop_comment
+LOG = logging.getLogger(__name__)
# See: man resolv.conf
@@ -79,9 +81,10 @@ class ResolvConf(object):
if len(new_ns) == len(current_ns):
return current_ns
if len(current_ns) >= 3:
- # Hard restriction on only 3 name servers
- raise ValueError(("Adding %r would go beyond the "
- "'3' maximum name servers") % (ns))
+ LOG.warn("ignoring nameserver %r: adding would "
+ "exceed the maximum of "
+ "'3' name servers (see resolv.conf(5))" % (ns))
+ return current_ns[:3]
self._remove_option('nameserver')
for n in new_ns:
self._contents.append(('option', ['nameserver', n, '']))
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index 7498c63a..372c7d0f 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -11,8 +11,6 @@
from cloudinit import distros
from cloudinit import helpers
from cloudinit import log as logging
-from cloudinit.net.network_state import parse_net_config_data
-from cloudinit.net import sysconfig
from cloudinit import util
from cloudinit.distros import net_util
@@ -49,16 +47,13 @@ class Distro(distros.Distro):
# should only happen say once per instance...)
self._runner = helpers.Runners(paths)
self.osfamily = 'redhat'
- self._net_renderer = sysconfig.Renderer()
cfg['ssh_svcname'] = 'sshd'
def install_packages(self, pkglist):
self.package_command('install', pkgs=pkglist)
def _write_network_config(self, netconfig):
- ns = parse_net_config_data(netconfig)
- self._net_renderer.render_network_state("/", ns)
- return []
+ return self._supported_write_network_config(netconfig)
def _write_network(self, settings):
# TODO(harlowja) fix this... since this is the ubuntu format
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index ea649cc2..346be5d3 100755..100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -82,6 +82,10 @@ def is_wireless(devname):
return os.path.exists(sys_dev_path(devname, "wireless"))
+def is_bridge(devname):
+ return os.path.exists(sys_dev_path(devname, "bridge"))
+
+
def is_connected(devname):
# is_connected isn't really as simple as that. 2 is
# 'physically connected'. 3 is 'not connected'. but a wlan interface will
@@ -132,7 +136,7 @@ def generate_fallback_config():
for interface in potential_interfaces:
if interface.startswith("veth"):
continue
- if os.path.exists(sys_dev_path(interface, "bridge")):
+ if is_bridge(interface):
# skip any bridges
continue
carrier = read_sys_net_int(interface, 'carrier')
@@ -187,7 +191,11 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
"""read the network config and rename devices accordingly.
if strict_present is false, then do not raise exception if no devices
match. if strict_busy is false, then do not raise exception if the
- device cannot be renamed because it is currently configured."""
+ device cannot be renamed because it is currently configured.
+
+ renames are only attempted for interfaces of type 'physical'. It is
+ expected that the network system will create other devices with the
+ correct name in place."""
renames = []
for ent in netcfg.get('config', {}):
if ent.get('type') != 'physical':
@@ -201,13 +209,35 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
return _rename_interfaces(renames)
+def interface_has_own_mac(ifname, strict=False):
+ """return True if the provided interface has its own address.
+
+ Based on addr_assign_type in /sys. Return true for any interface
+ that does not have a 'stolen' address. Examples of such devices
+ are bonds or vlans that inherit their mac from another device.
+ Possible values are:
+ 0: permanent address 2: stolen from another device
+ 1: randomly generated 3: set using dev_set_mac_address"""
+
+ assign_type = read_sys_net_int(ifname, "addr_assign_type")
+ if strict and assign_type is None:
+ raise ValueError("%s had no addr_assign_type.")
+ return assign_type in (0, 1, 3)
+
+
def _get_current_rename_info(check_downable=True):
- """Collect information necessary for rename_interfaces."""
- names = get_devicelist()
+ """Collect information necessary for rename_interfaces.
+
+ returns a dictionary by mac address like:
+ {mac:
+ {'name': name
+ 'up': boolean: is_up(name),
+ 'downable': None or boolean indicating that the
+ device has only automatically assigned ip addrs.}}
+ """
bymac = {}
- for n in names:
- bymac[get_interface_mac(n)] = {
- 'name': n, 'up': is_up(n), 'downable': None}
+ for mac, name in get_interfaces_by_mac().items():
+ bymac[mac] = {'name': name, 'up': is_up(name), 'downable': None}
if check_downable:
nmatch = re.compile(r"[0-9]+:\s+(\w+)[@:]")
@@ -346,22 +376,37 @@ def get_interface_mac(ifname):
return read_sys_net_safe(ifname, path)
-def get_interfaces_by_mac(devs=None):
- """Build a dictionary of tuples {mac: name}"""
- if devs is None:
- try:
- devs = get_devicelist()
- except OSError as e:
- if e.errno == errno.ENOENT:
- devs = []
- else:
- raise
+def get_interfaces_by_mac():
+ """Build a dictionary of tuples {mac: name}.
+
+ Bridges and any devices that have a 'stolen' mac are excluded."""
+ try:
+ devs = get_devicelist()
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ devs = []
+ else:
+ raise
ret = {}
for name in devs:
+ if not interface_has_own_mac(name):
+ continue
+ if is_bridge(name):
+ continue
mac = get_interface_mac(name)
# some devices may not have a mac (tun0)
- if mac:
- ret[mac] = name
+ if not mac:
+ continue
+ if mac in ret:
+ raise RuntimeError(
+ "duplicate mac found! both '%s' and '%s' have mac '%s'" %
+ (name, ret[mac], mac))
+ ret[mac] = name
return ret
+
+class RendererNotFoundError(RuntimeError):
+ pass
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index 5b249f1f..9819d4f5 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -8,6 +8,7 @@ import re
from . import ParserError
from . import renderer
+from .network_state import subnet_is_ipv6
from cloudinit import util
@@ -111,16 +112,6 @@ def _iface_start_entry(iface, index, render_hwaddress=False):
return lines
-def _subnet_is_ipv6(subnet):
- # 'static6' or 'dhcp6'
- if subnet['type'].endswith('6'):
- # This is a request for DHCPv6.
- return True
- elif subnet['type'] == 'static' and ":" in subnet['address']:
- return True
- return False
-
-
def _parse_deb_config_data(ifaces, contents, src_dir, src_path):
"""Parses the file contents, placing result into ifaces.
@@ -273,8 +264,11 @@ def _ifaces_to_net_config_data(ifaces):
# devname is 'eth0' for name='eth0:1'
devname = name.partition(":")[0]
if devname not in devs:
- devs[devname] = {'type': 'physical', 'name': devname,
- 'subnets': []}
+ if devname == "lo":
+ dtype = "loopback"
+ else:
+ dtype = "physical"
+ devs[devname] = {'type': dtype, 'name': devname, 'subnets': []}
# this isnt strictly correct, but some might specify
# hwaddress on a nic for matching / declaring name.
if 'hwaddress' in data:
@@ -367,7 +361,7 @@ class Renderer(renderer.Renderer):
iface['mode'] = subnet['type']
iface['control'] = subnet.get('control', 'auto')
subnet_inet = 'inet'
- if _subnet_is_ipv6(subnet):
+ if subnet_is_ipv6(subnet):
subnet_inet += '6'
iface['inet'] = subnet_inet
if subnet['type'].startswith('dhcp'):
@@ -423,10 +417,11 @@ class Renderer(renderer.Renderer):
bonding
'''
order = {
- 'physical': 0,
- 'bond': 1,
- 'bridge': 2,
- 'vlan': 3,
+ 'loopback': 0,
+ 'physical': 1,
+ 'bond': 2,
+ 'bridge': 3,
+ 'vlan': 4,
}
sections = []
@@ -444,14 +439,14 @@ class Renderer(renderer.Renderer):
return '\n\n'.join(['\n'.join(s) for s in sections]) + "\n"
- def render_network_state(self, target, network_state):
- fpeni = os.path.join(target, self.eni_path)
+ def render_network_state(self, network_state, target=None):
+ fpeni = util.target_path(target, self.eni_path)
util.ensure_dir(os.path.dirname(fpeni))
header = self.eni_header if self.eni_header else ""
util.write_file(fpeni, header + self._render_interfaces(network_state))
if self.netrules_path:
- netrules = os.path.join(target, self.netrules_path)
+ netrules = util.target_path(target, self.netrules_path)
util.ensure_dir(os.path.dirname(netrules))
util.write_file(netrules,
self._render_persistent_net(network_state))
@@ -461,7 +456,7 @@ class Renderer(renderer.Renderer):
links_prefix=self.links_path_prefix)
def _render_systemd_links(self, target, network_state, links_prefix):
- fp_prefix = os.path.join(target, links_prefix)
+ fp_prefix = util.target_path(target, links_prefix)
for f in glob.glob(fp_prefix + "*"):
os.unlink(f)
for iface in network_state.iter_interfaces():
@@ -482,7 +477,7 @@ class Renderer(renderer.Renderer):
def network_state_to_eni(network_state, header=None, render_hwaddress=False):
# render the provided network state, return a string of equivalent eni
eni_path = 'etc/network/interfaces'
- renderer = Renderer({
+ renderer = Renderer(config={
'eni_path': eni_path,
'eni_header': header,
'links_path_prefix': None,
@@ -496,4 +491,18 @@ def network_state_to_eni(network_state, header=None, render_hwaddress=False):
network_state, render_hwaddress=render_hwaddress)
return header + contents
+
+def available(target=None):
+ expected = ['ifquery', 'ifup', 'ifdown']
+ search = ['/sbin', '/usr/sbin']
+ for p in expected:
+ if not util.which(p, search=search, target=target):
+ return False
+ eni = util.target_path(target, 'etc/network/interfaces')
+ if not os.path.isfile(eni):
+ return False
+
+ return True
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
new file mode 100644
index 00000000..825fe831
--- /dev/null
+++ b/cloudinit/net/netplan.py
@@ -0,0 +1,412 @@
+# This file is part of cloud-init. See LICENSE file ...
+
+import copy
+import os
+
+from . import renderer
+from .network_state import subnet_is_ipv6
+
+from cloudinit import log as logging
+from cloudinit import util
+from cloudinit.net import SYS_CLASS_NET, get_devicelist
+
+KNOWN_SNAPD_CONFIG = b"""\
+# This is the initial network config.
+# It can be overwritten by cloud-init or console-conf.
+network:
+ version: 2
+ ethernets:
+ all-en:
+ match:
+ name: "en*"
+ dhcp4: true
+ all-eth:
+ match:
+ name: "eth*"
+ dhcp4: true
+"""
+
+LOG = logging.getLogger(__name__)
+NET_CONFIG_TO_V2 = {
+ 'bond': {'bond-ad-select': 'ad-select',
+ 'bond-arp-interval': 'arp-interval',
+ 'bond-arp-ip-target': 'arp-ip-target',
+ 'bond-arp-validate': 'arp-validate',
+ 'bond-downdelay': 'down-delay',
+ 'bond-fail-over-mac': 'fail-over-mac-policy',
+ 'bond-lacp-rate': 'lacp-rate',
+ 'bond-miimon': 'mii-monitor-interval',
+ 'bond-min-links': 'min-links',
+ 'bond-mode': 'mode',
+ 'bond-num-grat-arp': 'gratuitious-arp',
+ 'bond-primary-reselect': 'primary-reselect-policy',
+ 'bond-updelay': 'up-delay',
+ 'bond-xmit_hash_policy': 'transmit_hash_policy'},
+ 'bridge': {'bridge_ageing': 'ageing-time',
+ 'bridge_bridgeprio': 'priority',
+ 'bridge_fd': 'forward-delay',
+ 'bridge_gcint': None,
+ 'bridge_hello': 'hello-time',
+ 'bridge_maxage': 'max-age',
+ 'bridge_maxwait': None,
+ 'bridge_pathcost': 'path-cost',
+ 'bridge_portprio': None,
+ 'bridge_waitport': None}}
+
+
+def _get_params_dict_by_match(config, match):
+ return dict((key, value) for (key, value) in config.items()
+ if key.startswith(match))
+
+
+def _extract_addresses(config, entry):
+ """This method parse a cloudinit.net.network_state dictionary (config) and
+ maps netstate keys/values into a dictionary (entry) to represent
+ netplan yaml.
+
+ An example config dictionary might look like:
+
+ {'mac_address': '52:54:00:12:34:00',
+ 'name': 'interface0',
+ 'subnets': [
+ {'address': '192.168.1.2/24',
+ 'mtu': 1501,
+ 'type': 'static'},
+ {'address': '2001:4800:78ff:1b:be76:4eff:fe06:1000",
+ 'mtu': 1480,
+ 'netmask': 64,
+ 'type': 'static'}],
+ 'type: physical'
+ }
+
+ An entry dictionary looks like:
+
+ {'set-name': 'interface0',
+ 'match': {'macaddress': '52:54:00:12:34:00'},
+ 'mtu': 1501}
+
+ After modification returns
+
+ {'set-name': 'interface0',
+ 'match': {'macaddress': '52:54:00:12:34:00'},
+ 'mtu': 1501,
+ 'address': ['192.168.1.2/24', '2001:4800:78ff:1b:be76:4eff:fe06:1000"],
+ 'mtu6': 1480}
+
+ """
+
+ def _listify(obj, token=' '):
+ "Helper to convert strings to list of strings, handle single string"
+ if not obj or type(obj) not in [str]:
+ return obj
+ if token in obj:
+ return obj.split(token)
+ else:
+ return [obj, ]
+
+ addresses = []
+ routes = []
+ nameservers = []
+ searchdomains = []
+ subnets = config.get('subnets', [])
+ if subnets is None:
+ subnets = []
+ for subnet in subnets:
+ sn_type = subnet.get('type')
+ if sn_type.startswith('dhcp'):
+ if sn_type == 'dhcp':
+ sn_type += '4'
+ entry.update({sn_type: True})
+ elif sn_type in ['static']:
+ addr = "%s" % subnet.get('address')
+ if 'netmask' in subnet:
+ addr += "/%s" % subnet.get('netmask')
+ if 'gateway' in subnet and subnet.get('gateway'):
+ gateway = subnet.get('gateway')
+ if ":" in gateway:
+ entry.update({'gateway6': gateway})
+ else:
+ entry.update({'gateway4': gateway})
+ if 'dns_nameservers' in subnet:
+ nameservers += _listify(subnet.get('dns_nameservers', []))
+ if 'dns_search' in subnet:
+ searchdomains += _listify(subnet.get('dns_search', []))
+ if 'mtu' in subnet:
+ mtukey = 'mtu'
+ if subnet_is_ipv6(subnet):
+ mtukey += '6'
+ entry.update({mtukey: subnet.get('mtu')})
+ for route in subnet.get('routes', []):
+ to_net = "%s/%s" % (route.get('network'),
+ route.get('netmask'))
+ route = {
+ 'via': route.get('gateway'),
+ 'to': to_net,
+ }
+ if 'metric' in route:
+ route.update({'metric': route.get('metric', 100)})
+ routes.append(route)
+
+ addresses.append(addr)
+
+ if len(addresses) > 0:
+ entry.update({'addresses': addresses})
+ if len(routes) > 0:
+ entry.update({'routes': routes})
+ if len(nameservers) > 0:
+ ns = {'addresses': nameservers}
+ entry.update({'nameservers': ns})
+ if len(searchdomains) > 0:
+ ns = entry.get('nameservers', {})
+ ns.update({'search': searchdomains})
+ entry.update({'nameservers': ns})
+
+
+def _extract_bond_slaves_by_name(interfaces, entry, bond_master):
+ bond_slave_names = sorted([name for (name, cfg) in interfaces.items()
+ if cfg.get('bond-master', None) == bond_master])
+ if len(bond_slave_names) > 0:
+ entry.update({'interfaces': bond_slave_names})
+
+
+def _clean_default(target=None):
+ # clean out any known default files and derived files in target
+ # LP: #1675576
+ tpath = util.target_path(target, "etc/netplan/00-snapd-config.yaml")
+ if not os.path.isfile(tpath):
+ return
+ content = util.load_file(tpath, decode=False)
+ if content != KNOWN_SNAPD_CONFIG:
+ return
+
+ derived = [util.target_path(target, f) for f in (
+ 'run/systemd/network/10-netplan-all-en.network',
+ 'run/systemd/network/10-netplan-all-eth.network',
+ 'run/systemd/generator/netplan.stamp')]
+ existing = [f for f in derived if os.path.isfile(f)]
+ LOG.debug("removing known config '%s' and derived existing files: %s",
+ tpath, existing)
+
+ for f in [tpath] + existing:
+ os.unlink(f)
+
+
+class Renderer(renderer.Renderer):
+ """Renders network information in a /etc/netplan/network.yaml format."""
+
+ NETPLAN_GENERATE = ['netplan', 'generate']
+
+ def __init__(self, config=None):
+ if not config:
+ config = {}
+ self.netplan_path = config.get('netplan_path',
+ 'etc/netplan/50-cloud-init.yaml')
+ self.netplan_header = config.get('netplan_header', None)
+ self._postcmds = config.get('postcmds', False)
+ self.clean_default = config.get('clean_default', True)
+
+ def render_network_state(self, target, network_state):
+ # check network state for version
+ # if v2, then extract network_state.config
+ # else render_v2_from_state
+ fpnplan = os.path.join(target, self.netplan_path)
+ util.ensure_dir(os.path.dirname(fpnplan))
+ header = self.netplan_header if self.netplan_header else ""
+
+ # render from state
+ content = self._render_content(network_state)
+
+ if not header.endswith("\n"):
+ header += "\n"
+ util.write_file(fpnplan, header + content)
+
+ if self.clean_default:
+ _clean_default(target=target)
+ self._netplan_generate(run=self._postcmds)
+ self._net_setup_link(run=self._postcmds)
+
+ def _netplan_generate(self, run=False):
+ if not run:
+ LOG.debug("netplan generate postcmd disabled")
+ return
+ util.subp(self.NETPLAN_GENERATE, capture=True)
+
+ def _net_setup_link(self, run=False):
+ """To ensure device link properties are applied, we poke
+ udev to re-evaluate networkd .link files and call
+ the setup_link udev builtin command
+ """
+ if not run:
+ LOG.debug("netplan net_setup_link postcmd disabled")
+ return
+ setup_lnk = ['udevadm', 'test-builtin', 'net_setup_link']
+ for cmd in [setup_lnk + [SYS_CLASS_NET + iface]
+ for iface in get_devicelist() if
+ os.path.islink(SYS_CLASS_NET + iface)]:
+ util.subp(cmd, capture=True)
+
+ def _render_content(self, network_state):
+ ethernets = {}
+ wifis = {}
+ bridges = {}
+ bonds = {}
+ vlans = {}
+ content = []
+
+ interfaces = network_state._network_state.get('interfaces', [])
+
+ nameservers = network_state.dns_nameservers
+ searchdomains = network_state.dns_searchdomains
+
+ for config in network_state.iter_interfaces():
+ ifname = config.get('name')
+ # filter None entries up front so we can do simple if key in dict
+ ifcfg = dict((key, value) for (key, value) in config.items()
+ if value)
+
+ if_type = ifcfg.get('type')
+ if if_type == 'physical':
+ # required_keys = ['name', 'mac_address']
+ eth = {
+ 'set-name': ifname,
+ 'match': ifcfg.get('match', None),
+ }
+ if eth['match'] is None:
+ macaddr = ifcfg.get('mac_address', None)
+ if macaddr is not None:
+ eth['match'] = {'macaddress': macaddr.lower()}
+ else:
+ del eth['match']
+ del eth['set-name']
+ if 'mtu' in ifcfg:
+ eth['mtu'] = ifcfg.get('mtu')
+
+ _extract_addresses(ifcfg, eth)
+ ethernets.update({ifname: eth})
+
+ elif if_type == 'bond':
+ # required_keys = ['name', 'bond_interfaces']
+ bond = {}
+ bond_config = {}
+ # extract bond params and drop the bond_ prefix as it's
+ # redundent in v2 yaml format
+ v2_bond_map = NET_CONFIG_TO_V2.get('bond')
+ for match in ['bond_', 'bond-']:
+ bond_params = _get_params_dict_by_match(ifcfg, match)
+ for (param, value) in bond_params.items():
+ newname = v2_bond_map.get(param)
+ if newname is None:
+ continue
+ bond_config.update({newname: value})
+
+ if len(bond_config) > 0:
+ bond.update({'parameters': bond_config})
+ slave_interfaces = ifcfg.get('bond-slaves')
+ if slave_interfaces == 'none':
+ _extract_bond_slaves_by_name(interfaces, bond, ifname)
+ _extract_addresses(ifcfg, bond)
+ bonds.update({ifname: bond})
+
+ elif if_type == 'bridge':
+ # required_keys = ['name', 'bridge_ports']
+ ports = sorted(copy.copy(ifcfg.get('bridge_ports')))
+ bridge = {
+ 'interfaces': ports,
+ }
+ # extract bridge params and drop the bridge prefix as it's
+ # redundent in v2 yaml format
+ match_prefix = 'bridge_'
+ params = _get_params_dict_by_match(ifcfg, match_prefix)
+ br_config = {}
+
+ # v2 yaml uses different names for the keys
+ # and at least one value format change
+ v2_bridge_map = NET_CONFIG_TO_V2.get('bridge')
+ for (param, value) in params.items():
+ newname = v2_bridge_map.get(param)
+ if newname is None:
+ continue
+ br_config.update({newname: value})
+ if newname == 'path-cost':
+ # <interface> <cost> -> <interface>: int(<cost>)
+ newvalue = {}
+ for costval in value:
+ (port, cost) = costval.split()
+ newvalue[port] = int(cost)
+ br_config.update({newname: newvalue})
+ if len(br_config) > 0:
+ bridge.update({'parameters': br_config})
+ _extract_addresses(ifcfg, bridge)
+ bridges.update({ifname: bridge})
+
+ elif if_type == 'vlan':
+ # required_keys = ['name', 'vlan_id', 'vlan-raw-device']
+ vlan = {
+ 'id': ifcfg.get('vlan_id'),
+ 'link': ifcfg.get('vlan-raw-device')
+ }
+
+ _extract_addresses(ifcfg, vlan)
+ vlans.update({ifname: vlan})
+
+ # inject global nameserver values under each physical interface
+ if nameservers:
+ for _eth, cfg in ethernets.items():
+ nscfg = cfg.get('nameservers', {})
+ addresses = nscfg.get('addresses', [])
+ addresses += nameservers
+ nscfg.update({'addresses': addresses})
+ cfg.update({'nameservers': nscfg})
+
+ if searchdomains:
+ for _eth, cfg in ethernets.items():
+ nscfg = cfg.get('nameservers', {})
+ search = nscfg.get('search', [])
+ search += searchdomains
+ nscfg.update({'search': search})
+ cfg.update({'nameservers': nscfg})
+
+ # workaround yaml dictionary key sorting when dumping
+ def _render_section(name, section):
+ if section:
+ dump = util.yaml_dumps({name: section},
+ explicit_start=False,
+ explicit_end=False)
+ txt = util.indent(dump, ' ' * 4)
+ return [txt]
+ return []
+
+ content.append("network:\n version: 2\n")
+ content += _render_section('ethernets', ethernets)
+ content += _render_section('wifis', wifis)
+ content += _render_section('bonds', bonds)
+ content += _render_section('bridges', bridges)
+ content += _render_section('vlans', vlans)
+
+ return "".join(content)
+
+
+def available(target=None):
+ expected = ['netplan']
+ search = ['/usr/sbin', '/sbin']
+ for p in expected:
+ if not util.which(p, search=search, target=target):
+ return False
+ return True
+
+
+def network_state_to_netplan(network_state, header=None):
+ # render the provided network state, return a string of equivalent eni
+ netplan_path = 'etc/network/50-cloud-init.yaml'
+ renderer = Renderer({
+ 'netplan_path': netplan_path,
+ 'netplan_header': header,
+ })
+ if not header:
+ header = ""
+ if not header.endswith("\n"):
+ header += "\n"
+ contents = renderer._render_content(network_state)
+ return header + contents
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 11ef585b..692b6007 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2013-2014 Canonical Ltd.
+# Copyright (C) 2017 Canonical Ltd.
#
# Author: Ryan Harper <ryan.harper@canonical.com>
#
@@ -18,6 +18,10 @@ NETWORK_STATE_VERSION = 1
NETWORK_STATE_REQUIRED_KEYS = {
1: ['version', 'config', 'network_state'],
}
+NETWORK_V2_KEY_FILTER = [
+ 'addresses', 'dhcp4', 'dhcp6', 'gateway4', 'gateway6', 'interfaces',
+ 'match', 'mtu', 'nameservers', 'renderer', 'set-name', 'wakeonlan'
+]
def parse_net_config_data(net_config, skip_broken=True):
@@ -26,11 +30,18 @@ def parse_net_config_data(net_config, skip_broken=True):
:param net_config: curtin network config dict
"""
state = None
- if 'version' in net_config and 'config' in net_config:
- nsi = NetworkStateInterpreter(version=net_config.get('version'),
- config=net_config.get('config'))
+ version = net_config.get('version')
+ config = net_config.get('config')
+ if version == 2:
+ # v2 does not have explicit 'config' key so we
+ # pass the whole net-config as-is
+ config = net_config
+
+ if version and config:
+ nsi = NetworkStateInterpreter(version=version, config=config)
nsi.parse_config(skip_broken=skip_broken)
- state = nsi.network_state
+ state = nsi.get_network_state()
+
return state
@@ -106,6 +117,7 @@ class NetworkState(object):
def __init__(self, network_state, version=NETWORK_STATE_VERSION):
self._network_state = copy.deepcopy(network_state)
self._version = version
+ self.use_ipv6 = network_state.get('use_ipv6', False)
@property
def version(self):
@@ -152,7 +164,8 @@ class NetworkStateInterpreter(object):
'dns': {
'nameservers': [],
'search': [],
- }
+ },
+ 'use_ipv6': False,
}
def __init__(self, version=NETWORK_STATE_VERSION, config=None):
@@ -165,6 +178,14 @@ class NetworkStateInterpreter(object):
def network_state(self):
return NetworkState(self._network_state, version=self._version)
+ @property
+ def use_ipv6(self):
+ return self._network_state.get('use_ipv6')
+
+ @use_ipv6.setter
+ def use_ipv6(self, val):
+ self._network_state.update({'use_ipv6': val})
+
def dump(self):
state = {
'version': self._version,
@@ -192,8 +213,22 @@ class NetworkStateInterpreter(object):
def dump_network_state(self):
return util.yaml_dumps(self._network_state)
+ def as_dict(self):
+ return {'version': self._version, 'config': self._config}
+
+ def get_network_state(self):
+ ns = self.network_state
+ return ns
+
def parse_config(self, skip_broken=True):
- # rebuild network state
+ if self._version == 1:
+ self.parse_config_v1(skip_broken=skip_broken)
+ self._parsed = True
+ elif self._version == 2:
+ self.parse_config_v2(skip_broken=skip_broken)
+ self._parsed = True
+
+ def parse_config_v1(self, skip_broken=True):
for command in self._config:
command_type = command['type']
try:
@@ -211,6 +246,30 @@ class NetworkStateInterpreter(object):
exc_info=True)
LOG.debug(self.dump_network_state())
+ def parse_config_v2(self, skip_broken=True):
+ for command_type, command in self._config.items():
+ if command_type == 'version':
+ continue
+ try:
+ handler = self.command_handlers[command_type]
+ except KeyError:
+ raise RuntimeError("No handler found for"
+ " command '%s'" % command_type)
+ try:
+ handler(self, command)
+ self._v2_common(command)
+ except InvalidCommand:
+ if not skip_broken:
+ raise
+ else:
+ LOG.warn("Skipping invalid command: %s", command,
+ exc_info=True)
+ LOG.debug(self.dump_network_state())
+
+ @ensure_command_keys(['name'])
+ def handle_loopback(self, command):
+ return self.handle_physical(command)
+
@ensure_command_keys(['name'])
def handle_physical(self, command):
'''
@@ -234,11 +293,16 @@ class NetworkStateInterpreter(object):
if subnets:
for subnet in subnets:
if subnet['type'] == 'static':
+ if ':' in subnet['address']:
+ self.use_ipv6 = True
if 'netmask' in subnet and ':' in subnet['address']:
subnet['netmask'] = mask2cidr(subnet['netmask'])
for route in subnet.get('routes', []):
if 'netmask' in route:
route['netmask'] = mask2cidr(route['netmask'])
+ elif subnet['type'].endswith('6'):
+ self.use_ipv6 = True
+
iface.update({
'name': command.get('name'),
'type': command.get('type'),
@@ -323,7 +387,7 @@ class NetworkStateInterpreter(object):
bond_if.update({param: val})
self._network_state['interfaces'].update({ifname: bond_if})
- @ensure_command_keys(['name', 'bridge_interfaces', 'params'])
+ @ensure_command_keys(['name', 'bridge_interfaces'])
def handle_bridge(self, command):
'''
auto br0
@@ -369,7 +433,7 @@ class NetworkStateInterpreter(object):
self.handle_physical(command)
iface = interfaces.get(command.get('name'), {})
iface['bridge_ports'] = command['bridge_interfaces']
- for param, val in command.get('params').items():
+ for param, val in command.get('params', {}).items():
iface.update({param: val})
interfaces.update({iface['name']: iface})
@@ -403,6 +467,241 @@ class NetworkStateInterpreter(object):
}
routes.append(route)
+ # V2 handlers
+ def handle_bonds(self, command):
+ '''
+ v2_command = {
+ bond0: {
+ 'interfaces': ['interface0', 'interface1'],
+ 'miimon': 100,
+ 'mode': '802.3ad',
+ 'xmit_hash_policy': 'layer3+4'},
+ bond1: {
+ 'bond-slaves': ['interface2', 'interface7'],
+ 'mode': 1
+ }
+ }
+
+ v1_command = {
+ 'type': 'bond'
+ 'name': 'bond0',
+ 'bond_interfaces': [interface0, interface1],
+ 'params': {
+ 'bond-mode': '802.3ad',
+ 'bond_miimon: 100,
+ 'bond_xmit_hash_policy': 'layer3+4',
+ }
+ }
+
+ '''
+ self._handle_bond_bridge(command, cmd_type='bond')
+
+ def handle_bridges(self, command):
+
+ '''
+ v2_command = {
+ br0: {
+ 'interfaces': ['interface0', 'interface1'],
+ 'fd': 0,
+ 'stp': 'off',
+ 'maxwait': 0,
+ }
+ }
+
+ v1_command = {
+ 'type': 'bridge'
+ 'name': 'br0',
+ 'bridge_interfaces': [interface0, interface1],
+ 'params': {
+ 'bridge_stp': 'off',
+ 'bridge_fd: 0,
+ 'bridge_maxwait': 0
+ }
+ }
+
+ '''
+ self._handle_bond_bridge(command, cmd_type='bridge')
+
+ def handle_ethernets(self, command):
+ '''
+ ethernets:
+ eno1:
+ match:
+ macaddress: 00:11:22:33:44:55
+ wakeonlan: true
+ dhcp4: true
+ dhcp6: false
+ addresses:
+ - 192.168.14.2/24
+ - 2001:1::1/64
+ gateway4: 192.168.14.1
+ gateway6: 2001:1::2
+ nameservers:
+ search: [foo.local, bar.local]
+ addresses: [8.8.8.8, 8.8.4.4]
+ lom:
+ match:
+ driver: ixgbe
+ set-name: lom1
+ dhcp6: true
+ switchports:
+ match:
+ name: enp2*
+ mtu: 1280
+
+ command = {
+ 'type': 'physical',
+ 'mac_address': 'c0:d6:9f:2c:e8:80',
+ 'name': 'eth0',
+ 'subnets': [
+ {'type': 'dhcp4'}
+ ]
+ }
+ '''
+ for eth, cfg in command.items():
+ phy_cmd = {
+ 'type': 'physical',
+ 'name': cfg.get('set-name', eth),
+ }
+ mac_address = cfg.get('match', {}).get('macaddress', None)
+ if not mac_address:
+ LOG.debug('NetworkState Version2: missing "macaddress" info '
+ 'in config entry: %s: %s', eth, str(cfg))
+
+ for key in ['mtu', 'match', 'wakeonlan']:
+ if key in cfg:
+ phy_cmd.update({key: cfg.get(key)})
+
+ subnets = self._v2_to_v1_ipcfg(cfg)
+ if len(subnets) > 0:
+ phy_cmd.update({'subnets': subnets})
+
+ LOG.debug('v2(ethernets) -> v1(physical):\n%s', phy_cmd)
+ self.handle_physical(phy_cmd)
+
+ def handle_vlans(self, command):
+ '''
+ v2_vlans = {
+ 'eth0.123': {
+ 'id': 123,
+ 'link': 'eth0',
+ 'dhcp4': True,
+ }
+ }
+
+ v1_command = {
+ 'type': 'vlan',
+ 'name': 'eth0.123',
+ 'vlan_link': 'eth0',
+ 'vlan_id': 123,
+ 'subnets': [{'type': 'dhcp4'}],
+ }
+ '''
+ for vlan, cfg in command.items():
+ vlan_cmd = {
+ 'type': 'vlan',
+ 'name': vlan,
+ 'vlan_id': cfg.get('id'),
+ 'vlan_link': cfg.get('link'),
+ }
+ subnets = self._v2_to_v1_ipcfg(cfg)
+ if len(subnets) > 0:
+ vlan_cmd.update({'subnets': subnets})
+ LOG.debug('v2(vlans) -> v1(vlan):\n%s', vlan_cmd)
+ self.handle_vlan(vlan_cmd)
+
+ def handle_wifis(self, command):
+ raise NotImplementedError("NetworkState V2: "
+ "Skipping wifi configuration")
+
+ def _v2_common(self, cfg):
+ LOG.debug('v2_common: handling config:\n%s', cfg)
+ if 'nameservers' in cfg:
+ search = cfg.get('nameservers').get('search', [])
+ dns = cfg.get('nameservers').get('addresses', [])
+ name_cmd = {'type': 'nameserver'}
+ if len(search) > 0:
+ name_cmd.update({'search': search})
+ if len(dns) > 0:
+ name_cmd.update({'addresses': dns})
+ LOG.debug('v2(nameserver) -> v1(nameserver):\n%s', name_cmd)
+ self.handle_nameserver(name_cmd)
+
+ def _handle_bond_bridge(self, command, cmd_type=None):
+ """Common handler for bond and bridge types"""
+ for item_name, item_cfg in command.items():
+ item_params = dict((key, value) for (key, value) in
+ item_cfg.items() if key not in
+ NETWORK_V2_KEY_FILTER)
+ v1_cmd = {
+ 'type': cmd_type,
+ 'name': item_name,
+ cmd_type + '_interfaces': item_cfg.get('interfaces'),
+ 'params': item_params,
+ }
+ subnets = self._v2_to_v1_ipcfg(item_cfg)
+ if len(subnets) > 0:
+ v1_cmd.update({'subnets': subnets})
+
+ LOG.debug('v2(%ss) -> v1(%s):\n%s', cmd_type, cmd_type, v1_cmd)
+ self.handle_bridge(v1_cmd)
+
+ def _v2_to_v1_ipcfg(self, cfg):
+ """Common ipconfig extraction from v2 to v1 subnets array."""
+
+ subnets = []
+ if 'dhcp4' in cfg:
+ subnets.append({'type': 'dhcp4'})
+ if 'dhcp6' in cfg:
+ self.use_ipv6 = True
+ subnets.append({'type': 'dhcp6'})
+
+ gateway4 = None
+ gateway6 = None
+ for address in cfg.get('addresses', []):
+ subnet = {
+ 'type': 'static',
+ 'address': address,
+ }
+
+ routes = []
+ for route in cfg.get('routes', []):
+ route_addr = route.get('to')
+ if "/" in route_addr:
+ route_addr, route_cidr = route_addr.split("/")
+ route_netmask = cidr2mask(route_cidr)
+ subnet_route = {
+ 'address': route_addr,
+ 'netmask': route_netmask,
+ 'gateway': route.get('via')
+ }
+ routes.append(subnet_route)
+ if len(routes) > 0:
+ subnet.update({'routes': routes})
+
+ if ":" in address:
+ if 'gateway6' in cfg and gateway6 is None:
+ gateway6 = cfg.get('gateway6')
+ subnet.update({'gateway': gateway6})
+ else:
+ if 'gateway4' in cfg and gateway4 is None:
+ gateway4 = cfg.get('gateway4')
+ subnet.update({'gateway': gateway4})
+
+ subnets.append(subnet)
+ return subnets
+
+
+def subnet_is_ipv6(subnet):
+ """Common helper for checking network_state subnets for ipv6."""
+ # 'static6' or 'dhcp6'
+ if subnet['type'].endswith('6'):
+ # This is a request for DHCPv6.
+ return True
+ elif subnet['type'] == 'static' and ":" in subnet['address']:
+ return True
+ return False
+
def cidr2mask(cidr):
mask = [0, 0, 0, 0]
diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py
index 3a192436..c68658dc 100644
--- a/cloudinit/net/renderer.py
+++ b/cloudinit/net/renderer.py
@@ -5,8 +5,10 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+import abc
import six
+from .network_state import parse_net_config_data
from .udev import generate_udev_rule
@@ -36,4 +38,12 @@ class Renderer(object):
iface['mac_address']))
return content.getvalue()
+ @abc.abstractmethod
+ def render_network_state(self, network_state, target=None):
+ """Render network state."""
+
+ def render_network_config(self, network_config, target=None):
+ return self.render_network_state(
+ network_state=parse_net_config_data(network_config), target=target)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/renderers.py b/cloudinit/net/renderers.py
new file mode 100644
index 00000000..5117b4a5
--- /dev/null
+++ b/cloudinit/net/renderers.py
@@ -0,0 +1,53 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from . import eni
+from . import netplan
+from . import RendererNotFoundError
+from . import sysconfig
+
+NAME_TO_RENDERER = {
+ "eni": eni,
+ "netplan": netplan,
+ "sysconfig": sysconfig,
+}
+
+DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan"]
+
+
+def search(priority=None, target=None, first=False):
+ if priority is None:
+ priority = DEFAULT_PRIORITY
+
+ available = NAME_TO_RENDERER
+
+ unknown = [i for i in priority if i not in available]
+ if unknown:
+ raise ValueError(
+ "Unknown renderers provided in priority list: %s" % unknown)
+
+ found = []
+ for name in priority:
+ render_mod = available[name]
+ if render_mod.available(target):
+ cur = (name, render_mod.Renderer)
+ if first:
+ return cur
+ found.append(cur)
+
+ return found
+
+
+def select(priority=None, target=None):
+ found = search(priority, target=target, first=True)
+ if not found:
+ if priority is None:
+ priority = DEFAULT_PRIORITY
+ tmsg = ""
+ if target and target != "/":
+ tmsg = " in target=%s" % target
+ raise RendererNotFoundError(
+ "No available network renderers found%s. Searched "
+ "through list: %s" % (tmsg, priority))
+ return found
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 6e7739fb..504e4d02 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -9,6 +9,7 @@ from cloudinit.distros.parsers import resolv_conf
from cloudinit import util
from . import renderer
+from .network_state import subnet_is_ipv6
def _make_header(sep='#'):
@@ -87,7 +88,8 @@ class Route(ConfigMap):
def __init__(self, route_name, base_sysconf_dir):
super(Route, self).__init__()
self.last_idx = 1
- self.has_set_default = False
+ self.has_set_default_ipv4 = False
+ self.has_set_default_ipv6 = False
self._route_name = route_name
self._base_sysconf_dir = base_sysconf_dir
@@ -95,7 +97,8 @@ class Route(ConfigMap):
r = Route(self._route_name, self._base_sysconf_dir)
r._conf = self._conf.copy()
r.last_idx = self.last_idx
- r.has_set_default = self.has_set_default
+ r.has_set_default_ipv4 = self.has_set_default_ipv4
+ r.has_set_default_ipv6 = self.has_set_default_ipv6
return r
@property
@@ -119,10 +122,10 @@ class NetInterface(ConfigMap):
super(NetInterface, self).__init__()
self.children = []
self.routes = Route(iface_name, base_sysconf_dir)
- self._kind = kind
+ self.kind = kind
+
self._iface_name = iface_name
self._conf['DEVICE'] = iface_name
- self._conf['TYPE'] = self.iface_types[kind]
self._base_sysconf_dir = base_sysconf_dir
@property
@@ -140,6 +143,8 @@ class NetInterface(ConfigMap):
@kind.setter
def kind(self, kind):
+ if kind not in self.iface_types:
+ raise ValueError(kind)
self._kind = kind
self._conf['TYPE'] = self.iface_types[kind]
@@ -173,7 +178,7 @@ class Renderer(renderer.Renderer):
('BOOTPROTO', 'none'),
])
- # If these keys exist, then there values will be used to form
+ # If these keys exist, then their values will be used to form
# a BONDING_OPTS grouping; otherwise no grouping will be set.
bond_tpl_opts = tuple([
('bond_mode', "mode=%s"),
@@ -190,7 +195,7 @@ class Renderer(renderer.Renderer):
def __init__(self, config=None):
if not config:
config = {}
- self.sysconf_dir = config.get('sysconf_dir', 'etc/sysconfig/')
+ self.sysconf_dir = config.get('sysconf_dir', 'etc/sysconfig')
self.netrules_path = config.get(
'netrules_path', 'etc/udev/rules.d/70-persistent-net.rules')
self.dns_path = config.get('dns_path', 'etc/resolv.conf')
@@ -199,6 +204,7 @@ class Renderer(renderer.Renderer):
def _render_iface_shared(cls, iface, iface_cfg):
for k, v in cls.iface_defaults:
iface_cfg[k] = v
+
for (old_key, new_key) in [('mac_address', 'HWADDR'), ('mtu', 'MTU')]:
old_value = iface.get(old_key)
if old_value is not None:
@@ -215,7 +221,7 @@ class Renderer(renderer.Renderer):
iface_cfg['BOOTPROTO'] = 'dhcp'
elif subnet_type == 'static':
iface_cfg['BOOTPROTO'] = 'static'
- if subnet.get('ipv6'):
+ if subnet_is_ipv6(subnet):
iface_cfg['IPV6ADDR'] = subnet['address']
iface_cfg['IPV6INIT'] = True
else:
@@ -227,10 +233,20 @@ class Renderer(renderer.Renderer):
if 'netmask' in subnet:
iface_cfg['NETMASK'] = subnet['netmask']
for route in subnet.get('routes', []):
+ if subnet.get('ipv6'):
+ gw_cfg = 'IPV6_DEFAULTGW'
+ else:
+ gw_cfg = 'GATEWAY'
+
if _is_default_route(route):
- if route_cfg.has_set_default:
- raise ValueError("Duplicate declaration of default"
- " route found for interface '%s'"
+ if (
+ (subnet.get('ipv4') and
+ route_cfg.has_set_default_ipv4) or
+ (subnet.get('ipv6') and
+ route_cfg.has_set_default_ipv6)
+ ):
+ raise ValueError("Duplicate declaration of default "
+ "route found for interface '%s'"
% (iface_cfg.name))
# NOTE(harlowja): ipv6 and ipv4 default gateways
gw_key = 'GATEWAY0'
@@ -242,7 +258,7 @@ class Renderer(renderer.Renderer):
# also provided the default route?
iface_cfg['DEFROUTE'] = True
if 'gateway' in route:
- iface_cfg['GATEWAY'] = route['gateway']
+ iface_cfg[gw_cfg] = route['gateway']
route_cfg.has_set_default = True
else:
gw_key = 'GATEWAY%s' % route_cfg.last_idx
@@ -353,6 +369,8 @@ class Renderer(renderer.Renderer):
'''Given state, return /etc/sysconfig files + contents'''
iface_contents = {}
for iface in network_state.iter_interfaces():
+ if iface['type'] == "loopback":
+ continue
iface_name = iface['name']
iface_cfg = NetInterface(iface_name, base_sysconf_dir)
cls._render_iface_shared(iface, iface_cfg)
@@ -372,19 +390,45 @@ class Renderer(renderer.Renderer):
contents[iface_cfg.routes.path] = iface_cfg.routes.to_string()
return contents
- def render_network_state(self, target, network_state):
- base_sysconf_dir = os.path.join(target, self.sysconf_dir)
+ def render_network_state(self, network_state, target=None):
+ file_mode = 0o644
+ base_sysconf_dir = util.target_path(target, self.sysconf_dir)
for path, data in self._render_sysconfig(base_sysconf_dir,
network_state).items():
- util.write_file(path, data)
+ util.write_file(path, data, file_mode)
if self.dns_path:
- dns_path = os.path.join(target, self.dns_path)
+ dns_path = util.target_path(target, self.dns_path)
resolv_content = self._render_dns(network_state,
existing_dns_path=dns_path)
- util.write_file(dns_path, resolv_content)
+ util.write_file(dns_path, resolv_content, file_mode)
if self.netrules_path:
netrules_content = self._render_persistent_net(network_state)
- netrules_path = os.path.join(target, self.netrules_path)
- util.write_file(netrules_path, netrules_content)
+ netrules_path = util.target_path(target, self.netrules_path)
+ util.write_file(netrules_path, netrules_content, file_mode)
+
+ # always write /etc/sysconfig/network configuration
+ sysconfig_path = util.target_path(target, "etc/sysconfig/network")
+ netcfg = [_make_header(), 'NETWORKING=yes']
+ if network_state.use_ipv6:
+ netcfg.append('NETWORKING_IPV6=yes')
+ netcfg.append('IPV6_AUTOCONF=no')
+ util.write_file(sysconfig_path, "\n".join(netcfg) + "\n", file_mode)
+
+
+def available(target=None):
+ expected = ['ifup', 'ifdown']
+ search = ['/sbin', '/usr/sbin']
+ for p in expected:
+ if not util.which(p, search=search, target=target):
+ return False
+
+ expected_paths = [
+ 'etc/sysconfig/network-scripts/network-functions',
+ 'etc/sysconfig/network-scripts/ifdown-eth']
+ for p in expected_paths:
+ if not os.path.isfile(util.target_path(target, p)):
+ return False
+ return True
+
# vi: ts=4 expandtab
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index 692ff5e5..dbafead5 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -46,6 +46,7 @@ CFG_BUILTIN = {
'templates_dir': '/etc/cloud/templates/',
},
'distro': 'ubuntu',
+ 'network': {'renderers': None},
},
'vendor_data': {'enabled': True, 'prefix': []},
}
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index c2b0eac2..8528fa10 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -201,8 +201,7 @@ class DataSourceAltCloud(sources.DataSource):
util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
return False
except OSError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
- _err.message)
+ util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
return False
try:
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index c5af8b84..48a3e1df 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -111,50 +111,62 @@ class DataSourceAzureNet(sources.DataSource):
root = sources.DataSource.__str__(self)
return "%s [seed=%s]" % (root, self.seed)
- def get_metadata_from_agent(self):
- temp_hostname = self.metadata.get('local-hostname')
+ def bounce_network_with_azure_hostname(self):
+ # When using cloud-init to provision, we have to set the hostname from
+ # the metadata and "bounce" the network to force DDNS to update via
+ # dhclient
+ azure_hostname = self.metadata.get('local-hostname')
+ LOG.debug("Hostname in metadata is {}".format(azure_hostname))
hostname_command = self.ds_cfg['hostname_bounce']['hostname_command']
- agent_cmd = self.ds_cfg['agent_command']
- LOG.debug("Getting metadata via agent. hostname=%s cmd=%s",
- temp_hostname, agent_cmd)
- with temporary_hostname(temp_hostname, self.ds_cfg,
+
+ with temporary_hostname(azure_hostname, self.ds_cfg,
hostname_command=hostname_command) \
as previous_hostname:
if (previous_hostname is not None and
- util.is_true(self.ds_cfg.get('set_hostname'))):
+ util.is_true(self.ds_cfg.get('set_hostname'))):
cfg = self.ds_cfg['hostname_bounce']
+
+ # "Bouncing" the network
try:
- perform_hostname_bounce(hostname=temp_hostname,
+ perform_hostname_bounce(hostname=azure_hostname,
cfg=cfg,
prev_hostname=previous_hostname)
except Exception as e:
LOG.warn("Failed publishing hostname: %s", e)
util.logexc(LOG, "handling set_hostname failed")
- try:
- invoke_agent(agent_cmd)
- except util.ProcessExecutionError:
- # claim the datasource even if the command failed
- util.logexc(LOG, "agent command '%s' failed.",
- self.ds_cfg['agent_command'])
-
- ddir = self.ds_cfg['data_dir']
-
- fp_files = []
- key_value = None
- for pk in self.cfg.get('_pubkeys', []):
- if pk.get('value', None):
- key_value = pk['value']
- LOG.debug("ssh authentication: using value from fabric")
- else:
- bname = str(pk['fingerprint'] + ".crt")
- fp_files += [os.path.join(ddir, bname)]
- LOG.debug("ssh authentication: "
- "using fingerprint from fabirc")
-
- missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
- func=wait_for_files,
- args=(fp_files,))
+ def get_metadata_from_agent(self):
+ temp_hostname = self.metadata.get('local-hostname')
+ agent_cmd = self.ds_cfg['agent_command']
+ LOG.debug("Getting metadata via agent. hostname=%s cmd=%s",
+ temp_hostname, agent_cmd)
+
+ self.bounce_network_with_azure_hostname()
+
+ try:
+ invoke_agent(agent_cmd)
+ except util.ProcessExecutionError:
+ # claim the datasource even if the command failed
+ util.logexc(LOG, "agent command '%s' failed.",
+ self.ds_cfg['agent_command'])
+
+ ddir = self.ds_cfg['data_dir']
+
+ fp_files = []
+ key_value = None
+ for pk in self.cfg.get('_pubkeys', []):
+ if pk.get('value', None):
+ key_value = pk['value']
+ LOG.debug("ssh authentication: using value from fabric")
+ else:
+ bname = str(pk['fingerprint'] + ".crt")
+ fp_files += [os.path.join(ddir, bname)]
+ LOG.debug("ssh authentication: "
+ "using fingerprint from fabirc")
+
+ missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
+ func=wait_for_files,
+ args=(fp_files,))
if len(missing):
LOG.warn("Did not find files, but going on: %s", missing)
@@ -220,6 +232,8 @@ class DataSourceAzureNet(sources.DataSource):
write_files(ddir, files, dirmode=0o700)
if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN:
+ self.bounce_network_with_azure_hostname()
+
metadata_func = partial(get_metadata_from_fabric,
fallback_lease_file=self.
dhclient_lease_file)
diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py
index 5ffdcb25..d7fcd45a 100644
--- a/cloudinit/sources/DataSourceBigstep.py
+++ b/cloudinit/sources/DataSourceBigstep.py
@@ -27,7 +27,7 @@ class DataSourceBigstep(sources.DataSource):
if url is None:
return False
response = url_helper.readurl(url)
- decoded = json.loads(response.contents)
+ decoded = json.loads(response.contents.decode())
self.metadata = decoded["metadata"]
self.vendordata_raw = decoded["vendordata_raw"]
self.userdata_raw = decoded["userdata_raw"]
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 8a448dc9..46dd89e0 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -54,13 +54,16 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
found = None
md = {}
results = {}
- if os.path.isdir(self.seed_dir):
+ for sdir in (self.seed_dir, "/config-drive"):
+ if not os.path.isdir(sdir):
+ continue
try:
- results = read_config_drive(self.seed_dir)
- found = self.seed_dir
+ results = read_config_drive(sdir)
+ found = sdir
+ break
except openstack.NonReadable:
- util.logexc(LOG, "Failed reading config drive from %s",
- self.seed_dir)
+ util.logexc(LOG, "Failed reading config drive from %s", sdir)
+
if not found:
for dev in find_candidate_devs():
try:
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index b1a1c8f2..637c9505 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -62,6 +62,9 @@ class DataSourceGCE(sources.DataSource):
return public_key
def get_data(self):
+ if not platform_reports_gce():
+ return False
+
# url_map: (our-key, path, required, is_text)
url_map = [
('instance-id', ('instance/id',), True, True),
@@ -144,6 +147,21 @@ class DataSourceGCE(sources.DataSource):
return self.availability_zone.rsplit('-', 1)[0]
+def platform_reports_gce():
+ pname = util.read_dmi_data('system-product-name') or "N/A"
+ if pname == "Google Compute Engine":
+ return True
+
+ # system-product-name is not always guaranteed (LP: #1674861)
+ serial = util.read_dmi_data('system-serial-number') or "N/A"
+ if serial.startswith("GoogleCloud-"):
+ return True
+
+ LOG.debug("Not running on google cloud. product-name=%s serial=%s",
+ pname, serial)
+ return False
+
+
# Used to match classes to dependencies
datasources = [
(DataSourceGCE, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 1f1baf46..cd75e6ea 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -286,12 +286,12 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
output = output[0:-1] # remove trailing null
# go through output. First _start_ is for 'preset', second for 'target'.
- # Add to target only things were changed and not in volitile
+ # Add to ret only things were changed and not in excluded.
for line in output.split("\x00"):
try:
(key, val) = line.split("=", 1)
if target is preset:
- target[key] = val
+ preset[key] = val
elif (key not in excluded and
(key in keylist_in or preset.get(key) != val)):
ret[key] = val
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 3d01072f..5c99437e 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -50,7 +50,7 @@ class DataSource(object):
self.distro = distro
self.paths = paths
self.userdata = None
- self.metadata = None
+ self.metadata = {}
self.userdata_raw = None
self.vendordata = None
self.vendordata_raw = None
@@ -210,7 +210,7 @@ class DataSource(object):
else:
hostname = toks[0]
- if fqdn:
+ if fqdn and domain != defdomain:
return "%s.%s" % (hostname, domain)
else:
return hostname
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 096062d5..61cd36bd 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -52,6 +52,7 @@ OS_VERSIONS = (
PHYSICAL_TYPES = (
None,
'bridge',
+ 'dvs',
'ethernet',
'hw_veb',
'hyperv',
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 5bed9032..12165433 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -646,9 +646,13 @@ class Init(object):
src, bring_up, netcfg)
try:
return self.distro.apply_network_config(netcfg, bring_up=bring_up)
+ except net.RendererNotFoundError as e:
+ LOG.error("Unable to render networking. Network config is "
+ "likely broken: %s", e)
+ return
except NotImplementedError:
LOG.warn("distro '%s' does not implement apply_network_config. "
- "networking may not be configured properly." %
+ "networking may not be configured properly.",
self.distro)
return
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 312b0460..2f6a158e 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -45,7 +45,7 @@ try:
from distutils.version import LooseVersion
import pkg_resources
_REQ = pkg_resources.get_distribution('requests')
- _REQ_VER = LooseVersion(_REQ.version)
+ _REQ_VER = LooseVersion(_REQ.version) # pylint: disable=no-member
if _REQ_VER >= LooseVersion('0.8.8'):
SSL_ENABLED = True
if _REQ_VER >= LooseVersion('0.7.0') and _REQ_VER < LooseVersion('1.0.0'):
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 7196a7ca..17abdf81 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -2099,21 +2099,36 @@ def get_mount_info(path, log=LOG):
return parse_mount(path)
-def which(program):
- # Return path of program for execution if found in path
- def is_exe(fpath):
- return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
-
- _fpath, _ = os.path.split(program)
- if _fpath:
- if is_exe(program):
+def is_exe(fpath):
+ # return boolean indicating if fpath exists and is executable.
+ return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
+
+
+def which(program, search=None, target=None):
+ target = target_path(target)
+
+ if os.path.sep in program:
+ # if program had a '/' in it, then do not search PATH
+ # 'which' does consider cwd here. (cd / && which bin/ls) = bin/ls
+ # so effectively we set cwd to / (or target)
+ if is_exe(target_path(target, program)):
return program
- else:
- for path in os.environ.get("PATH", "").split(os.pathsep):
- path = path.strip('"')
- exe_file = os.path.join(path, program)
- if is_exe(exe_file):
- return exe_file
+
+ if search is None:
+ paths = [p.strip('"') for p in
+ os.environ.get("PATH", "").split(os.pathsep)]
+ if target == "/":
+ search = paths
+ else:
+ search = [p for p in paths if p.startswith("/")]
+
+ # normalize path input
+ search = [os.path.abspath(p) for p in search]
+
+ for path in search:
+ ppath = os.path.sep.join((path, program))
+ if is_exe(target_path(target, ppath)):
+ return ppath
return None
@@ -2358,4 +2373,42 @@ def system_is_snappy():
return True
return False
+
+def indent(text, prefix):
+ """replacement for indent from textwrap that is not available in 2.7."""
+ lines = []
+ for line in text.splitlines(True):
+ lines.append(prefix + line)
+ return ''.join(lines)
+
+
+def rootdev_from_cmdline(cmdline):
+ found = None
+ for tok in cmdline.split():
+ if tok.startswith("root="):
+ found = tok[5:]
+ break
+ if found is None:
+ return None
+
+ if found.startswith("/dev/"):
+ return found
+ if found.startswith("LABEL="):
+ return "/dev/disk/by-label/" + found[len("LABEL="):]
+ if found.startswith("UUID="):
+ return "/dev/disk/by-uuid/" + found[len("UUID="):]
+ if found.startswith("PARTUUID="):
+ disks_path = "/dev/disk/by-partuuid/" + found[len("PARTUUID="):]
+ if os.path.exists(disks_path):
+ return disks_path
+ results = find_devs_with(found)
+ if results:
+ return results[0]
+ # we know this doesn't exist, but for consistency return the path as
+ # it /would/ exist
+ return disks_path
+
+ return "/dev/" + found
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/version.py b/cloudinit/version.py
index 92bace1a..dff4af04 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -6,6 +6,13 @@
__VERSION__ = "0.7.9"
+FEATURES = [
+ # supports network config version 1
+ 'NETWORK_CONFIG_V1',
+ # supports network config version 2 (netplan)
+ 'NETWORK_CONFIG_V2',
+]
+
def version_string():
return __VERSION__
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
index c03f1026..bd84c641 100644
--- a/doc/examples/cloud-config.txt
+++ b/doc/examples/cloud-config.txt
@@ -426,14 +426,21 @@ syslog_fix_perms: syslog:root
#
# there is also an option to set multiple users passwords, using 'chpasswd'
# That looks like the following, with 'expire' set to 'True' by default.
-# to not expire users passwords, set 'expire' to 'False':
+# to not expire users passwords, set 'expire' to 'False'. Also possible
+# to set hashed password, here account 'user3' has a password it set to
+# 'cloud-init', hashed with SHA-256:
# chpasswd:
# list: |
# user1:password1
# user2:RANDOM
+# user3:$5$eriogqzq$Dg7PxHsKGzziuEGkZgkLvacjuEFeljJ.rLf.hZqKQLA
# expire: True
# ssh_pwauth: [ True, False, "" or "unchanged" ]
#
+# Hashed passwords can be generated in multiple ways, example with python3:
+# python3 -c 'import crypt,getpass; print(crypt.crypt(getpass.getpass(), crypt.mksalt(crypt.METHOD_SHA512)))'
+# Newer versions of 'mkpasswd' will also work: mkpasswd -m sha-512 password
+#
# So, a simple working example to allow login via ssh, and not expire
# for the default user would look like:
password: passw0rd
diff --git a/doc/rtd/topics/capabilities.rst b/doc/rtd/topics/capabilities.rst
index be0802c5..2c8770bd 100644
--- a/doc/rtd/topics/capabilities.rst
+++ b/doc/rtd/topics/capabilities.rst
@@ -3,10 +3,11 @@ Capabilities
************
- Setting a default locale
-- Setting a instance hostname
-- Generating instance ssh private keys
-- Adding ssh keys to a users ``.ssh/authorized_keys`` so they can log in
+- Setting an instance hostname
+- Generating instance SSH private keys
+- Adding SSH keys to a user's ``.ssh/authorized_keys`` so they can log in
- Setting up ephemeral mount points
+- Configuring network devices
User configurability
====================
@@ -22,5 +23,27 @@ ec2-run-instances for example.
string or `user-data` file for usage by cloud-init on instance creation.
+Feature detection
+=================
+
+Newer versions of cloud-init may have a list of additional features that they
+support. This allows other applications to detect what features the installed
+cloud-init supports without having to parse its version number. If present,
+this list of features will be located at ``cloudinit.version.FEATURES``.
+
+When checking if cloud-init supports a feature, in order to not break the
+detection script on older versions of cloud-init without the features list, a
+script similar to the following should be used. Note that this will exit 0 if
+the feature is supported and 1 otherwise::
+
+ import sys
+ from cloudinit import version
+ sys.exit('<FEATURE_NAME>' not in getattr(version, 'FEATURES', []))
+
+Currently defined feature names include:
+
+ - ``NETWORK_CONFIG_V1`` support for v1 networking configuration, see curtin
+ documentation for examples.
+
.. _Cloud-init: https://launchpad.net/cloud-init
.. vi: textwidth=78
diff --git a/doc/rtd/topics/datasources/configdrive.rst b/doc/rtd/topics/datasources/configdrive.rst
index acdab6a2..11dd52ab 100644
--- a/doc/rtd/topics/datasources/configdrive.rst
+++ b/doc/rtd/topics/datasources/configdrive.rst
@@ -18,12 +18,13 @@ support listed below)
Version 1
---------
+**Note:** Version 1 is legacy and should be considered deprecated. Version 2
+has been supported in OpenStack since 2012.2 (Folsom).
The following criteria are required to as a config drive:
1. Must be formatted with `vfat`_ filesystem
-2. Must be a un-partitioned block device (/dev/vdb, not /dev/vdb1)
-3. Must contain *one* of the following files
+2. Must contain *one* of the following files
::
@@ -56,8 +57,7 @@ The following criteria are required to as a config drive:
1. Must be formatted with `vfat`_ or `iso9660`_ filesystem
or have a *filesystem* label of **config-2**
-2. Must be a un-partitioned block device (/dev/vdb, not /dev/vdb1)
-3. The files that will typically be present in the config drive are:
+2. The files that will typically be present in the config drive are:
::
diff --git a/doc/rtd/topics/tests.rst b/doc/rtd/topics/tests.rst
index 00c63c63..0663811e 100644
--- a/doc/rtd/topics/tests.rst
+++ b/doc/rtd/topics/tests.rst
@@ -238,6 +238,20 @@ without the more lengthy collect process. This can be done by running:
The above command will run the verify scripts on the data discovered in
`/tmp/collection`.
+Run via tox
+-----------
+In order to avoid the need for dependencies and ease the setup and
+configuration users can run the integration tests via tox:
+
+.. code-block:: bash
+
+ $ tox -e citest -- run [integration test arguments]
+ $ tox -e citest -- run -v -n zesty --deb=cloud-init_all.deb
+ $ tox -e citest -- run -t module/user_groups.yaml
+
+Users need to invoke the citest enviornment and then pass any additional
+arguments.
+
Architecture
============
diff --git a/systemd/cloud-init.service b/systemd/cloud-init.service
index fb3b918c..39acc20a 100644
--- a/systemd/cloud-init.service
+++ b/systemd/cloud-init.service
@@ -5,6 +5,7 @@ Wants=cloud-init-local.service
Wants=sshd-keygen.service
Wants=sshd.service
After=cloud-init-local.service
+After=systemd-networkd-wait-online.service
After=networking.service
Before=network-online.target
Before=sshd-keygen.service
diff --git a/tests/cloud_tests/configs/modules/set_password_list.yaml b/tests/cloud_tests/configs/modules/set_password_list.yaml
index 36129047..a2a89c9d 100644
--- a/tests/cloud_tests/configs/modules/set_password_list.yaml
+++ b/tests/cloud_tests/configs/modules/set_password_list.yaml
@@ -6,22 +6,29 @@ cloud_config: |
ssh_pwauth: yes
users:
- name: tom
- password: $1$xyz$sPMsLNmf66Ohl.ol6JvzE.
+ # md5 gotomgo
+ passwd: "$1$S7$tT1BEDIYrczeryDQJfdPe0"
lock_passwd: false
- name: dick
- password: $1$xyz$sPMsLNmf66Ohl.ol6JvzE.
+ # md5 gocubsgo
+ passwd: "$1$ssisyfpf$YqvuJLfrrW6Cg/l53Pi1n1"
lock_passwd: false
- name: harry
- password: $1$xyz$sPMsLNmf66Ohl.ol6JvzE.
+ # sha512 goharrygo
+ passwd: "$6$LF$9Z2p6rWK6TNC1DC6393ec0As.18KRAvKDbfsGJEdWN3sRQRwpdfoh37EQ3yUh69tP4GSrGW5XKHxMLiKowJgm/"
lock_passwd: false
- name: jane
- password: $1$xyz$sPMsLNmf66Ohl.ol6JvzE.
+ # sha256 gojanego
+ passwd: "$5$iW$XsxmWCdpwIW8Yhv.Jn/R3uk6A4UaicfW5Xp7C9p9pg."
+ lock_passwd: false
+ - name: "mikey"
lock_passwd: false
chpasswd:
- list: |
- tom:mypassword123!
- dick:R
- harry:Random
+ list:
+ - tom:mypassword123!
+ - dick:RANDOM
+ - harry:RANDOM
+ - mikey:$5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89
collect_scripts:
shadow: |
#!/bin/bash
diff --git a/tests/cloud_tests/configs/modules/set_password_list_string.yaml b/tests/cloud_tests/configs/modules/set_password_list_string.yaml
new file mode 100644
index 00000000..c2a0f631
--- /dev/null
+++ b/tests/cloud_tests/configs/modules/set_password_list_string.yaml
@@ -0,0 +1,40 @@
+#
+# Set password of list of users as a string
+#
+cloud_config: |
+ #cloud-config
+ ssh_pwauth: yes
+ users:
+ - name: tom
+ # md5 gotomgo
+ passwd: "$1$S7$tT1BEDIYrczeryDQJfdPe0"
+ lock_passwd: false
+ - name: dick
+ # md5 gocubsgo
+ passwd: "$1$ssisyfpf$YqvuJLfrrW6Cg/l53Pi1n1"
+ lock_passwd: false
+ - name: harry
+ # sha512 goharrygo
+ passwd: "$6$LF$9Z2p6rWK6TNC1DC6393ec0As.18KRAvKDbfsGJEdWN3sRQRwpdfoh37EQ3yUh69tP4GSrGW5XKHxMLiKowJgm/"
+ lock_passwd: false
+ - name: jane
+ # sha256 gojanego
+ passwd: "$5$iW$XsxmWCdpwIW8Yhv.Jn/R3uk6A4UaicfW5Xp7C9p9pg."
+ lock_passwd: false
+ - name: "mikey"
+ lock_passwd: false
+ chpasswd:
+ list: |
+ tom:mypassword123!
+ dick:RANDOM
+ harry:RANDOM
+ mikey:$5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89
+collect_scripts:
+ shadow: |
+ #!/bin/bash
+ cat /etc/shadow
+ sshd_config: |
+ #!/bin/bash
+ grep '^PasswordAuth' /etc/ssh/sshd_config
+
+# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/configs/modules/timezone.yaml b/tests/cloud_tests/configs/modules/timezone.yaml
index 6a05aba1..8c96ed47 100644
--- a/tests/cloud_tests/configs/modules/timezone.yaml
+++ b/tests/cloud_tests/configs/modules/timezone.yaml
@@ -7,6 +7,8 @@ cloud_config: |
collect_scripts:
timezone: |
#!/bin/bash
- date +%Z
+ # date will convert this to system's configured time zone.
+ # use a static date to avoid dealing with daylight savings.
+ date "+%Z" --date="Thu, 03 Nov 2016 00:47:00 -0400"
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/__init__.py b/tests/cloud_tests/testcases/__init__.py
index 182c090a..a1d86d45 100644
--- a/tests/cloud_tests/testcases/__init__.py
+++ b/tests/cloud_tests/testcases/__init__.py
@@ -21,7 +21,7 @@ def discover_tests(test_name):
raise ValueError('no test verifier found at: {}'.format(testmod_name))
return [mod for name, mod in inspect.getmembers(testmod)
- if inspect.isclass(mod) and base_test in mod.__bases__ and
+ if inspect.isclass(mod) and base_test in inspect.getmro(mod) and
getattr(mod, '__test__', True)]
diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py
index 5395b9a3..64d5507a 100644
--- a/tests/cloud_tests/testcases/base.py
+++ b/tests/cloud_tests/testcases/base.py
@@ -2,6 +2,7 @@
from cloudinit import util as c_util
+import crypt
import json
import unittest
@@ -14,6 +15,9 @@ class CloudTestCase(unittest.TestCase):
conf = None
_cloud_config = None
+ def shortDescription(self):
+ return None
+
@property
def cloud_config(self):
"""
@@ -78,4 +82,56 @@ class CloudTestCase(unittest.TestCase):
result = self.get_status_data(self.get_data_file('result.json'))
self.assertEqual(len(result['errors']), 0)
+
+class PasswordListTest(CloudTestCase):
+ def test_shadow_passwords(self):
+ shadow = self.get_data_file('shadow')
+ users = {}
+ dupes = []
+ for line in shadow.splitlines():
+ user, encpw = line.split(":")[0:2]
+ if user in users:
+ dupes.append(user)
+ users[user] = encpw
+
+ jane_enc = "$5$iW$XsxmWCdpwIW8Yhv.Jn/R3uk6A4UaicfW5Xp7C9p9pg."
+ self.assertEqual([], dupes)
+ self.assertEqual(jane_enc, users['jane'])
+
+ mikey_enc = "$5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89"
+ self.assertEqual(mikey_enc, users['mikey'])
+
+ # shadow entry is $N$salt$, so we encrypt with the same format
+ # and salt and expect the result.
+ tom = "mypassword123!"
+ fmtsalt = users['tom'][0:users['tom'].rfind("$") + 1]
+ tom_enc = crypt.crypt(tom, fmtsalt)
+ self.assertEqual(tom_enc, users['tom'])
+
+ harry_enc = ("$6$LF$9Z2p6rWK6TNC1DC6393ec0As.18KRAvKDbfsG"
+ "JEdWN3sRQRwpdfoh37EQ3yUh69tP4GSrGW5XKHxMLiKowJgm/")
+ dick_enc = "$1$ssisyfpf$YqvuJLfrrW6Cg/l53Pi1n1"
+
+ # these should have been changed to random values.
+ self.assertNotEqual(harry_enc, users['harry'])
+ self.assertTrue(users['harry'].startswith("$"))
+ self.assertNotEqual(dick_enc, users['dick'])
+ self.assertTrue(users['dick'].startswith("$"))
+
+ self.assertNotEqual(users['harry'], users['dick'])
+
+ def test_shadow_expected_users(self):
+ """Test every tom, dick, and harry user in shadow"""
+ out = self.get_data_file('shadow')
+ self.assertIn('tom:', out)
+ self.assertIn('dick:', out)
+ self.assertIn('harry:', out)
+ self.assertIn('jane:', out)
+ self.assertIn('mikey:', out)
+
+ def test_sshd_config(self):
+ """Test sshd config allows passwords"""
+ out = self.get_data_file('sshd_config')
+ self.assertIn('PasswordAuthentication yes', out)
+
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password_list.py b/tests/cloud_tests/testcases/modules/set_password_list.py
index b764362f..6819d259 100644
--- a/tests/cloud_tests/testcases/modules/set_password_list.py
+++ b/tests/cloud_tests/testcases/modules/set_password_list.py
@@ -4,22 +4,8 @@
from tests.cloud_tests.testcases import base
-class TestPasswordList(base.CloudTestCase):
- """Test password module"""
-
- # TODO: Verify dick and harry passwords are random
- # TODO: Verify tom's password was changed
-
- def test_shadow(self):
- """Test every tom, dick, and harry user in shadow"""
- out = self.get_data_file('shadow')
- self.assertIn('tom:', out)
- self.assertIn('dick:', out)
- self.assertIn('harry:', out)
-
- def test_sshd_config(self):
- """Test sshd config allows passwords"""
- out = self.get_data_file('sshd_config')
- self.assertIn('PasswordAuthentication yes', out)
+class TestPasswordList(base.PasswordListTest, base.CloudTestCase):
+ """Test password setting via list in chpasswd/list"""
+ __test__ = True
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password_list_string.py b/tests/cloud_tests/testcases/modules/set_password_list_string.py
new file mode 100644
index 00000000..2c34fada
--- /dev/null
+++ b/tests/cloud_tests/testcases/modules/set_password_list_string.py
@@ -0,0 +1,11 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""cloud-init Integration Test Verify Script"""
+from tests.cloud_tests.testcases import base
+
+
+class TestPasswordListString(base.PasswordListTest, base.CloudTestCase):
+ """Test password setting via string in chpasswd/list"""
+ __test__ = True
+
+# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/timezone.py b/tests/cloud_tests/testcases/modules/timezone.py
index 272c266f..bf91d490 100644
--- a/tests/cloud_tests/testcases/modules/timezone.py
+++ b/tests/cloud_tests/testcases/modules/timezone.py
@@ -10,6 +10,6 @@ class TestTimezone(base.CloudTestCase):
def test_timezone(self):
"""Test date prints correct timezone"""
out = self.get_data_file('timezone')
- self.assertIn('HST', out)
+ self.assertEqual('HDT', out.rstrip())
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py
index 55153357..337be667 100644
--- a/tests/unittests/test_datasource/test_configdrive.py
+++ b/tests/unittests/test_datasource/test_configdrive.py
@@ -645,7 +645,7 @@ class TestConvertNetworkData(TestCase):
routes)
eni_renderer = eni.Renderer()
eni_renderer.render_network_state(
- self.tmp, network_state.parse_net_config_data(ncfg))
+ network_state.parse_net_config_data(ncfg), self.tmp)
with open(os.path.join(self.tmp, "etc",
"network", "interfaces"), 'r') as f:
eni_rendering = f.read()
@@ -665,8 +665,9 @@ class TestConvertNetworkData(TestCase):
ncfg = openstack.convert_net_json(NETWORK_DATA_BOND,
known_macs=KNOWN_MACS)
eni_renderer = eni.Renderer()
+
eni_renderer.render_network_state(
- self.tmp, network_state.parse_net_config_data(ncfg))
+ network_state.parse_net_config_data(ncfg), self.tmp)
with open(os.path.join(self.tmp, "etc",
"network", "interfaces"), 'r') as f:
eni_rendering = f.read()
@@ -697,7 +698,7 @@ class TestConvertNetworkData(TestCase):
known_macs=KNOWN_MACS)
eni_renderer = eni.Renderer()
eni_renderer.render_network_state(
- self.tmp, network_state.parse_net_config_data(ncfg))
+ network_state.parse_net_config_data(ncfg), self.tmp)
with open(os.path.join(self.tmp, "etc",
"network", "interfaces"), 'r') as f:
eni_rendering = f.read()
diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py
index 9be6bc19..61d6e001 100644
--- a/tests/unittests/test_datasource/test_digitalocean.py
+++ b/tests/unittests/test_datasource/test_digitalocean.py
@@ -194,7 +194,12 @@ class TestDataSourceDigitalOcean(TestCase):
class TestNetworkConvert(TestCase):
- def _get_networking(self):
+ @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ def _get_networking(self, m_get_by_mac):
+ m_get_by_mac.return_value = {
+ '04:01:57:d1:9e:01': 'ens1', '04:01:57:d1:9e:02': 'ens2',
+ 'b8:ae:ed:75:5f:9a': 'enp0s25',
+ 'ae:cc:08:7c:88:00': 'meta2p1'}
netcfg = digitalocean.convert_network_configuration(
DO_META['interfaces'], DO_META['dns']['nameservers'])
self.assertIn('config', netcfg)
@@ -302,10 +307,15 @@ class TestNetworkConvert(TestCase):
self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask'))
self.assertNotIn('gateway', subn_def)
- def test_convert_without_private(self):
+ @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ def test_convert_without_private(self, m_get_by_mac):
+ m_get_by_mac.return_value = {
+ 'b8:ae:ed:75:5f:9a': 'enp0s25',
+ 'ae:cc:08:7c:88:00': 'meta2p1'}
netcfg = digitalocean.convert_network_configuration(
DO_META_2['interfaces'], DO_META_2['dns']['nameservers'])
+ # print(netcfg)
byname = {}
for i in netcfg['config']:
if 'name' in i:
diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py
index 4f83454e..3eaa58e3 100644
--- a/tests/unittests/test_datasource/test_gce.py
+++ b/tests/unittests/test_datasource/test_gce.py
@@ -5,6 +5,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import httpretty
+import mock
import re
from base64 import b64encode, b64decode
@@ -71,6 +72,11 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
self.ds = DataSourceGCE.DataSourceGCE(
settings.CFG_BUILTIN, None,
helpers.Paths({}))
+ self.m_platform_reports_gce = mock.patch(
+ 'cloudinit.sources.DataSourceGCE.platform_reports_gce',
+ return_value=True)
+ self.m_platform_reports_gce.start()
+ self.addCleanup(self.m_platform_reports_gce.stop)
super(TestDataSourceGCE, self).setUp()
def test_connection(self):
@@ -153,7 +159,13 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
def test_only_last_part_of_zone_used_for_availability_zone(self):
_set_mock_metadata()
- self.ds.get_data()
+ r = self.ds.get_data()
+ self.assertEqual(True, r)
self.assertEqual('bar', self.ds.availability_zone)
+ def test_get_data_returns_false_if_not_on_gce(self):
+ self.m_platform_reports_gce.return_value = False
+ self.assertEqual(False, self.ds.get_data())
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py
index a266e952..bce66125 100644
--- a/tests/unittests/test_datasource/test_opennebula.py
+++ b/tests/unittests/test_datasource/test_opennebula.py
@@ -195,7 +195,9 @@ class TestOpenNebulaDataSource(TestCase):
self.assertTrue('userdata' in results)
self.assertEqual(USER_DATA, results['userdata'])
- def test_hostname(self):
+ @mock.patch(DS_PATH + ".get_physical_nics_by_mac")
+ def test_hostname(self, m_get_phys_by_mac):
+ m_get_phys_by_mac.return_value = {'02:00:0a:12:01:01': 'eth0'}
for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'):
my_d = os.path.join(self.tmp, k)
populate_context_dir(my_d, {k: PUBLIC_IP})
@@ -205,11 +207,14 @@ class TestOpenNebulaDataSource(TestCase):
self.assertTrue('local-hostname' in results['metadata'])
self.assertEqual(PUBLIC_IP, results['metadata']['local-hostname'])
- def test_network_interfaces(self):
+ @mock.patch(DS_PATH + ".get_physical_nics_by_mac")
+ def test_network_interfaces(self, m_get_phys_by_mac):
+ m_get_phys_by_mac.return_value = {'02:00:0a:12:01:01': 'eth0'}
populate_context_dir(self.seed_dir, {'ETH0_IP': '1.2.3.4'})
results = ds.read_context_disk_dir(self.seed_dir)
self.assertTrue('network-interfaces' in results)
+ self.assertTrue('1.2.3.4' in results['network-interfaces'])
def test_find_candidates(self):
def my_devs_with(criteria):
diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py
index bde3bb50..88370669 100644
--- a/tests/unittests/test_distros/test_netconfig.py
+++ b/tests/unittests/test_distros/test_netconfig.py
@@ -17,6 +17,7 @@ from ..helpers import TestCase
from cloudinit import distros
from cloudinit.distros.parsers.sys_conf import SysConf
from cloudinit import helpers
+from cloudinit.net import eni
from cloudinit import settings
from cloudinit import util
@@ -28,10 +29,10 @@ iface lo inet loopback
auto eth0
iface eth0 inet static
address 192.168.1.5
- netmask 255.255.255.0
- network 192.168.0.0
broadcast 192.168.1.0
gateway 192.168.1.254
+ netmask 255.255.255.0
+ network 192.168.0.0
auto eth1
iface eth1 inet dhcp
@@ -67,6 +68,100 @@ iface eth1 inet6 static
gateway 2607:f0d0:1002:0011::1
'''
+V1_NET_CFG = {'config': [{'name': 'eth0',
+
+ 'subnets': [{'address': '192.168.1.5',
+ 'broadcast': '192.168.1.0',
+ 'gateway': '192.168.1.254',
+ 'netmask': '255.255.255.0',
+ 'type': 'static'}],
+ 'type': 'physical'},
+ {'name': 'eth1',
+ 'subnets': [{'control': 'auto', 'type': 'dhcp4'}],
+ 'type': 'physical'}],
+ 'version': 1}
+
+V1_NET_CFG_OUTPUT = """
+# This file is generated from information provided by
+# the datasource. Changes to it will not persist across an instance.
+# To disable cloud-init's network configuration capabilities, write a file
+# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following:
+# network: {config: disabled}
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 192.168.1.5
+ broadcast 192.168.1.0
+ gateway 192.168.1.254
+ netmask 255.255.255.0
+
+auto eth1
+iface eth1 inet dhcp
+"""
+
+V1_NET_CFG_IPV6 = {'config': [{'name': 'eth0',
+ 'subnets': [{'address':
+ '2607:f0d0:1002:0011::2',
+ 'gateway':
+ '2607:f0d0:1002:0011::1',
+ 'netmask': '64',
+ 'type': 'static'}],
+ 'type': 'physical'},
+ {'name': 'eth1',
+ 'subnets': [{'control': 'auto',
+ 'type': 'dhcp4'}],
+ 'type': 'physical'}],
+ 'version': 1}
+
+
+V1_TO_V2_NET_CFG_OUTPUT = """
+# This file is generated from information provided by
+# the datasource. Changes to it will not persist across an instance.
+# To disable cloud-init's network configuration capabilities, write a file
+# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following:
+# network: {config: disabled}
+network:
+ version: 2
+ ethernets:
+ eth0:
+ addresses:
+ - 192.168.1.5/255.255.255.0
+ gateway4: 192.168.1.254
+ eth1:
+ dhcp4: true
+"""
+
+V2_NET_CFG = {
+ 'ethernets': {
+ 'eth7': {
+ 'addresses': ['192.168.1.5/255.255.255.0'],
+ 'gateway4': '192.168.1.254'},
+ 'eth9': {
+ 'dhcp4': True}
+ },
+ 'version': 2
+}
+
+
+V2_TO_V2_NET_CFG_OUTPUT = """
+# This file is generated from information provided by
+# the datasource. Changes to it will not persist across an instance.
+# To disable cloud-init's network configuration capabilities, write a file
+# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following:
+# network: {config: disabled}
+network:
+ version: 2
+ ethernets:
+ eth7:
+ addresses:
+ - 192.168.1.5/255.255.255.0
+ gateway4: 192.168.1.254
+ eth9:
+ dhcp4: true
+"""
+
class WriteBuffer(object):
def __init__(self):
@@ -83,12 +178,14 @@ class WriteBuffer(object):
class TestNetCfgDistro(TestCase):
- def _get_distro(self, dname):
+ def _get_distro(self, dname, renderers=None):
cls = distros.fetch(dname)
cfg = settings.CFG_BUILTIN
cfg['system_info']['distro'] = dname
+ if renderers:
+ cfg['system_info']['network'] = {'renderers': renderers}
paths = helpers.Paths({})
- return cls(dname, cfg, paths)
+ return cls(dname, cfg.get('system_info'), paths)
def test_simple_write_ub(self):
ub_distro = self._get_distro('ubuntu')
@@ -116,6 +213,110 @@ class TestNetCfgDistro(TestCase):
self.assertEqual(str(write_buf).strip(), BASE_NET_CFG.strip())
self.assertEqual(write_buf.mode, 0o644)
+ def test_apply_network_config_eni_ub(self):
+ ub_distro = self._get_distro('ubuntu')
+ with ExitStack() as mocks:
+ write_bufs = {}
+
+ def replace_write(filename, content, mode=0o644, omode="wb"):
+ buf = WriteBuffer()
+ buf.mode = mode
+ buf.omode = omode
+ buf.write(content)
+ write_bufs[filename] = buf
+
+ # eni availability checks
+ mocks.enter_context(
+ mock.patch.object(util, 'which', return_value=True))
+ mocks.enter_context(
+ mock.patch.object(eni, 'available', return_value=True))
+ mocks.enter_context(
+ mock.patch.object(util, 'ensure_dir'))
+ mocks.enter_context(
+ mock.patch.object(util, 'write_file', replace_write))
+ mocks.enter_context(
+ mock.patch.object(os.path, 'isfile', return_value=False))
+ mocks.enter_context(
+ mock.patch("cloudinit.net.eni.glob.glob",
+ return_value=[]))
+
+ ub_distro.apply_network_config(V1_NET_CFG, False)
+
+ self.assertEqual(len(write_bufs), 2)
+ eni_name = '/etc/network/interfaces.d/50-cloud-init.cfg'
+ self.assertIn(eni_name, write_bufs)
+ write_buf = write_bufs[eni_name]
+ self.assertEqual(str(write_buf).strip(), V1_NET_CFG_OUTPUT.strip())
+ self.assertEqual(write_buf.mode, 0o644)
+
+ def test_apply_network_config_v1_to_netplan_ub(self):
+ renderers = ['netplan']
+ ub_distro = self._get_distro('ubuntu', renderers=renderers)
+ with ExitStack() as mocks:
+ write_bufs = {}
+
+ def replace_write(filename, content, mode=0o644, omode="wb"):
+ buf = WriteBuffer()
+ buf.mode = mode
+ buf.omode = omode
+ buf.write(content)
+ write_bufs[filename] = buf
+
+ mocks.enter_context(
+ mock.patch.object(util, 'which', return_value=True))
+ mocks.enter_context(
+ mock.patch.object(util, 'write_file', replace_write))
+ mocks.enter_context(
+ mock.patch.object(util, 'ensure_dir'))
+ mocks.enter_context(
+ mock.patch.object(util, 'subp', return_value=(0, 0)))
+ mocks.enter_context(
+ mock.patch.object(os.path, 'isfile', return_value=False))
+
+ ub_distro.apply_network_config(V1_NET_CFG, False)
+
+ self.assertEqual(len(write_bufs), 1)
+ netplan_name = '/etc/netplan/50-cloud-init.yaml'
+ self.assertIn(netplan_name, write_bufs)
+ write_buf = write_bufs[netplan_name]
+ self.assertEqual(str(write_buf).strip(),
+ V1_TO_V2_NET_CFG_OUTPUT.strip())
+ self.assertEqual(write_buf.mode, 0o644)
+
+ def test_apply_network_config_v2_passthrough_ub(self):
+ renderers = ['netplan']
+ ub_distro = self._get_distro('ubuntu', renderers=renderers)
+ with ExitStack() as mocks:
+ write_bufs = {}
+
+ def replace_write(filename, content, mode=0o644, omode="wb"):
+ buf = WriteBuffer()
+ buf.mode = mode
+ buf.omode = omode
+ buf.write(content)
+ write_bufs[filename] = buf
+
+ mocks.enter_context(
+ mock.patch.object(util, 'which', return_value=True))
+ mocks.enter_context(
+ mock.patch.object(util, 'write_file', replace_write))
+ mocks.enter_context(
+ mock.patch.object(util, 'ensure_dir'))
+ mocks.enter_context(
+ mock.patch.object(util, 'subp', return_value=(0, 0)))
+ mocks.enter_context(
+ mock.patch.object(os.path, 'isfile', return_value=False))
+
+ ub_distro.apply_network_config(V2_NET_CFG, False)
+
+ self.assertEqual(len(write_bufs), 1)
+ netplan_name = '/etc/netplan/50-cloud-init.yaml'
+ self.assertIn(netplan_name, write_bufs)
+ write_buf = write_bufs[netplan_name]
+ self.assertEqual(str(write_buf).strip(),
+ V2_TO_V2_NET_CFG_OUTPUT.strip())
+ self.assertEqual(write_buf.mode, 0o644)
+
def assertCfgEquals(self, blob1, blob2):
b1 = dict(SysConf(blob1.strip().splitlines()))
b2 = dict(SysConf(blob2.strip().splitlines()))
@@ -195,6 +396,79 @@ NETWORKING=yes
self.assertCfgEquals(expected_buf, str(write_buf))
self.assertEqual(write_buf.mode, 0o644)
+ def test_apply_network_config_rh(self):
+ renderers = ['sysconfig']
+ rh_distro = self._get_distro('rhel', renderers=renderers)
+
+ write_bufs = {}
+
+ def replace_write(filename, content, mode=0o644, omode="wb"):
+ buf = WriteBuffer()
+ buf.mode = mode
+ buf.omode = omode
+ buf.write(content)
+ write_bufs[filename] = buf
+
+ with ExitStack() as mocks:
+ # sysconfig availability checks
+ mocks.enter_context(
+ mock.patch.object(util, 'which', return_value=True))
+ mocks.enter_context(
+ mock.patch.object(util, 'write_file', replace_write))
+ mocks.enter_context(
+ mock.patch.object(util, 'load_file', return_value=''))
+ mocks.enter_context(
+ mock.patch.object(os.path, 'isfile', return_value=True))
+
+ rh_distro.apply_network_config(V1_NET_CFG, False)
+
+ self.assertEqual(len(write_bufs), 5)
+
+ # eth0
+ self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0',
+ write_bufs)
+ write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0']
+ expected_buf = '''
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+BOOTPROTO=static
+DEVICE=eth0
+IPADDR=192.168.1.5
+NETMASK=255.255.255.0
+NM_CONTROLLED=no
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+'''
+ self.assertCfgEquals(expected_buf, str(write_buf))
+ self.assertEqual(write_buf.mode, 0o644)
+
+ # eth1
+ self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1',
+ write_bufs)
+ write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1']
+ expected_buf = '''
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+BOOTPROTO=dhcp
+DEVICE=eth1
+NM_CONTROLLED=no
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+'''
+ self.assertCfgEquals(expected_buf, str(write_buf))
+ self.assertEqual(write_buf.mode, 0o644)
+
+ self.assertIn('/etc/sysconfig/network', write_bufs)
+ write_buf = write_bufs['/etc/sysconfig/network']
+ expected_buf = '''
+# Created by cloud-init v. 0.7
+NETWORKING=yes
+'''
+ self.assertCfgEquals(expected_buf, str(write_buf))
+ self.assertEqual(write_buf.mode, 0o644)
+
def test_write_ipv6_rhel(self):
rh_distro = self._get_distro('rhel')
@@ -274,6 +548,78 @@ IPV6_AUTOCONF=no
self.assertCfgEquals(expected_buf, str(write_buf))
self.assertEqual(write_buf.mode, 0o644)
+ def test_apply_network_config_ipv6_rh(self):
+ renderers = ['sysconfig']
+ rh_distro = self._get_distro('rhel', renderers=renderers)
+
+ write_bufs = {}
+
+ def replace_write(filename, content, mode=0o644, omode="wb"):
+ buf = WriteBuffer()
+ buf.mode = mode
+ buf.omode = omode
+ buf.write(content)
+ write_bufs[filename] = buf
+
+ with ExitStack() as mocks:
+ mocks.enter_context(
+ mock.patch.object(util, 'which', return_value=True))
+ mocks.enter_context(
+ mock.patch.object(util, 'write_file', replace_write))
+ mocks.enter_context(
+ mock.patch.object(util, 'load_file', return_value=''))
+ mocks.enter_context(
+ mock.patch.object(os.path, 'isfile', return_value=True))
+
+ rh_distro.apply_network_config(V1_NET_CFG_IPV6, False)
+
+ self.assertEqual(len(write_bufs), 5)
+
+ self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0',
+ write_bufs)
+ write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0']
+ expected_buf = '''
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+BOOTPROTO=static
+DEVICE=eth0
+IPV6ADDR=2607:f0d0:1002:0011::2
+IPV6INIT=yes
+NETMASK=64
+NM_CONTROLLED=no
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+'''
+ self.assertCfgEquals(expected_buf, str(write_buf))
+ self.assertEqual(write_buf.mode, 0o644)
+ self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1',
+ write_bufs)
+ write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1']
+ expected_buf = '''
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+BOOTPROTO=dhcp
+DEVICE=eth1
+NM_CONTROLLED=no
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+'''
+ self.assertCfgEquals(expected_buf, str(write_buf))
+ self.assertEqual(write_buf.mode, 0o644)
+
+ self.assertIn('/etc/sysconfig/network', write_bufs)
+ write_buf = write_bufs['/etc/sysconfig/network']
+ expected_buf = '''
+# Created by cloud-init v. 0.7
+NETWORKING=yes
+NETWORKING_IPV6=yes
+IPV6_AUTOCONF=no
+'''
+ self.assertCfgEquals(expected_buf, str(write_buf))
+ self.assertEqual(write_buf.mode, 0o644)
+
def test_simple_write_freebsd(self):
fbsd_distro = self._get_distro('freebsd')
diff --git a/tests/unittests/test_distros/test_resolv.py b/tests/unittests/test_distros/test_resolv.py
index 6b535a95..c9d03475 100644
--- a/tests/unittests/test_distros/test_resolv.py
+++ b/tests/unittests/test_distros/test_resolv.py
@@ -46,7 +46,7 @@ class TestResolvHelper(TestCase):
self.assertNotIn('10.3', rp.nameservers)
self.assertEqual(len(rp.nameservers), 3)
rp.add_nameserver('10.2')
- self.assertRaises(ValueError, rp.add_nameserver, '10.3')
+ rp.add_nameserver('10.3')
self.assertNotIn('10.3', rp.nameservers)
def test_search_domains(self):
diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py
index 227f0497..7ff39225 100644
--- a/tests/unittests/test_handler/test_handler_disk_setup.py
+++ b/tests/unittests/test_handler/test_handler_disk_setup.py
@@ -103,4 +103,48 @@ class TestGetPartitionMbrLayout(TestCase):
',{0},83\n,,82'.format(expected_partition_size),
cc_disk_setup.get_partition_mbr_layout(disk_size, [33, [66, 82]]))
+
+class TestUpdateFsSetupDevices(TestCase):
+ def test_regression_1634678(self):
+ # Cf. https://bugs.launchpad.net/cloud-init/+bug/1634678
+ fs_setup = {
+ 'partition': 'auto',
+ 'device': '/dev/xvdb1',
+ 'overwrite': False,
+ 'label': 'test',
+ 'filesystem': 'ext4'
+ }
+
+ cc_disk_setup.update_fs_setup_devices([fs_setup],
+ lambda device: device)
+
+ self.assertEqual({
+ '_origname': '/dev/xvdb1',
+ 'partition': 'auto',
+ 'device': '/dev/xvdb1',
+ 'overwrite': False,
+ 'label': 'test',
+ 'filesystem': 'ext4'
+ }, fs_setup)
+
+ def test_dotted_devname(self):
+ fs_setup = {
+ 'partition': 'auto',
+ 'device': 'ephemeral0.0',
+ 'label': 'test2',
+ 'filesystem': 'xfs'
+ }
+
+ cc_disk_setup.update_fs_setup_devices([fs_setup],
+ lambda device: device)
+
+ self.assertEqual({
+ '_origname': 'ephemeral0.0',
+ '_partition': 'auto',
+ 'partition': '0',
+ 'device': 'ephemeral0',
+ 'label': 'test2',
+ 'filesystem': 'xfs'
+ }, fs_setup)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index 4b03ff72..9cc5e4ab 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -3,7 +3,9 @@
from cloudinit import net
from cloudinit.net import cmdline
from cloudinit.net import eni
+from cloudinit.net import netplan
from cloudinit.net import network_state
+from cloudinit.net import renderers
from cloudinit.net import sysconfig
from cloudinit.sources.helpers import openstack
from cloudinit import util
@@ -248,6 +250,100 @@ nameserver 172.19.0.12
('etc/udev/rules.d/70-persistent-net.rules',
"".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))]
+ },
+ {
+ 'in_data': {
+ "services": [{"type": "dns", "address": "172.19.0.12"}],
+ "networks": [{
+ "network_id": "public-ipv4",
+ "type": "ipv4", "netmask": "255.255.252.0",
+ "link": "tap1a81968a-79",
+ "routes": [{
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.19.3.254",
+ }],
+ "ip_address": "172.19.1.34", "id": "network0"
+ }, {
+ "network_id": "public-ipv6",
+ "type": "ipv6", "netmask": "",
+ "link": "tap1a81968a-79",
+ "routes": [
+ {
+ "gateway": "2001:DB8::1",
+ "netmask": "::",
+ "network": "::"
+ }
+ ],
+ "ip_address": "2001:DB8::10", "id": "network1"
+ }],
+ "links": [
+ {
+ "ethernet_mac_address": "fa:16:3e:ed:9a:59",
+ "mtu": None, "type": "bridge", "id":
+ "tap1a81968a-79",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
+ },
+ ],
+ },
+ 'in_macs': {
+ 'fa:16:3e:ed:9a:59': 'eth0',
+ },
+ 'out_sysconfig': [
+ ('etc/sysconfig/network-scripts/ifcfg-eth0',
+ """
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+BOOTPROTO=none
+DEVICE=eth0
+HWADDR=fa:16:3e:ed:9a:59
+NM_CONTROLLED=no
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+""".lstrip()),
+ ('etc/sysconfig/network-scripts/ifcfg-eth0:0',
+ """
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+BOOTPROTO=static
+DEFROUTE=yes
+DEVICE=eth0:0
+GATEWAY=172.19.3.254
+HWADDR=fa:16:3e:ed:9a:59
+IPADDR=172.19.1.34
+NETMASK=255.255.252.0
+NM_CONTROLLED=no
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+""".lstrip()),
+ ('etc/sysconfig/network-scripts/ifcfg-eth0:1',
+ """
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+BOOTPROTO=static
+DEFROUTE=yes
+DEVICE=eth0:1
+HWADDR=fa:16:3e:ed:9a:59
+IPV6ADDR=2001:DB8::10
+IPV6INIT=yes
+IPV6_DEFAULTGW=2001:DB8::1
+NETMASK=
+NM_CONTROLLED=no
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+""".lstrip()),
+ ('etc/resolv.conf',
+ """
+; Created by cloud-init on instance boot automatically, do not edit.
+;
+nameserver 172.19.0.12
+""".lstrip()),
+ ('etc/udev/rules.d/70-persistent-net.rules',
+ "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
+ 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))]
}
]
@@ -313,6 +409,41 @@ NETWORK_CONFIGS = {
post-up route add default gw 65.61.151.37 || true
pre-down route del default gw 65.61.151.37 || true
""").rstrip(' '),
+ 'expected_netplan': textwrap.dedent("""
+ network:
+ version: 2
+ ethernets:
+ eth1:
+ match:
+ macaddress: cf:d6:af:48:e8:80
+ nameservers:
+ addresses:
+ - 1.2.3.4
+ - 5.6.7.8
+ search:
+ - wark.maas
+ set-name: eth1
+ eth99:
+ addresses:
+ - 192.168.21.3/24
+ dhcp4: true
+ match:
+ macaddress: c0:d6:9f:2c:e8:80
+ nameservers:
+ addresses:
+ - 8.8.8.8
+ - 8.8.4.4
+ - 1.2.3.4
+ - 5.6.7.8
+ search:
+ - barley.maas
+ - sach.maas
+ - wark.maas
+ routes:
+ - to: 0.0.0.0/0.0.0.0
+ via: 65.61.151.37
+ set-name: eth99
+ """).rstrip(' '),
'yaml': textwrap.dedent("""
version: 1
config:
@@ -355,6 +486,14 @@ NETWORK_CONFIGS = {
# control-alias iface0
iface iface0 inet6 dhcp
""").rstrip(' '),
+ 'expected_netplan': textwrap.dedent("""
+ network:
+ version: 2
+ ethernets:
+ iface0:
+ dhcp4: true
+ dhcp6: true
+ """).rstrip(' '),
'yaml': textwrap.dedent("""\
version: 1
config:
@@ -429,6 +568,126 @@ iface eth0.101 inet static
post-up route add -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
"""),
+ 'expected_netplan': textwrap.dedent("""
+ network:
+ version: 2
+ ethernets:
+ eth0:
+ match:
+ macaddress: c0:d6:9f:2c:e8:80
+ nameservers:
+ addresses:
+ - 8.8.8.8
+ - 4.4.4.4
+ - 8.8.4.4
+ search:
+ - barley.maas
+ - wark.maas
+ - foobar.maas
+ set-name: eth0
+ eth1:
+ match:
+ macaddress: aa:d6:9f:2c:e8:80
+ nameservers:
+ addresses:
+ - 8.8.8.8
+ - 4.4.4.4
+ - 8.8.4.4
+ search:
+ - barley.maas
+ - wark.maas
+ - foobar.maas
+ set-name: eth1
+ eth2:
+ match:
+ macaddress: c0:bb:9f:2c:e8:80
+ nameservers:
+ addresses:
+ - 8.8.8.8
+ - 4.4.4.4
+ - 8.8.4.4
+ search:
+ - barley.maas
+ - wark.maas
+ - foobar.maas
+ set-name: eth2
+ eth3:
+ match:
+ macaddress: 66:bb:9f:2c:e8:80
+ nameservers:
+ addresses:
+ - 8.8.8.8
+ - 4.4.4.4
+ - 8.8.4.4
+ search:
+ - barley.maas
+ - wark.maas
+ - foobar.maas
+ set-name: eth3
+ eth4:
+ match:
+ macaddress: 98:bb:9f:2c:e8:80
+ nameservers:
+ addresses:
+ - 8.8.8.8
+ - 4.4.4.4
+ - 8.8.4.4
+ search:
+ - barley.maas
+ - wark.maas
+ - foobar.maas
+ set-name: eth4
+ eth5:
+ dhcp4: true
+ match:
+ macaddress: 98:bb:9f:2c:e8:8a
+ nameservers:
+ addresses:
+ - 8.8.8.8
+ - 4.4.4.4
+ - 8.8.4.4
+ search:
+ - barley.maas
+ - wark.maas
+ - foobar.maas
+ set-name: eth5
+ bonds:
+ bond0:
+ dhcp6: true
+ interfaces:
+ - eth1
+ - eth2
+ parameters:
+ mode: active-backup
+ bridges:
+ br0:
+ addresses:
+ - 192.168.14.2/24
+ - 2001:1::1/64
+ interfaces:
+ - eth3
+ - eth4
+ vlans:
+ bond0.200:
+ dhcp4: true
+ id: 200
+ link: bond0
+ eth0.101:
+ addresses:
+ - 192.168.0.2/24
+ - 192.168.2.10/24
+ gateway4: 192.168.0.1
+ id: 101
+ link: eth0
+ nameservers:
+ addresses:
+ - 192.168.0.10
+ - 10.23.23.134
+ search:
+ - barley.maas
+ - sacchromyces.maas
+ - brettanomyces.maas
+ """).rstrip(' '),
'yaml': textwrap.dedent("""
version: 1
config:
@@ -543,6 +802,14 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
}
}
+CONFIG_V1_EXPLICIT_LOOPBACK = {
+ 'version': 1,
+ 'config': [{'name': 'eth0', 'type': 'physical',
+ 'subnets': [{'control': 'auto', 'type': 'dhcp'}]},
+ {'name': 'lo', 'type': 'loopback',
+ 'subnets': [{'control': 'auto', 'type': 'loopback'}]},
+ ]}
+
def _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net,
mock_sys_dev_path):
@@ -595,7 +862,7 @@ class TestSysConfigRendering(CiTestCase):
os.makedirs(render_dir)
renderer = sysconfig.Renderer()
- renderer.render_network_state(render_dir, ns)
+ renderer.render_network_state(ns, render_dir)
render_file = 'etc/sysconfig/network-scripts/ifcfg-eth1000'
with open(os.path.join(render_dir, render_file)) as fh:
@@ -623,11 +890,32 @@ USERCTL=no
ns = network_state.parse_net_config_data(network_cfg,
skip_broken=False)
renderer = sysconfig.Renderer()
- renderer.render_network_state(render_dir, ns)
+ renderer.render_network_state(ns, render_dir)
for fn, expected_content in os_sample.get('out_sysconfig', []):
with open(os.path.join(render_dir, fn)) as fh:
self.assertEqual(expected_content, fh.read())
+ def test_config_with_explicit_loopback(self):
+ ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK)
+ render_dir = self.tmp_path("render")
+ os.makedirs(render_dir)
+ renderer = sysconfig.Renderer()
+ renderer.render_network_state(ns, render_dir)
+ found = dir2dict(render_dir)
+ nspath = '/etc/sysconfig/network-scripts/'
+ self.assertNotIn(nspath + 'ifcfg-lo', found.keys())
+ expected = """\
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+BOOTPROTO=dhcp
+DEVICE=eth0
+NM_CONTROLLED=no
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+"""
+ self.assertEqual(expected, found[nspath + 'ifcfg-eth0'])
+
class TestEniNetRendering(CiTestCase):
@@ -652,7 +940,7 @@ class TestEniNetRendering(CiTestCase):
{'links_path_prefix': None,
'eni_path': 'interfaces', 'netrules_path': None,
})
- renderer.render_network_state(render_dir, ns)
+ renderer.render_network_state(ns, render_dir)
self.assertTrue(os.path.exists(os.path.join(render_dir,
'interfaces')))
@@ -668,6 +956,179 @@ iface eth1000 inet dhcp
"""
self.assertEqual(expected.lstrip(), contents.lstrip())
+ def test_config_with_explicit_loopback(self):
+ tmp_dir = self.tmp_dir()
+ ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK)
+ renderer = eni.Renderer()
+ renderer.render_network_state(ns, tmp_dir)
+ expected = """\
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet dhcp
+"""
+ self.assertEqual(
+ expected, dir2dict(tmp_dir)['/etc/network/interfaces'])
+
+
+class TestNetplanNetRendering(CiTestCase):
+
+ @mock.patch("cloudinit.net.netplan._clean_default")
+ @mock.patch("cloudinit.net.sys_dev_path")
+ @mock.patch("cloudinit.net.read_sys_net")
+ @mock.patch("cloudinit.net.get_devicelist")
+ def test_default_generation(self, mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ mock_clean_default):
+ tmp_dir = self.tmp_dir()
+ _setup_test(tmp_dir, mock_get_devicelist,
+ mock_read_sys_net, mock_sys_dev_path)
+
+ network_cfg = net.generate_fallback_config()
+ ns = network_state.parse_net_config_data(network_cfg,
+ skip_broken=False)
+
+ render_dir = os.path.join(tmp_dir, "render")
+ os.makedirs(render_dir)
+
+ render_target = 'netplan.yaml'
+ renderer = netplan.Renderer(
+ {'netplan_path': render_target, 'postcmds': False})
+ renderer.render_network_state(render_dir, ns)
+
+ self.assertTrue(os.path.exists(os.path.join(render_dir,
+ render_target)))
+ with open(os.path.join(render_dir, render_target)) as fh:
+ contents = fh.read()
+ print(contents)
+
+ expected = """
+network:
+ version: 2
+ ethernets:
+ eth1000:
+ dhcp4: true
+ match:
+ macaddress: 07-1c-c6-75-a4-be
+ set-name: eth1000
+"""
+ self.assertEqual(expected.lstrip(), contents.lstrip())
+ self.assertEqual(1, mock_clean_default.call_count)
+
+
+class TestNetplanCleanDefault(CiTestCase):
+ snapd_known_path = 'etc/netplan/00-snapd-config.yaml'
+ snapd_known_content = textwrap.dedent("""\
+ # This is the initial network config.
+ # It can be overwritten by cloud-init or console-conf.
+ network:
+ version: 2
+ ethernets:
+ all-en:
+ match:
+ name: "en*"
+ dhcp4: true
+ all-eth:
+ match:
+ name: "eth*"
+ dhcp4: true
+ """)
+ stub_known = {
+ 'run/systemd/network/10-netplan-all-en.network': 'foo-en',
+ 'run/systemd/network/10-netplan-all-eth.network': 'foo-eth',
+ 'run/systemd/generator/netplan.stamp': 'stamp',
+ }
+
+ def test_clean_known_config_cleaned(self):
+ content = {self.snapd_known_path: self.snapd_known_content, }
+ content.update(self.stub_known)
+ tmpd = self.tmp_dir()
+ files = sorted(populate_dir(tmpd, content))
+ netplan._clean_default(target=tmpd)
+ found = [t for t in files if os.path.exists(t)]
+ self.assertEqual([], found)
+
+ def test_clean_unknown_config_not_cleaned(self):
+ content = {self.snapd_known_path: self.snapd_known_content, }
+ content.update(self.stub_known)
+ content[self.snapd_known_path] += "# user put a comment\n"
+ tmpd = self.tmp_dir()
+ files = sorted(populate_dir(tmpd, content))
+ netplan._clean_default(target=tmpd)
+ found = [t for t in files if os.path.exists(t)]
+ self.assertEqual(files, found)
+
+ def test_clean_known_config_cleans_only_expected(self):
+ astamp = "run/systemd/generator/another.stamp"
+ anet = "run/systemd/network/10-netplan-all-lo.network"
+ ayaml = "etc/netplan/01-foo-config.yaml"
+ content = {
+ self.snapd_known_path: self.snapd_known_content,
+ astamp: "stamp",
+ anet: "network",
+ ayaml: "yaml",
+ }
+ content.update(self.stub_known)
+
+ tmpd = self.tmp_dir()
+ files = sorted(populate_dir(tmpd, content))
+ netplan._clean_default(target=tmpd)
+ found = [t for t in files if os.path.exists(t)]
+ expected = [util.target_path(tmpd, f) for f in (astamp, anet, ayaml)]
+ self.assertEqual(sorted(expected), found)
+
+
+class TestNetplanPostcommands(CiTestCase):
+ mycfg = {
+ 'config': [{"type": "physical", "name": "eth0",
+ "mac_address": "c0:d6:9f:2c:e8:80",
+ "subnets": [{"type": "dhcp"}]}],
+ 'version': 1}
+
+ @mock.patch.object(netplan.Renderer, '_netplan_generate')
+ @mock.patch.object(netplan.Renderer, '_net_setup_link')
+ def test_netplan_render_calls_postcmds(self, mock_netplan_generate,
+ mock_net_setup_link):
+ tmp_dir = self.tmp_dir()
+ ns = network_state.parse_net_config_data(self.mycfg,
+ skip_broken=False)
+
+ render_dir = os.path.join(tmp_dir, "render")
+ os.makedirs(render_dir)
+
+ render_target = 'netplan.yaml'
+ renderer = netplan.Renderer(
+ {'netplan_path': render_target, 'postcmds': True})
+ renderer.render_network_state(render_dir, ns)
+
+ mock_netplan_generate.assert_called_with(run=True)
+ mock_net_setup_link.assert_called_with(run=True)
+
+ @mock.patch.object(netplan, "get_devicelist")
+ @mock.patch('cloudinit.util.subp')
+ def test_netplan_postcmds(self, mock_subp, mock_devlist):
+ mock_devlist.side_effect = [['lo']]
+ tmp_dir = self.tmp_dir()
+ ns = network_state.parse_net_config_data(self.mycfg,
+ skip_broken=False)
+
+ render_dir = os.path.join(tmp_dir, "render")
+ os.makedirs(render_dir)
+
+ render_target = 'netplan.yaml'
+ renderer = netplan.Renderer(
+ {'netplan_path': render_target, 'postcmds': True})
+ renderer.render_network_state(render_dir, ns)
+
+ expected = [
+ mock.call(['netplan', 'generate'], capture=True),
+ mock.call(['udevadm', 'test-builtin', 'net_setup_link',
+ '/sys/class/net/lo'], capture=True),
+ ]
+ mock_subp.assert_has_calls(expected)
+
class TestEniNetworkStateToEni(CiTestCase):
mycfg = {
@@ -814,6 +1275,50 @@ class TestCmdlineReadKernelConfig(CiTestCase):
self.assertEqual(found['config'], expected)
+class TestNetplanRoundTrip(CiTestCase):
+ def _render_and_read(self, network_config=None, state=None,
+ netplan_path=None, dir=None):
+ if dir is None:
+ dir = self.tmp_dir()
+
+ if network_config:
+ ns = network_state.parse_net_config_data(network_config)
+ elif state:
+ ns = state
+ else:
+ raise ValueError("Expected data or state, got neither")
+
+ if netplan_path is None:
+ netplan_path = 'etc/netplan/50-cloud-init.yaml'
+
+ renderer = netplan.Renderer(
+ config={'netplan_path': netplan_path})
+
+ renderer.render_network_state(dir, ns)
+ return dir2dict(dir)
+
+ def testsimple_render_small_netplan(self):
+ entry = NETWORK_CONFIGS['small']
+ files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self.assertEqual(
+ entry['expected_netplan'].splitlines(),
+ files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+
+ def testsimple_render_v4_and_v6(self):
+ entry = NETWORK_CONFIGS['v4_and_v6']
+ files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self.assertEqual(
+ entry['expected_netplan'].splitlines(),
+ files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+
+ def testsimple_render_all(self):
+ entry = NETWORK_CONFIGS['all']
+ files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self.assertEqual(
+ entry['expected_netplan'].splitlines(),
+ files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+
+
class TestEniRoundTrip(CiTestCase):
def _render_and_read(self, network_config=None, state=None, eni_path=None,
links_prefix=None, netrules_path=None, dir=None):
@@ -834,7 +1339,7 @@ class TestEniRoundTrip(CiTestCase):
config={'eni_path': eni_path, 'links_path_prefix': links_prefix,
'netrules_path': netrules_path})
- renderer.render_network_state(dir, ns)
+ renderer.render_network_state(ns, dir)
return dir2dict(dir)
def testsimple_convert_and_render(self):
@@ -912,6 +1417,126 @@ class TestEniRoundTrip(CiTestCase):
expected, [line for line in found if line])
+class TestNetRenderers(CiTestCase):
+ @mock.patch("cloudinit.net.renderers.sysconfig.available")
+ @mock.patch("cloudinit.net.renderers.eni.available")
+ def test_eni_and_sysconfig_available(self, m_eni_avail, m_sysc_avail):
+ m_eni_avail.return_value = True
+ m_sysc_avail.return_value = True
+ found = renderers.search(priority=['sysconfig', 'eni'], first=False)
+ names = [f[0] for f in found]
+ self.assertEqual(['sysconfig', 'eni'], names)
+
+ @mock.patch("cloudinit.net.renderers.eni.available")
+ def test_search_returns_empty_on_none(self, m_eni_avail):
+ m_eni_avail.return_value = False
+ found = renderers.search(priority=['eni'], first=False)
+ self.assertEqual([], found)
+
+ @mock.patch("cloudinit.net.renderers.sysconfig.available")
+ @mock.patch("cloudinit.net.renderers.eni.available")
+ def test_first_in_priority(self, m_eni_avail, m_sysc_avail):
+ # available should only be called until one is found.
+ m_eni_avail.return_value = True
+ m_sysc_avail.side_effect = Exception("Should not call me")
+ found = renderers.search(priority=['eni', 'sysconfig'], first=True)
+ self.assertEqual(['eni'], [found[0]])
+
+ @mock.patch("cloudinit.net.renderers.sysconfig.available")
+ @mock.patch("cloudinit.net.renderers.eni.available")
+ def test_select_positive(self, m_eni_avail, m_sysc_avail):
+ m_eni_avail.return_value = True
+ m_sysc_avail.return_value = False
+ found = renderers.select(priority=['sysconfig', 'eni'])
+ self.assertEqual('eni', found[0])
+
+ @mock.patch("cloudinit.net.renderers.sysconfig.available")
+ @mock.patch("cloudinit.net.renderers.eni.available")
+ def test_select_none_found_raises(self, m_eni_avail, m_sysc_avail):
+ # if select finds nothing, should raise exception.
+ m_eni_avail.return_value = False
+ m_sysc_avail.return_value = False
+
+ self.assertRaises(net.RendererNotFoundError, renderers.select,
+ priority=['sysconfig', 'eni'])
+
+
+class TestGetInterfacesByMac(CiTestCase):
+ _data = {'devices': ['enp0s1', 'enp0s2', 'bond1', 'bridge1',
+ 'bridge1-nic', 'tun0'],
+ 'bonds': ['bond1'],
+ 'bridges': ['bridge1'],
+ 'own_macs': ['enp0s1', 'enp0s2', 'bridge1-nic', 'bridge1'],
+ 'macs': {'enp0s1': 'aa:aa:aa:aa:aa:01',
+ 'enp0s2': 'aa:aa:aa:aa:aa:02',
+ 'bond1': 'aa:aa:aa:aa:aa:01',
+ 'bridge1': 'aa:aa:aa:aa:aa:03',
+ 'bridge1-nic': 'aa:aa:aa:aa:aa:03',
+ 'tun0': None}}
+ data = {}
+
+ def _se_get_devicelist(self):
+ return self.data['devices']
+
+ def _se_get_interface_mac(self, name):
+ return self.data['macs'][name]
+
+ def _se_is_bridge(self, name):
+ return name in self.data['bridges']
+
+ def _se_interface_has_own_mac(self, name):
+ return name in self.data['own_macs']
+
+ def _mock_setup(self):
+ self.data = copy.deepcopy(self._data)
+ mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge',
+ 'interface_has_own_mac')
+ self.mocks = {}
+ for n in mocks:
+ m = mock.patch('cloudinit.net.' + n,
+ side_effect=getattr(self, '_se_' + n))
+ self.addCleanup(m.stop)
+ self.mocks[n] = m.start()
+
+ def test_raise_exception_on_duplicate_macs(self):
+ self._mock_setup()
+ self.data['macs']['bridge1-nic'] = self.data['macs']['enp0s1']
+ self.assertRaises(RuntimeError, net.get_interfaces_by_mac)
+
+ def test_excludes_any_without_mac_address(self):
+ self._mock_setup()
+ ret = net.get_interfaces_by_mac()
+ self.assertIn('tun0', self._se_get_devicelist())
+ self.assertNotIn('tun0', ret.values())
+
+ def test_excludes_stolen_macs(self):
+ self._mock_setup()
+ ret = net.get_interfaces_by_mac()
+ self.mocks['interface_has_own_mac'].assert_has_calls(
+ [mock.call('enp0s1'), mock.call('bond1')], any_order=True)
+ self.assertEqual(
+ {'aa:aa:aa:aa:aa:01': 'enp0s1', 'aa:aa:aa:aa:aa:02': 'enp0s2',
+ 'aa:aa:aa:aa:aa:03': 'bridge1-nic'},
+ ret)
+
+ def test_excludes_bridges(self):
+ self._mock_setup()
+ # add a device 'b1', make all return they have their "own mac",
+ # set everything other than 'b1' to be a bridge.
+ # then expect b1 is the only thing left.
+ self.data['macs']['b1'] = 'aa:aa:aa:aa:aa:b1'
+ self.data['devices'].append('b1')
+ self.data['bonds'] = []
+ self.data['own_macs'] = self.data['devices']
+ self.data['bridges'] = [f for f in self.data['devices'] if f != "b1"]
+ ret = net.get_interfaces_by_mac()
+ self.assertEqual({'aa:aa:aa:aa:aa:b1': 'b1'}, ret)
+ self.mocks['is_bridge'].assert_has_calls(
+ [mock.call('bridge1'), mock.call('enp0s1'), mock.call('bond1'),
+ mock.call('b1')],
+ any_order=True)
+
+
def _gzip_data(data):
with io.BytesIO() as iobuf:
gzfp = gzip.GzipFile(mode="wb", fileobj=iobuf)
diff --git a/tests/unittests/test_version.py b/tests/unittests/test_version.py
new file mode 100644
index 00000000..1662ce09
--- /dev/null
+++ b/tests/unittests/test_version.py
@@ -0,0 +1,14 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from .helpers import CiTestCase
+from cloudinit import version
+
+
+class TestExportsFeatures(CiTestCase):
+ def test_has_network_config_v1(self):
+ self.assertIn('NETWORK_CONFIG_V1', version.FEATURES)
+
+ def test_has_network_config_v2(self):
+ self.assertIn('NETWORK_CONFIG_V2', version.FEATURES)
+
+# vi: ts=4 expandtab
diff --git a/tools/ds-identify b/tools/ds-identify
index e138d780..5d390ef7 100755
--- a/tools/ds-identify
+++ b/tools/ds-identify
@@ -70,7 +70,9 @@ PATH_PROC_CMDLINE="${PATH_PROC_CMDLINE:-${PATH_ROOT}/proc/cmdline}"
PATH_PROC_1_CMDLINE="${PATH_PROC_1_CMDLINE:-${PATH_ROOT}/proc/1/cmdline}"
PATH_PROC_1_ENVIRON="${PATH_PROC_1_ENVIRON:-${PATH_ROOT}/proc/1/environ}"
PATH_PROC_UPTIME=${PATH_PROC_UPTIME:-${PATH_ROOT}/proc/uptime}
-PATH_CLOUD_CONFD="${PATH_CLOUD_CONFD:-${PATH_ROOT}/etc/cloud}"
+PATH_ETC_CLOUD="${PATH_ETC_CLOUD:-${PATH_ROOT}/etc/cloud}"
+PATH_ETC_CI_CFG="${PATH_ETC_CI_CFG:-${PATH_ETC_CLOUD}/cloud.cfg}"
+PATH_ETC_CI_CFG_D="${PATH_ETC_CI_CFG_D:-${PATH_ETC_CI_CFG}.d}"
PATH_RUN_CI="${PATH_RUN_CI:-${PATH_RUN}/cloud-init}"
PATH_RUN_CI_CFG=${PATH_RUN_CI_CFG:-${PATH_RUN_CI}/cloud.cfg}
PATH_RUN_DI_RESULT=${PATH_RUN_DI_RESULT:-${PATH_RUN_CI}/.ds-identify.result}
@@ -108,7 +110,7 @@ DI_DSNAME=""
# this has to match the builtin list in cloud-init, it is what will
# be searched if there is no setting found in config.
DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \
-CloudSigma CloudStack DigitalOcean Ec2 OpenNebula OpenStack OVF SmartOS"
+CloudSigma CloudStack DigitalOcean Ec2 GCE OpenNebula OpenStack OVF SmartOS"
DI_DSLIST=""
DI_MODE=""
DI_ON_FOUND=""
@@ -383,6 +385,14 @@ dmi_product_name_matches() {
return 1
}
+dmi_product_serial_matches() {
+ is_container && return 1
+ case "${DI_DMI_PRODUCT_SERIAL}" in
+ $1) return 0;;
+ esac
+ return 1
+}
+
dmi_product_name_is() {
is_container && return 1
[ "${DI_DMI_PRODUCT_NAME}" = "$1" ]
@@ -464,16 +474,19 @@ dscheck_CloudSigma() {
}
check_config() {
- # somewhat hackily read config for 'key' in files matching 'files'
- # currently does not respect any hierarchy.
- local key="$1" files="" bp="${PATH_CLOUD_CONFD}/cloud.cfg"
- if [ $# -eq 1 ]; then
- files="$bp ${bp}.d/*.cfg"
+ # check_config(key [,file_globs])
+ # somewhat hackily read through file_globs for 'key'
+ # file_globs are expanded via path expansion and
+ # default to /etc/cloud/cloud.cfg /etc/cloud/cloud.cfg.d/*.cfg
+ # currently does not respect any hierarchy in searching for key.
+ local key="$1" files=""
+ shift
+ if [ $# -eq 0 ]; then
+ files="${PATH_ETC_CI_CFG} ${PATH_ETC_CI_CFG_D}/*.cfg"
else
files="$*"
fi
- shift
- set +f; set -- $files; set +f;
+ set +f; set -- $files; set -f;
if [ "$1" = "$files" -a ! -f "$1" ]; then
return 1
fi
@@ -512,9 +525,7 @@ dscheck_MAAS() {
esac
# check config files written by maas for installed system.
- local confd="${PATH_CLOUD_CONFD}"
- local fnmatch="$confd/*maas*.cfg $confd/*kernel_cmdline*.cfg"
- if check_config "MAAS" "$fnmatch"; then
+ if check_config "MAAS"; then
return "${DS_FOUND}"
fi
return ${DS_NOT_FOUND}
@@ -538,6 +549,19 @@ check_configdrive_v2() {
if has_fs_with_label "config-2"; then
return ${DS_FOUND}
fi
+ # look in /config-drive <vlc>/seed/config_drive for a directory
+ # openstack/YYYY-MM-DD format with a file meta_data.json
+ local d=""
+ for d in /config-drive "${PATH_VAR_LIB_CLOUD}/seed/config_drive"; do
+ set +f; set -- "$d/openstack/"2???-??-??/meta_data.json; set -f;
+ [ -f "$1" ] && return ${DS_FOUND}
+ done
+ # at least one cloud (softlayer) seeds config drive with only 'latest'.
+ local lpath="openstack/latest/meta_data.json"
+ if [ -e "${PATH_VAR_LIB_CLOUD}/$lpath" ]; then
+ debug 1 "config drive seeded directory had only 'latest'"
+ return ${DS_FOUND}
+ fi
return ${DS_NOT_FOUND}
}
@@ -586,9 +610,7 @@ ovf_vmware_guest_customization() {
# (disable_vmware_customization=true). If it is set to false, then
# user has requested customization.
local key="disable_vmware_customization"
- local match="" bp="${PATH_CLOUD_CONFD}/cloud.cfg"
- match="$bp $bp.d/*[Oo][Vv][Ff]*.cfg"
- if check_config "$key" "$match"; then
+ if check_config "$key"; then
debug 2 "${_RET_fname} set $key to $_RET"
case "$_RET" in
0|false|False) return 0;;
@@ -633,7 +655,8 @@ dscheck_Azure() {
dscheck_Bigstep() {
# bigstep is activated by presense of seed file 'url'
- check_seed_dir "bigstep" url && return ${DS_FOUND}
+ [ -f "${PATH_VAR_LIB_CLOUD}/data/seed/bigstep/url" ] &&
+ return ${DS_FOUND}
return ${DS_NOT_FOUND}
}
@@ -658,9 +681,9 @@ ec2_read_strict_setting() {
esac
# 3. look for the key 'strict_id' (datasource/Ec2/strict_id)
- local match="" bp="${PATH_CLOUD_CONFD}/cloud.cfg"
- match="$bp $bp.d/*[Ee][Cc]2*.cfg"
- if check_config strict_id "$match"; then
+ # only in cloud.cfg or cloud.cfg.d/EC2.cfg (case insensitive)
+ local cfg="${PATH_ETC_CI_CFG}" cfg_d="${PATH_ETC_CI_CFG_D}"
+ if check_config strict_id $cfg "$cfg_d/*[Ee][Cc]2*.cfg"; then
debug 2 "${_RET_fname} set strict_id to $_RET"
return 0
fi
@@ -756,6 +779,10 @@ dscheck_GCE() {
if dmi_product_name_is "Google Compute Engine"; then
return ${DS_FOUND}
fi
+ # product name is not guaranteed (LP: #1674861)
+ if dmi_product_serial_matches "GoogleCloud-*"; then
+ return ${DS_FOUND}
+ fi
return ${DS_NOT_FOUND}
}
@@ -769,10 +796,15 @@ dscheck_OpenStack() {
if [ $? -eq ${DS_FOUND} ]; then
return ${DS_NOT_FOUND}
fi
- if dmi_product_name_is "OpenStack Nova"; then
+ local nova="OpenStack Nova" compute="OpenStack Compute"
+ if dmi_product_name_is "$nova"; then
+ return ${DS_FOUND}
+ fi
+ if dmi_product_name_is "$compute"; then
+ # RDO installed nova (LP: #1675349).
return ${DS_FOUND}
fi
- if [ "${DI_PID_1_PLATFORM}" = "OpenStack Nova" ]; then
+ if [ "${DI_PID_1_PLATFORM}" = "$nova" ]; then
return ${DS_FOUND}
fi
@@ -923,7 +955,9 @@ found() {
shift
fi
# always write the None datasource last.
- list="${list:+${list}, }None"
+ if [ "$list" != "None" ]; then
+ list="${list:+${list}, }None"
+ fi
write_result "datasource_list: [ $list ]" "$@"
return
}
diff --git a/tools/net-convert.py b/tools/net-convert.py
new file mode 100755
index 00000000..870da639
--- /dev/null
+++ b/tools/net-convert.py
@@ -0,0 +1,84 @@
+#!/usr/bin/python3
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import argparse
+import json
+import os
+import yaml
+
+from cloudinit.sources.helpers import openstack
+
+from cloudinit.net import eni
+from cloudinit.net import network_state
+from cloudinit.net import netplan
+from cloudinit.net import sysconfig
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--network-data", "-p", type=open,
+ metavar="PATH", required=True)
+ parser.add_argument("--kind", "-k",
+ choices=['eni', 'network_data.json', 'yaml'],
+ required=True)
+ parser.add_argument("-d", "--directory",
+ metavar="PATH",
+ help="directory to place output in",
+ required=True)
+ parser.add_argument("-m", "--mac",
+ metavar="name,mac",
+ action='append',
+ help="interface name to mac mapping")
+ parser.add_argument("--output-kind", "-ok",
+ choices=['eni', 'netplan', 'sysconfig'],
+ required=True)
+ args = parser.parse_args()
+
+ if not os.path.isdir(args.directory):
+ os.makedirs(args.directory)
+
+ if args.mac:
+ known_macs = {}
+ for item in args.mac:
+ iface_name, iface_mac = item.split(",", 1)
+ known_macs[iface_mac] = iface_name
+ else:
+ known_macs = None
+
+ net_data = args.network_data.read()
+ if args.kind == "eni":
+ pre_ns = eni.convert_eni_data(net_data)
+ ns = network_state.parse_net_config_data(pre_ns)
+ elif args.kind == "yaml":
+ pre_ns = yaml.load(net_data)
+ if 'network' in pre_ns:
+ pre_ns = pre_ns.get('network')
+ print("Input YAML")
+ print(yaml.dump(pre_ns, default_flow_style=False, indent=4))
+ ns = network_state.parse_net_config_data(pre_ns)
+ else:
+ pre_ns = openstack.convert_net_json(
+ json.loads(net_data), known_macs=known_macs)
+ ns = network_state.parse_net_config_data(pre_ns)
+
+ if not ns:
+ raise RuntimeError("No valid network_state object created from"
+ "input data")
+
+ print("\nInternal State")
+ print(yaml.dump(ns, default_flow_style=False, indent=4))
+ if args.output_kind == "eni":
+ r_cls = eni.Renderer
+ elif args.output_kind == "netplan":
+ r_cls = netplan.Renderer
+ else:
+ r_cls = sysconfig.Renderer
+
+ r = r_cls()
+ r.render_network_state(ns, target=args.directory)
+
+
+if __name__ == '__main__':
+ main()
+
+# vi: ts=4 expandtab
diff --git a/tox.ini b/tox.ini
index ca5d8b87..bf9046af 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = py27, py3, flake8, xenial
+envlist = py27, py3, flake8, xenial, pylint
recreate = True
[testenv]
@@ -17,6 +17,10 @@ commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/}
setenv =
LC_ALL = en_US.utf-8
+[testenv:pylint]
+deps = pylint==1.6.5
+commands = {envpython} -m pylint {posargs:cloudinit}
+
[testenv:py3]
basepython = python3
commands = {envpython} -m nose {posargs:--with-coverage \
@@ -87,3 +91,14 @@ deps = pycodestyle
[testenv:tip-pyflakes]
commands = {envpython} -m pyflakes {posargs:cloudinit/ tests/ tools/}
deps = pyflakes
+
+[testenv:tip-pylint]
+commands = {envpython} -m pylint {posargs:cloudinit}
+deps = pylint
+
+[testenv:citest]
+basepython = python3
+commands = {envpython} -m tests.cloud_tests {posargs}
+passenv = HOME
+deps =
+ pylxd==2.1.3