summaryrefslogtreecommitdiff
path: root/cloudinit
diff options
context:
space:
mode:
authorScott Moser <smoser@brickies.net>2017-07-31 14:46:00 -0400
committerScott Moser <smoser@brickies.net>2017-07-31 14:46:00 -0400
commit19c248d009af6a7cff26fbb2febf5c958987084d (patch)
tree521cc4c8cd303fd7a9eb56bc4eb5975c48996298 /cloudinit
parentf47c7ac027fc905ca7f6bee776007e2a922c117e (diff)
parente586fe35a692b7519000005c8024ebd2bcbc82e0 (diff)
downloadvyos-cloud-init-19c248d009af6a7cff26fbb2febf5c958987084d.tar.gz
vyos-cloud-init-19c248d009af6a7cff26fbb2febf5c958987084d.zip
merge from master at 0.7.9-233-ge586fe35
Diffstat (limited to 'cloudinit')
-rw-r--r--cloudinit/cmd/main.py24
-rw-r--r--cloudinit/config/cc_chef.py2
-rw-r--r--cloudinit/config/cc_growpart.py2
-rw-r--r--cloudinit/config/cc_ntp.py69
-rw-r--r--cloudinit/config/cc_power_state_change.py2
-rw-r--r--cloudinit/config/cc_write_files.py33
-rw-r--r--cloudinit/config/schema.py222
-rwxr-xr-xcloudinit/distros/__init__.py2
-rw-r--r--cloudinit/distros/arch.py2
-rw-r--r--cloudinit/distros/centos.py12
-rw-r--r--cloudinit/distros/debian.py48
-rw-r--r--cloudinit/distros/parsers/networkmanager_conf.py23
-rw-r--r--cloudinit/net/__init__.py315
-rw-r--r--cloudinit/net/eni.py46
-rw-r--r--cloudinit/net/netplan.py17
-rw-r--r--cloudinit/net/network_state.py244
-rw-r--r--cloudinit/net/renderer.py8
-rw-r--r--cloudinit/net/sysconfig.py155
-rw-r--r--cloudinit/net/tests/__init__.py0
-rw-r--r--cloudinit/net/tests/test_init.py522
-rw-r--r--cloudinit/net/udev.py7
-rw-r--r--cloudinit/netinfo.py7
-rw-r--r--cloudinit/settings.py2
-rw-r--r--cloudinit/sources/DataSourceAliYun.py14
-rw-r--r--cloudinit/sources/DataSourceAzure.py151
-rw-r--r--cloudinit/sources/DataSourceEc2.py21
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py12
-rw-r--r--cloudinit/sources/DataSourceScaleway.py234
-rw-r--r--cloudinit/sources/__init__.py15
-rw-r--r--cloudinit/stages.py5
-rw-r--r--cloudinit/url_helper.py10
-rw-r--r--cloudinit/util.py52
32 files changed, 2020 insertions, 258 deletions
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index 26cc2654..139e03b3 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -3,10 +3,12 @@
# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2012 Yahoo! Inc.
+# Copyright (C) 2017 Amazon.com, Inc. or its affiliates
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+# Author: Andrew Jorgensen <ajorgens@amazon.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
@@ -25,7 +27,6 @@ from cloudinit import netinfo
from cloudinit import signal_handler
from cloudinit import sources
from cloudinit import stages
-from cloudinit import templater
from cloudinit import url_helper
from cloudinit import util
from cloudinit import version
@@ -42,9 +43,9 @@ from cloudinit import atomic_helper
from cloudinit.dhclient_hook import LogDhclient
-# Pretty little cheetah formatted welcome message template
-WELCOME_MSG_TPL = ("Cloud-init v. ${version} running '${action}' at "
- "${timestamp}. Up ${uptime} seconds.")
+# Welcome message template
+WELCOME_MSG_TPL = ("Cloud-init v. {version} running '{action}' at "
+ "{timestamp}. Up {uptime} seconds.")
# Module section template
MOD_SECTION_TPL = "cloud_%s_modules"
@@ -88,13 +89,11 @@ def welcome(action, msg=None):
def welcome_format(action):
- tpl_params = {
- 'version': version.version_string(),
- 'uptime': util.uptime(),
- 'timestamp': util.time_rfc2822(),
- 'action': action,
- }
- return templater.render_string(WELCOME_MSG_TPL, tpl_params)
+ return WELCOME_MSG_TPL.format(
+ version=version.version_string(),
+ uptime=util.uptime(),
+ timestamp=util.time_rfc2822(),
+ action=action)
def extract_fns(args):
@@ -373,6 +372,9 @@ def main_init(name, args):
LOG.debug("[%s] %s is in local mode, will apply init modules now.",
mode, init.datasource)
+ # Give the datasource a chance to use network resources.
+ # This is used on Azure to communicate with the fabric over network.
+ init.setup_datasource()
# update fully realizes user-data (pulling in #include if necessary)
init.update()
# Stage 7
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index 2be2532c..02c70b10 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -92,7 +92,7 @@ REQUIRED_CHEF_DIRS = tuple([
])
# Used if fetching chef from a omnibus style package
-OMNIBUS_URL = "https://www.getchef.com/chef/install.sh"
+OMNIBUS_URL = "https://www.chef.io/chef/install.sh"
OMNIBUS_URL_RETRIES = 5
CHEF_VALIDATION_PEM_PATH = '/etc/chef/validation.pem'
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index d2bc6e6c..bafca9d8 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -214,7 +214,7 @@ def device_part_info(devpath):
# FreeBSD doesn't know of sysfs so just get everything we need from
# the device, like /dev/vtbd0p2.
- if util.system_info()["platform"].startswith('FreeBSD'):
+ if util.is_FreeBSD():
m = re.search('^(/dev/.+)p([0-9])$', devpath)
return (m.group(1), m.group(2))
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index 5cc54536..31ed64e3 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -36,6 +36,7 @@ servers or pools are provided, 4 pools will be used in the format
- 192.168.23.2
"""
+from cloudinit.config.schema import validate_cloudconfig_schema
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
from cloudinit import templater
@@ -43,6 +44,7 @@ from cloudinit import type_utils
from cloudinit import util
import os
+from textwrap import dedent
LOG = logging.getLogger(__name__)
@@ -52,21 +54,84 @@ NR_POOL_SERVERS = 4
distros = ['centos', 'debian', 'fedora', 'opensuse', 'ubuntu']
+# The schema definition for each cloud-config module is a strict contract for
+# describing supported configuration parameters for each cloud-config section.
+# It allows cloud-config to validate and alert users to invalid or ignored
+# configuration options before actually attempting to deploy with said
+# configuration.
+
+schema = {
+ 'id': 'cc_ntp',
+ 'name': 'NTP',
+ 'title': 'enable and configure ntp',
+ 'description': dedent("""\
+ Handle ntp configuration. If ntp is not installed on the system and
+ ntp configuration is specified, ntp will be installed. If there is a
+ default ntp config file in the image or one is present in the
+ distro's ntp package, it will be copied to ``/etc/ntp.conf.dist``
+ before any changes are made. A list of ntp pools and ntp servers can
+ be provided under the ``ntp`` config key. If no ntp ``servers`` or
+ ``pools`` are provided, 4 pools will be used in the format
+ ``{0-3}.{distro}.pool.ntp.org``."""),
+ 'distros': distros,
+ 'examples': [
+ {'ntp': {'pools': ['0.company.pool.ntp.org', '1.company.pool.ntp.org',
+ 'ntp.myorg.org'],
+ 'servers': ['my.ntp.server.local', 'ntp.ubuntu.com',
+ '192.168.23.2']}}],
+ 'frequency': PER_INSTANCE,
+ 'type': 'object',
+ 'properties': {
+ 'ntp': {
+ 'type': ['object', 'null'],
+ 'properties': {
+ 'pools': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string',
+ 'format': 'hostname'
+ },
+ 'uniqueItems': True,
+ 'description': dedent("""\
+ List of ntp pools. If both pools and servers are
+ empty, 4 default pool servers will be provided of
+ the format ``{0-3}.{distro}.pool.ntp.org``.""")
+ },
+ 'servers': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string',
+ 'format': 'hostname'
+ },
+ 'uniqueItems': True,
+ 'description': dedent("""\
+ List of ntp servers. If both pools and servers are
+ empty, 4 default pool servers will be provided with
+ the format ``{0-3}.{distro}.pool.ntp.org``.""")
+ }
+ },
+ 'required': [],
+ 'additionalProperties': False
+ }
+ }
+}
+
+
def handle(name, cfg, cloud, log, _args):
"""Enable and configure ntp."""
-
if 'ntp' not in cfg:
LOG.debug(
"Skipping module named %s, not present or disabled by cfg", name)
return
-
ntp_cfg = cfg.get('ntp', {})
+ # TODO drop this when validate_cloudconfig_schema is strict=True
if not isinstance(ntp_cfg, (dict)):
raise RuntimeError(("'ntp' key existed in config,"
" but not a dictionary type,"
" is a %s %instead"), type_utils.obj_name(ntp_cfg))
+ validate_cloudconfig_schema(cfg, schema)
rename_ntp_conf()
# ensure when ntp is installed it has a configuration file
# to use instead of starting up with packaged defaults
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index c1c6fe7e..eba58b02 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -71,7 +71,7 @@ def givecmdline(pid):
# Example output from procstat -c 1
# PID COMM ARGS
# 1 init /bin/init --
- if util.system_info()["platform"].startswith('FreeBSD'):
+ if util.is_FreeBSD():
(output, _err) = util.subp(['procstat', '-c', str(pid)])
line = output.splitlines()[1]
m = re.search('\d+ (\w|\.|-)+\s+(/\w.+)', line)
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index 72e1cdd6..54ae3a68 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -50,15 +50,19 @@ import base64
import os
import six
+from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
from cloudinit import util
+
frequency = PER_INSTANCE
DEFAULT_OWNER = "root:root"
DEFAULT_PERMS = 0o644
UNKNOWN_ENC = 'text/plain'
+LOG = logging.getLogger(__name__)
+
def handle(name, cfg, _cloud, log, _args):
files = cfg.get('write_files')
@@ -66,10 +70,10 @@ def handle(name, cfg, _cloud, log, _args):
log.debug(("Skipping module named %s,"
" no/empty 'write_files' key in configuration"), name)
return
- write_files(name, files, log)
+ write_files(name, files)
-def canonicalize_extraction(encoding_type, log):
+def canonicalize_extraction(encoding_type):
if not encoding_type:
encoding_type = ''
encoding_type = encoding_type.lower().strip()
@@ -84,31 +88,31 @@ def canonicalize_extraction(encoding_type, log):
if encoding_type in ['b64', 'base64']:
return ['application/base64']
if encoding_type:
- log.warn("Unknown encoding type %s, assuming %s",
- encoding_type, UNKNOWN_ENC)
+ LOG.warning("Unknown encoding type %s, assuming %s",
+ encoding_type, UNKNOWN_ENC)
return [UNKNOWN_ENC]
-def write_files(name, files, log):
+def write_files(name, files):
if not files:
return
for (i, f_info) in enumerate(files):
path = f_info.get('path')
if not path:
- log.warn("No path provided to write for entry %s in module %s",
- i + 1, name)
+ LOG.warning("No path provided to write for entry %s in module %s",
+ i + 1, name)
continue
path = os.path.abspath(path)
- extractions = canonicalize_extraction(f_info.get('encoding'), log)
+ extractions = canonicalize_extraction(f_info.get('encoding'))
contents = extract_contents(f_info.get('content', ''), extractions)
(u, g) = util.extract_usergroup(f_info.get('owner', DEFAULT_OWNER))
- perms = decode_perms(f_info.get('permissions'), DEFAULT_PERMS, log)
+ perms = decode_perms(f_info.get('permissions'), DEFAULT_PERMS)
util.write_file(path, contents, mode=perms)
util.chownbyname(path, u, g)
-def decode_perms(perm, default, log):
+def decode_perms(perm, default):
if perm is None:
return default
try:
@@ -119,7 +123,14 @@ def decode_perms(perm, default, log):
# Force to string and try octal conversion
return int(str(perm), 8)
except (TypeError, ValueError):
- log.warn("Undecodable permissions %s, assuming %s", perm, default)
+ reps = []
+ for r in (perm, default):
+ try:
+ reps.append("%o" % r)
+ except TypeError:
+ reps.append("%r" % r)
+ LOG.warning(
+ "Undecodable permissions %s, returning default %s", *reps)
return default
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
new file mode 100644
index 00000000..6400f005
--- /dev/null
+++ b/cloudinit/config/schema.py
@@ -0,0 +1,222 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""schema.py: Set of module functions for processing cloud-config schema."""
+
+from __future__ import print_function
+
+from cloudinit.util import read_file_or_url
+
+import argparse
+import logging
+import os
+import sys
+import yaml
+
+SCHEMA_UNDEFINED = b'UNDEFINED'
+CLOUD_CONFIG_HEADER = b'#cloud-config'
+SCHEMA_DOC_TMPL = """
+{name}
+---
+**Summary:** {title}
+
+{description}
+
+**Internal name:** ``{id}``
+
+**Module frequency:** {frequency}
+
+**Supported distros:** {distros}
+
+**Config schema**:
+{property_doc}
+{examples}
+"""
+SCHEMA_PROPERTY_TMPL = '{prefix}**{prop_name}:** ({type}) {description}'
+
+
+class SchemaValidationError(ValueError):
+ """Raised when validating a cloud-config file against a schema."""
+
+ def __init__(self, schema_errors=()):
+ """Init the exception an n-tuple of schema errors.
+
+ @param schema_errors: An n-tuple of the format:
+ ((flat.config.key, msg),)
+ """
+ self.schema_errors = schema_errors
+ error_messages = [
+ '{0}: {1}'.format(config_key, message)
+ for config_key, message in schema_errors]
+ message = "Cloud config schema errors: {0}".format(
+ ', '.join(error_messages))
+ super(SchemaValidationError, self).__init__(message)
+
+
+def validate_cloudconfig_schema(config, schema, strict=False):
+ """Validate provided config meets the schema definition.
+
+ @param config: Dict of cloud configuration settings validated against
+ schema.
+ @param schema: jsonschema dict describing the supported schema definition
+ for the cloud config module (config.cc_*).
+ @param strict: Boolean, when True raise SchemaValidationErrors instead of
+ logging warnings.
+
+ @raises: SchemaValidationError when provided config does not validate
+ against the provided schema.
+ """
+ try:
+ from jsonschema import Draft4Validator, FormatChecker
+ except ImportError:
+ logging.warning(
+ 'Ignoring schema validation. python-jsonschema is not present')
+ return
+ validator = Draft4Validator(schema, format_checker=FormatChecker())
+ errors = ()
+ for error in sorted(validator.iter_errors(config), key=lambda e: e.path):
+ path = '.'.join([str(p) for p in error.path])
+ errors += ((path, error.message),)
+ if errors:
+ if strict:
+ raise SchemaValidationError(errors)
+ else:
+ messages = ['{0}: {1}'.format(k, msg) for k, msg in errors]
+ logging.warning('Invalid config:\n%s', '\n'.join(messages))
+
+
+def validate_cloudconfig_file(config_path, schema):
+ """Validate cloudconfig file adheres to a specific jsonschema.
+
+ @param config_path: Path to the yaml cloud-config file to parse.
+ @param schema: Dict describing a valid jsonschema to validate against.
+
+ @raises SchemaValidationError containing any of schema_errors encountered.
+ @raises RuntimeError when config_path does not exist.
+ """
+ if not os.path.exists(config_path):
+ raise RuntimeError('Configfile {0} does not exist'.format(config_path))
+ content = read_file_or_url('file://{0}'.format(config_path)).contents
+ if not content.startswith(CLOUD_CONFIG_HEADER):
+ errors = (
+ ('header', 'File {0} needs to begin with "{1}"'.format(
+ config_path, CLOUD_CONFIG_HEADER.decode())),)
+ raise SchemaValidationError(errors)
+
+ try:
+ cloudconfig = yaml.safe_load(content)
+ except yaml.parser.ParserError as e:
+ errors = (
+ ('format', 'File {0} is not valid yaml. {1}'.format(
+ config_path, str(e))),)
+ raise SchemaValidationError(errors)
+ validate_cloudconfig_schema(
+ cloudconfig, schema, strict=True)
+
+
+def _get_property_type(property_dict):
+ """Return a string representing a property type from a given jsonschema."""
+ property_type = property_dict.get('type', SCHEMA_UNDEFINED)
+ if isinstance(property_type, list):
+ property_type = '/'.join(property_type)
+ item_type = property_dict.get('items', {}).get('type')
+ if item_type:
+ property_type = '{0} of {1}'.format(property_type, item_type)
+ return property_type
+
+
+def _get_property_doc(schema, prefix=' '):
+ """Return restructured text describing the supported schema properties."""
+ new_prefix = prefix + ' '
+ properties = []
+ for prop_key, prop_config in schema.get('properties', {}).items():
+ # Define prop_name and dscription for SCHEMA_PROPERTY_TMPL
+ description = prop_config.get('description', '')
+ properties.append(SCHEMA_PROPERTY_TMPL.format(
+ prefix=prefix,
+ prop_name=prop_key,
+ type=_get_property_type(prop_config),
+ description=description.replace('\n', '')))
+ if 'properties' in prop_config:
+ properties.append(
+ _get_property_doc(prop_config, prefix=new_prefix))
+ return '\n\n'.join(properties)
+
+
+def _get_schema_examples(schema, prefix=''):
+ """Return restructured text describing the schema examples if present."""
+ examples = schema.get('examples')
+ if not examples:
+ return ''
+ rst_content = '\n**Examples**::\n\n'
+ for example in examples:
+ example_yaml = yaml.dump(example, default_flow_style=False)
+ # Python2.6 is missing textwrapper.indent
+ lines = example_yaml.split('\n')
+ indented_lines = [' {0}'.format(line) for line in lines]
+ rst_content += '\n'.join(indented_lines)
+ return rst_content
+
+
+def get_schema_doc(schema):
+ """Return reStructured text rendering the provided jsonschema.
+
+ @param schema: Dict of jsonschema to render.
+ @raise KeyError: If schema lacks an expected key.
+ """
+ schema['property_doc'] = _get_property_doc(schema)
+ schema['examples'] = _get_schema_examples(schema)
+ schema['distros'] = ', '.join(schema['distros'])
+ return SCHEMA_DOC_TMPL.format(**schema)
+
+
+def get_schema(section_key=None):
+ """Return a dict of jsonschema defined in any cc_* module.
+
+ @param: section_key: Optionally limit schema to a specific top-level key.
+ """
+ # TODO use util.find_modules in subsequent branch
+ from cloudinit.config.cc_ntp import schema
+ return schema
+
+
+def error(message):
+ print(message, file=sys.stderr)
+ return 1
+
+
+def get_parser():
+ """Return a parser for supported cmdline arguments."""
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-c', '--config-file',
+ help='Path of the cloud-config yaml file to validate')
+ parser.add_argument('-d', '--doc', action="store_true", default=False,
+ help='Print schema documentation')
+ parser.add_argument('-k', '--key',
+ help='Limit validation or docs to a section key')
+ return parser
+
+
+def main():
+ """Tool to validate schema of a cloud-config file or print schema docs."""
+ parser = get_parser()
+ args = parser.parse_args()
+ exclusive_args = [args.config_file, args.doc]
+ if not any(exclusive_args) or all(exclusive_args):
+ return error('Expected either --config-file argument or --doc')
+
+ schema = get_schema()
+ if args.config_file:
+ try:
+ validate_cloudconfig_file(args.config_file, schema)
+ except (SchemaValidationError, RuntimeError) as e:
+ return error(str(e))
+ print("Valid cloud-config file {0}".format(args.config_file))
+ if args.doc:
+ print(get_schema_doc(schema))
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index f56c0cf7..1fd48a7b 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -32,7 +32,7 @@ from cloudinit.distros.parsers import hosts
OSFAMILIES = {
'debian': ['debian', 'ubuntu'],
- 'redhat': ['fedora', 'rhel'],
+ 'redhat': ['centos', 'fedora', 'rhel'],
'gentoo': ['gentoo'],
'freebsd': ['freebsd'],
'suse': ['sles'],
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
index 75d46201..b4c0ba72 100644
--- a/cloudinit/distros/arch.py
+++ b/cloudinit/distros/arch.py
@@ -119,7 +119,7 @@ class Distro(distros.Distro):
if not conf:
conf = HostnameConf('')
conf.set_hostname(your_hostname)
- util.write_file(out_fn, conf, 0o644)
+ util.write_file(out_fn, str(conf), omode="w", mode=0o644)
def _read_system_hostname(self):
sys_hostname = self._read_hostname(self.hostname_conf_fn)
diff --git a/cloudinit/distros/centos.py b/cloudinit/distros/centos.py
new file mode 100644
index 00000000..4b803d2e
--- /dev/null
+++ b/cloudinit/distros/centos.py
@@ -0,0 +1,12 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.distros import rhel
+from cloudinit import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+class Distro(rhel.Distro):
+ pass
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index d06d46a6..abfb81f4 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -37,11 +37,11 @@ ENI_HEADER = """# This file is generated from information provided by
"""
NETWORK_CONF_FN = "/etc/network/interfaces.d/50-cloud-init.cfg"
+LOCALE_CONF_FN = "/etc/default/locale"
class Distro(distros.Distro):
hostname_conf_fn = "/etc/hostname"
- locale_conf_fn = "/etc/default/locale"
network_conf_fn = {
"eni": "/etc/network/interfaces.d/50-cloud-init.cfg",
"netplan": "/etc/netplan/50-cloud-init.yaml"
@@ -64,16 +64,8 @@ class Distro(distros.Distro):
def apply_locale(self, locale, out_fn=None):
if not out_fn:
- out_fn = self.locale_conf_fn
- util.subp(['locale-gen', locale], capture=False)
- util.subp(['update-locale', locale], capture=False)
- # "" provides trailing newline during join
- lines = [
- util.make_header(),
- 'LANG="%s"' % (locale),
- "",
- ]
- util.write_file(out_fn, "\n".join(lines))
+ out_fn = LOCALE_CONF_FN
+ apply_locale(locale, out_fn)
def install_packages(self, pkglist):
self.update_package_sources()
@@ -225,4 +217,38 @@ def _maybe_remove_legacy_eth0(path="/etc/network/interfaces.d/eth0.cfg"):
LOG.warning(msg)
+
+def apply_locale(locale, sys_path=LOCALE_CONF_FN, keyname='LANG'):
+ """Apply the locale.
+
+ Run locale-gen for the provided locale and set the default
+ system variable `keyname` appropriately in the provided `sys_path`.
+
+ If sys_path indicates that `keyname` is already set to `locale`
+ then no changes will be made and locale-gen not called.
+ This allows images built with a locale already generated to not re-run
+ locale-gen which can be very heavy.
+ """
+ if not locale:
+ raise ValueError('Failed to provide locale value.')
+
+ if not sys_path:
+ raise ValueError('Invalid path: %s' % sys_path)
+
+ if os.path.exists(sys_path):
+ locale_content = util.load_file(sys_path)
+ # if LANG isn't present, regen
+ sys_defaults = util.load_shell_content(locale_content)
+ sys_val = sys_defaults.get(keyname, "")
+ if sys_val.lower() == locale.lower():
+ LOG.debug(
+ "System has '%s=%s' requested '%s', skipping regeneration.",
+ keyname, sys_val, locale)
+ return
+
+ util.subp(['locale-gen', locale], capture=False)
+ util.subp(
+ ['update-locale', '--locale-file=' + sys_path,
+ '%s=%s' % (keyname, locale)], capture=False)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/parsers/networkmanager_conf.py b/cloudinit/distros/parsers/networkmanager_conf.py
new file mode 100644
index 00000000..ac51f122
--- /dev/null
+++ b/cloudinit/distros/parsers/networkmanager_conf.py
@@ -0,0 +1,23 @@
+# Copyright (C) 2017 Red Hat, Inc.
+#
+# Author: Ryan McCabe <rmccabe@redhat.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import configobj
+
+# This module is used to set additional NetworkManager configuration
+# in /etc/NetworkManager/conf.d
+#
+
+
+class NetworkManagerConf(configobj.ConfigObj):
+ def __init__(self, contents):
+ configobj.ConfigObj.__init__(self, contents,
+ interpolation=False,
+ write_empty_values=False)
+
+ def set_section_keypair(self, section_name, key, value):
+ if section_name not in self.sections:
+ self.main[section_name] = {}
+ self.main[section_name] = {key: value}
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 8c6cd057..46cb9c85 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -10,6 +10,7 @@ import logging
import os
import re
+from cloudinit.net.network_state import mask_to_net_prefix
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -17,8 +18,24 @@ SYS_CLASS_NET = "/sys/class/net/"
DEFAULT_PRIMARY_INTERFACE = 'eth0'
+def _natural_sort_key(s, _nsre=re.compile('([0-9]+)')):
+ """Sorting for Humans: natural sort order. Can be use as the key to sort
+ functions.
+ This will sort ['eth0', 'ens3', 'ens10', 'ens12', 'ens8', 'ens0'] as
+ ['ens0', 'ens3', 'ens8', 'ens10', 'ens12', 'eth0'] instead of the simple
+ python way which will produce ['ens0', 'ens10', 'ens12', 'ens3', 'ens8',
+ 'eth0']."""
+ return [int(text) if text.isdigit() else text.lower()
+ for text in re.split(_nsre, s)]
+
+
+def get_sys_class_path():
+ """Simple function to return the global SYS_CLASS_NET."""
+ return SYS_CLASS_NET
+
+
def sys_dev_path(devname, path=""):
- return SYS_CLASS_NET + devname + "/" + path
+ return get_sys_class_path() + devname + "/" + path
def read_sys_net(devname, path, translate=None,
@@ -66,7 +83,7 @@ def read_sys_net_int(iface, field):
return None
try:
return int(val)
- except TypeError:
+ except ValueError:
return None
@@ -86,6 +103,10 @@ def is_bridge(devname):
return os.path.exists(sys_dev_path(devname, "bridge"))
+def is_bond(devname):
+ return os.path.exists(sys_dev_path(devname, "bonding"))
+
+
def is_vlan(devname):
uevent = str(read_sys_net_safe(devname, "uevent"))
return 'DEVTYPE=vlan' in uevent.splitlines()
@@ -113,8 +134,35 @@ def is_present(devname):
return os.path.exists(sys_dev_path(devname))
+def device_driver(devname):
+ """Return the device driver for net device named 'devname'."""
+ driver = None
+ driver_path = sys_dev_path(devname, "device/driver")
+ # driver is a symlink to the driver *dir*
+ if os.path.islink(driver_path):
+ driver = os.path.basename(os.readlink(driver_path))
+
+ return driver
+
+
+def device_devid(devname):
+ """Return the device id string for net device named 'devname'."""
+ dev_id = read_sys_net_safe(devname, "device/device")
+ if dev_id is False:
+ return None
+
+ return dev_id
+
+
def get_devicelist():
- return os.listdir(SYS_CLASS_NET)
+ try:
+ devs = os.listdir(get_sys_class_path())
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ devs = []
+ else:
+ raise
+ return devs
class ParserError(Exception):
@@ -127,12 +175,21 @@ def is_disabled_cfg(cfg):
return cfg.get('config') == "disabled"
-def generate_fallback_config():
+def generate_fallback_config(blacklist_drivers=None, config_driver=None):
"""Determine which attached net dev is most likely to have a connection and
generate network state to run dhcp on that interface"""
+
+ if not config_driver:
+ config_driver = False
+
+ if not blacklist_drivers:
+ blacklist_drivers = []
+
# get list of interfaces that could have connections
invalid_interfaces = set(['lo'])
- potential_interfaces = set(get_devicelist())
+ potential_interfaces = set([device for device in get_devicelist()
+ if device_driver(device) not in
+ blacklist_drivers])
potential_interfaces = potential_interfaces.difference(invalid_interfaces)
# sort into interfaces with carrier, interfaces which could have carrier,
# and ignore interfaces that are definitely disconnected
@@ -144,6 +201,9 @@ def generate_fallback_config():
if is_bridge(interface):
# skip any bridges
continue
+ if is_bond(interface):
+ # skip any bonds
+ continue
carrier = read_sys_net_int(interface, 'carrier')
if carrier:
connected.append(interface)
@@ -169,7 +229,7 @@ def generate_fallback_config():
# if eth0 exists use it above anything else, otherwise get the interface
# that we can read 'first' (using the sorted defintion of first).
- names = list(sorted(potential_interfaces))
+ names = list(sorted(potential_interfaces, key=_natural_sort_key))
if DEFAULT_PRIMARY_INTERFACE in names:
names.remove(DEFAULT_PRIMARY_INTERFACE)
names.insert(0, DEFAULT_PRIMARY_INTERFACE)
@@ -183,9 +243,18 @@ def generate_fallback_config():
break
if target_mac and target_name:
nconf = {'config': [], 'version': 1}
- nconf['config'].append(
- {'type': 'physical', 'name': target_name,
- 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]})
+ cfg = {'type': 'physical', 'name': target_name,
+ 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]}
+ # inject the device driver name, dev_id into config if enabled and
+ # device has a valid device driver value
+ if config_driver:
+ driver = device_driver(target_name)
+ if driver:
+ cfg['params'] = {
+ 'driver': driver,
+ 'device_id': device_devid(target_name),
+ }
+ nconf['config'].append(cfg)
return nconf
else:
# can't read any interfaces addresses (or there are none); give up
@@ -206,10 +275,16 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
if ent.get('type') != 'physical':
continue
mac = ent.get('mac_address')
- name = ent.get('name')
if not mac:
continue
- renames.append([mac, name])
+ name = ent.get('name')
+ driver = ent.get('params', {}).get('driver')
+ device_id = ent.get('params', {}).get('device_id')
+ if not driver:
+ driver = device_driver(name)
+ if not device_id:
+ device_id = device_devid(name)
+ renames.append([mac, name, driver, device_id])
return _rename_interfaces(renames)
@@ -234,15 +309,27 @@ def _get_current_rename_info(check_downable=True):
"""Collect information necessary for rename_interfaces.
returns a dictionary by mac address like:
- {mac:
- {'name': name
- 'up': boolean: is_up(name),
+ {name:
+ {
'downable': None or boolean indicating that the
- device has only automatically assigned ip addrs.}}
+ device has only automatically assigned ip addrs.
+ 'device_id': Device id value (if it has one)
+ 'driver': Device driver (if it has one)
+ 'mac': mac address (in lower case)
+ 'name': name
+ 'up': boolean: is_up(name)
+ }}
"""
- bymac = {}
- for mac, name in get_interfaces_by_mac().items():
- bymac[mac] = {'name': name, 'up': is_up(name), 'downable': None}
+ cur_info = {}
+ for (name, mac, driver, device_id) in get_interfaces():
+ cur_info[name] = {
+ 'downable': None,
+ 'device_id': device_id,
+ 'driver': driver,
+ 'mac': mac.lower(),
+ 'name': name,
+ 'up': is_up(name),
+ }
if check_downable:
nmatch = re.compile(r"[0-9]+:\s+(\w+)[@:]")
@@ -254,11 +341,11 @@ def _get_current_rename_info(check_downable=True):
for bytes_out in (ipv6, ipv4):
nics_with_addresses.update(nmatch.findall(bytes_out))
- for d in bymac.values():
+ for d in cur_info.values():
d['downable'] = (d['up'] is False or
d['name'] not in nics_with_addresses)
- return bymac
+ return cur_info
def _rename_interfaces(renames, strict_present=True, strict_busy=True,
@@ -271,15 +358,17 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
if current_info is None:
current_info = _get_current_rename_info()
- cur_bymac = {}
- for mac, data in current_info.items():
+ cur_info = {}
+ for name, data in current_info.items():
cur = data.copy()
- cur['mac'] = mac
- cur_bymac[mac] = cur
+ if cur.get('mac'):
+ cur['mac'] = cur['mac'].lower()
+ cur['name'] = name
+ cur_info[name] = cur
def update_byname(bymac):
return dict((data['name'], data)
- for data in bymac.values())
+ for data in cur_info.values())
def rename(cur, new):
util.subp(["ip", "link", "set", cur, "name", new], capture=True)
@@ -293,14 +382,50 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
ops = []
errors = []
ups = []
- cur_byname = update_byname(cur_bymac)
+ cur_byname = update_byname(cur_info)
tmpname_fmt = "cirename%d"
tmpi = -1
- for mac, new_name in renames:
- cur = cur_bymac.get(mac, {})
- cur_name = cur.get('name')
+ def entry_match(data, mac, driver, device_id):
+ """match if set and in data"""
+ if mac and driver and device_id:
+ return (data['mac'] == mac and
+ data['driver'] == driver and
+ data['device_id'] == device_id)
+ elif mac and driver:
+ return (data['mac'] == mac and
+ data['driver'] == driver)
+ elif mac:
+ return (data['mac'] == mac)
+
+ return False
+
+ def find_entry(mac, driver, device_id):
+ match = [data for data in cur_info.values()
+ if entry_match(data, mac, driver, device_id)]
+ if len(match):
+ if len(match) > 1:
+ msg = ('Failed to match a single device. Matched devices "%s"'
+ ' with search values "(mac:%s driver:%s device_id:%s)"'
+ % (match, mac, driver, device_id))
+ raise ValueError(msg)
+ return match[0]
+
+ return None
+
+ for mac, new_name, driver, device_id in renames:
+ if mac:
+ mac = mac.lower()
cur_ops = []
+ cur = find_entry(mac, driver, device_id)
+ if not cur:
+ if strict_present:
+ errors.append(
+ "[nic not present] Cannot rename mac=%s to %s"
+ ", not available." % (mac, new_name))
+ continue
+
+ cur_name = cur.get('name')
if cur_name == new_name:
# nothing to do
continue
@@ -340,13 +465,13 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
cur_ops.append(("rename", mac, new_name, (new_name, tmp_name)))
target['name'] = tmp_name
- cur_byname = update_byname(cur_bymac)
+ cur_byname = update_byname(cur_info)
if target['up']:
ups.append(("up", mac, new_name, (tmp_name,)))
cur_ops.append(("rename", mac, new_name, (cur['name'], new_name)))
cur['name'] = new_name
- cur_byname = update_byname(cur_bymac)
+ cur_byname = update_byname(cur_info)
ops += cur_ops
opmap = {'rename': rename, 'down': down, 'up': up}
@@ -385,14 +510,8 @@ def get_interfaces_by_mac():
"""Build a dictionary of tuples {mac: name}.
Bridges and any devices that have a 'stolen' mac are excluded."""
- try:
- devs = get_devicelist()
- except OSError as e:
- if e.errno == errno.ENOENT:
- devs = []
- else:
- raise
ret = {}
+ devs = get_devicelist()
empty_mac = '00:00:00:00:00:00'
for name in devs:
if not interface_has_own_mac(name):
@@ -415,6 +534,126 @@ def get_interfaces_by_mac():
return ret
+def get_interfaces():
+ """Return list of interface tuples (name, mac, driver, device_id)
+
+ Bridges and any devices that have a 'stolen' mac are excluded."""
+ ret = []
+ devs = get_devicelist()
+ empty_mac = '00:00:00:00:00:00'
+ for name in devs:
+ if not interface_has_own_mac(name):
+ continue
+ if is_bridge(name):
+ continue
+ if is_vlan(name):
+ continue
+ mac = get_interface_mac(name)
+ # some devices may not have a mac (tun0)
+ if not mac:
+ continue
+ if mac == empty_mac and name != 'lo':
+ continue
+ ret.append((name, mac, device_driver(name), device_devid(name)))
+ return ret
+
+
+class EphemeralIPv4Network(object):
+ """Context manager which sets up temporary static network configuration.
+
+ No operations are performed if the provided interface is already connected.
+ If unconnected, bring up the interface with valid ip, prefix and broadcast.
+ If router is provided setup a default route for that interface. Upon
+ context exit, clean up the interface leaving no configuration behind.
+ """
+
+ def __init__(self, interface, ip, prefix_or_mask, broadcast, router=None):
+ """Setup context manager and validate call signature.
+
+ @param interface: Name of the network interface to bring up.
+ @param ip: IP address to assign to the interface.
+ @param prefix_or_mask: Either netmask of the format X.X.X.X or an int
+ prefix.
+ @param broadcast: Broadcast address for the IPv4 network.
+ @param router: Optionally the default gateway IP.
+ """
+ if not all([interface, ip, prefix_or_mask, broadcast]):
+ raise ValueError(
+ 'Cannot init network on {0} with {1}/{2} and bcast {3}'.format(
+ interface, ip, prefix_or_mask, broadcast))
+ try:
+ self.prefix = mask_to_net_prefix(prefix_or_mask)
+ except ValueError as e:
+ raise ValueError(
+ 'Cannot setup network: {0}'.format(e))
+ self.interface = interface
+ self.ip = ip
+ self.broadcast = broadcast
+ self.router = router
+ self.cleanup_cmds = [] # List of commands to run to cleanup state.
+
+ def __enter__(self):
+ """Perform ephemeral network setup if interface is not connected."""
+ self._bringup_device()
+ if self.router:
+ self._bringup_router()
+
+ def __exit__(self, excp_type, excp_value, excp_traceback):
+ for cmd in self.cleanup_cmds:
+ util.subp(cmd, capture=True)
+
+ def _delete_address(self, address, prefix):
+ """Perform the ip command to remove the specified address."""
+ util.subp(
+ ['ip', '-family', 'inet', 'addr', 'del',
+ '%s/%s' % (address, prefix), 'dev', self.interface],
+ capture=True)
+
+ def _bringup_device(self):
+ """Perform the ip comands to fully setup the device."""
+ cidr = '{0}/{1}'.format(self.ip, self.prefix)
+ LOG.debug(
+ 'Attempting setup of ephemeral network on %s with %s brd %s',
+ self.interface, cidr, self.broadcast)
+ try:
+ util.subp(
+ ['ip', '-family', 'inet', 'addr', 'add', cidr, 'broadcast',
+ self.broadcast, 'dev', self.interface],
+ capture=True, update_env={'LANG': 'C'})
+ except util.ProcessExecutionError as e:
+ if "File exists" not in e.stderr:
+ raise
+ LOG.debug(
+ 'Skip ephemeral network setup, %s already has address %s',
+ self.interface, self.ip)
+ else:
+ # Address creation success, bring up device and queue cleanup
+ util.subp(
+ ['ip', '-family', 'inet', 'link', 'set', 'dev', self.interface,
+ 'up'], capture=True)
+ self.cleanup_cmds.append(
+ ['ip', '-family', 'inet', 'link', 'set', 'dev', self.interface,
+ 'down'])
+ self.cleanup_cmds.append(
+ ['ip', '-family', 'inet', 'addr', 'del', cidr, 'dev',
+ self.interface])
+
+ def _bringup_router(self):
+ """Perform the ip commands to fully setup the router if needed."""
+ # Check if a default route exists and exit if it does
+ out, _ = util.subp(['ip', 'route', 'show', '0.0.0.0/0'], capture=True)
+ if 'default' in out:
+ LOG.debug(
+ 'Skip ephemeral route setup. %s already has default route: %s',
+ self.interface, out.strip())
+ return
+ util.subp(
+ ['ip', '-4', 'route', 'add', 'default', 'via', self.router,
+ 'dev', self.interface], capture=True)
+ self.cleanup_cmds.insert(
+ 0, ['ip', '-4', 'route', 'del', 'default', 'dev', self.interface])
+
+
class RendererNotFoundError(RuntimeError):
pass
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index 9819d4f5..bb80ec02 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -46,6 +46,10 @@ def _iface_add_subnet(iface, subnet):
'dns_nameservers',
]
for key, value in subnet.items():
+ if key == 'netmask':
+ continue
+ if key == 'address':
+ value = "%s/%s" % (subnet['address'], subnet['prefix'])
if value and key in valid_map:
if type(value) == list:
value = " ".join(value)
@@ -68,6 +72,8 @@ def _iface_add_attrs(iface, index):
content = []
ignore_map = [
'control',
+ 'device_id',
+ 'driver',
'index',
'inet',
'mode',
@@ -75,6 +81,15 @@ def _iface_add_attrs(iface, index):
'subnets',
'type',
]
+
+ # The following parameters require repetitive entries of the key for
+ # each of the values
+ multiline_keys = [
+ 'bridge_pathcost',
+ 'bridge_portprio',
+ 'bridge_waitport',
+ ]
+
renames = {'mac_address': 'hwaddress'}
if iface['type'] not in ['bond', 'bridge', 'vlan']:
ignore_map.append('mac_address')
@@ -82,6 +97,10 @@ def _iface_add_attrs(iface, index):
for key, value in iface.items():
if not value or key in ignore_map:
continue
+ if key in multiline_keys:
+ for v in value:
+ content.append(" {0} {1}".format(renames.get(key, key), v))
+ continue
if type(value) == list:
value = " ".join(value)
content.append(" {0} {1}".format(renames.get(key, key), value))
@@ -304,8 +323,6 @@ class Renderer(renderer.Renderer):
config = {}
self.eni_path = config.get('eni_path', 'etc/network/interfaces')
self.eni_header = config.get('eni_header', None)
- self.links_path_prefix = config.get(
- 'links_path_prefix', 'etc/systemd/network/50-cloud-init-')
self.netrules_path = config.get(
'netrules_path', 'etc/udev/rules.d/70-persistent-net.rules')
@@ -338,7 +355,7 @@ class Renderer(renderer.Renderer):
default_gw = " default gw %s" % route['gateway']
content.append(up + default_gw + or_true)
content.append(down + default_gw + or_true)
- elif route['network'] == '::' and route['netmask'] == 0:
+ elif route['network'] == '::' and route['prefix'] == 0:
# ipv6!
default_gw = " -A inet6 default gw %s" % route['gateway']
content.append(up + default_gw + or_true)
@@ -451,28 +468,6 @@ class Renderer(renderer.Renderer):
util.write_file(netrules,
self._render_persistent_net(network_state))
- if self.links_path_prefix:
- self._render_systemd_links(target, network_state,
- links_prefix=self.links_path_prefix)
-
- def _render_systemd_links(self, target, network_state, links_prefix):
- fp_prefix = util.target_path(target, links_prefix)
- for f in glob.glob(fp_prefix + "*"):
- os.unlink(f)
- for iface in network_state.iter_interfaces():
- if (iface['type'] == 'physical' and 'name' in iface and
- iface.get('mac_address')):
- fname = fp_prefix + iface['name'] + ".link"
- content = "\n".join([
- "[Match]",
- "MACAddress=" + iface['mac_address'],
- "",
- "[Link]",
- "Name=" + iface['name'],
- ""
- ])
- util.write_file(fname, content)
-
def network_state_to_eni(network_state, header=None, render_hwaddress=False):
# render the provided network state, return a string of equivalent eni
@@ -480,7 +475,6 @@ def network_state_to_eni(network_state, header=None, render_hwaddress=False):
renderer = Renderer(config={
'eni_path': eni_path,
'eni_header': header,
- 'links_path_prefix': None,
'netrules_path': None,
})
if not header:
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
index a715f3b0..9f35b72b 100644
--- a/cloudinit/net/netplan.py
+++ b/cloudinit/net/netplan.py
@@ -4,7 +4,7 @@ import copy
import os
from . import renderer
-from .network_state import mask2cidr, subnet_is_ipv6
+from .network_state import subnet_is_ipv6
from cloudinit import log as logging
from cloudinit import util
@@ -118,10 +118,9 @@ def _extract_addresses(config, entry):
sn_type += '4'
entry.update({sn_type: True})
elif sn_type in ['static']:
- addr = '%s' % subnet.get('address')
- netmask = subnet.get('netmask')
- if netmask and '/' not in addr:
- addr += '/%s' % mask2cidr(netmask)
+ addr = "%s" % subnet.get('address')
+ if 'prefix' in subnet:
+ addr += "/%d" % subnet.get('prefix')
if 'gateway' in subnet and subnet.get('gateway'):
gateway = subnet.get('gateway')
if ":" in gateway:
@@ -138,9 +137,8 @@ def _extract_addresses(config, entry):
mtukey += '6'
entry.update({mtukey: subnet.get('mtu')})
for route in subnet.get('routes', []):
- network = route.get('network')
- netmask = route.get('netmask')
- to_net = '%s/%s' % (network, mask2cidr(netmask))
+ to_net = "%s/%s" % (route.get('network'),
+ route.get('prefix'))
route = {
'via': route.get('gateway'),
'to': to_net,
@@ -211,7 +209,8 @@ class Renderer(renderer.Renderer):
# check network state for version
# if v2, then extract network_state.config
# else render_v2_from_state
- fpnplan = os.path.join(target, self.netplan_path)
+ fpnplan = os.path.join(util.target_path(target), self.netplan_path)
+
util.ensure_dir(os.path.dirname(fpnplan))
header = self.netplan_header if self.netplan_header else ""
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 9e9c05a0..87a7222d 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -289,19 +289,15 @@ class NetworkStateInterpreter(object):
iface.update({param: val})
# convert subnet ipv6 netmask to cidr as needed
- subnets = command.get('subnets')
- if subnets:
+ subnets = _normalize_subnets(command.get('subnets'))
+
+ # automatically set 'use_ipv6' if any addresses are ipv6
+ if not self.use_ipv6:
for subnet in subnets:
- if subnet['type'] == 'static':
- if ':' in subnet['address']:
- self.use_ipv6 = True
- if 'netmask' in subnet and ':' in subnet['address']:
- subnet['netmask'] = mask2cidr(subnet['netmask'])
- for route in subnet.get('routes', []):
- if 'netmask' in route:
- route['netmask'] = mask2cidr(route['netmask'])
- elif subnet['type'].endswith('6'):
+ if (subnet.get('type').endswith('6') or
+ is_ipv6_addr(subnet.get('address'))):
self.use_ipv6 = True
+ break
iface.update({
'name': command.get('name'),
@@ -456,16 +452,7 @@ class NetworkStateInterpreter(object):
@ensure_command_keys(['destination'])
def handle_route(self, command):
- routes = self._network_state.get('routes', [])
- network, cidr = command['destination'].split("/")
- netmask = cidr2mask(int(cidr))
- route = {
- 'network': network,
- 'netmask': netmask,
- 'gateway': command.get('gateway'),
- 'metric': command.get('metric'),
- }
- routes.append(route)
+ self._network_state['routes'].append(_normalize_route(command))
# V2 handlers
def handle_bonds(self, command):
@@ -666,18 +653,9 @@ class NetworkStateInterpreter(object):
routes = []
for route in cfg.get('routes', []):
- route_addr = route.get('to')
- if "/" in route_addr:
- route_addr, route_cidr = route_addr.split("/")
- route_netmask = cidr2mask(route_cidr)
- subnet_route = {
- 'address': route_addr,
- 'netmask': route_netmask,
- 'gateway': route.get('via')
- }
- routes.append(subnet_route)
- if len(routes) > 0:
- subnet.update({'routes': routes})
+ routes.append(_normalize_route(
+ {'address': route.get('to'), 'gateway': route.get('via')}))
+ subnet['routes'] = routes
if ":" in address:
if 'gateway6' in cfg and gateway6 is None:
@@ -692,53 +670,219 @@ class NetworkStateInterpreter(object):
return subnets
+def _normalize_subnet(subnet):
+ # Prune all keys with None values.
+ subnet = copy.deepcopy(subnet)
+ normal_subnet = dict((k, v) for k, v in subnet.items() if v)
+
+ if subnet.get('type') in ('static', 'static6'):
+ normal_subnet.update(
+ _normalize_net_keys(normal_subnet, address_keys=('address',)))
+ normal_subnet['routes'] = [_normalize_route(r)
+ for r in subnet.get('routes', [])]
+ return normal_subnet
+
+
+def _normalize_net_keys(network, address_keys=()):
+ """Normalize dictionary network keys returning prefix and address keys.
+
+ @param network: A dict of network-related definition containing prefix,
+ netmask and address_keys.
+ @param address_keys: A tuple of keys to search for representing the address
+ or cidr. The first address_key discovered will be used for
+ normalization.
+
+ @returns: A dict containing normalized prefix and matching addr_key.
+ """
+ net = dict((k, v) for k, v in network.items() if v)
+ addr_key = None
+ for key in address_keys:
+ if net.get(key):
+ addr_key = key
+ break
+ if not addr_key:
+ message = (
+ 'No config network address keys [%s] found in %s' %
+ (','.join(address_keys), network))
+ LOG.error(message)
+ raise ValueError(message)
+
+ addr = net.get(addr_key)
+ ipv6 = is_ipv6_addr(addr)
+ netmask = net.get('netmask')
+ if "/" in addr:
+ addr_part, _, maybe_prefix = addr.partition("/")
+ net[addr_key] = addr_part
+ try:
+ prefix = int(maybe_prefix)
+ except ValueError:
+ # this supports input of <address>/255.255.255.0
+ prefix = mask_to_net_prefix(maybe_prefix)
+ elif netmask:
+ prefix = mask_to_net_prefix(netmask)
+ elif 'prefix' in net:
+ prefix = int(prefix)
+ else:
+ prefix = 64 if ipv6 else 24
+
+ if 'prefix' in net and str(net['prefix']) != str(prefix):
+ LOG.warning("Overwriting existing 'prefix' with '%s' in "
+ "network info: %s", prefix, net)
+ net['prefix'] = prefix
+
+ if ipv6:
+ # TODO: we could/maybe should add this back with the very uncommon
+ # 'netmask' for ipv6. We need a 'net_prefix_to_ipv6_mask' for that.
+ if 'netmask' in net:
+ del net['netmask']
+ else:
+ net['netmask'] = net_prefix_to_ipv4_mask(net['prefix'])
+
+ return net
+
+
+def _normalize_route(route):
+ """normalize a route.
+ return a dictionary with only:
+ 'type': 'route' (only present if it was present in input)
+ 'network': the network portion of the route as a string.
+ 'prefix': the network prefix for address as an integer.
+ 'metric': integer metric (only if present in input).
+ 'netmask': netmask (string) equivalent to prefix iff network is ipv4.
+ """
+ # Prune None-value keys. Specifically allow 0 (a valid metric).
+ normal_route = dict((k, v) for k, v in route.items()
+ if v not in ("", None))
+ if 'destination' in normal_route:
+ normal_route['network'] = normal_route['destination']
+ del normal_route['destination']
+
+ normal_route.update(
+ _normalize_net_keys(
+ normal_route, address_keys=('network', 'destination')))
+
+ metric = normal_route.get('metric')
+ if metric:
+ try:
+ normal_route['metric'] = int(metric)
+ except ValueError:
+ raise TypeError(
+ 'Route config metric {} is not an integer'.format(metric))
+ return normal_route
+
+
+def _normalize_subnets(subnets):
+ if not subnets:
+ subnets = []
+ return [_normalize_subnet(s) for s in subnets]
+
+
+def is_ipv6_addr(address):
+ if not address:
+ return False
+ return ":" in str(address)
+
+
def subnet_is_ipv6(subnet):
"""Common helper for checking network_state subnets for ipv6."""
# 'static6' or 'dhcp6'
if subnet['type'].endswith('6'):
# This is a request for DHCPv6.
return True
- elif subnet['type'] == 'static' and ":" in subnet['address']:
+ elif subnet['type'] == 'static' and is_ipv6_addr(subnet.get('address')):
return True
return False
-def cidr2mask(cidr):
+def net_prefix_to_ipv4_mask(prefix):
+ """Convert a network prefix to an ipv4 netmask.
+
+ This is the inverse of ipv4_mask_to_net_prefix.
+ 24 -> "255.255.255.0"
+ Also supports input as a string."""
+
mask = [0, 0, 0, 0]
- for i in list(range(0, cidr)):
+ for i in list(range(0, int(prefix))):
idx = int(i / 8)
mask[idx] = mask[idx] + (1 << (7 - i % 8))
return ".".join([str(x) for x in mask])
-def ipv4mask2cidr(mask):
- if '.' not in mask:
+def ipv4_mask_to_net_prefix(mask):
+ """Convert an ipv4 netmask into a network prefix length.
+
+ If the input is already an integer or a string representation of
+ an integer, then int(mask) will be returned.
+ "255.255.255.0" => 24
+ str(24) => 24
+ "24" => 24
+ """
+ if isinstance(mask, int):
return mask
- return sum([bin(int(x)).count('1') for x in mask.split('.')])
+ if isinstance(mask, six.string_types):
+ try:
+ return int(mask)
+ except ValueError:
+ pass
+ else:
+ raise TypeError("mask '%s' is not a string or int")
+ if '.' not in mask:
+ raise ValueError("netmask '%s' does not contain a '.'" % mask)
-def ipv6mask2cidr(mask):
- if ':' not in mask:
+ toks = mask.split(".")
+ if len(toks) != 4:
+ raise ValueError("netmask '%s' had only %d parts" % (mask, len(toks)))
+
+ return sum([bin(int(x)).count('1') for x in toks])
+
+
+def ipv6_mask_to_net_prefix(mask):
+ """Convert an ipv6 netmask (very uncommon) or prefix (64) to prefix.
+
+ If 'mask' is an integer or string representation of one then
+ int(mask) will be returned.
+ """
+
+ if isinstance(mask, int):
return mask
+ if isinstance(mask, six.string_types):
+ try:
+ return int(mask)
+ except ValueError:
+ pass
+ else:
+ raise TypeError("mask '%s' is not a string or int")
+
+ if ':' not in mask:
+ raise ValueError("mask '%s' does not have a ':'")
bitCount = [0, 0x8000, 0xc000, 0xe000, 0xf000, 0xf800, 0xfc00, 0xfe00,
0xff00, 0xff80, 0xffc0, 0xffe0, 0xfff0, 0xfff8, 0xfffc,
0xfffe, 0xffff]
- cidr = 0
+ prefix = 0
for word in mask.split(':'):
if not word or int(word, 16) == 0:
break
- cidr += bitCount.index(int(word, 16))
+ prefix += bitCount.index(int(word, 16))
+
+ return prefix
- return cidr
+def mask_to_net_prefix(mask):
+ """Return the network prefix for the netmask provided.
-def mask2cidr(mask):
- if ':' in str(mask):
- return ipv6mask2cidr(mask)
- elif '.' in str(mask):
- return ipv4mask2cidr(mask)
+ Supports ipv4 or ipv6 netmasks."""
+ try:
+ # if 'mask' is a prefix that is an integer.
+ # then just return it.
+ return int(mask)
+ except ValueError:
+ pass
+ if is_ipv6_addr(mask):
+ return ipv6_mask_to_net_prefix(mask)
else:
- return mask
+ return ipv4_mask_to_net_prefix(mask)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py
index c68658dc..57652e27 100644
--- a/cloudinit/net/renderer.py
+++ b/cloudinit/net/renderer.py
@@ -20,6 +20,10 @@ def filter_by_name(match_name):
return lambda iface: match_name == iface['name']
+def filter_by_attr(match_name):
+ return lambda iface: (match_name in iface and iface[match_name])
+
+
filter_by_physical = filter_by_type('physical')
@@ -34,8 +38,10 @@ class Renderer(object):
for iface in network_state.iter_interfaces(filter_by_physical):
# for physical interfaces write out a persist net udev rule
if 'name' in iface and iface.get('mac_address'):
+ driver = iface.get('driver', None)
content.write(generate_udev_rule(iface['name'],
- iface['mac_address']))
+ iface['mac_address'],
+ driver=driver))
return content.getvalue()
@abc.abstractmethod
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 58c5713f..a550f97c 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -5,11 +5,13 @@ import re
import six
+from cloudinit.distros.parsers import networkmanager_conf
from cloudinit.distros.parsers import resolv_conf
from cloudinit import util
from . import renderer
-from .network_state import subnet_is_ipv6
+from .network_state import (
+ is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6)
def _make_header(sep='#'):
@@ -26,11 +28,8 @@ def _make_header(sep='#'):
def _is_default_route(route):
- if route['network'] == '::' and route['netmask'] == 0:
- return True
- if route['network'] == '0.0.0.0' and route['netmask'] == '0.0.0.0':
- return True
- return False
+ default_nets = ('::', '0.0.0.0')
+ return route['prefix'] == 0 and route['network'] in default_nets
def _quote_value(value):
@@ -62,6 +61,9 @@ class ConfigMap(object):
def __getitem__(self, key):
return self._conf[key]
+ def __contains__(self, key):
+ return key in self._conf
+
def drop(self, key):
self._conf.pop(key, None)
@@ -153,9 +155,10 @@ class Route(ConfigMap):
elif proto == "ipv6" and self.is_ipv6_route(address_value):
netmask_value = str(self._conf['NETMASK' + index])
gateway_value = str(self._conf['GATEWAY' + index])
- buf.write("%s/%s via %s\n" % (address_value,
- netmask_value,
- gateway_value))
+ buf.write("%s/%s via %s dev %s\n" % (address_value,
+ netmask_value,
+ gateway_value,
+ self._route_name))
return buf.getvalue()
@@ -252,6 +255,9 @@ class Renderer(renderer.Renderer):
self.netrules_path = config.get(
'netrules_path', 'etc/udev/rules.d/70-persistent-net.rules')
self.dns_path = config.get('dns_path', 'etc/resolv.conf')
+ nm_conf_path = 'etc/NetworkManager/conf.d/99-cloud-init.conf'
+ self.networkmanager_conf_path = config.get('networkmanager_conf_path',
+ nm_conf_path)
@classmethod
def _render_iface_shared(cls, iface, iface_cfg):
@@ -261,6 +267,9 @@ class Renderer(renderer.Renderer):
for (old_key, new_key) in [('mac_address', 'HWADDR'), ('mtu', 'MTU')]:
old_value = iface.get(old_key)
if old_value is not None:
+ # only set HWADDR on physical interfaces
+ if old_key == 'mac_address' and iface['type'] != 'physical':
+ continue
iface_cfg[new_key] = old_value
@classmethod
@@ -270,6 +279,7 @@ class Renderer(renderer.Renderer):
# modifying base values according to subnets
for i, subnet in enumerate(subnets, start=len(iface_cfg.children)):
+ mtu_key = 'MTU'
subnet_type = subnet.get('type')
if subnet_type == 'dhcp6':
iface_cfg['IPV6INIT'] = True
@@ -289,11 +299,20 @@ class Renderer(renderer.Renderer):
# if iface_cfg['BOOTPROTO'] == 'none':
# iface_cfg['BOOTPROTO'] = 'static'
if subnet_is_ipv6(subnet):
+ mtu_key = 'IPV6_MTU'
iface_cfg['IPV6INIT'] = True
+ if 'mtu' in subnet:
+ iface_cfg[mtu_key] = subnet['mtu']
+ elif subnet_type == 'manual':
+ # If the subnet has an MTU setting, then ONBOOT=True
+ # to apply the setting
+ iface_cfg['ONBOOT'] = mtu_key in iface_cfg
else:
raise ValueError("Unknown subnet type '%s' found"
" for interface '%s'" % (subnet_type,
iface_cfg.name))
+ if subnet.get('control') == 'manual':
+ iface_cfg['ONBOOT'] = False
# set IPv4 and IPv6 static addresses
ipv4_index = -1
@@ -307,38 +326,32 @@ class Renderer(renderer.Renderer):
elif subnet_type == 'static':
if subnet_is_ipv6(subnet):
ipv6_index = ipv6_index + 1
- if 'netmask' in subnet and str(subnet['netmask']) != "":
- ipv6_cidr = (subnet['address'] +
- '/' +
- str(subnet['netmask']))
- else:
- ipv6_cidr = subnet['address']
+ ipv6_cidr = "%s/%s" % (subnet['address'], subnet['prefix'])
if ipv6_index == 0:
iface_cfg['IPV6ADDR'] = ipv6_cidr
elif ipv6_index == 1:
iface_cfg['IPV6ADDR_SECONDARIES'] = ipv6_cidr
else:
- iface_cfg['IPV6ADDR_SECONDARIES'] = (
- iface_cfg['IPV6ADDR_SECONDARIES'] +
- " " + ipv6_cidr)
+ iface_cfg['IPV6ADDR_SECONDARIES'] += " " + ipv6_cidr
else:
ipv4_index = ipv4_index + 1
- if ipv4_index == 0:
- iface_cfg['IPADDR'] = subnet['address']
- if 'netmask' in subnet:
- iface_cfg['NETMASK'] = subnet['netmask']
+ suff = "" if ipv4_index == 0 else str(ipv4_index)
+ iface_cfg['IPADDR' + suff] = subnet['address']
+ iface_cfg['NETMASK' + suff] = \
+ net_prefix_to_ipv4_mask(subnet['prefix'])
+
+ if 'gateway' in subnet:
+ iface_cfg['DEFROUTE'] = True
+ if is_ipv6_addr(subnet['gateway']):
+ iface_cfg['IPV6_DEFAULTGW'] = subnet['gateway']
else:
- iface_cfg['IPADDR' + str(ipv4_index)] = \
- subnet['address']
- if 'netmask' in subnet:
- iface_cfg['NETMASK' + str(ipv4_index)] = \
- subnet['netmask']
+ iface_cfg['GATEWAY'] = subnet['gateway']
@classmethod
def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets):
for i, subnet in enumerate(subnets, start=len(iface_cfg.children)):
for route in subnet.get('routes', []):
- is_ipv6 = subnet.get('ipv6')
+ is_ipv6 = subnet.get('ipv6') or is_ipv6_addr(route['gateway'])
if _is_default_route(route):
if (
@@ -360,7 +373,7 @@ class Renderer(renderer.Renderer):
# also provided the default route?
iface_cfg['DEFROUTE'] = True
if 'gateway' in route:
- if is_ipv6:
+ if is_ipv6 or is_ipv6_addr(route['gateway']):
iface_cfg['IPV6_DEFAULTGW'] = route['gateway']
route_cfg.has_set_default_ipv6 = True
else:
@@ -372,11 +385,13 @@ class Renderer(renderer.Renderer):
nm_key = 'NETMASK%s' % route_cfg.last_idx
addr_key = 'ADDRESS%s' % route_cfg.last_idx
route_cfg.last_idx += 1
- for (old_key, new_key) in [('gateway', gw_key),
- ('netmask', nm_key),
- ('network', addr_key)]:
- if old_key in route:
- route_cfg[new_key] = route[old_key]
+ # add default routes only to ifcfg files, not
+ # to route-* or route6-*
+ for (old_key, new_key) in [('gateway', gw_key),
+ ('netmask', nm_key),
+ ('network', addr_key)]:
+ if old_key in route:
+ route_cfg[new_key] = route[old_key]
@classmethod
def _render_bonding_opts(cls, iface_cfg, iface):
@@ -409,24 +424,45 @@ class Renderer(renderer.Renderer):
@classmethod
def _render_bond_interfaces(cls, network_state, iface_contents):
bond_filter = renderer.filter_by_type('bond')
+ slave_filter = renderer.filter_by_attr('bond-master')
for iface in network_state.iter_interfaces(bond_filter):
iface_name = iface['name']
iface_cfg = iface_contents[iface_name]
cls._render_bonding_opts(iface_cfg, iface)
- iface_master_name = iface['bond-master']
- iface_cfg['MASTER'] = iface_master_name
- iface_cfg['SLAVE'] = True
+
# Ensure that the master interface (and any of its children)
# are actually marked as being bond types...
- master_cfg = iface_contents[iface_master_name]
- master_cfgs = [master_cfg]
- master_cfgs.extend(master_cfg.children)
+ master_cfgs = [iface_cfg]
+ master_cfgs.extend(iface_cfg.children)
for master_cfg in master_cfgs:
master_cfg['BONDING_MASTER'] = True
master_cfg.kind = 'bond'
- @staticmethod
- def _render_vlan_interfaces(network_state, iface_contents):
+ if iface.get('mac_address'):
+ iface_cfg['MACADDR'] = iface.get('mac_address')
+
+ iface_subnets = iface.get("subnets", [])
+ route_cfg = iface_cfg.routes
+ cls._render_subnets(iface_cfg, iface_subnets)
+ cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
+
+ # iter_interfaces on network-state is not sorted to produce
+ # consistent numbers we need to sort.
+ bond_slaves = sorted(
+ [slave_iface['name'] for slave_iface in
+ network_state.iter_interfaces(slave_filter)
+ if slave_iface['bond-master'] == iface_name])
+
+ for index, bond_slave in enumerate(bond_slaves):
+ slavestr = 'BONDING_SLAVE%s' % index
+ iface_cfg[slavestr] = bond_slave
+
+ slave_cfg = iface_contents[bond_slave]
+ slave_cfg['MASTER'] = iface_name
+ slave_cfg['SLAVE'] = True
+
+ @classmethod
+ def _render_vlan_interfaces(cls, network_state, iface_contents):
vlan_filter = renderer.filter_by_type('vlan')
for iface in network_state.iter_interfaces(vlan_filter):
iface_name = iface['name']
@@ -434,6 +470,11 @@ class Renderer(renderer.Renderer):
iface_cfg['VLAN'] = True
iface_cfg['PHYSDEV'] = iface_name[:iface_name.rfind('.')]
+ iface_subnets = iface.get("subnets", [])
+ route_cfg = iface_cfg.routes
+ cls._render_subnets(iface_cfg, iface_subnets)
+ cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
+
@staticmethod
def _render_dns(network_state, existing_dns_path=None):
content = resolv_conf.ResolvConf("")
@@ -445,6 +486,21 @@ class Renderer(renderer.Renderer):
content.add_search_domain(searchdomain)
return "\n".join([_make_header(';'), str(content)])
+ @staticmethod
+ def _render_networkmanager_conf(network_state):
+ content = networkmanager_conf.NetworkManagerConf("")
+
+ # If DNS server information is provided, configure
+ # NetworkManager to not manage dns, so that /etc/resolv.conf
+ # does not get clobbered.
+ if network_state.dns_nameservers:
+ content.set_section_keypair('main', 'dns', 'none')
+
+ if len(content) == 0:
+ return None
+ out = "".join([_make_header(), "\n", "\n".join(content.write()), "\n"])
+ return out
+
@classmethod
def _render_bridge_interfaces(cls, network_state, iface_contents):
bridge_filter = renderer.filter_by_type('bridge')
@@ -455,6 +511,10 @@ class Renderer(renderer.Renderer):
for old_key, new_key in cls.bridge_opts_keys:
if old_key in iface:
iface_cfg[new_key] = iface[old_key]
+
+ if iface.get('mac_address'):
+ iface_cfg['MACADDR'] = iface.get('mac_address')
+
# Is this the right key to get all the connected interfaces?
for bridged_iface_name in iface.get('bridge_ports', []):
# Ensure all bridged interfaces are correctly tagged
@@ -465,6 +525,11 @@ class Renderer(renderer.Renderer):
for bridge_cfg in bridged_cfgs:
bridge_cfg['BRIDGE'] = iface_name
+ iface_subnets = iface.get("subnets", [])
+ route_cfg = iface_cfg.routes
+ cls._render_subnets(iface_cfg, iface_subnets)
+ cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
+
@classmethod
def _render_sysconfig(cls, base_sysconf_dir, network_state):
'''Given state, return /etc/sysconfig files + contents'''
@@ -505,6 +570,12 @@ class Renderer(renderer.Renderer):
resolv_content = self._render_dns(network_state,
existing_dns_path=dns_path)
util.write_file(dns_path, resolv_content, file_mode)
+ if self.networkmanager_conf_path:
+ nm_conf_path = util.target_path(target,
+ self.networkmanager_conf_path)
+ nm_conf_content = self._render_networkmanager_conf(network_state)
+ if nm_conf_content:
+ util.write_file(nm_conf_path, nm_conf_content, file_mode)
if self.netrules_path:
netrules_content = self._render_persistent_net(network_state)
netrules_path = util.target_path(target, self.netrules_path)
diff --git a/cloudinit/net/tests/__init__.py b/cloudinit/net/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/cloudinit/net/tests/__init__.py
diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py
new file mode 100644
index 00000000..272a6ebd
--- /dev/null
+++ b/cloudinit/net/tests/test_init.py
@@ -0,0 +1,522 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import copy
+import errno
+import mock
+import os
+
+import cloudinit.net as net
+from cloudinit.util import ensure_file, write_file, ProcessExecutionError
+from tests.unittests.helpers import CiTestCase
+
+
+class TestSysDevPath(CiTestCase):
+
+ def test_sys_dev_path(self):
+ """sys_dev_path returns a path under SYS_CLASS_NET for a device."""
+ dev = 'something'
+ path = 'attribute'
+ expected = net.SYS_CLASS_NET + dev + '/' + path
+ self.assertEqual(expected, net.sys_dev_path(dev, path))
+
+ def test_sys_dev_path_without_path(self):
+ """When path param isn't provided it defaults to empty string."""
+ dev = 'something'
+ expected = net.SYS_CLASS_NET + dev + '/'
+ self.assertEqual(expected, net.sys_dev_path(dev))
+
+
+class TestReadSysNet(CiTestCase):
+ with_logs = True
+
+ def setUp(self):
+ super(TestReadSysNet, self).setUp()
+ sys_mock = mock.patch('cloudinit.net.get_sys_class_path')
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + '/'
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+
+ def test_read_sys_net_strips_contents_of_sys_path(self):
+ """read_sys_net strips whitespace from the contents of a sys file."""
+ content = 'some stuff with trailing whitespace\t\r\n'
+ write_file(os.path.join(self.sysdir, 'dev', 'attr'), content)
+ self.assertEqual(content.strip(), net.read_sys_net('dev', 'attr'))
+
+ def test_read_sys_net_reraises_oserror(self):
+ """read_sys_net raises OSError/IOError when file doesn't exist."""
+ # Non-specific Exception because versions of python OSError vs IOError.
+ with self.assertRaises(Exception) as context_manager: # noqa: H202
+ net.read_sys_net('dev', 'attr')
+ error = context_manager.exception
+ self.assertIn('No such file or directory', str(error))
+
+ def test_read_sys_net_handles_error_with_on_enoent(self):
+ """read_sys_net handles OSError/IOError with on_enoent if provided."""
+ handled_errors = []
+
+ def on_enoent(e):
+ handled_errors.append(e)
+
+ net.read_sys_net('dev', 'attr', on_enoent=on_enoent)
+ error = handled_errors[0]
+ self.assertIsInstance(error, Exception)
+ self.assertIn('No such file or directory', str(error))
+
+ def test_read_sys_net_translates_content(self):
+ """read_sys_net translates content when translate dict is provided."""
+ content = "you're welcome\n"
+ write_file(os.path.join(self.sysdir, 'dev', 'attr'), content)
+ translate = {"you're welcome": 'de nada'}
+ self.assertEqual(
+ 'de nada',
+ net.read_sys_net('dev', 'attr', translate=translate))
+
+ def test_read_sys_net_errors_on_translation_failures(self):
+ """read_sys_net raises a KeyError and logs details on failure."""
+ content = "you're welcome\n"
+ write_file(os.path.join(self.sysdir, 'dev', 'attr'), content)
+ with self.assertRaises(KeyError) as context_manager:
+ net.read_sys_net('dev', 'attr', translate={})
+ error = context_manager.exception
+ self.assertEqual('"you\'re welcome"', str(error))
+ self.assertIn(
+ "Found unexpected (not translatable) value 'you're welcome' in "
+ "'{0}dev/attr".format(self.sysdir),
+ self.logs.getvalue())
+
+ def test_read_sys_net_handles_handles_with_onkeyerror(self):
+ """read_sys_net handles translation errors calling on_keyerror."""
+ content = "you're welcome\n"
+ write_file(os.path.join(self.sysdir, 'dev', 'attr'), content)
+ handled_errors = []
+
+ def on_keyerror(e):
+ handled_errors.append(e)
+
+ net.read_sys_net('dev', 'attr', translate={}, on_keyerror=on_keyerror)
+ error = handled_errors[0]
+ self.assertIsInstance(error, KeyError)
+ self.assertEqual('"you\'re welcome"', str(error))
+
+ def test_read_sys_net_safe_false_on_translate_failure(self):
+ """read_sys_net_safe returns False on translation failures."""
+ content = "you're welcome\n"
+ write_file(os.path.join(self.sysdir, 'dev', 'attr'), content)
+ self.assertFalse(net.read_sys_net_safe('dev', 'attr', translate={}))
+
+ def test_read_sys_net_safe_returns_false_on_noent_failure(self):
+ """read_sys_net_safe returns False on file not found failures."""
+ self.assertFalse(net.read_sys_net_safe('dev', 'attr'))
+
+ def test_read_sys_net_int_returns_none_on_error(self):
+ """read_sys_net_safe returns None on failures."""
+ self.assertFalse(net.read_sys_net_int('dev', 'attr'))
+
+ def test_read_sys_net_int_returns_none_on_valueerror(self):
+ """read_sys_net_safe returns None when content is not an int."""
+ write_file(os.path.join(self.sysdir, 'dev', 'attr'), 'NOTINT\n')
+ self.assertFalse(net.read_sys_net_int('dev', 'attr'))
+
+ def test_read_sys_net_int_returns_integer_from_content(self):
+ """read_sys_net_safe returns None on failures."""
+ write_file(os.path.join(self.sysdir, 'dev', 'attr'), '1\n')
+ self.assertEqual(1, net.read_sys_net_int('dev', 'attr'))
+
+ def test_is_up_true(self):
+ """is_up is True if sys/net/devname/operstate is 'up' or 'unknown'."""
+ for state in ['up', 'unknown']:
+ write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state)
+ self.assertTrue(net.is_up('eth0'))
+
+ def test_is_up_false(self):
+ """is_up is False if sys/net/devname/operstate is 'down' or invalid."""
+ for state in ['down', 'incomprehensible']:
+ write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state)
+ self.assertFalse(net.is_up('eth0'))
+
+ def test_is_wireless(self):
+ """is_wireless is True when /sys/net/devname/wireless exists."""
+ self.assertFalse(net.is_wireless('eth0'))
+ ensure_file(os.path.join(self.sysdir, 'eth0', 'wireless'))
+ self.assertTrue(net.is_wireless('eth0'))
+
+ def test_is_bridge(self):
+ """is_bridge is True when /sys/net/devname/bridge exists."""
+ self.assertFalse(net.is_bridge('eth0'))
+ ensure_file(os.path.join(self.sysdir, 'eth0', 'bridge'))
+ self.assertTrue(net.is_bridge('eth0'))
+
+ def test_is_bond(self):
+ """is_bond is True when /sys/net/devname/bonding exists."""
+ self.assertFalse(net.is_bond('eth0'))
+ ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding'))
+ self.assertTrue(net.is_bond('eth0'))
+
+ def test_is_vlan(self):
+ """is_vlan is True when /sys/net/devname/uevent has DEVTYPE=vlan."""
+ ensure_file(os.path.join(self.sysdir, 'eth0', 'uevent'))
+ self.assertFalse(net.is_vlan('eth0'))
+ content = 'junk\nDEVTYPE=vlan\njunk\n'
+ write_file(os.path.join(self.sysdir, 'eth0', 'uevent'), content)
+ self.assertTrue(net.is_vlan('eth0'))
+
+ def test_is_connected_when_physically_connected(self):
+ """is_connected is True when /sys/net/devname/iflink reports 2."""
+ self.assertFalse(net.is_connected('eth0'))
+ write_file(os.path.join(self.sysdir, 'eth0', 'iflink'), "2")
+ self.assertTrue(net.is_connected('eth0'))
+
+ def test_is_connected_when_wireless_and_carrier_active(self):
+ """is_connected is True if wireless /sys/net/devname/carrier is 1."""
+ self.assertFalse(net.is_connected('eth0'))
+ ensure_file(os.path.join(self.sysdir, 'eth0', 'wireless'))
+ self.assertFalse(net.is_connected('eth0'))
+ write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), "1")
+ self.assertTrue(net.is_connected('eth0'))
+
+ def test_is_physical(self):
+ """is_physical is True when /sys/net/devname/device exists."""
+ self.assertFalse(net.is_physical('eth0'))
+ ensure_file(os.path.join(self.sysdir, 'eth0', 'device'))
+ self.assertTrue(net.is_physical('eth0'))
+
+ def test_is_present(self):
+ """is_present is True when /sys/net/devname exists."""
+ self.assertFalse(net.is_present('eth0'))
+ ensure_file(os.path.join(self.sysdir, 'eth0', 'device'))
+ self.assertTrue(net.is_present('eth0'))
+
+
+class TestGenerateFallbackConfig(CiTestCase):
+
+ def setUp(self):
+ super(TestGenerateFallbackConfig, self).setUp()
+ sys_mock = mock.patch('cloudinit.net.get_sys_class_path')
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + '/'
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+
+ def test_generate_fallback_finds_connected_eth_with_mac(self):
+ """generate_fallback_config finds any connected device with a mac."""
+ write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1')
+ write_file(os.path.join(self.sysdir, 'eth1', 'carrier'), '1')
+ mac = 'aa:bb:cc:aa:bb:cc'
+ write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac)
+ expected = {
+ 'config': [{'type': 'physical', 'mac_address': mac,
+ 'name': 'eth1', 'subnets': [{'type': 'dhcp'}]}],
+ 'version': 1}
+ self.assertEqual(expected, net.generate_fallback_config())
+
+ def test_generate_fallback_finds_dormant_eth_with_mac(self):
+ """generate_fallback_config finds any dormant device with a mac."""
+ write_file(os.path.join(self.sysdir, 'eth0', 'dormant'), '1')
+ mac = 'aa:bb:cc:aa:bb:cc'
+ write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac)
+ expected = {
+ 'config': [{'type': 'physical', 'mac_address': mac,
+ 'name': 'eth0', 'subnets': [{'type': 'dhcp'}]}],
+ 'version': 1}
+ self.assertEqual(expected, net.generate_fallback_config())
+
+ def test_generate_fallback_finds_eth_by_operstate(self):
+ """generate_fallback_config finds any dormant device with a mac."""
+ mac = 'aa:bb:cc:aa:bb:cc'
+ write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac)
+ expected = {
+ 'config': [{'type': 'physical', 'mac_address': mac,
+ 'name': 'eth0', 'subnets': [{'type': 'dhcp'}]}],
+ 'version': 1}
+ valid_operstates = ['dormant', 'down', 'lowerlayerdown', 'unknown']
+ for state in valid_operstates:
+ write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state)
+ self.assertEqual(expected, net.generate_fallback_config())
+ write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), 'noworky')
+ self.assertIsNone(net.generate_fallback_config())
+
+ def test_generate_fallback_config_skips_veth(self):
+ """generate_fallback_config will skip any veth interfaces."""
+ # A connected veth which gets ignored
+ write_file(os.path.join(self.sysdir, 'veth0', 'carrier'), '1')
+ self.assertIsNone(net.generate_fallback_config())
+
+ def test_generate_fallback_config_skips_bridges(self):
+ """generate_fallback_config will skip any bridges interfaces."""
+ # A connected veth which gets ignored
+ write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1')
+ mac = 'aa:bb:cc:aa:bb:cc'
+ write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac)
+ ensure_file(os.path.join(self.sysdir, 'eth0', 'bridge'))
+ self.assertIsNone(net.generate_fallback_config())
+
+ def test_generate_fallback_config_skips_bonds(self):
+ """generate_fallback_config will skip any bonded interfaces."""
+ # A connected veth which gets ignored
+ write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1')
+ mac = 'aa:bb:cc:aa:bb:cc'
+ write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac)
+ ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding'))
+ self.assertIsNone(net.generate_fallback_config())
+
+
+class TestGetDeviceList(CiTestCase):
+
+ def setUp(self):
+ super(TestGetDeviceList, self).setUp()
+ sys_mock = mock.patch('cloudinit.net.get_sys_class_path')
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + '/'
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+
+ def test_get_devicelist_raise_oserror(self):
+ """get_devicelist raise any non-ENOENT OSerror."""
+ error = OSError('Can not do it')
+ error.errno = errno.EPERM # Set non-ENOENT
+ self.m_sys_path.side_effect = error
+ with self.assertRaises(OSError) as context_manager:
+ net.get_devicelist()
+ exception = context_manager.exception
+ self.assertEqual('Can not do it', str(exception))
+
+ def test_get_devicelist_empty_without_sys_net(self):
+ """get_devicelist returns empty list when missing SYS_CLASS_NET."""
+ self.m_sys_path.return_value = 'idontexist'
+ self.assertEqual([], net.get_devicelist())
+
+ def test_get_devicelist_empty_with_no_devices_in_sys_net(self):
+ """get_devicelist returns empty directoty listing for SYS_CLASS_NET."""
+ self.assertEqual([], net.get_devicelist())
+
+ def test_get_devicelist_lists_any_subdirectories_in_sys_net(self):
+ """get_devicelist returns a directory listing for SYS_CLASS_NET."""
+ write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), 'up')
+ write_file(os.path.join(self.sysdir, 'eth1', 'operstate'), 'up')
+ self.assertItemsEqual(['eth0', 'eth1'], net.get_devicelist())
+
+
+class TestGetInterfaceMAC(CiTestCase):
+
+ def setUp(self):
+ super(TestGetInterfaceMAC, self).setUp()
+ sys_mock = mock.patch('cloudinit.net.get_sys_class_path')
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + '/'
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+
+ def test_get_interface_mac_false_with_no_mac(self):
+ """get_device_list returns False when no mac is reported."""
+ ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding'))
+ mac_path = os.path.join(self.sysdir, 'eth0', 'address')
+ self.assertFalse(os.path.exists(mac_path))
+ self.assertFalse(net.get_interface_mac('eth0'))
+
+ def test_get_interface_mac(self):
+ """get_interfaces returns the mac from SYS_CLASS_NET/dev/address."""
+ mac = 'aa:bb:cc:aa:bb:cc'
+ write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac)
+ self.assertEqual(mac, net.get_interface_mac('eth1'))
+
+ def test_get_interface_mac_grabs_bonding_address(self):
+ """get_interfaces returns the source device mac for bonded devices."""
+ source_dev_mac = 'aa:bb:cc:aa:bb:cc'
+ bonded_mac = 'dd:ee:ff:dd:ee:ff'
+ write_file(os.path.join(self.sysdir, 'eth1', 'address'), bonded_mac)
+ write_file(
+ os.path.join(self.sysdir, 'eth1', 'bonding_slave', 'perm_hwaddr'),
+ source_dev_mac)
+ self.assertEqual(source_dev_mac, net.get_interface_mac('eth1'))
+
+ def test_get_interfaces_empty_list_without_sys_net(self):
+ """get_interfaces returns an empty list when missing SYS_CLASS_NET."""
+ self.m_sys_path.return_value = 'idontexist'
+ self.assertEqual([], net.get_interfaces())
+
+ def test_get_interfaces_by_mac_skips_empty_mac(self):
+ """Ignore 00:00:00:00:00:00 addresses from get_interfaces_by_mac."""
+ empty_mac = '00:00:00:00:00:00'
+ mac = 'aa:bb:cc:aa:bb:cc'
+ write_file(os.path.join(self.sysdir, 'eth1', 'address'), empty_mac)
+ write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '0')
+ write_file(os.path.join(self.sysdir, 'eth2', 'addr_assign_type'), '0')
+ write_file(os.path.join(self.sysdir, 'eth2', 'address'), mac)
+ expected = [('eth2', 'aa:bb:cc:aa:bb:cc', None, None)]
+ self.assertEqual(expected, net.get_interfaces())
+
+ def test_get_interfaces_by_mac_skips_missing_mac(self):
+ """Ignore interfaces without an address from get_interfaces_by_mac."""
+ write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '0')
+ address_path = os.path.join(self.sysdir, 'eth1', 'address')
+ self.assertFalse(os.path.exists(address_path))
+ mac = 'aa:bb:cc:aa:bb:cc'
+ write_file(os.path.join(self.sysdir, 'eth2', 'addr_assign_type'), '0')
+ write_file(os.path.join(self.sysdir, 'eth2', 'address'), mac)
+ expected = [('eth2', 'aa:bb:cc:aa:bb:cc', None, None)]
+ self.assertEqual(expected, net.get_interfaces())
+
+
+class TestInterfaceHasOwnMAC(CiTestCase):
+
+ def setUp(self):
+ super(TestInterfaceHasOwnMAC, self).setUp()
+ sys_mock = mock.patch('cloudinit.net.get_sys_class_path')
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + '/'
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+
+ def test_interface_has_own_mac_false_when_stolen(self):
+ """Return False from interface_has_own_mac when address is stolen."""
+ write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '2')
+ self.assertFalse(net.interface_has_own_mac('eth1'))
+
+ def test_interface_has_own_mac_true_when_not_stolen(self):
+ """Return False from interface_has_own_mac when mac isn't stolen."""
+ valid_assign_types = ['0', '1', '3']
+ assign_path = os.path.join(self.sysdir, 'eth1', 'addr_assign_type')
+ for _type in valid_assign_types:
+ write_file(assign_path, _type)
+ self.assertTrue(net.interface_has_own_mac('eth1'))
+
+ def test_interface_has_own_mac_strict_errors_on_absent_assign_type(self):
+ """When addr_assign_type is absent, interface_has_own_mac errors."""
+ with self.assertRaises(ValueError):
+ net.interface_has_own_mac('eth1', strict=True)
+
+
+@mock.patch('cloudinit.net.util.subp')
+class TestEphemeralIPV4Network(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestEphemeralIPV4Network, self).setUp()
+ sys_mock = mock.patch('cloudinit.net.get_sys_class_path')
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + '/'
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+
+ def test_ephemeral_ipv4_network_errors_on_missing_params(self, m_subp):
+ """No required params for EphemeralIPv4Network can be None."""
+ required_params = {
+ 'interface': 'eth0', 'ip': '192.168.2.2',
+ 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255'}
+ for key in required_params.keys():
+ params = copy.deepcopy(required_params)
+ params[key] = None
+ with self.assertRaises(ValueError) as context_manager:
+ net.EphemeralIPv4Network(**params)
+ error = context_manager.exception
+ self.assertIn('Cannot init network on', str(error))
+ self.assertEqual(0, m_subp.call_count)
+
+ def test_ephemeral_ipv4_network_errors_invalid_mask(self, m_subp):
+ """Raise an error when prefix_or_mask is not a netmask or prefix."""
+ params = {
+ 'interface': 'eth0', 'ip': '192.168.2.2',
+ 'broadcast': '192.168.2.255'}
+ invalid_masks = ('invalid', 'invalid.', '123.123.123')
+ for error_val in invalid_masks:
+ params['prefix_or_mask'] = error_val
+ with self.assertRaises(ValueError) as context_manager:
+ with net.EphemeralIPv4Network(**params):
+ pass
+ error = context_manager.exception
+ self.assertIn('Cannot setup network: netmask', str(error))
+ self.assertEqual(0, m_subp.call_count)
+
+ def test_ephemeral_ipv4_network_performs_teardown(self, m_subp):
+ """EphemeralIPv4Network performs teardown on the device if setup."""
+ expected_setup_calls = [
+ mock.call(
+ ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24',
+ 'broadcast', '192.168.2.255', 'dev', 'eth0'],
+ capture=True, update_env={'LANG': 'C'}),
+ mock.call(
+ ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'],
+ capture=True)]
+ expected_teardown_calls = [
+ mock.call(
+ ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0',
+ 'down'], capture=True),
+ mock.call(
+ ['ip', '-family', 'inet', 'addr', 'del', '192.168.2.2/24',
+ 'dev', 'eth0'], capture=True)]
+ params = {
+ 'interface': 'eth0', 'ip': '192.168.2.2',
+ 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255'}
+ with net.EphemeralIPv4Network(**params):
+ self.assertEqual(expected_setup_calls, m_subp.call_args_list)
+ m_subp.assert_has_calls(expected_teardown_calls)
+
+ def test_ephemeral_ipv4_network_noop_when_configured(self, m_subp):
+ """EphemeralIPv4Network handles exception when address is setup.
+
+ It performs no cleanup as the interface was already setup.
+ """
+ params = {
+ 'interface': 'eth0', 'ip': '192.168.2.2',
+ 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255'}
+ m_subp.side_effect = ProcessExecutionError(
+ '', 'RTNETLINK answers: File exists', 2)
+ expected_calls = [
+ mock.call(
+ ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24',
+ 'broadcast', '192.168.2.255', 'dev', 'eth0'],
+ capture=True, update_env={'LANG': 'C'})]
+ with net.EphemeralIPv4Network(**params):
+ pass
+ self.assertEqual(expected_calls, m_subp.call_args_list)
+ self.assertIn(
+ 'Skip ephemeral network setup, eth0 already has address',
+ self.logs.getvalue())
+
+ def test_ephemeral_ipv4_network_with_prefix(self, m_subp):
+ """EphemeralIPv4Network takes a valid prefix to setup the network."""
+ params = {
+ 'interface': 'eth0', 'ip': '192.168.2.2',
+ 'prefix_or_mask': '24', 'broadcast': '192.168.2.255'}
+ for prefix_val in ['24', 16]: # prefix can be int or string
+ params['prefix_or_mask'] = prefix_val
+ with net.EphemeralIPv4Network(**params):
+ pass
+ m_subp.assert_has_calls([mock.call(
+ ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24',
+ 'broadcast', '192.168.2.255', 'dev', 'eth0'],
+ capture=True, update_env={'LANG': 'C'})])
+ m_subp.assert_has_calls([mock.call(
+ ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/16',
+ 'broadcast', '192.168.2.255', 'dev', 'eth0'],
+ capture=True, update_env={'LANG': 'C'})])
+
+ def test_ephemeral_ipv4_network_with_new_default_route(self, m_subp):
+ """Add the route when router is set and no default route exists."""
+ params = {
+ 'interface': 'eth0', 'ip': '192.168.2.2',
+ 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255',
+ 'router': '192.168.2.1'}
+ m_subp.return_value = '', '' # Empty response from ip route gw check
+ expected_setup_calls = [
+ mock.call(
+ ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24',
+ 'broadcast', '192.168.2.255', 'dev', 'eth0'],
+ capture=True, update_env={'LANG': 'C'}),
+ mock.call(
+ ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'],
+ capture=True),
+ mock.call(
+ ['ip', 'route', 'show', '0.0.0.0/0'], capture=True),
+ mock.call(
+ ['ip', '-4', 'route', 'add', 'default', 'via',
+ '192.168.2.1', 'dev', 'eth0'], capture=True)]
+ expected_teardown_calls = [mock.call(
+ ['ip', '-4', 'route', 'del', 'default', 'dev', 'eth0'],
+ capture=True)]
+
+ with net.EphemeralIPv4Network(**params):
+ self.assertEqual(expected_setup_calls, m_subp.call_args_list)
+ m_subp.assert_has_calls(expected_teardown_calls)
diff --git a/cloudinit/net/udev.py b/cloudinit/net/udev.py
index fd2fd8c7..58c0a708 100644
--- a/cloudinit/net/udev.py
+++ b/cloudinit/net/udev.py
@@ -23,7 +23,7 @@ def compose_udev_setting(key, value):
return '%s="%s"' % (key, value)
-def generate_udev_rule(interface, mac):
+def generate_udev_rule(interface, mac, driver=None):
"""Return a udev rule to set the name of network interface with `mac`.
The rule ends up as a single line looking something like:
@@ -31,10 +31,13 @@ def generate_udev_rule(interface, mac):
SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*",
ATTR{address}="ff:ee:dd:cc:bb:aa", NAME="eth0"
"""
+ if not driver:
+ driver = '?*'
+
rule = ', '.join([
compose_udev_equality('SUBSYSTEM', 'net'),
compose_udev_equality('ACTION', 'add'),
- compose_udev_equality('DRIVERS', '?*'),
+ compose_udev_equality('DRIVERS', driver),
compose_udev_attr_equality('address', mac),
compose_udev_setting('NAME', interface),
])
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index ed374a36..39c79dee 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -20,7 +20,7 @@ LOG = logging.getLogger()
def netdev_info(empty=""):
fields = ("hwaddr", "addr", "bcast", "mask")
- (ifcfg_out, _err) = util.subp(["ifconfig", "-a"])
+ (ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1])
devs = {}
for line in str(ifcfg_out).splitlines():
if len(line) == 0:
@@ -85,7 +85,7 @@ def netdev_info(empty=""):
def route_info():
- (route_out, _err) = util.subp(["netstat", "-rn"])
+ (route_out, _err) = util.subp(["netstat", "-rn"], rcs=[0, 1])
routes = {}
routes['ipv4'] = []
@@ -125,7 +125,8 @@ def route_info():
routes['ipv4'].append(entry)
try:
- (route_out6, _err6) = util.subp(["netstat", "-A", "inet6", "-n"])
+ (route_out6, _err6) = util.subp(["netstat", "-A", "inet6", "-n"],
+ rcs=[0, 1])
except util.ProcessExecutionError:
pass
else:
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index 411960d8..c120498f 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -29,11 +29,13 @@ CFG_BUILTIN = {
'MAAS',
'GCE',
'OpenStack',
+ 'AliYun',
'Ec2',
'CloudSigma',
'CloudStack',
'SmartOS',
'Bigstep',
+ 'Scaleway',
# At the end to act as a 'catch' when none of the above work...
'None',
],
diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py
index 9debe947..380e27cb 100644
--- a/cloudinit/sources/DataSourceAliYun.py
+++ b/cloudinit/sources/DataSourceAliYun.py
@@ -4,8 +4,10 @@ import os
from cloudinit import sources
from cloudinit.sources import DataSourceEc2 as EC2
+from cloudinit import util
DEF_MD_VERSION = "2016-01-01"
+ALIYUN_PRODUCT = "Alibaba Cloud ECS"
class DataSourceAliYun(EC2.DataSourceEc2):
@@ -24,7 +26,17 @@ class DataSourceAliYun(EC2.DataSourceEc2):
@property
def cloud_platform(self):
- return EC2.Platforms.ALIYUN
+ if self._cloud_platform is None:
+ if _is_aliyun():
+ self._cloud_platform = EC2.Platforms.ALIYUN
+ else:
+ self._cloud_platform = EC2.Platforms.NO_EC2_METADATA
+
+ return self._cloud_platform
+
+
+def _is_aliyun():
+ return util.read_dmi_data('system-product-name') == ALIYUN_PRODUCT
def parse_public_keys(public_keys):
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index b9458ffa..b5a95a1f 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -16,6 +16,7 @@ from xml.dom import minidom
import xml.etree.ElementTree as ET
from cloudinit import log as logging
+from cloudinit import net
from cloudinit import sources
from cloudinit.sources.helpers.azure import get_metadata_from_fabric
from cloudinit import util
@@ -36,6 +37,8 @@ RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource'
DEFAULT_PRIMARY_NIC = 'eth0'
LEASE_FILE = '/var/lib/dhcp/dhclient.eth0.leases'
DEFAULT_FS = 'ext4'
+# DMI chassis-asset-tag is set static for all azure instances
+AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'
def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid):
@@ -99,7 +102,7 @@ def get_dev_storvsc_sysctl():
sysctl_out, err = util.subp(['sysctl', 'dev.storvsc'])
except util.ProcessExecutionError:
LOG.debug("Fail to execute sysctl dev.storvsc")
- return None
+ sysctl_out = ""
return sysctl_out
@@ -175,6 +178,11 @@ if util.is_FreeBSD():
RESOURCE_DISK_PATH = "/dev/" + res_disk
else:
LOG.debug("resource disk is None")
+ BOUNCE_COMMAND = [
+ 'sh', '-xc',
+ ("i=$interface; x=0; ifconfig down $i || x=$?; "
+ "ifconfig up $i || x=$?; exit $x")
+ ]
BUILTIN_DS_CONFIG = {
'agent_command': AGENT_START_BUILTIN,
@@ -238,7 +246,9 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
set_hostname(previous_hostname, hostname_command)
-class DataSourceAzureNet(sources.DataSource):
+class DataSourceAzure(sources.DataSource):
+ _negotiated = False
+
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.seed_dir = os.path.join(paths.seed_dir, 'azure')
@@ -248,6 +258,7 @@ class DataSourceAzureNet(sources.DataSource):
util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
BUILTIN_DS_CONFIG])
self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file')
+ self._network_config = None
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -320,6 +331,11 @@ class DataSourceAzureNet(sources.DataSource):
# azure removes/ejects the cdrom containing the ovf-env.xml
# file on reboot. So, in order to successfully reboot we
# need to look in the datadir and consider that valid
+ asset_tag = util.read_dmi_data('chassis-asset-tag')
+ if asset_tag != AZURE_CHASSIS_ASSET_TAG:
+ LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag)
+ return False
+
ddir = self.ds_cfg['data_dir']
candidates = [self.seed_dir]
@@ -364,13 +380,14 @@ class DataSourceAzureNet(sources.DataSource):
LOG.debug("using files cached in %s", ddir)
# azure / hyper-v provides random data here
+ # TODO. find the seed on FreeBSD platform
+ # now update ds_cfg to reflect contents pass in config
if not util.is_FreeBSD():
seed = util.load_file("/sys/firmware/acpi/tables/OEM0",
quiet=True, decode=False)
if seed:
self.metadata['random_seed'] = seed
- # TODO. find the seed on FreeBSD platform
- # now update ds_cfg to reflect contents pass in config
+
user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
@@ -378,6 +395,40 @@ class DataSourceAzureNet(sources.DataSource):
# the directory to be protected.
write_files(ddir, files, dirmode=0o700)
+ self.metadata['instance-id'] = util.read_dmi_data('system-uuid')
+
+ return True
+
+ def device_name_to_device(self, name):
+ return self.ds_cfg['disk_aliases'].get(name)
+
+ def get_config_obj(self):
+ return self.cfg
+
+ def check_instance_id(self, sys_cfg):
+ # quickly (local check only) if self.instance_id is still valid
+ return sources.instance_id_matches_system_uuid(self.get_instance_id())
+
+ def setup(self, is_new_instance):
+ if self._negotiated is False:
+ LOG.debug("negotiating for %s (new_instance=%s)",
+ self.get_instance_id(), is_new_instance)
+ fabric_data = self._negotiate()
+ LOG.debug("negotiating returned %s", fabric_data)
+ if fabric_data:
+ self.metadata.update(fabric_data)
+ self._negotiated = True
+ else:
+ LOG.debug("negotiating already done for %s",
+ self.get_instance_id())
+
+ def _negotiate(self):
+ """Negotiate with fabric and return data from it.
+
+ On success, returns a dictionary including 'public_keys'.
+ On failure, returns False.
+ """
+
if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN:
self.bounce_network_with_azure_hostname()
@@ -387,31 +438,64 @@ class DataSourceAzureNet(sources.DataSource):
else:
metadata_func = self.get_metadata_from_agent
+ LOG.debug("negotiating with fabric via agent command %s",
+ self.ds_cfg['agent_command'])
try:
fabric_data = metadata_func()
except Exception as exc:
- LOG.info("Error communicating with Azure fabric; assume we aren't"
- " on Azure.", exc_info=True)
+ LOG.warning(
+ "Error communicating with Azure fabric; You may experience."
+ "connectivity issues.", exc_info=True)
return False
- self.metadata['instance-id'] = util.read_dmi_data('system-uuid')
- self.metadata.update(fabric_data)
-
- return True
-
- def device_name_to_device(self, name):
- return self.ds_cfg['disk_aliases'].get(name)
- def get_config_obj(self):
- return self.cfg
-
- def check_instance_id(self, sys_cfg):
- # quickly (local check only) if self.instance_id is still valid
- return sources.instance_id_matches_system_uuid(self.get_instance_id())
+ return fabric_data
def activate(self, cfg, is_new_instance):
address_ephemeral_resize(is_new_instance=is_new_instance)
return
+ @property
+ def network_config(self):
+ """Generate a network config like net.generate_fallback_network() with
+ the following execptions.
+
+ 1. Probe the drivers of the net-devices present and inject them in
+ the network configuration under params: driver: <driver> value
+ 2. If the driver value is 'mlx4_core', the control mode should be
+ set to manual. The device will be later used to build a bond,
+ for now we want to ensure the device gets named but does not
+ break any network configuration
+ """
+ blacklist = ['mlx4_core']
+ if not self._network_config:
+ LOG.debug('Azure: generating fallback configuration')
+ # generate a network config, blacklist picking any mlx4_core devs
+ netconfig = net.generate_fallback_config(
+ blacklist_drivers=blacklist, config_driver=True)
+
+ # if we have any blacklisted devices, update the network_config to
+ # include the device, mac, and driver values, but with no ip
+ # config; this ensures udev rules are generated but won't affect
+ # ip configuration
+ bl_found = 0
+ for bl_dev in [dev for dev in net.get_devicelist()
+ if net.device_driver(dev) in blacklist]:
+ bl_found += 1
+ cfg = {
+ 'type': 'physical',
+ 'name': 'vf%d' % bl_found,
+ 'mac_address': net.get_interface_mac(bl_dev),
+ 'params': {
+ 'driver': net.device_driver(bl_dev),
+ 'device_id': net.device_devid(bl_dev),
+ },
+ }
+ netconfig['config'].append(cfg)
+
+ self._network_config = netconfig
+
+ return self._network_config
+
def _partitions_on_device(devpath, maxnum=16):
# return a list of tuples (ptnum, path) for each part on devpath
@@ -694,7 +778,7 @@ def read_azure_ovf(contents):
try:
dom = minidom.parseString(contents)
except Exception as e:
- raise BrokenAzureDataSource("invalid xml: %s" % e)
+ raise BrokenAzureDataSource("Invalid ovf-env.xml: %s" % e)
results = find_child(dom.documentElement,
lambda n: n.localName == "ProvisioningSection")
@@ -792,19 +876,23 @@ def encrypt_pass(password, salt_id="$6$"):
return crypt.crypt(password, salt_id + util.rand_str(strlen=16))
+def _check_freebsd_cdrom(cdrom_dev):
+ """Return boolean indicating path to cdrom device has content."""
+ try:
+ with open(cdrom_dev) as fp:
+ fp.read(1024)
+ return True
+ except IOError:
+ LOG.debug("cdrom (%s) is not configured", cdrom_dev)
+ return False
+
+
def list_possible_azure_ds_devs():
- # return a sorted list of devices that might have a azure datasource
devlist = []
if util.is_FreeBSD():
cdrom_dev = "/dev/cd0"
- try:
- util.subp(["mount", "-o", "ro", "-t", "udf", cdrom_dev,
- "/mnt/cdrom/secure"])
- except util.ProcessExecutionError:
- LOG.debug("Fail to mount cd")
- return devlist
- util.subp(["umount", "/mnt/cdrom/secure"])
- devlist.append(cdrom_dev)
+ if _check_freebsd_cdrom(cdrom_dev):
+ return [cdrom_dev]
else:
for fstype in ("iso9660", "udf"):
devlist.extend(util.find_devs_with("TYPE=%s" % fstype))
@@ -834,9 +922,12 @@ class NonAzureDataSource(Exception):
pass
+# Legacy: Must be present in case we load an old pkl object
+DataSourceAzureNet = DataSourceAzure
+
# Used to match classes to dependencies
datasources = [
- (DataSourceAzureNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceAzure, (sources.DEP_FILESYSTEM, )),
]
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 2f9c7edf..4ec9592f 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -32,7 +32,12 @@ class Platforms(object):
AWS = "AWS"
BRIGHTBOX = "Brightbox"
SEEDED = "Seeded"
+ # UNKNOWN indicates no positive id. If strict_id is 'warn' or 'false',
+ # then an attempt at the Ec2 Metadata service will be made.
UNKNOWN = "Unknown"
+ # NO_EC2_METADATA indicates this platform does not have a Ec2 metadata
+ # service available. No attempt at the Ec2 Metadata service will be made.
+ NO_EC2_METADATA = "No-EC2-Metadata"
class DataSourceEc2(sources.DataSource):
@@ -65,6 +70,8 @@ class DataSourceEc2(sources.DataSource):
strict_mode, self.cloud_platform)
if strict_mode == "true" and self.cloud_platform == Platforms.UNKNOWN:
return False
+ elif self.cloud_platform == Platforms.NO_EC2_METADATA:
+ return False
try:
if not self.wait_for_metadata_service():
@@ -309,10 +316,16 @@ def identify_platform():
def _collect_platform_data():
- # returns a dictionary with all lower case values:
- # uuid: system-uuid from dmi or /sys/hypervisor
- # uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi'
- # serial: dmi 'system-serial-number' (/sys/.../product_serial)
+ """Returns a dictionary of platform info from dmi or /sys/hypervisor.
+
+ Keys in the dictionary are as follows:
+ uuid: system-uuid from dmi or /sys/hypervisor
+ uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi'
+ serial: dmi 'system-serial-number' (/sys/.../product_serial)
+
+ On Ec2 instances experimentation is that product_serial is upper case,
+ and product_uuid is lower case. This returns lower case values for both.
+ """
data = {}
try:
uuid = util.load_file("/sys/hypervisor/uuid").strip()
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index c68f6b8c..e641244d 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -43,6 +43,18 @@ class DataSourceNoCloud(sources.DataSource):
'network-config': None}
try:
+ # Parse the system serial label from dmi. If not empty, try parsing
+ # like the commandline
+ md = {}
+ serial = util.read_dmi_data('system-serial-number')
+ if serial and load_cmdline_data(md, serial):
+ found.append("dmi")
+ mydata = _merge_new_seed(mydata, {'meta-data': md})
+ except Exception:
+ util.logexc(LOG, "Unable to parse dmi data")
+ return False
+
+ try:
# Parse the kernel command line, getting data passed in
md = {}
if load_cmdline_data(md):
diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py
new file mode 100644
index 00000000..3a8a8e8f
--- /dev/null
+++ b/cloudinit/sources/DataSourceScaleway.py
@@ -0,0 +1,234 @@
+# Author: Julien Castets <castets.j@gmail.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+# Scaleway API:
+# https://developer.scaleway.com/#metadata
+
+import json
+import os
+import socket
+import time
+
+import requests
+
+# pylint fails to import the two modules below.
+# These are imported via requests.packages rather than urllib3 because:
+# a.) the provider of the requests package should ensure that urllib3
+# contained in it is consistent/correct.
+# b.) cloud-init does not specifically have a dependency on urllib3
+#
+# For future reference, see:
+# https://github.com/kennethreitz/requests/pull/2375
+# https://github.com/requests/requests/issues/4104
+# pylint: disable=E0401
+from requests.packages.urllib3.connection import HTTPConnection
+from requests.packages.urllib3.poolmanager import PoolManager
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import url_helper
+from cloudinit import util
+
+
+LOG = logging.getLogger(__name__)
+
+DS_BASE_URL = 'http://169.254.42.42'
+
+BUILTIN_DS_CONFIG = {
+ 'metadata_url': DS_BASE_URL + '/conf?format=json',
+ 'userdata_url': DS_BASE_URL + '/user_data/cloud-init',
+ 'vendordata_url': DS_BASE_URL + '/vendor_data/cloud-init'
+}
+
+DEF_MD_RETRIES = 5
+DEF_MD_TIMEOUT = 10
+
+
+def on_scaleway():
+ """
+ There are three ways to detect if you are on Scaleway:
+
+ * check DMI data: not yet implemented by Scaleway, but the check is made to
+ be future-proof.
+ * the initrd created the file /var/run/scaleway.
+ * "scaleway" is in the kernel cmdline.
+ """
+ vendor_name = util.read_dmi_data('system-manufacturer')
+ if vendor_name == 'Scaleway':
+ return True
+
+ if os.path.exists('/var/run/scaleway'):
+ return True
+
+ cmdline = util.get_cmdline()
+ if 'scaleway' in cmdline:
+ return True
+
+ return False
+
+
+class SourceAddressAdapter(requests.adapters.HTTPAdapter):
+ """
+ Adapter for requests to choose the local address to bind to.
+ """
+ def __init__(self, source_address, **kwargs):
+ self.source_address = source_address
+ super(SourceAddressAdapter, self).__init__(**kwargs)
+
+ def init_poolmanager(self, connections, maxsize, block=False):
+ socket_options = HTTPConnection.default_socket_options + [
+ (socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+ ]
+ self.poolmanager = PoolManager(num_pools=connections,
+ maxsize=maxsize,
+ block=block,
+ source_address=self.source_address,
+ socket_options=socket_options)
+
+
+def query_data_api_once(api_address, timeout, requests_session):
+ """
+ Retrieve user data or vendor data.
+
+ Scaleway user/vendor data API returns HTTP/404 if user/vendor data is not
+ set.
+
+ This function calls `url_helper.readurl` but instead of considering
+ HTTP/404 as an error that requires a retry, it considers it as empty
+ user/vendor data.
+
+ Also, be aware the user data/vendor API requires the source port to be
+ below 1024 to ensure the client is root (since non-root users can't bind
+ ports below 1024). If requests raises ConnectionError (EADDRINUSE), the
+ caller should retry to call this function on an other port.
+ """
+ try:
+ resp = url_helper.readurl(
+ api_address,
+ data=None,
+ timeout=timeout,
+ # It's the caller's responsability to recall this function in case
+ # of exception. Don't let url_helper.readurl() retry by itself.
+ retries=0,
+ session=requests_session,
+ # If the error is a HTTP/404 or a ConnectionError, go into raise
+ # block below.
+ exception_cb=lambda _, exc: exc.code == 404 or (
+ isinstance(exc.cause, requests.exceptions.ConnectionError)
+ )
+ )
+ return util.decode_binary(resp.contents)
+ except url_helper.UrlError as exc:
+ # Empty user data.
+ if exc.code == 404:
+ return None
+ raise
+
+
+def query_data_api(api_type, api_address, retries, timeout):
+ """Get user or vendor data.
+
+ Handle the retrying logic in case the source port is used.
+
+ Scaleway metadata service requires the source port of the client to
+ be a privileged port (<1024). This is done to ensure that only a
+ privileged user on the system can access the metadata service.
+ """
+ # Query user/vendor data. Try to make a request on the first privileged
+ # port available.
+ for port in range(1, max(retries, 2)):
+ try:
+ LOG.debug(
+ 'Trying to get %s data (bind on port %d)...',
+ api_type, port
+ )
+ requests_session = requests.Session()
+ requests_session.mount(
+ 'http://',
+ SourceAddressAdapter(source_address=('0.0.0.0', port))
+ )
+ data = query_data_api_once(
+ api_address,
+ timeout=timeout,
+ requests_session=requests_session
+ )
+ LOG.debug('%s-data downloaded', api_type)
+ return data
+
+ except url_helper.UrlError as exc:
+ # Local port already in use or HTTP/429.
+ LOG.warning('Error while trying to get %s data: %s', api_type, exc)
+ time.sleep(5)
+ last_exc = exc
+ continue
+
+ # Max number of retries reached.
+ raise last_exc
+
+
+class DataSourceScaleway(sources.DataSource):
+
+ def __init__(self, sys_cfg, distro, paths):
+ super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths)
+
+ self.ds_cfg = util.mergemanydict([
+ util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}),
+ BUILTIN_DS_CONFIG
+ ])
+
+ self.metadata_address = self.ds_cfg['metadata_url']
+ self.userdata_address = self.ds_cfg['userdata_url']
+ self.vendordata_address = self.ds_cfg['vendordata_url']
+
+ self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES))
+ self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT))
+
+ def get_data(self):
+ if not on_scaleway():
+ return False
+
+ resp = url_helper.readurl(self.metadata_address,
+ timeout=self.timeout,
+ retries=self.retries)
+ self.metadata = json.loads(util.decode_binary(resp.contents))
+
+ self.userdata_raw = query_data_api(
+ 'user-data', self.userdata_address,
+ self.retries, self.timeout
+ )
+ self.vendordata_raw = query_data_api(
+ 'vendor-data', self.vendordata_address,
+ self.retries, self.timeout
+ )
+ return True
+
+ @property
+ def launch_index(self):
+ return None
+
+ def get_instance_id(self):
+ return self.metadata['id']
+
+ def get_public_ssh_keys(self):
+ return [key['key'] for key in self.metadata['ssh_public_keys']]
+
+ def get_hostname(self, fqdn=False, resolve_ip=False):
+ return self.metadata['hostname']
+
+ @property
+ def availability_zone(self):
+ return None
+
+ @property
+ def region(self):
+ return None
+
+
+datasources = [
+ (DataSourceScaleway, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index c3ce36d6..952caf35 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -251,10 +251,23 @@ class DataSource(object):
def first_instance_boot(self):
return
+ def setup(self, is_new_instance):
+ """setup(is_new_instance)
+
+ This is called before user-data and vendor-data have been processed.
+
+ Unless the datasource has set mode to 'local', then networking
+ per 'fallback' or per 'network_config' will have been written and
+ brought up the OS at this point.
+ """
+ return
+
def activate(self, cfg, is_new_instance):
"""activate(cfg, is_new_instance)
- This is called before the init_modules will be called.
+ This is called before the init_modules will be called but after
+ the user-data and vendor-data have been fully processed.
+
The cfg is fully up to date config, it contains a merged view of
system config, datasource config, user config, vendor config.
It should be used rather than the sys_cfg passed to __init__.
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index ad557827..a1c4a517 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -362,6 +362,11 @@ class Init(object):
self._store_userdata()
self._store_vendordata()
+ def setup_datasource(self):
+ if self.datasource is None:
+ raise RuntimeError("Datasource is None, cannot setup.")
+ self.datasource.setup(is_new_instance=self.is_new_instance())
+
def activate_datasource(self):
if self.datasource is None:
raise RuntimeError("Datasource is None, cannot activate.")
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index d2b92e6a..7cf76aae 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -172,7 +172,8 @@ def _get_ssl_args(url, ssl_details):
def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
headers=None, headers_cb=None, ssl_details=None,
- check_status=True, allow_redirects=True, exception_cb=None):
+ check_status=True, allow_redirects=True, exception_cb=None,
+ session=None):
url = _cleanurl(url)
req_args = {
'url': url,
@@ -231,7 +232,12 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
LOG.debug("[%s/%s] open '%s' with %s configuration", i,
manual_tries, url, filtered_req_args)
- r = requests.request(**req_args)
+ if session is None:
+ session = requests.Session()
+
+ with session as sess:
+ r = sess.request(**req_args)
+
if check_status:
r.raise_for_status()
LOG.debug("Read from %s (%s, %sb) after %s attempts", url,
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 135e4608..ce2c6034 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -330,7 +330,11 @@ class SeLinuxGuard(object):
LOG.debug("Restoring selinux mode for %s (recursive=%s)",
path, self.recursive)
- self.selinux.restorecon(path, recursive=self.recursive)
+ try:
+ self.selinux.restorecon(path, recursive=self.recursive)
+ except OSError as e:
+ LOG.warning('restorecon failed on %s,%s maybe badness? %s',
+ path, self.recursive, e)
class MountFailedError(Exception):
@@ -569,7 +573,7 @@ def is_ipv4(instr):
def is_FreeBSD():
- return system_info()['platform'].startswith('FreeBSD')
+ return system_info()['variant'] == "freebsd"
def get_cfg_option_bool(yobj, key, default=False):
@@ -592,13 +596,32 @@ def get_cfg_option_int(yobj, key, default=0):
def system_info():
- return {
+ info = {
'platform': platform.platform(),
+ 'system': platform.system(),
'release': platform.release(),
'python': platform.python_version(),
'uname': platform.uname(),
- 'dist': platform.linux_distribution(), # pylint: disable=W1505
+ 'dist': platform.dist(), # pylint: disable=W1505
}
+ system = info['system'].lower()
+ var = 'unknown'
+ if system == "linux":
+ linux_dist = info['dist'][0].lower()
+ if linux_dist in ('centos', 'fedora', 'debian'):
+ var = linux_dist
+ elif linux_dist in ('ubuntu', 'linuxmint', 'mint'):
+ var = 'ubuntu'
+ elif linux_dist == 'redhat':
+ var = 'rhel'
+ else:
+ var = 'linux'
+ elif system in ('windows', 'darwin', "freebsd"):
+ var = system
+
+ info['variant'] = var
+
+ return info
def get_cfg_option_list(yobj, key, default=None):
@@ -1105,14 +1128,14 @@ def is_resolvable(name):
we have to append '.'.
The top level 'invalid' domain is invalid per RFC. And example.com
- should also not exist. The random entry will be resolved inside
- the search list.
+ should also not exist. The '__cloud_init_expected_not_found__' entry will
+ be resolved inside the search list.
"""
global _DNS_REDIRECT_IP
if _DNS_REDIRECT_IP is None:
badips = set()
badnames = ("does-not-exist.example.com.", "example.invalid.",
- rand_str())
+ "__cloud_init_expected_not_found__")
badresults = {}
for iname in badnames:
try:
@@ -1720,8 +1743,12 @@ def write_file(filename, content, mode=0o644, omode="wb", copy_mode=False):
else:
content = decode_binary(content)
write_type = 'characters'
+ try:
+ mode_r = "%o" % mode
+ except TypeError:
+ mode_r = "%r" % mode
LOG.debug("Writing to %s - %s: [%s] %s %s",
- filename, omode, mode, len(content), write_type)
+ filename, omode, mode_r, len(content), write_type)
with SeLinuxGuard(path=filename):
with open(filename, omode) as fh:
fh.write(content)
@@ -2370,6 +2397,10 @@ def read_dmi_data(key):
"""
Wrapper for reading DMI data.
+ If running in a container return None. This is because DMI data is
+ assumed to be not useful in a container as it does not represent the
+ container but rather the host.
+
This will do the following (returning the first that produces a
result):
1) Use a mapping to translate `key` from dmidecode naming to
@@ -2380,6 +2411,9 @@ def read_dmi_data(key):
If all of the above fail to find a value, None will be returned.
"""
+ if is_container():
+ return None
+
syspath_value = _read_dmi_syspath(key)
if syspath_value is not None:
return syspath_value
@@ -2495,7 +2529,7 @@ def load_shell_content(content, add_empty=False, empty_val=None):
if PY26 and isinstance(blob, six.text_type):
# Older versions don't support unicode input
blob = blob.encode("utf8")
- return shlex.split(blob)
+ return shlex.split(blob, comments=True)
data = {}
for line in _shlex_split(content):