summaryrefslogtreecommitdiff
path: root/cloudinit
diff options
context:
space:
mode:
authorScott Moser <smoser@brickies.net>2016-09-09 21:46:49 -0400
committerScott Moser <smoser@brickies.net>2016-09-09 21:46:49 -0400
commitea732e69516983b1d9838b0d80540a832594748a (patch)
treef23cbf03e360f913e98e15d232bcf871770806e8 /cloudinit
parenteb5860ec6ed76a90fb837001ab2ed54e1dcf78de (diff)
parent34a26f7f59f2963691e36ca0476bec9fc9ccef63 (diff)
downloadvyos-cloud-init-ea732e69516983b1d9838b0d80540a832594748a.tar.gz
vyos-cloud-init-ea732e69516983b1d9838b0d80540a832594748a.zip
Merge branch 'master' into ubuntu/xenial
Diffstat (limited to 'cloudinit')
-rw-r--r--cloudinit/atomic_helper.py31
-rw-r--r--cloudinit/cmd/main.py55
-rw-r--r--cloudinit/config/cc_apt_configure.py717
-rw-r--r--cloudinit/config/cc_lxd.py2
-rw-r--r--cloudinit/config/cc_mcollective.py96
-rw-r--r--cloudinit/config/cc_ntp.py106
-rw-r--r--cloudinit/config/cc_phone_home.py2
-rw-r--r--cloudinit/config/cc_rh_subscription.py2
-rw-r--r--cloudinit/config/cc_salt_minion.py7
-rw-r--r--cloudinit/config/cc_snappy.py2
-rw-r--r--cloudinit/config/cc_spacewalk.py85
-rw-r--r--cloudinit/config/cc_ubuntu_init_switch.py2
-rw-r--r--cloudinit/config/cc_yum_add_repo.py2
-rw-r--r--cloudinit/dhclient_hook.py50
-rw-r--r--cloudinit/distros/__init__.py2
-rw-r--r--cloudinit/distros/gentoo.py95
-rw-r--r--cloudinit/gpg.py8
-rw-r--r--cloudinit/net/__init__.py9
-rw-r--r--cloudinit/net/eni.py2
-rw-r--r--cloudinit/signal_handler.py2
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py6
-rw-r--r--cloudinit/sources/DataSourceAzure.py20
-rw-r--r--cloudinit/sources/DataSourceCloudSigma.py6
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py2
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py106
-rw-r--r--cloudinit/sources/DataSourceGCE.py2
-rw-r--r--cloudinit/sources/DataSourceMAAS.py199
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py2
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py2
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py2
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py122
-rw-r--r--cloudinit/sources/__init__.py27
-rw-r--r--cloudinit/sources/helpers/azure.py102
-rw-r--r--cloudinit/sources/helpers/openstack.py85
-rw-r--r--cloudinit/util.py159
-rw-r--r--cloudinit/version.py8
36 files changed, 1632 insertions, 495 deletions
diff --git a/cloudinit/atomic_helper.py b/cloudinit/atomic_helper.py
new file mode 100644
index 00000000..a3cfd942
--- /dev/null
+++ b/cloudinit/atomic_helper.py
@@ -0,0 +1,31 @@
+#!/usr/bin/python
+# vi: ts=4 expandtab
+
+import json
+import os
+import tempfile
+
+_DEF_PERMS = 0o644
+
+
+def write_file(filename, content, mode=_DEF_PERMS, omode="wb"):
+ # open filename in mode 'omode', write content, set permissions to 'mode'
+ tf = None
+ try:
+ tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(filename),
+ delete=False, mode=omode)
+ tf.write(content)
+ tf.close()
+ os.chmod(tf.name, mode)
+ os.rename(tf.name, filename)
+ except Exception as e:
+ if tf is not None:
+ os.unlink(tf.name)
+ raise e
+
+
+def write_json(filename, data, mode=_DEF_PERMS):
+ # dump json representation of data to file filename.
+ return write_file(
+ filename, json.dumps(data, indent=1, sort_keys=True) + "\n",
+ omode="w", mode=mode)
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index 63621c1d..83eb02c9 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -25,7 +25,6 @@ import argparse
import json
import os
import sys
-import tempfile
import time
import traceback
@@ -47,6 +46,10 @@ from cloudinit.reporting import events
from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE,
CLOUD_CONFIG)
+from cloudinit import atomic_helper
+
+from cloudinit.dhclient_hook import LogDhclient
+
# Pretty little cheetah formatted welcome message template
WELCOME_MSG_TPL = ("Cloud-init v. ${version} running '${action}' at "
@@ -452,22 +455,10 @@ def main_single(name, args):
return 0
-def atomic_write_file(path, content, mode='w'):
- tf = None
- try:
- tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(path),
- delete=False, mode=mode)
- tf.write(content)
- tf.close()
- os.rename(tf.name, path)
- except Exception as e:
- if tf is not None:
- os.unlink(tf.name)
- raise e
-
-
-def atomic_write_json(path, data):
- return atomic_write_file(path, json.dumps(data, indent=1) + "\n")
+def dhclient_hook(name, args):
+ record = LogDhclient(args)
+ record.check_hooks_dir()
+ record.record()
def status_wrapper(name, args, data_d=None, link_d=None):
@@ -522,7 +513,7 @@ def status_wrapper(name, args, data_d=None, link_d=None):
v1['stage'] = mode
v1[mode]['start'] = time.time()
- atomic_write_json(status_path, status)
+ atomic_helper.write_json(status_path, status)
util.sym_link(os.path.relpath(status_path, link_d), status_link,
force=True)
@@ -545,7 +536,7 @@ def status_wrapper(name, args, data_d=None, link_d=None):
v1[mode]['finished'] = time.time()
v1['stage'] = None
- atomic_write_json(status_path, status)
+ atomic_helper.write_json(status_path, status)
if mode == "modules-final":
# write the 'finished' file
@@ -554,9 +545,9 @@ def status_wrapper(name, args, data_d=None, link_d=None):
if v1[m]['errors']:
errors.extend(v1[m].get('errors', []))
- atomic_write_json(result_path,
- {'v1': {'datasource': v1['datasource'],
- 'errors': errors}})
+ atomic_helper.write_json(
+ result_path, {'v1': {'datasource': v1['datasource'],
+ 'errors': errors}})
util.sym_link(os.path.relpath(result_path, link_d), result_link,
force=True)
@@ -627,7 +618,6 @@ def main(sysv_args=None):
# This subcommand allows you to run a single module
parser_single = subparsers.add_parser('single',
help=('run a single module '))
- parser_single.set_defaults(action=('single', main_single))
parser_single.add_argument("--name", '-n', action="store",
help="module name to run",
required=True)
@@ -644,6 +634,16 @@ def main(sysv_args=None):
' pass to this module'))
parser_single.set_defaults(action=('single', main_single))
+ parser_dhclient = subparsers.add_parser('dhclient-hook',
+ help=('run the dhclient hook'
+ 'to record network info'))
+ parser_dhclient.add_argument("net_action",
+ help=('action taken on the interface'))
+ parser_dhclient.add_argument("net_interface",
+ help=('the network interface being acted'
+ ' upon'))
+ parser_dhclient.set_defaults(action=('dhclient_hook', dhclient_hook))
+
args = parser.parse_args(args=sysv_args)
try:
@@ -677,9 +677,18 @@ def main(sysv_args=None):
"running single module %s" % args.name)
report_on = args.report
+ elif name == 'dhclient_hook':
+ rname, rdesc = ("dhclient-hook",
+ "running dhclient-hook module")
+
args.reporter = events.ReportEventStack(
rname, rdesc, reporting_enabled=report_on)
+
with args.reporter:
return util.log_time(
logfunc=LOG.debug, msg="cloud-init mode '%s'" % name,
get_uptime=True, func=functor, args=(name, args))
+
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 05ad4b03..fa9505a7 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -23,80 +23,182 @@ import os
import re
from cloudinit import gpg
+from cloudinit import log as logging
from cloudinit import templater
from cloudinit import util
-distros = ['ubuntu', 'debian']
-
-PROXY_TPL = "Acquire::HTTP::Proxy \"%s\";\n"
-APT_CONFIG_FN = "/etc/apt/apt.conf.d/94cloud-init-config"
-APT_PROXY_FN = "/etc/apt/apt.conf.d/95cloud-init-proxy"
+LOG = logging.getLogger(__name__)
# this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar')
ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
+# place where apt stores cached repository data
+APT_LISTS = "/var/lib/apt/lists"
-def handle(name, cfg, cloud, log, _args):
- if util.is_false(cfg.get('apt_configure_enabled', True)):
- log.debug("Skipping module named %s, disabled by config.", name)
- return
-
- release = get_release()
- mirrors = find_apt_mirror_info(cloud, cfg)
- if not mirrors or "primary" not in mirrors:
- log.debug(("Skipping module named %s,"
- " no package 'mirror' located"), name)
- return
-
- # backwards compatibility
- mirror = mirrors["primary"]
- mirrors["mirror"] = mirror
-
- log.debug("Mirror info: %s" % mirrors)
-
- if not util.get_cfg_option_bool(cfg,
- 'apt_preserve_sources_list', False):
- generate_sources_list(cfg, release, mirrors, cloud, log)
- old_mirrors = cfg.get('apt_old_mirrors',
- {"primary": "archive.ubuntu.com/ubuntu",
- "security": "security.ubuntu.com/ubuntu"})
- rename_apt_lists(old_mirrors, mirrors)
+# Files to store proxy information
+APT_CONFIG_FN = "/etc/apt/apt.conf.d/94cloud-init-config"
+APT_PROXY_FN = "/etc/apt/apt.conf.d/90cloud-init-aptproxy"
+
+# Default keyserver to use
+DEFAULT_KEYSERVER = "keyserver.ubuntu.com"
+
+# Default archive mirrors
+PRIMARY_ARCH_MIRRORS = {"PRIMARY": "http://archive.ubuntu.com/ubuntu/",
+ "SECURITY": "http://security.ubuntu.com/ubuntu/"}
+PORTS_MIRRORS = {"PRIMARY": "http://ports.ubuntu.com/ubuntu-ports",
+ "SECURITY": "http://ports.ubuntu.com/ubuntu-ports"}
+PRIMARY_ARCHES = ['amd64', 'i386']
+PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el']
+
+
+def get_default_mirrors(arch=None, target=None):
+ """returns the default mirrors for the target. These depend on the
+ architecture, for more see:
+ https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports"""
+ if arch is None:
+ arch = util.get_architecture(target)
+ if arch in PRIMARY_ARCHES:
+ return PRIMARY_ARCH_MIRRORS.copy()
+ if arch in PORTS_ARCHES:
+ return PORTS_MIRRORS.copy()
+ raise ValueError("No default mirror known for arch %s" % arch)
+
+
+def handle(name, ocfg, cloud, log, _):
+ """process the config for apt_config. This can be called from
+ curthooks if a global apt config was provided or via the "apt"
+ standalone command."""
+ # keeping code close to curtin codebase via entry handler
+ target = None
+ if log is not None:
+ global LOG
+ LOG = log
+ # feed back converted config, but only work on the subset under 'apt'
+ ocfg = convert_to_v3_apt_format(ocfg)
+ cfg = ocfg.get('apt', {})
+
+ if not isinstance(cfg, dict):
+ raise ValueError("Expected dictionary for 'apt' config, found %s",
+ type(cfg))
+
+ LOG.debug("handling apt (module %s) with apt config '%s'", name, cfg)
+
+ release = util.lsb_release(target=target)['codename']
+ arch = util.get_architecture(target)
+ mirrors = find_apt_mirror_info(cfg, cloud, arch=arch)
+ LOG.debug("Apt Mirror info: %s", mirrors)
+
+ apply_debconf_selections(cfg, target)
+
+ if util.is_false(cfg.get('preserve_sources_list', False)):
+ generate_sources_list(cfg, release, mirrors, cloud)
+ rename_apt_lists(mirrors, target)
try:
apply_apt_config(cfg, APT_PROXY_FN, APT_CONFIG_FN)
- except Exception as e:
- log.warn("failed to proxy or apt config info: %s", e)
+ except (IOError, OSError):
+ LOG.exception("Failed to apply proxy or apt config info:")
- # Process 'apt_sources'
- if 'apt_sources' in cfg:
+ # Process 'apt_source -> sources {dict}'
+ if 'sources' in cfg:
params = mirrors
params['RELEASE'] = release
- params['MIRROR'] = mirror
+ params['MIRROR'] = mirrors["MIRROR"]
+ matcher = None
matchcfg = cfg.get('add_apt_repo_match', ADD_APT_REPO_MATCH)
if matchcfg:
matcher = re.compile(matchcfg).search
+
+ add_apt_sources(cfg['sources'], cloud, target=target,
+ template_params=params, aa_repo_match=matcher)
+
+
+def debconf_set_selections(selections, target=None):
+ util.subp(['debconf-set-selections'], data=selections, target=target,
+ capture=True)
+
+
+def dpkg_reconfigure(packages, target=None):
+ # For any packages that are already installed, but have preseed data
+ # we populate the debconf database, but the filesystem configuration
+ # would be preferred on a subsequent dpkg-reconfigure.
+ # so, what we have to do is "know" information about certain packages
+ # to unconfigure them.
+ unhandled = []
+ to_config = []
+ for pkg in packages:
+ if pkg in CONFIG_CLEANERS:
+ LOG.debug("unconfiguring %s", pkg)
+ CONFIG_CLEANERS[pkg](target)
+ to_config.append(pkg)
else:
- def matcher(x):
- return False
+ unhandled.append(pkg)
+
+ if len(unhandled):
+ LOG.warn("The following packages were installed and preseeded, "
+ "but cannot be unconfigured: %s", unhandled)
+
+ if len(to_config):
+ util.subp(['dpkg-reconfigure', '--frontend=noninteractive'] +
+ list(to_config), data=None, target=target, capture=True)
+
+
+def apply_debconf_selections(cfg, target=None):
+ """apply_debconf_selections - push content to debconf"""
+ # debconf_selections:
+ # set1: |
+ # cloud-init cloud-init/datasources multiselect MAAS
+ # set2: pkg pkg/value string bar
+ selsets = cfg.get('debconf_selections')
+ if not selsets:
+ LOG.debug("debconf_selections was not set in config")
+ return
- errors = add_apt_sources(cfg['apt_sources'], params,
- aa_repo_match=matcher)
- for e in errors:
- log.warn("Add source error: %s", ':'.join(e))
+ selections = '\n'.join(
+ [selsets[key] for key in sorted(selsets.keys())])
+ debconf_set_selections(selections.encode() + b"\n", target=target)
- dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False)
- if dconf_sel:
- log.debug("Setting debconf selections per cloud config")
- try:
- util.subp(('debconf-set-selections', '-'), dconf_sel)
- except Exception:
- util.logexc(log, "Failed to run debconf-set-selections")
+ # get a complete list of packages listed in input
+ pkgs_cfgd = set()
+ for key, content in selsets.items():
+ for line in content.splitlines():
+ if line.startswith("#"):
+ continue
+ pkg = re.sub(r"[:\s].*", "", line)
+ pkgs_cfgd.add(pkg)
+
+ pkgs_installed = util.get_installed_packages(target)
+
+ LOG.debug("pkgs_cfgd: %s", pkgs_cfgd)
+ need_reconfig = pkgs_cfgd.intersection(pkgs_installed)
+
+ if len(need_reconfig) == 0:
+ LOG.debug("no need for reconfig")
+ return
+
+ dpkg_reconfigure(need_reconfig, target=target)
+
+
+def clean_cloud_init(target):
+ """clean out any local cloud-init config"""
+ flist = glob.glob(
+ util.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*"))
+
+ LOG.debug("cleaning cloud-init config from: %s", flist)
+ for dpkg_cfg in flist:
+ os.unlink(dpkg_cfg)
def mirrorurl_to_apt_fileprefix(mirror):
+ """mirrorurl_to_apt_fileprefix
+ Convert a mirror url to the file prefix used by apt on disk to
+ store cache information for that mirror.
+ To do so do:
+ - take off ???://
+ - drop tailing /
+ - convert in string / to _"""
string = mirror
- # take off http:// or ftp://
if string.endswith("/"):
string = string[0:-1]
pos = string.find("://")
@@ -106,174 +208,379 @@ def mirrorurl_to_apt_fileprefix(mirror):
return string
-def rename_apt_lists(old_mirrors, new_mirrors, lists_d="/var/lib/apt/lists"):
- for (name, omirror) in old_mirrors.items():
+def rename_apt_lists(new_mirrors, target=None):
+ """rename_apt_lists - rename apt lists to preserve old cache data"""
+ default_mirrors = get_default_mirrors(util.get_architecture(target))
+
+ pre = util.target_path(target, APT_LISTS)
+ for (name, omirror) in default_mirrors.items():
nmirror = new_mirrors.get(name)
if not nmirror:
continue
- oprefix = os.path.join(lists_d, mirrorurl_to_apt_fileprefix(omirror))
- nprefix = os.path.join(lists_d, mirrorurl_to_apt_fileprefix(nmirror))
+
+ oprefix = pre + os.path.sep + mirrorurl_to_apt_fileprefix(omirror)
+ nprefix = pre + os.path.sep + mirrorurl_to_apt_fileprefix(nmirror)
if oprefix == nprefix:
continue
olen = len(oprefix)
for filename in glob.glob("%s_*" % oprefix):
- util.rename(filename, "%s%s" % (nprefix, filename[olen:]))
-
-
-def get_release():
- (stdout, _stderr) = util.subp(['lsb_release', '-cs'])
- return stdout.strip()
-
-
-def generate_sources_list(cfg, codename, mirrors, cloud, log):
- params = {'codename': codename}
+ newname = "%s%s" % (nprefix, filename[olen:])
+ LOG.debug("Renaming apt list %s to %s", filename, newname)
+ try:
+ os.rename(filename, newname)
+ except OSError:
+ # since this is a best effort task, warn with but don't fail
+ LOG.warn("Failed to rename apt list:", exc_info=True)
+
+
+def mirror_to_placeholder(tmpl, mirror, placeholder):
+ """mirror_to_placeholder
+ replace the specified mirror in a template with a placeholder string
+ Checks for existance of the expected mirror and warns if not found"""
+ if mirror not in tmpl:
+ LOG.warn("Expected mirror '%s' not found in: %s", mirror, tmpl)
+ return tmpl.replace(mirror, placeholder)
+
+
+def map_known_suites(suite):
+ """there are a few default names which will be auto-extended.
+ This comes at the inability to use those names literally as suites,
+ but on the other hand increases readability of the cfg quite a lot"""
+ mapping = {'updates': '$RELEASE-updates',
+ 'backports': '$RELEASE-backports',
+ 'security': '$RELEASE-security',
+ 'proposed': '$RELEASE-proposed',
+ 'release': '$RELEASE'}
+ try:
+ retsuite = mapping[suite]
+ except KeyError:
+ retsuite = suite
+ return retsuite
+
+
+def disable_suites(disabled, src, release):
+ """reads the config for suites to be disabled and removes those
+ from the template"""
+ if not disabled:
+ return src
+
+ retsrc = src
+ for suite in disabled:
+ suite = map_known_suites(suite)
+ releasesuite = templater.render_string(suite, {'RELEASE': release})
+ LOG.debug("Disabling suite %s as %s", suite, releasesuite)
+
+ newsrc = ""
+ for line in retsrc.splitlines(True):
+ if line.startswith("#"):
+ newsrc += line
+ continue
+
+ # sources.list allow options in cols[1] which can have spaces
+ # so the actual suite can be [2] or later. example:
+ # deb [ arch=amd64,armel k=v ] http://example.com/debian
+ cols = line.split()
+ if len(cols) > 1:
+ pcol = 2
+ if cols[1].startswith("["):
+ for col in cols[1:]:
+ pcol += 1
+ if col.endswith("]"):
+ break
+
+ if cols[pcol] == releasesuite:
+ line = '# suite disabled by cloud-init: %s' % line
+ newsrc += line
+ retsrc = newsrc
+
+ return retsrc
+
+
+def generate_sources_list(cfg, release, mirrors, cloud):
+ """generate_sources_list
+ create a source.list file based on a custom or default template
+ by replacing mirrors and release in the template"""
+ aptsrc = "/etc/apt/sources.list"
+ params = {'RELEASE': release, 'codename': release}
for k in mirrors:
params[k] = mirrors[k]
+ params[k.lower()] = mirrors[k]
- custtmpl = cfg.get('apt_custom_sources_list', None)
- if custtmpl is not None:
- templater.render_string_to_file(custtmpl,
- '/etc/apt/sources.list', params)
- return
-
- template_fn = cloud.get_template_filename('sources.list.%s' %
- (cloud.distro.name))
- if not template_fn:
- template_fn = cloud.get_template_filename('sources.list')
+ tmpl = cfg.get('sources_list', None)
+ if tmpl is None:
+ LOG.info("No custom template provided, fall back to builtin")
+ template_fn = cloud.get_template_filename('sources.list.%s' %
+ (cloud.distro.name))
if not template_fn:
- log.warn("No template found, not rendering /etc/apt/sources.list")
+ template_fn = cloud.get_template_filename('sources.list')
+ if not template_fn:
+ LOG.warn("No template found, not rendering /etc/apt/sources.list")
return
+ tmpl = util.load_file(template_fn)
- templater.render_to_file(template_fn, '/etc/apt/sources.list', params)
+ rendered = templater.render_string(tmpl, params)
+ disabled = disable_suites(cfg.get('disable_suites'), rendered, release)
+ util.write_file(aptsrc, disabled, mode=0o644)
-def add_apt_key_raw(key):
+def add_apt_key_raw(key, target=None):
"""
actual adding of a key as defined in key argument
to the system
"""
+ LOG.debug("Adding key:\n'%s'", key)
try:
- util.subp(('apt-key', 'add', '-'), key)
+ util.subp(['apt-key', 'add', '-'], data=key.encode(), target=target)
except util.ProcessExecutionError:
- raise ValueError('failed to add apt GPG Key to apt keyring')
+ LOG.exception("failed to add apt GPG Key to apt keyring")
+ raise
-def add_apt_key(ent):
+def add_apt_key(ent, target=None):
"""
- add key to the system as defined in ent (if any)
- supports raw keys or keyid's
- The latter will as a first step fetch the raw key from a keyserver
+ Add key to the system as defined in ent (if any).
+ Supports raw keys or keyid's
+ The latter will as a first step fetched to get the raw key
"""
if 'keyid' in ent and 'key' not in ent:
- keyserver = "keyserver.ubuntu.com"
+ keyserver = DEFAULT_KEYSERVER
if 'keyserver' in ent:
keyserver = ent['keyserver']
- ent['key'] = gpg.get_key_by_id(ent['keyid'], keyserver)
- if 'key' in ent:
- add_apt_key_raw(ent['key'])
+ ent['key'] = gpg.getkeybyid(ent['keyid'], keyserver)
+ if 'key' in ent:
+ add_apt_key_raw(ent['key'], target)
-def convert_to_new_format(srclist):
- """convert_to_new_format
- convert the old list based format to the new dict based one
- """
- srcdict = {}
- if isinstance(srclist, list):
- for srcent in srclist:
- if 'filename' not in srcent:
- # file collides for multiple !filename cases for compatibility
- # yet we need them all processed, so not same dictionary key
- srcent['filename'] = "cloud_config_sources.list"
- key = util.rand_dict_key(srcdict, "cloud_config_sources.list")
- else:
- # all with filename use that as key (matching new format)
- key = srcent['filename']
- srcdict[key] = srcent
- elif isinstance(srclist, dict):
- srcdict = srclist
- else:
- raise ValueError("unknown apt_sources format")
- return srcdict
+def update_packages(cloud):
+ cloud.distro.update_package_sources()
-def add_apt_sources(srclist, template_params=None, aa_repo_match=None):
+def add_apt_sources(srcdict, cloud, target=None, template_params=None,
+ aa_repo_match=None):
"""
add entries in /etc/apt/sources.list.d for each abbreviated
- sources.list entry in 'srclist'. When rendering template, also
+ sources.list entry in 'srcdict'. When rendering template, also
include the values in dictionary searchList
"""
if template_params is None:
template_params = {}
if aa_repo_match is None:
- def _aa_repo_match(x):
- return False
- aa_repo_match = _aa_repo_match
+ raise ValueError('did not get a valid repo matcher')
- errorlist = []
- srcdict = convert_to_new_format(srclist)
+ if not isinstance(srcdict, dict):
+ raise TypeError('unknown apt format: %s' % (srcdict))
for filename in srcdict:
ent = srcdict[filename]
+ LOG.debug("adding source/key '%s'", ent)
if 'filename' not in ent:
ent['filename'] = filename
- # keys can be added without specifying a source
- try:
- add_apt_key(ent)
- except ValueError as detail:
- errorlist.append([ent, detail])
+ add_apt_key(ent, target)
if 'source' not in ent:
- errorlist.append(["", "missing source"])
continue
source = ent['source']
source = templater.render_string(source, template_params)
- if not ent['filename'].startswith(os.path.sep):
+ if not ent['filename'].startswith("/"):
ent['filename'] = os.path.join("/etc/apt/sources.list.d/",
ent['filename'])
+ if not ent['filename'].endswith(".list"):
+ ent['filename'] += ".list"
if aa_repo_match(source):
try:
- util.subp(["add-apt-repository", source])
- except util.ProcessExecutionError as e:
- errorlist.append([source,
- ("add-apt-repository failed. " + str(e))])
+ util.subp(["add-apt-repository", source], target=target)
+ except util.ProcessExecutionError:
+ LOG.exception("add-apt-repository failed.")
+ raise
continue
+ sourcefn = util.target_path(target, ent['filename'])
try:
contents = "%s\n" % (source)
- util.write_file(ent['filename'], contents, omode="ab")
- except Exception:
- errorlist.append([source,
- "failed write to file %s" % ent['filename']])
+ util.write_file(sourcefn, contents, omode="a")
+ except IOError as detail:
+ LOG.exception("failed write to file %s: %s", sourcefn, detail)
+ raise
- return errorlist
+ update_packages(cloud)
+ return
-def find_apt_mirror_info(cloud, cfg):
- """find an apt_mirror given the cloud and cfg provided."""
- mirror = None
+def convert_v1_to_v2_apt_format(srclist):
+ """convert v1 apt format to v2 (dict in apt_sources)"""
+ srcdict = {}
+ if isinstance(srclist, list):
+ LOG.debug("apt config: convert V1 to V2 format (source list to dict)")
+ for srcent in srclist:
+ if 'filename' not in srcent:
+ # file collides for multiple !filename cases for compatibility
+ # yet we need them all processed, so not same dictionary key
+ srcent['filename'] = "cloud_config_sources.list"
+ key = util.rand_dict_key(srcdict, "cloud_config_sources.list")
+ else:
+ # all with filename use that as key (matching new format)
+ key = srcent['filename']
+ srcdict[key] = srcent
+ elif isinstance(srclist, dict):
+ srcdict = srclist
+ else:
+ raise ValueError("unknown apt_sources format")
+
+ return srcdict
- # this is less preferred way of specifying mirror preferred would be to
- # use the distro's search or package_mirror.
- mirror = cfg.get("apt_mirror", None)
- search = cfg.get("apt_mirror_search", None)
- if not mirror and search:
- mirror = util.search_for_mirror(search)
+def convert_key(oldcfg, aptcfg, oldkey, newkey):
+ """convert an old key to the new one if the old one exists
+ returns true if a key was found and converted"""
+ if oldcfg.get(oldkey, None) is not None:
+ aptcfg[newkey] = oldcfg.get(oldkey)
+ del oldcfg[oldkey]
+ return True
+ return False
+
+
+def convert_mirror(oldcfg, aptcfg):
+ """convert old apt_mirror keys into the new more advanced mirror spec"""
+ keymap = [('apt_mirror', 'uri'),
+ ('apt_mirror_search', 'search'),
+ ('apt_mirror_search_dns', 'search_dns')]
+ converted = False
+ newmcfg = {'arches': ['default']}
+ for oldkey, newkey in keymap:
+ if convert_key(oldcfg, newmcfg, oldkey, newkey):
+ converted = True
+
+ # only insert new style config if anything was converted
+ if converted:
+ aptcfg['primary'] = [newmcfg]
+
+
+def convert_v2_to_v3_apt_format(oldcfg):
+ """convert old to new keys and adapt restructured mirror spec"""
+ mapoldkeys = {'apt_sources': 'sources',
+ 'apt_mirror': None,
+ 'apt_mirror_search': None,
+ 'apt_mirror_search_dns': None,
+ 'apt_proxy': 'proxy',
+ 'apt_http_proxy': 'http_proxy',
+ 'apt_ftp_proxy': 'https_proxy',
+ 'apt_https_proxy': 'ftp_proxy',
+ 'apt_preserve_sources_list': 'preserve_sources_list',
+ 'apt_custom_sources_list': 'sources_list',
+ 'add_apt_repo_match': 'add_apt_repo_match'}
+ needtoconvert = []
+ for oldkey in mapoldkeys:
+ if oldkey in oldcfg:
+ if oldcfg[oldkey] in (None, ""):
+ del oldcfg[oldkey]
+ else:
+ needtoconvert.append(oldkey)
+
+ # no old config, so no new one to be created
+ if not needtoconvert:
+ return oldcfg
+ LOG.debug("apt config: convert V2 to V3 format for keys '%s'",
+ ", ".join(needtoconvert))
+
+ # if old AND new config are provided, prefer the new one (LP #1616831)
+ newaptcfg = oldcfg.get('apt', None)
+ if newaptcfg is not None:
+ LOG.debug("apt config: V1/2 and V3 format specified, preferring V3")
+ for oldkey in needtoconvert:
+ newkey = mapoldkeys[oldkey]
+ verify = oldcfg[oldkey] # drop, but keep a ref for verification
+ del oldcfg[oldkey]
+ if newkey is None or newaptcfg.get(newkey, None) is None:
+ # no simple mapping or no collision on this particular key
+ continue
+ if verify != newaptcfg[newkey]:
+ raise ValueError("Old and New apt format defined with unequal "
+ "values %s vs %s @ %s" % (verify,
+ newaptcfg[newkey],
+ oldkey))
+ # return conf after clearing conflicting V1/2 keys
+ return oldcfg
+
+ # create new format from old keys
+ aptcfg = {}
+
+ # simple renames / moves under the apt key
+ for oldkey in mapoldkeys:
+ if mapoldkeys[oldkey] is not None:
+ convert_key(oldcfg, aptcfg, oldkey, mapoldkeys[oldkey])
+
+ # mirrors changed in a more complex way
+ convert_mirror(oldcfg, aptcfg)
+
+ for oldkey in mapoldkeys:
+ if oldcfg.get(oldkey, None) is not None:
+ raise ValueError("old apt key '%s' left after conversion" % oldkey)
+
+ # insert new format into config and return full cfg with only v3 content
+ oldcfg['apt'] = aptcfg
+ return oldcfg
+
+
+def convert_to_v3_apt_format(cfg):
+ """convert the old list based format to the new dict based one. After that
+ convert the old dict keys/format to v3 a.k.a 'new apt config'"""
+ # V1 -> V2, the apt_sources entry from list to dict
+ apt_sources = cfg.get('apt_sources', None)
+ if apt_sources is not None:
+ cfg['apt_sources'] = convert_v1_to_v2_apt_format(apt_sources)
+
+ # V2 -> V3, move all former globals under the "apt" key
+ # Restructure into new key names and mirror hierarchy
+ cfg = convert_v2_to_v3_apt_format(cfg)
+
+ return cfg
+
+
+def search_for_mirror(candidates):
+ """
+ Search through a list of mirror urls for one that works
+ This needs to return quickly.
+ """
+ if candidates is None:
+ return None
+
+ LOG.debug("search for mirror in candidates: '%s'", candidates)
+ for cand in candidates:
+ try:
+ if util.is_resolvable_url(cand):
+ LOG.debug("found working mirror: '%s'", cand)
+ return cand
+ except Exception:
+ pass
+ return None
+
+
+def search_for_mirror_dns(configured, mirrortype, cfg, cloud):
+ """
+ Try to resolve a list of predefines DNS names to pick mirrors
+ """
+ mirror = None
- if (not mirror and
- util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)):
+ if configured:
mydom = ""
doms = []
+ if mirrortype == "primary":
+ mirrordns = "mirror"
+ elif mirrortype == "security":
+ mirrordns = "security-mirror"
+ else:
+ raise ValueError("unknown mirror type")
+
# if we have a fqdn, then search its domain portion first
- (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
+ (_, fqdn) = util.get_hostname_fqdn(cfg, cloud)
mydom = ".".join(fqdn.split(".")[1:])
if mydom:
doms.append(".%s" % mydom)
@@ -282,38 +589,136 @@ def find_apt_mirror_info(cloud, cfg):
mirror_list = []
distro = cloud.distro.name
- mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro)
+ mirrorfmt = "http://%s-%s%s/%s" % (distro, mirrordns, "%s", distro)
for post in doms:
mirror_list.append(mirrorfmt % (post))
- mirror = util.search_for_mirror(mirror_list)
+ mirror = search_for_mirror(mirror_list)
+
+ return mirror
+
+def update_mirror_info(pmirror, smirror, arch, cloud):
+ """sets security mirror to primary if not defined.
+ returns defaults if no mirrors are defined"""
+ if pmirror is not None:
+ if smirror is None:
+ smirror = pmirror
+ return {'PRIMARY': pmirror,
+ 'SECURITY': smirror}
+
+ # None specified at all, get default mirrors from cloud
mirror_info = cloud.datasource.get_package_mirror_info()
+ if mirror_info:
+ # get_package_mirror_info() returns a dictionary with
+ # arbitrary key/value pairs including 'primary' and 'security' keys.
+ # caller expects dict with PRIMARY and SECURITY.
+ m = mirror_info.copy()
+ m['PRIMARY'] = m['primary']
+ m['SECURITY'] = m['security']
+
+ return m
+
+ # if neither apt nor cloud configured mirrors fall back to
+ return get_default_mirrors(arch)
+
+
+def get_arch_mirrorconfig(cfg, mirrortype, arch):
+ """out of a list of potential mirror configurations select
+ and return the one matching the architecture (or default)"""
+ # select the mirror specification (if-any)
+ mirror_cfg_list = cfg.get(mirrortype, None)
+ if mirror_cfg_list is None:
+ return None
+
+ # select the specification matching the target arch
+ default = None
+ for mirror_cfg_elem in mirror_cfg_list:
+ arches = mirror_cfg_elem.get("arches")
+ if arch in arches:
+ return mirror_cfg_elem
+ if "default" in arches:
+ default = mirror_cfg_elem
+ return default
+
+
+def get_mirror(cfg, mirrortype, arch, cloud):
+ """pass the three potential stages of mirror specification
+ returns None is neither of them found anything otherwise the first
+ hit is returned"""
+ mcfg = get_arch_mirrorconfig(cfg, mirrortype, arch)
+ if mcfg is None:
+ return None
+
+ # directly specified
+ mirror = mcfg.get("uri", None)
+
+ # fallback to search if specified
+ if mirror is None:
+ # list of mirrors to try to resolve
+ mirror = search_for_mirror(mcfg.get("search", None))
+
+ # fallback to search_dns if specified
+ if mirror is None:
+ # list of mirrors to try to resolve
+ mirror = search_for_mirror_dns(mcfg.get("search_dns", None),
+ mirrortype, cfg, cloud)
+
+ return mirror
+
+
+def find_apt_mirror_info(cfg, cloud, arch=None):
+ """find_apt_mirror_info
+ find an apt_mirror given the cfg provided.
+ It can check for separate config of primary and security mirrors
+ If only primary is given security is assumed to be equal to primary
+ If the generic apt_mirror is given that is defining for both
+ """
- # this is a bit strange.
- # if mirror is set, then one of the legacy options above set it
- # but they do not cover security. so we need to get that from
- # get_package_mirror_info
- if mirror:
- mirror_info.update({'primary': mirror})
+ if arch is None:
+ arch = util.get_architecture()
+ LOG.debug("got arch for mirror selection: %s", arch)
+ pmirror = get_mirror(cfg, "primary", arch, cloud)
+ LOG.debug("got primary mirror: %s", pmirror)
+ smirror = get_mirror(cfg, "security", arch, cloud)
+ LOG.debug("got security mirror: %s", smirror)
+
+ mirror_info = update_mirror_info(pmirror, smirror, arch, cloud)
+
+ # less complex replacements use only MIRROR, derive from primary
+ mirror_info["MIRROR"] = mirror_info["PRIMARY"]
return mirror_info
def apply_apt_config(cfg, proxy_fname, config_fname):
+ """apply_apt_config
+ Applies any apt*proxy config from if specified
+ """
# Set up any apt proxy
- cfgs = (('apt_proxy', 'Acquire::HTTP::Proxy "%s";'),
- ('apt_http_proxy', 'Acquire::HTTP::Proxy "%s";'),
- ('apt_ftp_proxy', 'Acquire::FTP::Proxy "%s";'),
- ('apt_https_proxy', 'Acquire::HTTPS::Proxy "%s";'))
+ cfgs = (('proxy', 'Acquire::http::Proxy "%s";'),
+ ('http_proxy', 'Acquire::http::Proxy "%s";'),
+ ('ftp_proxy', 'Acquire::ftp::Proxy "%s";'),
+ ('https_proxy', 'Acquire::https::Proxy "%s";'))
proxies = [fmt % cfg.get(name) for (name, fmt) in cfgs if cfg.get(name)]
if len(proxies):
+ LOG.debug("write apt proxy info to %s", proxy_fname)
util.write_file(proxy_fname, '\n'.join(proxies) + '\n')
elif os.path.isfile(proxy_fname):
util.del_file(proxy_fname)
+ LOG.debug("no apt proxy configured, removed %s", proxy_fname)
- if cfg.get('apt_config', None):
- util.write_file(config_fname, cfg.get('apt_config'))
+ if cfg.get('conf', None):
+ LOG.debug("write apt config info to %s", config_fname)
+ util.write_file(config_fname, cfg.get('conf'))
elif os.path.isfile(config_fname):
util.del_file(config_fname)
+ LOG.debug("no apt config configured, removed %s", config_fname)
+
+
+CONFIG_CLEANERS = {
+ 'cloud-init': clean_cloud_init,
+}
+
+# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 70d4e7c3..0086840f 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -47,6 +47,8 @@ Example config:
from cloudinit import util
+distros = ['ubuntu']
+
def handle(name, cfg, cloud, log, args):
# Get config
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
index 0c84d600..b3089f30 100644
--- a/cloudinit/config/cc_mcollective.py
+++ b/cloudinit/config/cc_mcollective.py
@@ -19,6 +19,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import errno
+
import six
from six import BytesIO
@@ -36,49 +38,61 @@ SERVER_CFG = '/etc/mcollective/server.cfg'
LOG = logging.getLogger(__name__)
-def configure(config):
- # Read server.cfg values from the
- # original file in order to be able to mix the rest up
+def configure(config, server_cfg=SERVER_CFG,
+ pubcert_file=PUBCERT_FILE, pricert_file=PRICERT_FILE):
+ # Read server.cfg (if it exists) values from the
+ # original file in order to be able to mix the rest up.
try:
- mcollective_config = ConfigObj(SERVER_CFG, file_error=True)
- except IOError:
- LOG.warn("Did not find file %s", SERVER_CFG)
- mcollective_config = ConfigObj(config)
- else:
- for (cfg_name, cfg) in config.items():
- if cfg_name == 'public-cert':
- util.write_file(PUBCERT_FILE, cfg, mode=0o644)
- mcollective_config[
- 'plugin.ssl_server_public'] = PUBCERT_FILE
- mcollective_config['securityprovider'] = 'ssl'
- elif cfg_name == 'private-cert':
- util.write_file(PRICERT_FILE, cfg, mode=0o600)
- mcollective_config[
- 'plugin.ssl_server_private'] = PRICERT_FILE
- mcollective_config['securityprovider'] = 'ssl'
+ old_contents = util.load_file(server_cfg, quiet=False, decode=False)
+ mcollective_config = ConfigObj(BytesIO(old_contents))
+ except IOError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ else:
+ LOG.debug("Did not find file %s (starting with an empty"
+ " config)", server_cfg)
+ mcollective_config = ConfigObj()
+ for (cfg_name, cfg) in config.items():
+ if cfg_name == 'public-cert':
+ util.write_file(pubcert_file, cfg, mode=0o644)
+ mcollective_config[
+ 'plugin.ssl_server_public'] = pubcert_file
+ mcollective_config['securityprovider'] = 'ssl'
+ elif cfg_name == 'private-cert':
+ util.write_file(pricert_file, cfg, mode=0o600)
+ mcollective_config[
+ 'plugin.ssl_server_private'] = pricert_file
+ mcollective_config['securityprovider'] = 'ssl'
+ else:
+ if isinstance(cfg, six.string_types):
+ # Just set it in the 'main' section
+ mcollective_config[cfg_name] = cfg
+ elif isinstance(cfg, (dict)):
+ # Iterate through the config items, create a section if
+ # it is needed and then add/or create items as needed
+ if cfg_name not in mcollective_config.sections:
+ mcollective_config[cfg_name] = {}
+ for (o, v) in cfg.items():
+ mcollective_config[cfg_name][o] = v
else:
- if isinstance(cfg, six.string_types):
- # Just set it in the 'main' section
- mcollective_config[cfg_name] = cfg
- elif isinstance(cfg, (dict)):
- # Iterate through the config items, create a section if
- # it is needed and then add/or create items as needed
- if cfg_name not in mcollective_config.sections:
- mcollective_config[cfg_name] = {}
- for (o, v) in cfg.items():
- mcollective_config[cfg_name][o] = v
- else:
- # Otherwise just try to convert it to a string
- mcollective_config[cfg_name] = str(cfg)
- # We got all our config as wanted we'll rename
- # the previous server.cfg and create our new one
- util.rename(SERVER_CFG, "%s.old" % (SERVER_CFG))
-
- # Now we got the whole file, write to disk...
+ # Otherwise just try to convert it to a string
+ mcollective_config[cfg_name] = str(cfg)
+
+ try:
+ # We got all our config as wanted we'll copy
+ # the previous server.cfg and overwrite the old with our new one
+ util.copy(server_cfg, "%s.old" % (server_cfg))
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ # Doesn't exist to copy...
+ pass
+ else:
+ raise
+
+ # Now we got the whole (new) file, write to disk...
contents = BytesIO()
mcollective_config.write(contents)
- contents = contents.getvalue()
- util.write_file(SERVER_CFG, contents, mode=0o644)
+ util.write_file(server_cfg, contents.getvalue(), mode=0o644)
def handle(name, cfg, cloud, log, _args):
@@ -98,5 +112,5 @@ def handle(name, cfg, cloud, log, _args):
if 'conf' in mcollective_cfg:
configure(config=mcollective_cfg['conf'])
- # Start mcollective
- util.subp(['service', 'mcollective', 'start'], capture=False)
+ # restart mcollective to handle updated config
+ util.subp(['service', 'mcollective', 'restart'], capture=False)
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
new file mode 100644
index 00000000..ad69aa34
--- /dev/null
+++ b/cloudinit/config/cc_ntp.py
@@ -0,0 +1,106 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2016 Canonical Ltd.
+#
+# Author: Ryan Harper <ryan.harper@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit import log as logging
+from cloudinit.settings import PER_INSTANCE
+from cloudinit import templater
+from cloudinit import type_utils
+from cloudinit import util
+
+import os
+
+LOG = logging.getLogger(__name__)
+
+frequency = PER_INSTANCE
+NTP_CONF = '/etc/ntp.conf'
+NR_POOL_SERVERS = 4
+distros = ['centos', 'debian', 'fedora', 'opensuse', 'ubuntu']
+
+
+def handle(name, cfg, cloud, log, _args):
+ """
+ Enable and configure ntp
+
+ ntp:
+ pools: ['0.{{distro}}.pool.ntp.org', '1.{{distro}}.pool.ntp.org']
+ servers: ['192.168.2.1']
+
+ """
+
+ ntp_cfg = cfg.get('ntp', {})
+
+ if not isinstance(ntp_cfg, (dict)):
+ raise RuntimeError(("'ntp' key existed in config,"
+ " but not a dictionary type,"
+ " is a %s %instead"), type_utils.obj_name(ntp_cfg))
+
+ if 'ntp' not in cfg:
+ LOG.debug("Skipping module named %s,"
+ "not present or disabled by cfg", name)
+ return True
+
+ install_ntp(cloud.distro.install_packages, packages=['ntp'],
+ check_exe="ntpd")
+ rename_ntp_conf()
+ write_ntp_config_template(ntp_cfg, cloud)
+
+
+def install_ntp(install_func, packages=None, check_exe="ntpd"):
+ if util.which(check_exe):
+ return
+ if packages is None:
+ packages = ['ntp']
+
+ install_func(packages)
+
+
+def rename_ntp_conf(config=NTP_CONF):
+ if os.path.exists(config):
+ util.rename(config, config + ".dist")
+
+
+def generate_server_names(distro):
+ names = []
+ for x in range(0, NR_POOL_SERVERS):
+ name = "%d.%s.pool.ntp.org" % (x, distro)
+ names.append(name)
+ return names
+
+
+def write_ntp_config_template(cfg, cloud):
+ servers = cfg.get('servers', [])
+ pools = cfg.get('pools', [])
+
+ if len(servers) == 0 and len(pools) == 0:
+ LOG.debug('Adding distro default ntp pool servers')
+ pools = generate_server_names(cloud.distro.name)
+
+ params = {
+ 'servers': servers,
+ 'pools': pools,
+ }
+
+ template_fn = cloud.get_template_filename('ntp.conf.%s' %
+ (cloud.distro.name))
+ if not template_fn:
+ template_fn = cloud.get_template_filename('ntp.conf')
+ if not template_fn:
+ raise RuntimeError(("No template found, "
+ "not rendering %s"), NTP_CONF)
+
+ templater.render_to_file(template_fn, NTP_CONF, params)
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index 72176d42..ae720bd2 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -31,7 +31,7 @@ POST_LIST_ALL = [
'pub_key_ecdsa',
'instance_id',
'hostname',
- 'fdqn'
+ 'fqdn'
]
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index 3a113aea..d4ad724a 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -18,6 +18,8 @@
from cloudinit import util
+distros = ['fedora', 'rhel']
+
def handle(name, cfg, _cloud, log, _args):
sm = SubscriptionManager(cfg)
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
index f5786a31..13d70c8e 100644
--- a/cloudinit/config/cc_salt_minion.py
+++ b/cloudinit/config/cc_salt_minion.py
@@ -46,7 +46,12 @@ def handle(name, cfg, cloud, log, _args):
# ... copy the key pair if specified
if 'public_key' in salt_cfg and 'private_key' in salt_cfg:
- pki_dir = salt_cfg.get('pki_dir', '/etc/salt/pki')
+ if os.path.isdir("/etc/salt/pki/minion"):
+ pki_dir_default = "/etc/salt/pki/minion"
+ else:
+ pki_dir_default = "/etc/salt/pki"
+
+ pki_dir = salt_cfg.get('pki_dir', pki_dir_default)
with util.umask(0o77):
util.ensure_dir(pki_dir)
pub_name = os.path.join(pki_dir, 'minion.pub')
diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index 1a485ee6..6bcd8382 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -68,6 +68,8 @@ BUILTIN_CFG = {
'config': {},
}
+distros = ['ubuntu']
+
def parse_filename(fname):
fname = os.path.basename(fname)
diff --git a/cloudinit/config/cc_spacewalk.py b/cloudinit/config/cc_spacewalk.py
new file mode 100644
index 00000000..f3c1a664
--- /dev/null
+++ b/cloudinit/config/cc_spacewalk.py
@@ -0,0 +1,85 @@
+# vi: ts=4 expandtab
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+**Summary:** helper to setup https://fedorahosted.org/spacewalk/
+
+**Description:** This module will enable for configuring the needed
+actions to setup spacewalk on redhat based systems.
+
+It can be configured with the following option structure::
+
+ spacewalk:
+ server: spacewalk api server (required)
+"""
+
+from cloudinit import util
+
+
+distros = ['redhat', 'fedora']
+required_packages = ['rhn-setup']
+def_ca_cert_path = "/usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT"
+
+
+def is_registered():
+ # Check to see if already registered and don't bother; this is
+ # apparently done by trying to sync and if that fails then we
+ # assume we aren't registered; which is sorta ghetto...
+ already_registered = False
+ try:
+ util.subp(['rhn-profile-sync', '--verbose'], capture=False)
+ already_registered = True
+ except util.ProcessExecutionError as e:
+ if e.exit_code != 1:
+ raise
+ return already_registered
+
+
+def do_register(server, profile_name,
+ ca_cert_path=def_ca_cert_path,
+ proxy=None, log=None,
+ activation_key=None):
+ if log is not None:
+ log.info("Registering using `rhnreg_ks` profile '%s'"
+ " into server '%s'", profile_name, server)
+ cmd = ['rhnreg_ks']
+ cmd.extend(['--serverUrl', 'https://%s/XMLRPC' % server])
+ cmd.extend(['--profilename', str(profile_name)])
+ if proxy:
+ cmd.extend(["--proxy", str(proxy)])
+ if ca_cert_path:
+ cmd.extend(['--sslCACert', str(ca_cert_path)])
+ if activation_key:
+ cmd.extend(['--activationkey', str(activation_key)])
+ util.subp(cmd, capture=False)
+
+
+def handle(name, cfg, cloud, log, _args):
+ if 'spacewalk' not in cfg:
+ log.debug(("Skipping module named %s,"
+ " no 'spacewalk' key in configuration"), name)
+ return
+ cfg = cfg['spacewalk']
+ spacewalk_server = cfg.get('server')
+ if spacewalk_server:
+ # Need to have this installed before further things will work.
+ cloud.distro.install_packages(required_packages)
+ if not is_registered():
+ do_register(spacewalk_server,
+ cloud.datasource.get_hostname(fqdn=True),
+ proxy=cfg.get("proxy"), log=log,
+ activation_key=cfg.get('activation_key'))
+ else:
+ log.debug("Skipping module named %s, 'spacewalk/server' key"
+ " was not found in configuration", name)
diff --git a/cloudinit/config/cc_ubuntu_init_switch.py b/cloudinit/config/cc_ubuntu_init_switch.py
index 884d79f1..bffb4380 100644
--- a/cloudinit/config/cc_ubuntu_init_switch.py
+++ b/cloudinit/config/cc_ubuntu_init_switch.py
@@ -86,6 +86,8 @@ else
fi
"""
+distros = ['ubuntu']
+
def handle(name, cfg, cloud, log, args):
"""Handler method activated by cloud-init."""
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index 64fba869..22549e62 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -23,6 +23,8 @@ import six
from cloudinit import util
+distros = ['fedora', 'rhel']
+
def _canonicalize_id(repo_id):
repo_id = repo_id.lower().replace("-", "_")
diff --git a/cloudinit/dhclient_hook.py b/cloudinit/dhclient_hook.py
new file mode 100644
index 00000000..82cb1855
--- /dev/null
+++ b/cloudinit/dhclient_hook.py
@@ -0,0 +1,50 @@
+#!/usr/bin/python
+# vi: ts=4 expandtab
+
+import os
+
+from cloudinit import atomic_helper
+from cloudinit import log as logging
+from cloudinit import stages
+
+LOG = logging.getLogger(__name__)
+
+
+class LogDhclient(object):
+
+ def __init__(self, cli_args):
+ self.hooks_dir = self._get_hooks_dir()
+ self.net_interface = cli_args.net_interface
+ self.net_action = cli_args.net_action
+ self.hook_file = os.path.join(self.hooks_dir,
+ self.net_interface + ".json")
+
+ @staticmethod
+ def _get_hooks_dir():
+ i = stages.Init()
+ return os.path.join(i.paths.get_runpath(), 'dhclient.hooks')
+
+ def check_hooks_dir(self):
+ if not os.path.exists(self.hooks_dir):
+ os.makedirs(self.hooks_dir)
+ else:
+ # If the action is down and the json file exists, we need to
+ # delete the file
+ if self.net_action is 'down' and os.path.exists(self.hook_file):
+ os.remove(self.hook_file)
+
+ @staticmethod
+ def get_vals(info):
+ new_info = {}
+ for k, v in info.items():
+ if k.startswith("DHCP4_") or k.startswith("new_"):
+ key = (k.replace('DHCP4_', '').replace('new_', '')).lower()
+ new_info[key] = v
+ return new_info
+
+ def record(self):
+ envs = os.environ
+ if self.hook_file is None:
+ return
+ atomic_helper.write_json(self.hook_file, self.get_vals(envs))
+ LOG.debug("Wrote dhclient options in %s", self.hook_file)
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 40af8802..b1192e84 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -112,7 +112,7 @@ class Distro(object):
raise NotImplementedError()
def get_primary_arch(self):
- arch = os.uname[4]
+ arch = os.uname()[4]
if arch in ("i386", "i486", "i586", "i686"):
return "i386"
return arch
diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py
index 6267dd6e..1865dc69 100644
--- a/cloudinit/distros/gentoo.py
+++ b/cloudinit/distros/gentoo.py
@@ -1,8 +1,10 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2014 Rackspace, US Inc.
+# Copyright (C) 2016 Matthew Thode.
#
# Author: Nate House <nathan.house@rackspace.com>
+# Author: Matthew Thode <prometheanfire@gentoo.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -21,6 +23,7 @@ from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import util
+from cloudinit.distros import net_util
from cloudinit.distros.parsers.hostname import HostnameConf
from cloudinit.settings import PER_INSTANCE
@@ -29,9 +32,11 @@ LOG = logging.getLogger(__name__)
class Distro(distros.Distro):
- locale_conf_fn = "/etc/locale.gen"
- network_conf_fn = "/etc/conf.d/net"
- init_cmd = [''] # init scripts
+ locale_conf_fn = '/etc/locale.gen'
+ network_conf_fn = '/etc/conf.d/net'
+ resolve_conf_fn = '/etc/resolv.conf'
+ hostname_conf_fn = '/etc/conf.d/hostname'
+ init_cmd = ['service'] # init scripts
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
@@ -50,7 +55,7 @@ class Distro(distros.Distro):
# "" provides trailing newline during join
lines = [
util.make_header(),
- 'LANG="%s"' % (locale),
+ 'LANG="%s"' % locale,
"",
]
util.write_file(out_fn, "\n".join(lines))
@@ -60,8 +65,66 @@ class Distro(distros.Distro):
self.package_command('', pkgs=pkglist)
def _write_network(self, settings):
- util.write_file(self.network_conf_fn, settings)
- return ['all']
+ entries = net_util.translate_network(settings)
+ LOG.debug("Translated ubuntu style network settings %s into %s",
+ settings, entries)
+ dev_names = entries.keys()
+ nameservers = []
+
+ for (dev, info) in entries.items():
+ if 'dns-nameservers' in info:
+ nameservers.extend(info['dns-nameservers'])
+ if dev == 'lo':
+ continue
+ net_fn = self.network_conf_fn + '.' + dev
+ dns_nameservers = info.get('dns-nameservers')
+ if isinstance(dns_nameservers, (list, tuple)):
+ dns_nameservers = str(tuple(dns_nameservers)).replace(',', '')
+ # eth0, {'auto': True, 'ipv6': {}, 'bootproto': 'dhcp'}
+ # lo, {'dns-nameservers': ['10.0.1.3'], 'ipv6': {}, 'auto': True}
+ results = ''
+ if info.get('bootproto') == 'dhcp':
+ results += 'config_{name}="dhcp"'.format(name=dev)
+ else:
+ results += (
+ 'config_{name}="{ip_address} netmask {netmask}"\n'
+ 'mac_{name}="{hwaddr}"\n'
+ ).format(name=dev, ip_address=info.get('address'),
+ netmask=info.get('netmask'),
+ hwaddr=info.get('hwaddress'))
+ results += 'routes_{name}="default via {gateway}"\n'.format(
+ name=dev,
+ gateway=info.get('gateway')
+ )
+ if info.get('dns-nameservers'):
+ results += 'dns_servers_{name}="{dnsservers}"\n'.format(
+ name=dev,
+ dnsservers=dns_nameservers)
+ util.write_file(net_fn, results)
+ self._create_network_symlink(dev)
+ if info.get('auto'):
+ cmd = ['rc-update', 'add', 'net.{name}'.format(name=dev),
+ 'default']
+ try:
+ (_out, err) = util.subp(cmd)
+ if len(err):
+ LOG.warn("Running %s resulted in stderr output: %s",
+ cmd, err)
+ except util.ProcessExecutionError:
+ util.logexc(LOG, "Running interface command %s failed",
+ cmd)
+
+ if nameservers:
+ util.write_file(self.resolve_conf_fn,
+ convert_resolv_conf(nameservers))
+
+ return dev_names
+
+ @staticmethod
+ def _create_network_symlink(interface_name):
+ file_path = '/etc/init.d/net.{name}'.format(name=interface_name)
+ if not util.is_link(file_path):
+ util.sym_link('/etc/init.d/net.lo', file_path)
def _bring_up_interface(self, device_name):
cmd = ['/etc/init.d/net.%s' % device_name, 'restart']
@@ -108,13 +171,16 @@ class Distro(distros.Distro):
if not conf:
conf = HostnameConf('')
conf.set_hostname(your_hostname)
- util.write_file(out_fn, conf, 0o644)
+ gentoo_hostname_config = 'hostname="%s"' % conf
+ gentoo_hostname_config = gentoo_hostname_config.replace('\n', '')
+ util.write_file(out_fn, gentoo_hostname_config, 0o644)
def _read_system_hostname(self):
sys_hostname = self._read_hostname(self.hostname_conf_fn)
- return (self.hostname_conf_fn, sys_hostname)
+ return self.hostname_conf_fn, sys_hostname
- def _read_hostname_conf(self, filename):
+ @staticmethod
+ def _read_hostname_conf(filename):
conf = HostnameConf(util.load_file(filename))
conf.parse()
return conf
@@ -137,7 +203,7 @@ class Distro(distros.Distro):
if pkgs is None:
pkgs = []
- cmd = ['emerge']
+ cmd = list('emerge')
# Redirect output
cmd.append("--quiet")
@@ -158,3 +224,12 @@ class Distro(distros.Distro):
def update_package_sources(self):
self._runner.run("update-sources", self.package_command,
["-u", "world"], freq=PER_INSTANCE)
+
+
+def convert_resolv_conf(settings):
+ """Returns a settings string formatted for resolv.conf."""
+ result = ''
+ if isinstance(settings, list):
+ for ns in settings:
+ result += 'nameserver %s\n' % ns
+ return result
diff --git a/cloudinit/gpg.py b/cloudinit/gpg.py
index 6a76d785..5bbff513 100644
--- a/cloudinit/gpg.py
+++ b/cloudinit/gpg.py
@@ -36,11 +36,11 @@ def export_armour(key):
return armour
-def receive_key(key, keyserver):
+def recv_key(key, keyserver):
"""Receive gpg key from the specified keyserver"""
LOG.debug('Receive gpg key "%s"', key)
try:
- util.subp(["gpg", "--keyserver", keyserver, "--recv-keys", key],
+ util.subp(["gpg", "--keyserver", keyserver, "--recv", key],
capture=True)
except util.ProcessExecutionError as error:
raise ValueError(('Failed to import key "%s" '
@@ -57,12 +57,12 @@ def delete_key(key):
LOG.warn('Failed delete key "%s": %s', key, error)
-def get_key_by_id(keyid, keyserver="keyserver.ubuntu.com"):
+def getkeybyid(keyid, keyserver='keyserver.ubuntu.com'):
"""get gpg keyid from keyserver"""
armour = export_armour(keyid)
if not armour:
try:
- receive_key(keyid, keyserver=keyserver)
+ recv_key(keyid, keyserver=keyserver)
armour = export_armour(keyid)
except ValueError:
LOG.exception('Failed to obtain gpg key %s', keyid)
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 21cc602b..7e58bfea 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -36,7 +36,7 @@ def read_sys_net(devname, path, translate=None, enoent=None, keyerror=None):
try:
contents = util.load_file(sys_dev_path(devname, path))
except (OSError, IOError) as e:
- if getattr(e, 'errno', None) == errno.ENOENT:
+ if getattr(e, 'errno', None) in (errno.ENOENT, errno.ENOTDIR):
if enoent is not None:
return enoent
raise
@@ -347,7 +347,12 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
def get_interface_mac(ifname):
"""Returns the string value of an interface's MAC Address"""
- return read_sys_net(ifname, "address", enoent=False)
+ path = "address"
+ if os.path.isdir(sys_dev_path(ifname, "bonding_slave")):
+ # for a bond slave, get the nic's hwaddress, not the address it
+ # is using because its part of a bond.
+ path = "bonding_slave/perm_hwaddr"
+ return read_sys_net(ifname, path, enoent=False)
def get_interfaces_by_mac(devs=None):
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index eff5b924..cd533ddb 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -399,7 +399,7 @@ class Renderer(renderer.Renderer):
else:
# ifenslave docs say to auto the slave devices
lines = []
- if 'bond-master' in iface:
+ if 'bond-master' in iface or 'bond-slaves' in iface:
lines.append("auto {name}".format(**iface))
lines.append("iface {name} {inet} {mode}".format(**iface))
lines.extend(_iface_add_attrs(iface, index=0))
diff --git a/cloudinit/signal_handler.py b/cloudinit/signal_handler.py
index 0d95f506..d76223d1 100644
--- a/cloudinit/signal_handler.py
+++ b/cloudinit/signal_handler.py
@@ -54,7 +54,7 @@ def _pprint_frame(frame, depth, max_depth, contents):
def _handle_exit(signum, frame):
(msg, rc) = EXIT_FOR[signum]
- msg = msg % ({'version': vr.version()})
+ msg = msg % ({'version': vr.version_string()})
contents = StringIO()
contents.write("%s\n" % (msg))
_pprint_frame(frame, 1, BACK_FRAME_TRACE_DEPTH, contents)
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index a3529609..48136f7c 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -110,12 +110,6 @@ class DataSourceAltCloud(sources.DataSource):
'''
- uname_arch = os.uname()[4]
- if uname_arch.startswith("arm") or uname_arch == "aarch64":
- # Disabling because dmi data is not available on ARM processors
- LOG.debug("Disabling AltCloud datasource on arm (LP: #1243287)")
- return 'UNKNOWN'
-
system_name = util.read_dmi_data("system-product-name")
if not system_name:
return 'UNKNOWN'
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 8c7e8673..dbc2bb68 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -20,18 +20,17 @@ import base64
import contextlib
import crypt
import fnmatch
+from functools import partial
import os
import os.path
import time
-import xml.etree.ElementTree as ET
-
from xml.dom import minidom
-
-from cloudinit.sources.helpers.azure import get_metadata_from_fabric
+import xml.etree.ElementTree as ET
from cloudinit import log as logging
from cloudinit.settings import PER_ALWAYS
from cloudinit import sources
+from cloudinit.sources.helpers.azure import get_metadata_from_fabric
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -55,6 +54,7 @@ BUILTIN_DS_CONFIG = {
'hostname_command': 'hostname',
},
'disk_aliases': {'ephemeral0': '/dev/sdb'},
+ 'dhclient_lease_file': '/var/lib/dhcp/dhclient.eth0.leases',
}
BUILTIN_CLOUD_CONFIG = {
@@ -115,6 +115,7 @@ class DataSourceAzureNet(sources.DataSource):
self.ds_cfg = util.mergemanydict([
util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
BUILTIN_DS_CONFIG])
+ self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file')
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -123,6 +124,9 @@ class DataSourceAzureNet(sources.DataSource):
def get_metadata_from_agent(self):
temp_hostname = self.metadata.get('local-hostname')
hostname_command = self.ds_cfg['hostname_bounce']['hostname_command']
+ agent_cmd = self.ds_cfg['agent_command']
+ LOG.debug("Getting metadata via agent. hostname=%s cmd=%s",
+ temp_hostname, agent_cmd)
with temporary_hostname(temp_hostname, self.ds_cfg,
hostname_command=hostname_command) \
as previous_hostname:
@@ -138,7 +142,7 @@ class DataSourceAzureNet(sources.DataSource):
util.logexc(LOG, "handling set_hostname failed")
try:
- invoke_agent(self.ds_cfg['agent_command'])
+ invoke_agent(agent_cmd)
except util.ProcessExecutionError:
# claim the datasource even if the command failed
util.logexc(LOG, "agent command '%s' failed.",
@@ -226,16 +230,18 @@ class DataSourceAzureNet(sources.DataSource):
write_files(ddir, files, dirmode=0o700)
if self.ds_cfg['agent_command'] == '__builtin__':
- metadata_func = get_metadata_from_fabric
+ metadata_func = partial(get_metadata_from_fabric,
+ fallback_lease_file=self.
+ dhclient_lease_file)
else:
metadata_func = self.get_metadata_from_agent
+
try:
fabric_data = metadata_func()
except Exception as exc:
LOG.info("Error communicating with Azure fabric; assume we aren't"
" on Azure.", exc_info=True)
return False
-
self.metadata['instance-id'] = util.read_dmi_data('system-uuid')
self.metadata.update(fabric_data)
diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
index d1f806d6..be74503b 100644
--- a/cloudinit/sources/DataSourceCloudSigma.py
+++ b/cloudinit/sources/DataSourceCloudSigma.py
@@ -16,7 +16,6 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from base64 import b64decode
-import os
import re
from cloudinit.cs_utils import Cepko
@@ -45,11 +44,6 @@ class DataSourceCloudSigma(sources.DataSource):
Uses dmi data to detect if this instance of cloud-init is running
in the CloudSigma's infrastructure.
"""
- uname_arch = os.uname()[4]
- if uname_arch.startswith("arm") or uname_arch == "aarch64":
- # Disabling because dmi data on ARM processors
- LOG.debug("Disabling CloudSigma datasource on arm (LP: #1243287)")
- return False
LOG.debug("determining hypervisor product name via dmi data")
sys_product_name = util.read_dmi_data("system-product-name")
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 91d6ff13..5c9edabe 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -134,7 +134,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
vd = results.get('vendordata')
self.vendordata_pure = vd
try:
- self.vendordata_raw = openstack.convert_vendordata_json(vd)
+ self.vendordata_raw = sources.convert_vendordata(vd)
except ValueError as e:
LOG.warn("Invalid content in vendor-data: %s", e)
self.vendordata_raw = None
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index 44a17a00..fc596e17 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -1,6 +1,7 @@
# vi: ts=4 expandtab
#
# Author: Neal Shrader <neal@digitalocean.com>
+# Author: Ben Howard <bh@digitalocean.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -14,22 +15,27 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from cloudinit import ec2_utils
+# DigitalOcean Droplet API:
+# https://developers.digitalocean.com/documentation/metadata/
+
+import json
+
from cloudinit import log as logging
from cloudinit import sources
+from cloudinit import url_helper
from cloudinit import util
-import functools
-
-
LOG = logging.getLogger(__name__)
BUILTIN_DS_CONFIG = {
- 'metadata_url': 'http://169.254.169.254/metadata/v1/',
- 'mirrors_url': 'http://mirrors.digitalocean.com/'
+ 'metadata_url': 'http://169.254.169.254/metadata/v1.json',
}
-MD_RETRIES = 0
-MD_TIMEOUT = 1
+
+# Wait for a up to a minute, retrying the meta-data server
+# every 2 seconds.
+MD_RETRIES = 30
+MD_TIMEOUT = 2
+MD_WAIT_RETRY = 2
class DataSourceDigitalOcean(sources.DataSource):
@@ -40,43 +46,61 @@ class DataSourceDigitalOcean(sources.DataSource):
util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}),
BUILTIN_DS_CONFIG])
self.metadata_address = self.ds_cfg['metadata_url']
+ self.retries = self.ds_cfg.get('retries', MD_RETRIES)
+ self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT)
+ self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY)
- if self.ds_cfg.get('retries'):
- self.retries = self.ds_cfg['retries']
- else:
- self.retries = MD_RETRIES
+ def _get_sysinfo(self):
+ # DigitalOcean embeds vendor ID and instance/droplet_id in the
+ # SMBIOS information
- if self.ds_cfg.get('timeout'):
- self.timeout = self.ds_cfg['timeout']
- else:
- self.timeout = MD_TIMEOUT
+ LOG.debug("checking if instance is a DigitalOcean droplet")
+
+ # Detect if we are on DigitalOcean and return the Droplet's ID
+ vendor_name = util.read_dmi_data("system-manufacturer")
+ if vendor_name != "DigitalOcean":
+ return (False, None)
- def get_data(self):
- caller = functools.partial(util.read_file_or_url,
- timeout=self.timeout, retries=self.retries)
+ LOG.info("running on DigitalOcean")
- def mcaller(url):
- return caller(url).contents
+ droplet_id = util.read_dmi_data("system-serial-number")
+ if droplet_id:
+ LOG.debug(("system identified via SMBIOS as DigitalOcean Droplet"
+ "{}").format(droplet_id))
+ else:
+ LOG.critical(("system identified via SMBIOS as a DigitalOcean "
+ "Droplet, but did not provide an ID. Please file a "
+ "support ticket at: "
+ "https://cloud.digitalocean.com/support/tickets/"
+ "new"))
- md = ec2_utils.MetadataMaterializer(mcaller(self.metadata_address),
- base_url=self.metadata_address,
- caller=mcaller)
+ return (True, droplet_id)
- self.metadata = md.materialize()
+ def get_data(self, apply_filter=False):
+ (is_do, droplet_id) = self._get_sysinfo()
- if self.metadata.get('id'):
- return True
- else:
+ # only proceed if we know we are on DigitalOcean
+ if not is_do:
return False
- def get_userdata_raw(self):
- return "\n".join(self.metadata['user-data'])
+ LOG.debug("reading metadata from {}".format(self.metadata_address))
+ response = url_helper.readurl(self.metadata_address,
+ timeout=self.timeout,
+ sec_between=self.wait_retry,
+ retries=self.retries)
- def get_vendordata_raw(self):
- return "\n".join(self.metadata['vendor-data'])
+ contents = util.decode_binary(response.contents)
+ decoded = json.loads(contents)
+
+ self.metadata = decoded
+ self.metadata['instance-id'] = decoded.get('droplet_id', droplet_id)
+ self.metadata['local-hostname'] = decoded.get('hostname', droplet_id)
+ self.vendordata_raw = decoded.get("vendor_data", None)
+ self.userdata_raw = decoded.get("user_data", None)
+ return True
def get_public_ssh_keys(self):
- public_keys = self.metadata['public-keys']
+ public_keys = self.metadata.get('public_keys', [])
if isinstance(public_keys, list):
return public_keys
else:
@@ -84,21 +108,17 @@ class DataSourceDigitalOcean(sources.DataSource):
@property
def availability_zone(self):
- return self.metadata['region']
-
- def get_instance_id(self):
- return self.metadata['id']
-
- def get_hostname(self, fqdn=False, resolve_ip=False):
- return self.metadata['hostname']
-
- def get_package_mirror_info(self):
- return self.ds_cfg['mirrors_url']
+ return self.metadata.get('region', 'default')
@property
def launch_index(self):
return None
+ def check_instance_id(self, sys_cfg):
+ return sources.instance_id_matches_system_uuid(
+ self.get_instance_id(), 'system-serial-number')
+
+
# Used to match classes to dependencies
datasources = [
(DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index c660a350..6c12d703 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -31,7 +31,7 @@ REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname')
class GoogleMetadataFetcher(object):
- headers = {'X-Google-Metadata-Request': True}
+ headers = {'X-Google-Metadata-Request': 'True'}
def __init__(self, metadata_address):
self.metadata_address = metadata_address
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index d828f078..ab93c0a2 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -20,7 +20,6 @@
from __future__ import print_function
-import errno
import os
import time
@@ -32,7 +31,14 @@ from cloudinit import util
LOG = logging.getLogger(__name__)
MD_VERSION = "2012-03-01"
-BINARY_FIELDS = ('user-data',)
+DS_FIELDS = [
+ # remote path, location in dictionary, binary data?, optional?
+ ("meta-data/instance-id", 'meta-data/instance-id', False, False),
+ ("meta-data/local-hostname", 'meta-data/local-hostname', False, False),
+ ("meta-data/public-keys", 'meta-data/public-keys', False, True),
+ ('meta-data/vendor-data', 'vendor-data', True, True),
+ ('user-data', 'user-data', True, True),
+]
class DataSourceMAAS(sources.DataSource):
@@ -43,6 +49,7 @@ class DataSourceMAAS(sources.DataSource):
instance-id
user-data
hostname
+ vendor-data
"""
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -71,10 +78,7 @@ class DataSourceMAAS(sources.DataSource):
mcfg = self.ds_cfg
try:
- (userdata, metadata) = read_maas_seed_dir(self.seed_dir)
- self.userdata_raw = userdata
- self.metadata = metadata
- self.base_url = self.seed_dir
+ self._set_data(self.seed_dir, read_maas_seed_dir(self.seed_dir))
return True
except MAASSeedDirNone:
pass
@@ -95,18 +99,29 @@ class DataSourceMAAS(sources.DataSource):
if not self.wait_for_metadata_service(url):
return False
- self.base_url = url
-
- (userdata, metadata) = read_maas_seed_url(
- self.base_url, read_file_or_url=self.oauth_helper.readurl,
- paths=self.paths, retries=1)
- self.userdata_raw = userdata
- self.metadata = metadata
+ self._set_data(
+ url, read_maas_seed_url(
+ url, read_file_or_url=self.oauth_helper.readurl,
+ paths=self.paths, retries=1))
return True
except Exception:
util.logexc(LOG, "Failed fetching metadata from url %s", url)
return False
+ def _set_data(self, url, data):
+ # takes a url for base_url and a tuple of userdata, metadata, vd.
+ self.base_url = url
+ ud, md, vd = data
+ self.userdata_raw = ud
+ self.metadata = md
+ self.vendordata_pure = vd
+ if vd:
+ try:
+ self.vendordata_raw = sources.convert_vendordata(vd)
+ except ValueError as e:
+ LOG.warn("Invalid content in vendor-data: %s", e)
+ self.vendordata_raw = None
+
def wait_for_metadata_service(self, url):
mcfg = self.ds_cfg
max_wait = 120
@@ -126,6 +141,8 @@ class DataSourceMAAS(sources.DataSource):
LOG.warn("Failed to get timeout, using %s" % timeout)
starttime = time.time()
+ if url.endswith("/"):
+ url = url[:-1]
check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION)
urls = [check_url]
url = self.oauth_helper.wait_for_url(
@@ -141,27 +158,13 @@ class DataSourceMAAS(sources.DataSource):
def read_maas_seed_dir(seed_d):
- """
- Return user-data and metadata for a maas seed dir in seed_d.
- Expected format of seed_d are the following files:
- * instance-id
- * local-hostname
- * user-data
- """
- if not os.path.isdir(seed_d):
+ if seed_d.startswith("file://"):
+ seed_d = seed_d[7:]
+ if not os.path.isdir(seed_d) or len(os.listdir(seed_d)) == 0:
raise MAASSeedDirNone("%s: not a directory")
- files = ('local-hostname', 'instance-id', 'user-data', 'public-keys')
- md = {}
- for fname in files:
- try:
- md[fname] = util.load_file(os.path.join(seed_d, fname),
- decode=fname not in BINARY_FIELDS)
- except IOError as e:
- if e.errno != errno.ENOENT:
- raise
-
- return check_seed_contents(md, seed_d)
+ # seed_dir looks in seed_dir, not seed_dir/VERSION
+ return read_maas_seed_url("file://%s" % seed_d, version=None)
def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
@@ -175,73 +178,78 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
* <seed_url>/<version>/meta-data/instance-id
* <seed_url>/<version>/meta-data/local-hostname
* <seed_url>/<version>/user-data
+ If version is None, then <version>/ will not be used.
"""
- base_url = "%s/%s" % (seed_url, version)
- file_order = [
- 'local-hostname',
- 'instance-id',
- 'public-keys',
- 'user-data',
- ]
- files = {
- 'local-hostname': "%s/%s" % (base_url, 'meta-data/local-hostname'),
- 'instance-id': "%s/%s" % (base_url, 'meta-data/instance-id'),
- 'public-keys': "%s/%s" % (base_url, 'meta-data/public-keys'),
- 'user-data': "%s/%s" % (base_url, 'user-data'),
- }
-
if read_file_or_url is None:
read_file_or_url = util.read_file_or_url
+ if seed_url.endswith("/"):
+ seed_url = seed_url[:-1]
+
md = {}
- for name in file_order:
- url = files.get(name)
- if name == 'user-data':
- item_retries = 0
+ for path, dictname, binary, optional in DS_FIELDS:
+ if version is None:
+ url = "%s/%s" % (seed_url, path)
else:
- item_retries = retries
-
+ url = "%s/%s/%s" % (seed_url, version, path)
try:
ssl_details = util.fetch_ssl_details(paths)
- resp = read_file_or_url(url, retries=item_retries,
- timeout=timeout, ssl_details=ssl_details)
+ resp = read_file_or_url(url, retries=retries, timeout=timeout,
+ ssl_details=ssl_details)
if resp.ok():
- if name in BINARY_FIELDS:
- md[name] = resp.contents
+ if binary:
+ md[path] = resp.contents
else:
- md[name] = util.decode_binary(resp.contents)
+ md[path] = util.decode_binary(resp.contents)
else:
LOG.warn(("Fetching from %s resulted in"
" an invalid http code %s"), url, resp.code)
except url_helper.UrlError as e:
- if e.code != 404:
- raise
+ if e.code == 404 and not optional:
+ raise MAASSeedDirMalformed(
+ "Missing required %s: %s" % (path, e))
+ elif e.code != 404:
+ raise e
+
return check_seed_contents(md, seed_url)
def check_seed_contents(content, seed):
- """Validate if content is Is the content a dict that is valid as a
- return for a datasource.
- Either return a (userdata, metadata) tuple or
+ """Validate if dictionary content valid as a return for a datasource.
+ Either return a (userdata, metadata, vendordata) tuple or
Raise MAASSeedDirMalformed or MAASSeedDirNone
"""
- md_required = ('instance-id', 'local-hostname')
- if len(content) == 0:
+ ret = {}
+ missing = []
+ for spath, dpath, _binary, optional in DS_FIELDS:
+ if spath not in content:
+ if not optional:
+ missing.append(spath)
+ continue
+
+ if "/" in dpath:
+ top, _, p = dpath.partition("/")
+ if top not in ret:
+ ret[top] = {}
+ ret[top][p] = content[spath]
+ else:
+ ret[dpath] = content[spath]
+
+ if len(ret) == 0:
raise MAASSeedDirNone("%s: no data files found" % seed)
- found = list(content.keys())
- missing = [k for k in md_required if k not in found]
- if len(missing):
+ if missing:
raise MAASSeedDirMalformed("%s: missing files %s" % (seed, missing))
- userdata = content.get('user-data', b"")
- md = {}
- for (key, val) in content.items():
- if key == 'user-data':
- continue
- md[key] = val
+ vd_data = None
+ if ret.get('vendor-data'):
+ err = object()
+ vd_data = util.load_yaml(ret.get('vendor-data'), default=err,
+ allowed=(object))
+ if vd_data is err:
+ raise MAASSeedDirMalformed("vendor-data was not loadable as yaml.")
- return (userdata, md)
+ return ret.get('user-data'), ret.get('meta-data'), vd_data
class MAASSeedDirNone(Exception):
@@ -272,6 +280,7 @@ if __name__ == "__main__":
"""
import argparse
import pprint
+ import sys
parser = argparse.ArgumentParser(description='Interact with MAAS DS')
parser.add_argument("--config", metavar="file",
@@ -289,17 +298,25 @@ if __name__ == "__main__":
default=MD_VERSION)
subcmds = parser.add_subparsers(title="subcommands", dest="subcmd")
- subcmds.add_parser('crawl', help="crawl the datasource")
- subcmds.add_parser('get', help="do a single GET of provided url")
- subcmds.add_parser('check-seed', help="read andn verify seed at url")
-
- parser.add_argument("url", help="the data source to query")
+ for (name, help) in (('crawl', 'crawl the datasource'),
+ ('get', 'do a single GET of provided url'),
+ ('check-seed', 'read and verify seed at url')):
+ p = subcmds.add_parser(name, help=help)
+ p.add_argument("url", help="the datasource url", nargs='?',
+ default=None)
args = parser.parse_args()
creds = {'consumer_key': args.ckey, 'token_key': args.tkey,
'token_secret': args.tsec, 'consumer_secret': args.csec}
+ maaspkg_cfg = "/etc/cloud/cloud.cfg.d/90_dpkg_maas.cfg"
+ if (args.config is None and args.url is None and
+ os.path.exists(maaspkg_cfg) and
+ os.access(maaspkg_cfg, os.R_OK)):
+ sys.stderr.write("Used config in %s.\n" % maaspkg_cfg)
+ args.config = maaspkg_cfg
+
if args.config:
cfg = util.read_conf(args.config)
if 'datasource' in cfg:
@@ -307,6 +324,12 @@ if __name__ == "__main__":
for key in creds.keys():
if key in cfg and creds[key] is None:
creds[key] = cfg[key]
+ if args.url is None and 'metadata_url' in cfg:
+ args.url = cfg['metadata_url']
+
+ if args.url is None:
+ sys.stderr.write("Must provide a url or a config with url.\n")
+ sys.exit(1)
oauth_helper = url_helper.OauthUrlHelper(**creds)
@@ -331,16 +354,20 @@ if __name__ == "__main__":
printurl(url)
if args.subcmd == "check-seed":
+ sys.stderr.write("Checking seed at %s\n" % args.url)
readurl = oauth_helper.readurl
if args.url[0] == "/" or args.url.startswith("file://"):
- readurl = None
- (userdata, metadata) = read_maas_seed_url(
- args.url, version=args.apiver, read_file_or_url=readurl,
- retries=2)
- print("=== userdata ===")
- print(userdata.decode())
- print("=== metadata ===")
+ (userdata, metadata, vd) = read_maas_seed_dir(args.url)
+ else:
+ (userdata, metadata, vd) = read_maas_seed_url(
+ args.url, version=args.apiver, read_file_or_url=readurl,
+ retries=2)
+ print("=== user-data ===")
+ print("N/A" if userdata is None else userdata.decode())
+ print("=== meta-data ===")
pprint.pprint(metadata)
+ print("=== vendor-data ===")
+ pprint.pprint("N/A" if vd is None else vd)
elif args.subcmd == "get":
printurl(args.url)
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index cdc9eef5..e6a0b5fe 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -52,7 +52,7 @@ class DataSourceNoCloud(sources.DataSource):
found = []
mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "",
- 'network-config': {}}
+ 'network-config': None}
try:
# Parse the kernel command line, getting data passed in
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 7b3a76b9..635a836c 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -407,7 +407,7 @@ def read_context_disk_dir(source_dir, asuser=None):
# http://opennebula.org/documentation:rel3.8:cong#network_configuration
for k in context:
if re.match(r'^ETH\d+_IP$', k):
- (out, _) = util.subp(['/sbin/ip', 'link'])
+ (out, _) = util.subp(['ip', 'link'])
net = OpenNebulaNetwork(out, context)
results['network-interfaces'] = net.gen_conf()
break
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index c06d17f3..82558214 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -138,7 +138,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
vd = results.get('vendordata')
self.vendordata_pure = vd
try:
- self.vendordata_raw = openstack.convert_vendordata_json(vd)
+ self.vendordata_raw = sources.convert_vendordata(vd)
except ValueError as e:
LOG.warn("Invalid content in vendor-data: %s", e)
self.vendordata_raw = None
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index ccc86883..143ab368 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -60,11 +60,15 @@ SMARTOS_ATTRIB_MAP = {
'availability_zone': ('sdc:datacenter_name', True),
'vendor-data': ('sdc:vendor-data', False),
'operator-script': ('sdc:operator-script', False),
+ 'hostname': ('sdc:hostname', True),
+ 'dns_domain': ('sdc:dns_domain', True),
}
SMARTOS_ATTRIB_JSON = {
# Cloud-init Key : (SmartOS Key known JSON)
'network-data': 'sdc:nics',
+ 'dns_servers': 'sdc:resolvers',
+ 'routes': 'sdc:routes',
}
SMARTOS_ENV_LX_BRAND = "lx-brand"
@@ -311,7 +315,10 @@ class DataSourceSmartOS(sources.DataSource):
if self._network_config is None:
if self.network_data is not None:
self._network_config = (
- convert_smartos_network_data(self.network_data))
+ convert_smartos_network_data(
+ network_data=self.network_data,
+ dns_servers=self.metadata['dns_servers'],
+ dns_domain=self.metadata['dns_domain']))
return self._network_config
@@ -445,7 +452,8 @@ class JoyentMetadataClient(object):
class JoyentMetadataSocketClient(JoyentMetadataClient):
- def __init__(self, socketpath):
+ def __init__(self, socketpath, smartos_type=SMARTOS_ENV_LX_BRAND):
+ super(JoyentMetadataSocketClient, self).__init__(smartos_type)
self.socketpath = socketpath
def open_transport(self):
@@ -461,7 +469,7 @@ class JoyentMetadataSocketClient(JoyentMetadataClient):
class JoyentMetadataSerialClient(JoyentMetadataClient):
- def __init__(self, device, timeout=10, smartos_type=None):
+ def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM):
super(JoyentMetadataSerialClient, self).__init__(smartos_type)
self.device = device
self.timeout = timeout
@@ -583,7 +591,8 @@ def jmc_client_factory(
device=serial_device, timeout=serial_timeout,
smartos_type=smartos_type)
elif smartos_type == SMARTOS_ENV_LX_BRAND:
- return JoyentMetadataSocketClient(socketpath=metadata_sockfile)
+ return JoyentMetadataSocketClient(socketpath=metadata_sockfile,
+ smartos_type=smartos_type)
raise ValueError("Unknown value for smartos_type: %s" % smartos_type)
@@ -644,14 +653,8 @@ def write_boot_content(content, content_f, link=None, shebang=False,
util.logexc(LOG, "failed establishing content link: %s", e)
-def get_smartos_environ(uname_version=None, product_name=None,
- uname_arch=None):
+def get_smartos_environ(uname_version=None, product_name=None):
uname = os.uname()
- if uname_arch is None:
- uname_arch = uname[4]
-
- if uname_arch.startswith("arm") or uname_arch == "aarch64":
- return None
# SDC LX-Brand Zones lack dmidecode (no /dev/mem) but
# report 'BrandZ virtual linux' as the kernel version
@@ -671,8 +674,9 @@ def get_smartos_environ(uname_version=None, product_name=None,
return None
-# Covert SMARTOS 'sdc:nics' data to network_config yaml
-def convert_smartos_network_data(network_data=None):
+# Convert SMARTOS 'sdc:nics' data to network_config yaml
+def convert_smartos_network_data(network_data=None,
+ dns_servers=None, dns_domain=None):
"""Return a dictionary of network_config by parsing provided
SMARTOS sdc:nics configuration data
@@ -706,9 +710,7 @@ def convert_smartos_network_data(network_data=None):
'broadcast',
'dns_nameservers',
'dns_search',
- 'gateway',
'metric',
- 'netmask',
'pointopoint',
'routes',
'scope',
@@ -716,6 +718,29 @@ def convert_smartos_network_data(network_data=None):
],
}
+ if dns_servers:
+ if not isinstance(dns_servers, (list, tuple)):
+ dns_servers = [dns_servers]
+ else:
+ dns_servers = []
+
+ if dns_domain:
+ if not isinstance(dns_domain, (list, tuple)):
+ dns_domain = [dns_domain]
+ else:
+ dns_domain = []
+
+ def is_valid_ipv4(addr):
+ return '.' in addr
+
+ def is_valid_ipv6(addr):
+ return ':' in addr
+
+ pgws = {
+ 'ipv4': {'match': is_valid_ipv4, 'gw': None},
+ 'ipv6': {'match': is_valid_ipv6, 'gw': None},
+ }
+
config = []
for nic in network_data:
cfg = dict((k, v) for k, v in nic.items()
@@ -727,18 +752,40 @@ def convert_smartos_network_data(network_data=None):
cfg.update({'mac_address': nic['mac']})
subnets = []
- for ip, gw in zip(nic['ips'], nic['gateways']):
- subnet = dict((k, v) for k, v in nic.items()
- if k in valid_keys['subnet'])
- subnet.update({
- 'type': 'static',
- 'address': ip,
- 'gateway': gw,
- })
+ for ip in nic.get('ips', []):
+ if ip == "dhcp":
+ subnet = {'type': 'dhcp4'}
+ else:
+ subnet = dict((k, v) for k, v in nic.items()
+ if k in valid_keys['subnet'])
+ subnet.update({
+ 'type': 'static',
+ 'address': ip,
+ })
+
+ proto = 'ipv4' if is_valid_ipv4(ip) else 'ipv6'
+ # Only use gateways for 'primary' nics
+ if 'primary' in nic and nic.get('primary', False):
+ # the ips and gateways list may be N to M, here
+ # we map the ip index into the gateways list,
+ # and handle the case that we could have more ips
+ # than gateways. we only consume the first gateway
+ if not pgws[proto]['gw']:
+ gateways = [gw for gw in nic.get('gateways', [])
+ if pgws[proto]['match'](gw)]
+ if len(gateways):
+ pgws[proto]['gw'] = gateways[0]
+ subnet.update({'gateway': pgws[proto]['gw']})
+
subnets.append(subnet)
cfg.update({'subnets': subnets})
config.append(cfg)
+ if dns_servers:
+ config.append(
+ {'type': 'nameserver', 'address': dns_servers,
+ 'search': dns_domain})
+
return {'version': 1, 'config': config}
@@ -761,21 +808,36 @@ if __name__ == "__main__":
sys.exit(1)
if len(sys.argv) == 1:
keys = (list(SMARTOS_ATTRIB_JSON.keys()) +
- list(SMARTOS_ATTRIB_MAP.keys()))
+ list(SMARTOS_ATTRIB_MAP.keys()) + ['network_config'])
else:
keys = sys.argv[1:]
- data = {}
- for key in keys:
+ def load_key(client, key, data):
+ if key in data:
+ return data[key]
+
if key in SMARTOS_ATTRIB_JSON:
keyname = SMARTOS_ATTRIB_JSON[key]
- data[key] = jmc.get_json(keyname)
+ data[key] = client.get_json(keyname)
+ elif key == "network_config":
+ for depkey in ('network-data', 'dns_servers', 'dns_domain'):
+ load_key(client, depkey, data)
+ data[key] = convert_smartos_network_data(
+ network_data=data['network-data'],
+ dns_servers=data['dns_servers'],
+ dns_domain=data['dns_domain'])
else:
if key in SMARTOS_ATTRIB_MAP:
keyname, strip = SMARTOS_ATTRIB_MAP[key]
else:
keyname, strip = (key, False)
- val = jmc.get(keyname, strip=strip)
- data[key] = jmc.get(keyname, strip=strip)
+ data[key] = client.get(keyname, strip=strip)
+
+ return data[key]
+
+ data = {}
+ for key in keys:
+ load_key(client=jmc, key=key, data=data)
- print(json.dumps(data, indent=1))
+ print(json.dumps(data, indent=1, sort_keys=True,
+ separators=(',', ': ')))
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 87b8e524..d1395270 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -21,8 +21,8 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import abc
+import copy
import os
-
import six
from cloudinit import importer
@@ -355,6 +355,31 @@ def instance_id_matches_system_uuid(instance_id, field='system-uuid'):
return instance_id.lower() == dmi_value.lower()
+def convert_vendordata(data, recurse=True):
+ """data: a loaded object (strings, arrays, dicts).
+ return something suitable for cloudinit vendordata_raw.
+
+ if data is:
+ None: return None
+ string: return string
+ list: return data
+ the list is then processed in UserDataProcessor
+ dict: return convert_vendordata(data.get('cloud-init'))
+ """
+ if not data:
+ return None
+ if isinstance(data, six.string_types):
+ return data
+ if isinstance(data, list):
+ return copy.deepcopy(data)
+ if isinstance(data, dict):
+ if recurse is True:
+ return convert_vendordata(data.get('cloud-init'),
+ recurse=False)
+ raise ValueError("vendordata['cloud-init'] cannot be dict")
+ raise ValueError("Unknown data type for vendordata: %s" % type(data))
+
+
# 'depends' is a list of dependencies (DEP_FILESYSTEM)
# ds_list is a list of 2 item lists
# ds_list = [
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index 63ccf10e..689ed4cc 100644
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -1,3 +1,4 @@
+import json
import logging
import os
import re
@@ -6,6 +7,7 @@ import struct
import tempfile
import time
+from cloudinit import stages
from contextlib import contextmanager
from xml.etree import ElementTree
@@ -187,19 +189,33 @@ class WALinuxAgentShim(object):
' </Container>',
'</Health>'])
- def __init__(self):
- LOG.debug('WALinuxAgentShim instantiated...')
- self.endpoint = self.find_endpoint()
+ def __init__(self, fallback_lease_file=None):
+ LOG.debug('WALinuxAgentShim instantiated, fallback_lease_file=%s',
+ fallback_lease_file)
+ self.dhcpoptions = None
+ self._endpoint = None
self.openssl_manager = None
self.values = {}
+ self.lease_file = fallback_lease_file
def clean_up(self):
if self.openssl_manager is not None:
self.openssl_manager.clean_up()
@staticmethod
- def get_ip_from_lease_value(lease_value):
- unescaped_value = lease_value.replace('\\', '')
+ def _get_hooks_dir():
+ _paths = stages.Init()
+ return os.path.join(_paths.paths.get_runpath(), "dhclient.hooks")
+
+ @property
+ def endpoint(self):
+ if self._endpoint is None:
+ self._endpoint = self.find_endpoint(self.lease_file)
+ return self._endpoint
+
+ @staticmethod
+ def get_ip_from_lease_value(fallback_lease_value):
+ unescaped_value = fallback_lease_value.replace('\\', '')
if len(unescaped_value) > 4:
hex_string = ''
for hex_pair in unescaped_value.split(':'):
@@ -213,15 +229,75 @@ class WALinuxAgentShim(object):
return socket.inet_ntoa(packed_bytes)
@staticmethod
- def find_endpoint():
- LOG.debug('Finding Azure endpoint...')
- content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases')
- value = None
+ def _get_value_from_leases_file(fallback_lease_file):
+ leases = []
+ content = util.load_file(fallback_lease_file)
+ LOG.debug("content is {}".format(content))
for line in content.splitlines():
if 'unknown-245' in line:
- value = line.strip(' ').split(' ', 2)[-1].strip(';\n"')
+ # Example line from Ubuntu
+ # option unknown-245 a8:3f:81:10;
+ leases.append(line.strip(' ').split(' ', 2)[-1].strip(';\n"'))
+ # Return the "most recent" one in the list
+ if len(leases) < 1:
+ return None
+ else:
+ return leases[-1]
+
+ @staticmethod
+ def _load_dhclient_json():
+ dhcp_options = {}
+ hooks_dir = WALinuxAgentShim._get_hooks_dir()
+ if not os.path.exists(hooks_dir):
+ LOG.debug("%s not found.", hooks_dir)
+ return None
+ hook_files = [os.path.join(hooks_dir, x)
+ for x in os.listdir(hooks_dir)]
+ for hook_file in hook_files:
+ try:
+ name = os.path.basename(hook_file).replace('.json', '')
+ dhcp_options[name] = json.loads(util.load_file((hook_file)))
+ except ValueError:
+ raise ValueError("%s is not valid JSON data", hook_file)
+ return dhcp_options
+
+ @staticmethod
+ def _get_value_from_dhcpoptions(dhcp_options):
+ if dhcp_options is None:
+ return None
+ # the MS endpoint server is given to us as DHPC option 245
+ _value = None
+ for interface in dhcp_options:
+ _value = dhcp_options[interface].get('unknown_245', None)
+ if _value is not None:
+ LOG.debug("Endpoint server found in dhclient options")
+ break
+ return _value
+
+ @staticmethod
+ def find_endpoint(fallback_lease_file=None):
+ LOG.debug('Finding Azure endpoint...')
+ value = None
+ # Option-245 stored in /run/cloud-init/dhclient.hooks/<ifc>.json
+ # a dhclient exit hook that calls cloud-init-dhclient-hook
+ dhcp_options = WALinuxAgentShim._load_dhclient_json()
+ value = WALinuxAgentShim._get_value_from_dhcpoptions(dhcp_options)
if value is None:
- raise ValueError('No endpoint found in DHCP config.')
+ # Fallback and check the leases file if unsuccessful
+ LOG.debug("Unable to find endpoint in dhclient logs. "
+ " Falling back to check lease files")
+ if fallback_lease_file is None:
+ LOG.warn("No fallback lease file was specified.")
+ value = None
+ else:
+ LOG.debug("Looking for endpoint in lease file %s",
+ fallback_lease_file)
+ value = WALinuxAgentShim._get_value_from_leases_file(
+ fallback_lease_file)
+
+ if value is None:
+ raise ValueError('No endpoint found.')
+
endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value)
LOG.debug('Azure endpoint found at %s', endpoint_ip_address)
return endpoint_ip_address
@@ -271,8 +347,8 @@ class WALinuxAgentShim(object):
LOG.info('Reported ready to Azure fabric.')
-def get_metadata_from_fabric():
- shim = WALinuxAgentShim()
+def get_metadata_from_fabric(fallback_lease_file=None):
+ shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file)
try:
return shim.register_with_azure_and_fetch_data()
finally:
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 2e7a1d47..a5a2a1d6 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -539,6 +539,10 @@ def convert_net_json(network_json=None, known_macs=None):
networks = network_json.get('networks', [])
services = network_json.get('services', [])
+ link_updates = []
+ link_id_info = {}
+ bond_name_fmt = "bond%d"
+ bond_number = 0
config = []
for link in links:
subnets = []
@@ -551,6 +555,13 @@ def convert_net_json(network_json=None, known_macs=None):
if 'name' in link:
cfg['name'] = link['name']
+ if link.get('ethernet_mac_address'):
+ link_id_info[link['id']] = link.get('ethernet_mac_address')
+
+ curinfo = {'name': cfg.get('name'),
+ 'mac': link.get('ethernet_mac_address'),
+ 'id': link['id'], 'type': link['type']}
+
for network in [n for n in networks
if n['link'] == link['id']]:
subnet = dict((k, v) for k, v in network.items()
@@ -571,7 +582,7 @@ def convert_net_json(network_json=None, known_macs=None):
subnet['ipv6'] = True
subnets.append(subnet)
cfg.update({'subnets': subnets})
- if link['type'] in ['ethernet', 'vif', 'ovs', 'phy', 'bridge']:
+ if link['type'] in ['ethernet', 'vif', 'ovs', 'phy', 'bridge', 'tap']:
cfg.update({
'type': 'physical',
'mac_address': link['ethernet_mac_address']})
@@ -582,31 +593,56 @@ def convert_net_json(network_json=None, known_macs=None):
continue
elif k.startswith('bond'):
params.update({k: v})
- cfg.update({
- 'bond_interfaces': copy.deepcopy(link['bond_links']),
- 'params': params,
- })
+
+ # openstack does not provide a name for the bond.
+ # they do provide an 'id', but that is possibly non-sensical.
+ # so we just create our own name.
+ link_name = bond_name_fmt % bond_number
+ bond_number += 1
+
+ # bond_links reference links by their id, but we need to add
+ # to the network config by their nic name.
+ # store that in bond_links_needed, and update these later.
+ link_updates.append(
+ (cfg, 'bond_interfaces', '%s',
+ copy.deepcopy(link['bond_links']))
+ )
+ cfg.update({'params': params, 'name': link_name})
+
+ curinfo['name'] = link_name
elif link['type'] in ['vlan']:
+ name = "%s.%s" % (link['vlan_link'], link['vlan_id'])
cfg.update({
- 'name': "%s.%s" % (link['vlan_link'],
- link['vlan_id']),
- 'vlan_link': link['vlan_link'],
+ 'name': name,
'vlan_id': link['vlan_id'],
'mac_address': link['vlan_mac_address'],
})
+ link_updates.append((cfg, 'vlan_link', '%s', link['vlan_link']))
+ link_updates.append((cfg, 'name', "%%s.%s" % link['vlan_id'],
+ link['vlan_link']))
+ curinfo.update({'mac': link['vlan_mac_address'],
+ 'name': name})
else:
raise ValueError(
'Unknown network_data link type: %s' % link['type'])
config.append(cfg)
+ link_id_info[curinfo['id']] = curinfo
need_names = [d for d in config
if d.get('type') == 'physical' and 'name' not in d]
- if need_names:
+ if need_names or link_updates:
if known_macs is None:
known_macs = net.get_interfaces_by_mac()
+ # go through and fill out the link_id_info with names
+ for link_id, info in link_id_info.items():
+ if info.get('name'):
+ continue
+ if info.get('mac') in known_macs:
+ info['name'] = known_macs[info['mac']]
+
for d in need_names:
mac = d.get('mac_address')
if not mac:
@@ -615,34 +651,15 @@ def convert_net_json(network_json=None, known_macs=None):
raise ValueError("Unable to find a system nic for %s" % d)
d['name'] = known_macs[mac]
+ for cfg, key, fmt, target in link_updates:
+ if isinstance(target, (list, tuple)):
+ cfg[key] = [fmt % link_id_info[l]['name'] for l in target]
+ else:
+ cfg[key] = fmt % link_id_info[target]['name']
+
for service in services:
cfg = service
cfg.update({'type': 'nameserver'})
config.append(cfg)
return {'version': 1, 'config': config}
-
-
-def convert_vendordata_json(data, recurse=True):
- """data: a loaded json *object* (strings, arrays, dicts).
- return something suitable for cloudinit vendordata_raw.
-
- if data is:
- None: return None
- string: return string
- list: return data
- the list is then processed in UserDataProcessor
- dict: return convert_vendordata_json(data.get('cloud-init'))
- """
- if not data:
- return None
- if isinstance(data, six.string_types):
- return data
- if isinstance(data, list):
- return copy.deepcopy(data)
- if isinstance(data, dict):
- if recurse is True:
- return convert_vendordata_json(data.get('cloud-init'),
- recurse=False)
- raise ValueError("vendordata['cloud-init'] cannot be dict")
- raise ValueError("Unknown data type for vendordata: %s" % type(data))
diff --git a/cloudinit/util.py b/cloudinit/util.py
index e5dd61a0..7c37eb8f 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -61,6 +61,10 @@ from cloudinit import version
from cloudinit.settings import (CFG_BUILTIN)
+try:
+ string_types = (basestring,)
+except NameError:
+ string_types = (str,)
_DNS_REDIRECT_IP = None
LOG = logging.getLogger(__name__)
@@ -82,6 +86,71 @@ CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'],
PROC_CMDLINE = None
+_LSB_RELEASE = {}
+
+
+def get_architecture(target=None):
+ out, _ = subp(['dpkg', '--print-architecture'], capture=True,
+ target=target)
+ return out.strip()
+
+
+def _lsb_release(target=None):
+ fmap = {'Codename': 'codename', 'Description': 'description',
+ 'Distributor ID': 'id', 'Release': 'release'}
+
+ data = {}
+ try:
+ out, _ = subp(['lsb_release', '--all'], capture=True, target=target)
+ for line in out.splitlines():
+ fname, _, val = line.partition(":")
+ if fname in fmap:
+ data[fmap[fname]] = val.strip()
+ missing = [k for k in fmap.values() if k not in data]
+ if len(missing):
+ LOG.warn("Missing fields in lsb_release --all output: %s",
+ ','.join(missing))
+
+ except ProcessExecutionError as err:
+ LOG.warn("Unable to get lsb_release --all: %s", err)
+ data = dict((v, "UNAVAILABLE") for v in fmap.values())
+
+ return data
+
+
+def lsb_release(target=None):
+ if target_path(target) != "/":
+ # do not use or update cache if target is provided
+ return _lsb_release(target)
+
+ global _LSB_RELEASE
+ if not _LSB_RELEASE:
+ data = _lsb_release()
+ _LSB_RELEASE.update(data)
+ return _LSB_RELEASE
+
+
+def target_path(target, path=None):
+ # return 'path' inside target, accepting target as None
+ if target in (None, ""):
+ target = "/"
+ elif not isinstance(target, string_types):
+ raise ValueError("Unexpected input for target: %s" % target)
+ else:
+ target = os.path.abspath(target)
+ # abspath("//") returns "//" specifically for 2 slashes.
+ if target.startswith("//"):
+ target = target[1:]
+
+ if not path:
+ return target
+
+ # os.path.join("/etc", "/foo") returns "/foo". Chomp all leading /.
+ while len(path) and path[0] == "/":
+ path = path[1:]
+
+ return os.path.join(target, path)
+
def decode_binary(blob, encoding='utf-8'):
# Converts a binary type into a text type using given encoding.
@@ -1570,6 +1639,11 @@ def get_builtin_cfg():
return obj_copy.deepcopy(CFG_BUILTIN)
+def is_link(path):
+ LOG.debug("Testing if a link exists for %s", path)
+ return os.path.islink(path)
+
+
def sym_link(source, link, force=False):
LOG.debug("Creating symbolic link from %r => %r", link, source)
if force and os.path.exists(link):
@@ -1688,10 +1762,20 @@ def delete_dir_contents(dirname):
def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
- logstring=False):
+ logstring=False, decode="replace", target=None):
+
+ # not supported in cloud-init (yet), for now kept in the call signature
+ # to ease maintaining code shared between cloud-init and curtin
+ if target is not None:
+ raise ValueError("target arg not supported by cloud-init")
+
if rcs is None:
rcs = [0]
+
+ devnull_fp = None
try:
+ if target_path(target) != "/":
+ args = ['chroot', target] + list(args)
if not logstring:
LOG.debug(("Running command %s with allowed return codes %s"
@@ -1700,33 +1784,52 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
LOG.debug(("Running hidden command to protect sensitive "
"input/output logstring: %s"), logstring)
- if not capture:
- stdout = None
- stderr = None
- else:
+ stdin = None
+ stdout = None
+ stderr = None
+ if capture:
stdout = subprocess.PIPE
stderr = subprocess.PIPE
- stdin = subprocess.PIPE
- kws = dict(stdout=stdout, stderr=stderr, stdin=stdin,
- env=env, shell=shell)
- if six.PY3:
- # Use this so subprocess output will be (Python 3) str, not bytes.
- kws['universal_newlines'] = True
- sp = subprocess.Popen(args, **kws)
+ if data is None:
+ # using devnull assures any reads get null, rather
+ # than possibly waiting on input.
+ devnull_fp = open(os.devnull)
+ stdin = devnull_fp
+ else:
+ stdin = subprocess.PIPE
+ if not isinstance(data, bytes):
+ data = data.encode()
+
+ sp = subprocess.Popen(args, stdout=stdout,
+ stderr=stderr, stdin=stdin,
+ env=env, shell=shell)
(out, err) = sp.communicate(data)
+
+ # Just ensure blank instead of none.
+ if not out and capture:
+ out = b''
+ if not err and capture:
+ err = b''
+ if decode:
+ def ldecode(data, m='utf-8'):
+ if not isinstance(data, bytes):
+ return data
+ return data.decode(m, errors=decode)
+
+ out = ldecode(out)
+ err = ldecode(err)
except OSError as e:
raise ProcessExecutionError(cmd=args, reason=e,
errno=e.errno)
+ finally:
+ if devnull_fp:
+ devnull_fp.close()
+
rc = sp.returncode
if rc not in rcs:
raise ProcessExecutionError(stdout=out, stderr=err,
exit_code=rc,
cmd=args)
- # Just ensure blank instead of none?? (iff capturing)
- if not out and capture:
- out = ''
- if not err and capture:
- err = ''
return (out, err)
@@ -2227,10 +2330,17 @@ def read_dmi_data(key):
If all of the above fail to find a value, None will be returned.
"""
+
syspath_value = _read_dmi_syspath(key)
if syspath_value is not None:
return syspath_value
+ # running dmidecode can be problematic on some arches (LP: #1243287)
+ uname_arch = os.uname()[4]
+ if uname_arch.startswith("arm") or uname_arch == "aarch64":
+ LOG.debug("dmidata is not supported on %s", uname_arch)
+ return None
+
dmidecode_path = which('dmidecode')
if dmidecode_path:
return _call_dmidecode(key, dmidecode_path)
@@ -2244,3 +2354,18 @@ def message_from_string(string):
if sys.version_info[:2] < (2, 7):
return email.message_from_file(six.StringIO(string))
return email.message_from_string(string)
+
+
+def get_installed_packages(target=None):
+ (out, _) = subp(['dpkg-query', '--list'], target=target, capture=True)
+
+ pkgs_inst = set()
+ for line in out.splitlines():
+ try:
+ (state, pkg, _) = line.split(None, 2)
+ except ValueError:
+ continue
+ if state.startswith("hi") or state.startswith("ii"):
+ pkgs_inst.add(re.sub(":.*", "", pkg))
+
+ return pkgs_inst
diff --git a/cloudinit/version.py b/cloudinit/version.py
index 3d1d1d23..6acada84 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -16,12 +16,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from distutils import version as vr
-
-
-def version():
- return vr.StrictVersion("0.7.7")
+__VERSION__ = "0.7.7"
def version_string():
- return str(version())
+ return __VERSION__