summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore6
-rw-r--r--ChangeLog3
-rw-r--r--HACKING.rst68
-rw-r--r--MANIFEST.in2
-rw-r--r--Makefile19
-rw-r--r--cloudinit/atomic_helper.py31
-rw-r--r--cloudinit/cmd/main.py55
-rw-r--r--cloudinit/config/cc_apt_configure.py717
-rw-r--r--cloudinit/config/cc_lxd.py2
-rw-r--r--cloudinit/config/cc_mcollective.py96
-rw-r--r--cloudinit/config/cc_ntp.py106
-rw-r--r--cloudinit/config/cc_phone_home.py2
-rw-r--r--cloudinit/config/cc_rh_subscription.py2
-rw-r--r--cloudinit/config/cc_salt_minion.py7
-rw-r--r--cloudinit/config/cc_snappy.py2
-rw-r--r--cloudinit/config/cc_spacewalk.py85
-rw-r--r--cloudinit/config/cc_ubuntu_init_switch.py2
-rw-r--r--cloudinit/config/cc_yum_add_repo.py2
-rw-r--r--cloudinit/dhclient_hook.py50
-rw-r--r--cloudinit/distros/__init__.py2
-rw-r--r--cloudinit/distros/gentoo.py95
-rw-r--r--cloudinit/gpg.py8
-rw-r--r--cloudinit/net/__init__.py9
-rw-r--r--cloudinit/net/eni.py2
-rw-r--r--cloudinit/signal_handler.py2
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py6
-rw-r--r--cloudinit/sources/DataSourceAzure.py20
-rw-r--r--cloudinit/sources/DataSourceCloudSigma.py6
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py2
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py106
-rw-r--r--cloudinit/sources/DataSourceGCE.py2
-rw-r--r--cloudinit/sources/DataSourceMAAS.py199
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py2
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py2
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py2
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py122
-rw-r--r--cloudinit/sources/__init__.py27
-rw-r--r--cloudinit/sources/helpers/azure.py102
-rw-r--r--cloudinit/sources/helpers/openstack.py85
-rw-r--r--cloudinit/util.py159
-rw-r--r--cloudinit/version.py8
-rw-r--r--config/cloud.cfg17
-rw-r--r--doc/examples/cloud-config-add-apt-repos.txt42
-rw-r--r--doc/examples/cloud-config-apt.txt328
-rw-r--r--doc/examples/cloud-config-chef-oneiric.txt67
-rw-r--r--doc/examples/cloud-config-chef.txt67
-rw-r--r--doc/examples/cloud-config-ntp.txt27
-rw-r--r--doc/examples/cloud-config.txt251
-rw-r--r--doc/sources/azure/README.rst29
-rwxr-xr-xpackages/bddeb120
-rwxr-xr-xpackages/brpm295
-rw-r--r--packages/debian/changelog.in2
-rwxr-xr-xpackages/debian/rules.in2
-rw-r--r--packages/debian/source/format1
-rw-r--r--packages/redhat/cloud-init.spec.in22
-rw-r--r--packages/suse/cloud-init.spec.in14
-rw-r--r--requirements.txt2
-rwxr-xr-xsetup.py8
-rw-r--r--systemd/cloud-final.service2
-rwxr-xr-xsystemd/cloud-init-generator5
-rw-r--r--sysvinit/gentoo/cloud-config2
-rw-r--r--sysvinit/gentoo/cloud-final2
-rw-r--r--sysvinit/gentoo/cloud-init2
-rw-r--r--sysvinit/gentoo/cloud-init-local2
-rw-r--r--templates/ntp.conf.debian.tmpl63
-rw-r--r--templates/ntp.conf.fedora.tmpl66
-rw-r--r--templates/ntp.conf.rhel.tmpl61
-rw-r--r--templates/ntp.conf.sles.tmpl100
-rw-r--r--templates/ntp.conf.ubuntu.tmpl75
-rw-r--r--tests/configs/sample1.yaml3
-rw-r--r--tests/unittests/helpers.py18
-rw-r--r--tests/unittests/test_atomic_helper.py54
-rw-r--r--tests/unittests/test_datasource/test_azure_helper.py15
-rw-r--r--tests/unittests/test_datasource/test_configdrive.py149
-rw-r--r--tests/unittests/test_datasource/test_digitalocean.py67
-rw-r--r--tests/unittests/test_datasource/test_maas.py127
-rw-r--r--tests/unittests/test_datasource/test_nocloud.py85
-rw-r--r--tests/unittests/test_datasource/test_openstack.py3
-rw-r--r--tests/unittests/test_datasource/test_smartos.py350
-rw-r--r--tests/unittests/test_distros/test_generic.py3
-rw-r--r--tests/unittests/test_handler/test_handler_apt_conf_v1.py (renamed from tests/unittests/test_handler/test_handler_apt_configure.py)46
-rw-r--r--tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py (renamed from tests/unittests/test_handler/test_handler_apt_configure_sources_list.py)64
-rw-r--r--tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py187
-rw-r--r--tests/unittests/test_handler/test_handler_apt_source_v1.py (renamed from tests/unittests/test_handler/test_handler_apt_source.py)214
-rw-r--r--tests/unittests/test_handler/test_handler_apt_source_v3.py1104
-rw-r--r--tests/unittests/test_handler/test_handler_mcollective.py128
-rw-r--r--tests/unittests/test_handler/test_handler_ntp.py274
-rw-r--r--tests/unittests/test_handler/test_handler_spacewalk.py42
-rw-r--r--tests/unittests/test_util.py95
-rwxr-xr-xtools/hook-dhclient24
-rwxr-xr-xtools/hook-network-manager24
-rwxr-xr-xtools/hook-rhel.sh27
-rwxr-xr-xtools/make-dist-tarball21
-rwxr-xr-xtools/make-tarball80
-rwxr-xr-xtools/read-dependencies22
-rwxr-xr-xtools/read-version105
96 files changed, 5785 insertions, 1445 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..77eb9c74
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,6 @@
+build
+cloud_init.egg-info
+dist
+*.pyc
+__pycache__
+.tox
diff --git a/ChangeLog b/ChangeLog
index bae982e3..8d0f16e3 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,6 @@
+0.7.8:
+ - SmartOS: more improvements for network configuration
+ - add ntp configuration module [Ryan Harper]
0.7.7:
- open 0.7.7
- Digital Ocean: add datasource for Digital Ocean. [Neal Shrader]
diff --git a/HACKING.rst b/HACKING.rst
index 6bfe4b4d..63a5bde0 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -2,47 +2,71 @@
Hacking on cloud-init
=====================
-To get changes into cloud-init, the process to follow is:
+This document describes how to contribute changes to cloud-init.
+
+Do these things once
+--------------------
* If you have not already, be sure to sign the CCA:
- `Canonical Contributor Agreement`_
-* Get your changes into a local bzr branch.
- Initialize a repo, and checkout trunk (init repo is to share bzr info across multiple checkouts, its different than git):
+* Clone the `LaunchPad`_ repository:
+
+ git clone YOUR_USERNAME@git.launchpad.net:cloud-init
+ cd cloud-init
+
+ If you would prefer a bzr style `git clone lp:cloud-init`, see
+ the `Instructions on LaunchPad`_ for more information.
- - ``bzr init-repo cloud-init``
- - ``bzr branch lp:cloud-init trunk.dist``
- - ``bzr branch trunk.dist my-topic-branch``
+* Create a new remote pointing to your personal LaunchPad
+ repository::
+
+ git remote add YOUR_USERNAME YOUR_USERNAME@git.launchpad.net:~YOUR_USERNAME/cloud-init
+
+.. _Canonical Contributor Agreement: http://www.canonical.com/contributors
-* Commit your changes (note, you can make multiple commits, fixes, more commits.):
+Do these things for each feature or bug
+---------------------------------------
- - ``bzr commit``
+* Create a new topic branch for your work::
-* Check pep8 and test, and address any issues:
+ git checkout -b my-topic-branch
- - ``make test pep8``
+.. _Instructions on launchpad: https://help.launchpad.net/Code/Git
-* Push to launchpad to a personal branch:
+* Make and commit your changes (note, you can make multiple commits,
+ fixes, more commits.)::
- - ``bzr push lp:~<YOUR_USERNAME>/cloud-init/<BRANCH_NAME>``
+ git commit
-* Propose that for a merge into lp:cloud-init via web browser.
+* Check pep8 and test, and address any issues::
- - Open the branch in `Launchpad`_
+ make test pep8
- - It will typically be at ``https://code.launchpad.net/<YOUR_USERNAME>/<PROJECT>/<BRANCH_NAME>``
- - ie. https://code.launchpad.net/~smoser/cloud-init/mybranch
+* Push your changes to your personal LaunchPad repository::
-* Click 'Propose for merging'
-* Select 'lp:cloud-init' as the target branch
+ git push -u YOUR_USERNAME my-topic-branch
-Then, someone on cloud-init-dev (currently `Scott Moser`_ and `Joshua Harlow`_) will
-review your changes and follow up in the merge request.
+* Use your browser to create a merge request:
-Feel free to ping and/or join #cloud-init on freenode (irc) if you have any questions.
+ - Open the branch on `LaunchPad`_
+
+ - It will typically be at
+ ``https://code.launchpad.net/~YOUR_USERNAME/cloud-init/+git/cloud-init/+ref/BRANCHNAME``
+ for example
+ https://code.launchpad.net/~larsks/cloud-init/+git/cloud-init/+ref/feature/move-to-git
+
+ - Click 'Propose for merging`
+ - Select ``cloud-init`` as the target repository
+ - Select ``master`` as the target reference path
+
+Then, someone on cloud-init-dev (currently `Scott Moser`_ and `Joshua
+Harlow`_) will review your changes and follow up in the merge request.
+
+Feel free to ping and/or join ``#cloud-init`` on freenode (irc) if you
+have any questions.
.. _Launchpad: https://launchpad.net
-.. _Canonical Contributor Agreement: http://www.canonical.com/contributors
.. _Scott Moser: https://launchpad.net/~smoser
.. _Joshua Harlow: https://launchpad.net/~harlowja
diff --git a/MANIFEST.in b/MANIFEST.in
index 90f6c7d5..94264640 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -4,5 +4,7 @@ graft tools
prune build
prune dist
prune .tox
+prune .git
prune .bzr
+exclude .gitignore
exclude .bzrignore
diff --git a/Makefile b/Makefile
index 32c50aee..5d35dcc0 100644
--- a/Makefile
+++ b/Makefile
@@ -1,21 +1,20 @@
CWD=$(shell pwd)
-PYVER ?= 3
+PYVER ?= $(shell for p in python3 python2; do \
+ out=$(which $$p 2>&1) && echo $$p && exit; done; \
+ exit 1)
noseopts ?= -v
YAML_FILES=$(shell find cloudinit bin tests tools -name "*.yaml" -type f )
YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f )
-CHANGELOG_VERSION=$(shell $(CWD)/tools/read-version)
-CODE_VERSION=$(shell python -c "from cloudinit import version; print version.version_string()")
-
PIP_INSTALL := pip install
-ifeq ($(PYVER),3)
+ifeq ($(PYVER),python3)
pyflakes = pyflakes3
unittests = unittest3
yaml = yaml
else
-ifeq ($(PYVER),2)
+ifeq ($(PYVER),python2)
pyflakes = pyflakes
unittests = unittest
else
@@ -28,6 +27,10 @@ ifeq ($(distro),)
distro = redhat
endif
+READ_VERSION=$(shell $(PYVER) $(CWD)/tools/read-version)
+CODE_VERSION=$(shell $(PYVER) -c "from cloudinit import version; print(version.version_string())")
+
+
all: check
check: check_version pep8 $(pyflakes) test $(yaml)
@@ -58,8 +61,8 @@ pip-test-requirements:
test: $(unittests)
check_version:
- @if [ "$(CHANGELOG_VERSION)" != "$(CODE_VERSION)" ]; then \
- echo "Error: ChangeLog version $(CHANGELOG_VERSION)" \
+ @if [ "$(READ_VERSION)" != "$(CODE_VERSION)" ]; then \
+ echo "Error: read-version version $(READ_VERSION)" \
"not equal to code version $(CODE_VERSION)"; exit 2; \
else true; fi
diff --git a/cloudinit/atomic_helper.py b/cloudinit/atomic_helper.py
new file mode 100644
index 00000000..a3cfd942
--- /dev/null
+++ b/cloudinit/atomic_helper.py
@@ -0,0 +1,31 @@
+#!/usr/bin/python
+# vi: ts=4 expandtab
+
+import json
+import os
+import tempfile
+
+_DEF_PERMS = 0o644
+
+
+def write_file(filename, content, mode=_DEF_PERMS, omode="wb"):
+ # open filename in mode 'omode', write content, set permissions to 'mode'
+ tf = None
+ try:
+ tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(filename),
+ delete=False, mode=omode)
+ tf.write(content)
+ tf.close()
+ os.chmod(tf.name, mode)
+ os.rename(tf.name, filename)
+ except Exception as e:
+ if tf is not None:
+ os.unlink(tf.name)
+ raise e
+
+
+def write_json(filename, data, mode=_DEF_PERMS):
+ # dump json representation of data to file filename.
+ return write_file(
+ filename, json.dumps(data, indent=1, sort_keys=True) + "\n",
+ omode="w", mode=mode)
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index 63621c1d..83eb02c9 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -25,7 +25,6 @@ import argparse
import json
import os
import sys
-import tempfile
import time
import traceback
@@ -47,6 +46,10 @@ from cloudinit.reporting import events
from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE,
CLOUD_CONFIG)
+from cloudinit import atomic_helper
+
+from cloudinit.dhclient_hook import LogDhclient
+
# Pretty little cheetah formatted welcome message template
WELCOME_MSG_TPL = ("Cloud-init v. ${version} running '${action}' at "
@@ -452,22 +455,10 @@ def main_single(name, args):
return 0
-def atomic_write_file(path, content, mode='w'):
- tf = None
- try:
- tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(path),
- delete=False, mode=mode)
- tf.write(content)
- tf.close()
- os.rename(tf.name, path)
- except Exception as e:
- if tf is not None:
- os.unlink(tf.name)
- raise e
-
-
-def atomic_write_json(path, data):
- return atomic_write_file(path, json.dumps(data, indent=1) + "\n")
+def dhclient_hook(name, args):
+ record = LogDhclient(args)
+ record.check_hooks_dir()
+ record.record()
def status_wrapper(name, args, data_d=None, link_d=None):
@@ -522,7 +513,7 @@ def status_wrapper(name, args, data_d=None, link_d=None):
v1['stage'] = mode
v1[mode]['start'] = time.time()
- atomic_write_json(status_path, status)
+ atomic_helper.write_json(status_path, status)
util.sym_link(os.path.relpath(status_path, link_d), status_link,
force=True)
@@ -545,7 +536,7 @@ def status_wrapper(name, args, data_d=None, link_d=None):
v1[mode]['finished'] = time.time()
v1['stage'] = None
- atomic_write_json(status_path, status)
+ atomic_helper.write_json(status_path, status)
if mode == "modules-final":
# write the 'finished' file
@@ -554,9 +545,9 @@ def status_wrapper(name, args, data_d=None, link_d=None):
if v1[m]['errors']:
errors.extend(v1[m].get('errors', []))
- atomic_write_json(result_path,
- {'v1': {'datasource': v1['datasource'],
- 'errors': errors}})
+ atomic_helper.write_json(
+ result_path, {'v1': {'datasource': v1['datasource'],
+ 'errors': errors}})
util.sym_link(os.path.relpath(result_path, link_d), result_link,
force=True)
@@ -627,7 +618,6 @@ def main(sysv_args=None):
# This subcommand allows you to run a single module
parser_single = subparsers.add_parser('single',
help=('run a single module '))
- parser_single.set_defaults(action=('single', main_single))
parser_single.add_argument("--name", '-n', action="store",
help="module name to run",
required=True)
@@ -644,6 +634,16 @@ def main(sysv_args=None):
' pass to this module'))
parser_single.set_defaults(action=('single', main_single))
+ parser_dhclient = subparsers.add_parser('dhclient-hook',
+ help=('run the dhclient hook'
+ 'to record network info'))
+ parser_dhclient.add_argument("net_action",
+ help=('action taken on the interface'))
+ parser_dhclient.add_argument("net_interface",
+ help=('the network interface being acted'
+ ' upon'))
+ parser_dhclient.set_defaults(action=('dhclient_hook', dhclient_hook))
+
args = parser.parse_args(args=sysv_args)
try:
@@ -677,9 +677,18 @@ def main(sysv_args=None):
"running single module %s" % args.name)
report_on = args.report
+ elif name == 'dhclient_hook':
+ rname, rdesc = ("dhclient-hook",
+ "running dhclient-hook module")
+
args.reporter = events.ReportEventStack(
rname, rdesc, reporting_enabled=report_on)
+
with args.reporter:
return util.log_time(
logfunc=LOG.debug, msg="cloud-init mode '%s'" % name,
get_uptime=True, func=functor, args=(name, args))
+
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 05ad4b03..fa9505a7 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -23,80 +23,182 @@ import os
import re
from cloudinit import gpg
+from cloudinit import log as logging
from cloudinit import templater
from cloudinit import util
-distros = ['ubuntu', 'debian']
-
-PROXY_TPL = "Acquire::HTTP::Proxy \"%s\";\n"
-APT_CONFIG_FN = "/etc/apt/apt.conf.d/94cloud-init-config"
-APT_PROXY_FN = "/etc/apt/apt.conf.d/95cloud-init-proxy"
+LOG = logging.getLogger(__name__)
# this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar')
ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
+# place where apt stores cached repository data
+APT_LISTS = "/var/lib/apt/lists"
-def handle(name, cfg, cloud, log, _args):
- if util.is_false(cfg.get('apt_configure_enabled', True)):
- log.debug("Skipping module named %s, disabled by config.", name)
- return
-
- release = get_release()
- mirrors = find_apt_mirror_info(cloud, cfg)
- if not mirrors or "primary" not in mirrors:
- log.debug(("Skipping module named %s,"
- " no package 'mirror' located"), name)
- return
-
- # backwards compatibility
- mirror = mirrors["primary"]
- mirrors["mirror"] = mirror
-
- log.debug("Mirror info: %s" % mirrors)
-
- if not util.get_cfg_option_bool(cfg,
- 'apt_preserve_sources_list', False):
- generate_sources_list(cfg, release, mirrors, cloud, log)
- old_mirrors = cfg.get('apt_old_mirrors',
- {"primary": "archive.ubuntu.com/ubuntu",
- "security": "security.ubuntu.com/ubuntu"})
- rename_apt_lists(old_mirrors, mirrors)
+# Files to store proxy information
+APT_CONFIG_FN = "/etc/apt/apt.conf.d/94cloud-init-config"
+APT_PROXY_FN = "/etc/apt/apt.conf.d/90cloud-init-aptproxy"
+
+# Default keyserver to use
+DEFAULT_KEYSERVER = "keyserver.ubuntu.com"
+
+# Default archive mirrors
+PRIMARY_ARCH_MIRRORS = {"PRIMARY": "http://archive.ubuntu.com/ubuntu/",
+ "SECURITY": "http://security.ubuntu.com/ubuntu/"}
+PORTS_MIRRORS = {"PRIMARY": "http://ports.ubuntu.com/ubuntu-ports",
+ "SECURITY": "http://ports.ubuntu.com/ubuntu-ports"}
+PRIMARY_ARCHES = ['amd64', 'i386']
+PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el']
+
+
+def get_default_mirrors(arch=None, target=None):
+ """returns the default mirrors for the target. These depend on the
+ architecture, for more see:
+ https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports"""
+ if arch is None:
+ arch = util.get_architecture(target)
+ if arch in PRIMARY_ARCHES:
+ return PRIMARY_ARCH_MIRRORS.copy()
+ if arch in PORTS_ARCHES:
+ return PORTS_MIRRORS.copy()
+ raise ValueError("No default mirror known for arch %s" % arch)
+
+
+def handle(name, ocfg, cloud, log, _):
+ """process the config for apt_config. This can be called from
+ curthooks if a global apt config was provided or via the "apt"
+ standalone command."""
+ # keeping code close to curtin codebase via entry handler
+ target = None
+ if log is not None:
+ global LOG
+ LOG = log
+ # feed back converted config, but only work on the subset under 'apt'
+ ocfg = convert_to_v3_apt_format(ocfg)
+ cfg = ocfg.get('apt', {})
+
+ if not isinstance(cfg, dict):
+ raise ValueError("Expected dictionary for 'apt' config, found %s",
+ type(cfg))
+
+ LOG.debug("handling apt (module %s) with apt config '%s'", name, cfg)
+
+ release = util.lsb_release(target=target)['codename']
+ arch = util.get_architecture(target)
+ mirrors = find_apt_mirror_info(cfg, cloud, arch=arch)
+ LOG.debug("Apt Mirror info: %s", mirrors)
+
+ apply_debconf_selections(cfg, target)
+
+ if util.is_false(cfg.get('preserve_sources_list', False)):
+ generate_sources_list(cfg, release, mirrors, cloud)
+ rename_apt_lists(mirrors, target)
try:
apply_apt_config(cfg, APT_PROXY_FN, APT_CONFIG_FN)
- except Exception as e:
- log.warn("failed to proxy or apt config info: %s", e)
+ except (IOError, OSError):
+ LOG.exception("Failed to apply proxy or apt config info:")
- # Process 'apt_sources'
- if 'apt_sources' in cfg:
+ # Process 'apt_source -> sources {dict}'
+ if 'sources' in cfg:
params = mirrors
params['RELEASE'] = release
- params['MIRROR'] = mirror
+ params['MIRROR'] = mirrors["MIRROR"]
+ matcher = None
matchcfg = cfg.get('add_apt_repo_match', ADD_APT_REPO_MATCH)
if matchcfg:
matcher = re.compile(matchcfg).search
+
+ add_apt_sources(cfg['sources'], cloud, target=target,
+ template_params=params, aa_repo_match=matcher)
+
+
+def debconf_set_selections(selections, target=None):
+ util.subp(['debconf-set-selections'], data=selections, target=target,
+ capture=True)
+
+
+def dpkg_reconfigure(packages, target=None):
+ # For any packages that are already installed, but have preseed data
+ # we populate the debconf database, but the filesystem configuration
+ # would be preferred on a subsequent dpkg-reconfigure.
+ # so, what we have to do is "know" information about certain packages
+ # to unconfigure them.
+ unhandled = []
+ to_config = []
+ for pkg in packages:
+ if pkg in CONFIG_CLEANERS:
+ LOG.debug("unconfiguring %s", pkg)
+ CONFIG_CLEANERS[pkg](target)
+ to_config.append(pkg)
else:
- def matcher(x):
- return False
+ unhandled.append(pkg)
+
+ if len(unhandled):
+ LOG.warn("The following packages were installed and preseeded, "
+ "but cannot be unconfigured: %s", unhandled)
+
+ if len(to_config):
+ util.subp(['dpkg-reconfigure', '--frontend=noninteractive'] +
+ list(to_config), data=None, target=target, capture=True)
+
+
+def apply_debconf_selections(cfg, target=None):
+ """apply_debconf_selections - push content to debconf"""
+ # debconf_selections:
+ # set1: |
+ # cloud-init cloud-init/datasources multiselect MAAS
+ # set2: pkg pkg/value string bar
+ selsets = cfg.get('debconf_selections')
+ if not selsets:
+ LOG.debug("debconf_selections was not set in config")
+ return
- errors = add_apt_sources(cfg['apt_sources'], params,
- aa_repo_match=matcher)
- for e in errors:
- log.warn("Add source error: %s", ':'.join(e))
+ selections = '\n'.join(
+ [selsets[key] for key in sorted(selsets.keys())])
+ debconf_set_selections(selections.encode() + b"\n", target=target)
- dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False)
- if dconf_sel:
- log.debug("Setting debconf selections per cloud config")
- try:
- util.subp(('debconf-set-selections', '-'), dconf_sel)
- except Exception:
- util.logexc(log, "Failed to run debconf-set-selections")
+ # get a complete list of packages listed in input
+ pkgs_cfgd = set()
+ for key, content in selsets.items():
+ for line in content.splitlines():
+ if line.startswith("#"):
+ continue
+ pkg = re.sub(r"[:\s].*", "", line)
+ pkgs_cfgd.add(pkg)
+
+ pkgs_installed = util.get_installed_packages(target)
+
+ LOG.debug("pkgs_cfgd: %s", pkgs_cfgd)
+ need_reconfig = pkgs_cfgd.intersection(pkgs_installed)
+
+ if len(need_reconfig) == 0:
+ LOG.debug("no need for reconfig")
+ return
+
+ dpkg_reconfigure(need_reconfig, target=target)
+
+
+def clean_cloud_init(target):
+ """clean out any local cloud-init config"""
+ flist = glob.glob(
+ util.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*"))
+
+ LOG.debug("cleaning cloud-init config from: %s", flist)
+ for dpkg_cfg in flist:
+ os.unlink(dpkg_cfg)
def mirrorurl_to_apt_fileprefix(mirror):
+ """mirrorurl_to_apt_fileprefix
+ Convert a mirror url to the file prefix used by apt on disk to
+ store cache information for that mirror.
+ To do so do:
+ - take off ???://
+ - drop tailing /
+ - convert in string / to _"""
string = mirror
- # take off http:// or ftp://
if string.endswith("/"):
string = string[0:-1]
pos = string.find("://")
@@ -106,174 +208,379 @@ def mirrorurl_to_apt_fileprefix(mirror):
return string
-def rename_apt_lists(old_mirrors, new_mirrors, lists_d="/var/lib/apt/lists"):
- for (name, omirror) in old_mirrors.items():
+def rename_apt_lists(new_mirrors, target=None):
+ """rename_apt_lists - rename apt lists to preserve old cache data"""
+ default_mirrors = get_default_mirrors(util.get_architecture(target))
+
+ pre = util.target_path(target, APT_LISTS)
+ for (name, omirror) in default_mirrors.items():
nmirror = new_mirrors.get(name)
if not nmirror:
continue
- oprefix = os.path.join(lists_d, mirrorurl_to_apt_fileprefix(omirror))
- nprefix = os.path.join(lists_d, mirrorurl_to_apt_fileprefix(nmirror))
+
+ oprefix = pre + os.path.sep + mirrorurl_to_apt_fileprefix(omirror)
+ nprefix = pre + os.path.sep + mirrorurl_to_apt_fileprefix(nmirror)
if oprefix == nprefix:
continue
olen = len(oprefix)
for filename in glob.glob("%s_*" % oprefix):
- util.rename(filename, "%s%s" % (nprefix, filename[olen:]))
-
-
-def get_release():
- (stdout, _stderr) = util.subp(['lsb_release', '-cs'])
- return stdout.strip()
-
-
-def generate_sources_list(cfg, codename, mirrors, cloud, log):
- params = {'codename': codename}
+ newname = "%s%s" % (nprefix, filename[olen:])
+ LOG.debug("Renaming apt list %s to %s", filename, newname)
+ try:
+ os.rename(filename, newname)
+ except OSError:
+ # since this is a best effort task, warn with but don't fail
+ LOG.warn("Failed to rename apt list:", exc_info=True)
+
+
+def mirror_to_placeholder(tmpl, mirror, placeholder):
+ """mirror_to_placeholder
+ replace the specified mirror in a template with a placeholder string
+ Checks for existance of the expected mirror and warns if not found"""
+ if mirror not in tmpl:
+ LOG.warn("Expected mirror '%s' not found in: %s", mirror, tmpl)
+ return tmpl.replace(mirror, placeholder)
+
+
+def map_known_suites(suite):
+ """there are a few default names which will be auto-extended.
+ This comes at the inability to use those names literally as suites,
+ but on the other hand increases readability of the cfg quite a lot"""
+ mapping = {'updates': '$RELEASE-updates',
+ 'backports': '$RELEASE-backports',
+ 'security': '$RELEASE-security',
+ 'proposed': '$RELEASE-proposed',
+ 'release': '$RELEASE'}
+ try:
+ retsuite = mapping[suite]
+ except KeyError:
+ retsuite = suite
+ return retsuite
+
+
+def disable_suites(disabled, src, release):
+ """reads the config for suites to be disabled and removes those
+ from the template"""
+ if not disabled:
+ return src
+
+ retsrc = src
+ for suite in disabled:
+ suite = map_known_suites(suite)
+ releasesuite = templater.render_string(suite, {'RELEASE': release})
+ LOG.debug("Disabling suite %s as %s", suite, releasesuite)
+
+ newsrc = ""
+ for line in retsrc.splitlines(True):
+ if line.startswith("#"):
+ newsrc += line
+ continue
+
+ # sources.list allow options in cols[1] which can have spaces
+ # so the actual suite can be [2] or later. example:
+ # deb [ arch=amd64,armel k=v ] http://example.com/debian
+ cols = line.split()
+ if len(cols) > 1:
+ pcol = 2
+ if cols[1].startswith("["):
+ for col in cols[1:]:
+ pcol += 1
+ if col.endswith("]"):
+ break
+
+ if cols[pcol] == releasesuite:
+ line = '# suite disabled by cloud-init: %s' % line
+ newsrc += line
+ retsrc = newsrc
+
+ return retsrc
+
+
+def generate_sources_list(cfg, release, mirrors, cloud):
+ """generate_sources_list
+ create a source.list file based on a custom or default template
+ by replacing mirrors and release in the template"""
+ aptsrc = "/etc/apt/sources.list"
+ params = {'RELEASE': release, 'codename': release}
for k in mirrors:
params[k] = mirrors[k]
+ params[k.lower()] = mirrors[k]
- custtmpl = cfg.get('apt_custom_sources_list', None)
- if custtmpl is not None:
- templater.render_string_to_file(custtmpl,
- '/etc/apt/sources.list', params)
- return
-
- template_fn = cloud.get_template_filename('sources.list.%s' %
- (cloud.distro.name))
- if not template_fn:
- template_fn = cloud.get_template_filename('sources.list')
+ tmpl = cfg.get('sources_list', None)
+ if tmpl is None:
+ LOG.info("No custom template provided, fall back to builtin")
+ template_fn = cloud.get_template_filename('sources.list.%s' %
+ (cloud.distro.name))
if not template_fn:
- log.warn("No template found, not rendering /etc/apt/sources.list")
+ template_fn = cloud.get_template_filename('sources.list')
+ if not template_fn:
+ LOG.warn("No template found, not rendering /etc/apt/sources.list")
return
+ tmpl = util.load_file(template_fn)
- templater.render_to_file(template_fn, '/etc/apt/sources.list', params)
+ rendered = templater.render_string(tmpl, params)
+ disabled = disable_suites(cfg.get('disable_suites'), rendered, release)
+ util.write_file(aptsrc, disabled, mode=0o644)
-def add_apt_key_raw(key):
+def add_apt_key_raw(key, target=None):
"""
actual adding of a key as defined in key argument
to the system
"""
+ LOG.debug("Adding key:\n'%s'", key)
try:
- util.subp(('apt-key', 'add', '-'), key)
+ util.subp(['apt-key', 'add', '-'], data=key.encode(), target=target)
except util.ProcessExecutionError:
- raise ValueError('failed to add apt GPG Key to apt keyring')
+ LOG.exception("failed to add apt GPG Key to apt keyring")
+ raise
-def add_apt_key(ent):
+def add_apt_key(ent, target=None):
"""
- add key to the system as defined in ent (if any)
- supports raw keys or keyid's
- The latter will as a first step fetch the raw key from a keyserver
+ Add key to the system as defined in ent (if any).
+ Supports raw keys or keyid's
+ The latter will as a first step fetched to get the raw key
"""
if 'keyid' in ent and 'key' not in ent:
- keyserver = "keyserver.ubuntu.com"
+ keyserver = DEFAULT_KEYSERVER
if 'keyserver' in ent:
keyserver = ent['keyserver']
- ent['key'] = gpg.get_key_by_id(ent['keyid'], keyserver)
- if 'key' in ent:
- add_apt_key_raw(ent['key'])
+ ent['key'] = gpg.getkeybyid(ent['keyid'], keyserver)
+ if 'key' in ent:
+ add_apt_key_raw(ent['key'], target)
-def convert_to_new_format(srclist):
- """convert_to_new_format
- convert the old list based format to the new dict based one
- """
- srcdict = {}
- if isinstance(srclist, list):
- for srcent in srclist:
- if 'filename' not in srcent:
- # file collides for multiple !filename cases for compatibility
- # yet we need them all processed, so not same dictionary key
- srcent['filename'] = "cloud_config_sources.list"
- key = util.rand_dict_key(srcdict, "cloud_config_sources.list")
- else:
- # all with filename use that as key (matching new format)
- key = srcent['filename']
- srcdict[key] = srcent
- elif isinstance(srclist, dict):
- srcdict = srclist
- else:
- raise ValueError("unknown apt_sources format")
- return srcdict
+def update_packages(cloud):
+ cloud.distro.update_package_sources()
-def add_apt_sources(srclist, template_params=None, aa_repo_match=None):
+def add_apt_sources(srcdict, cloud, target=None, template_params=None,
+ aa_repo_match=None):
"""
add entries in /etc/apt/sources.list.d for each abbreviated
- sources.list entry in 'srclist'. When rendering template, also
+ sources.list entry in 'srcdict'. When rendering template, also
include the values in dictionary searchList
"""
if template_params is None:
template_params = {}
if aa_repo_match is None:
- def _aa_repo_match(x):
- return False
- aa_repo_match = _aa_repo_match
+ raise ValueError('did not get a valid repo matcher')
- errorlist = []
- srcdict = convert_to_new_format(srclist)
+ if not isinstance(srcdict, dict):
+ raise TypeError('unknown apt format: %s' % (srcdict))
for filename in srcdict:
ent = srcdict[filename]
+ LOG.debug("adding source/key '%s'", ent)
if 'filename' not in ent:
ent['filename'] = filename
- # keys can be added without specifying a source
- try:
- add_apt_key(ent)
- except ValueError as detail:
- errorlist.append([ent, detail])
+ add_apt_key(ent, target)
if 'source' not in ent:
- errorlist.append(["", "missing source"])
continue
source = ent['source']
source = templater.render_string(source, template_params)
- if not ent['filename'].startswith(os.path.sep):
+ if not ent['filename'].startswith("/"):
ent['filename'] = os.path.join("/etc/apt/sources.list.d/",
ent['filename'])
+ if not ent['filename'].endswith(".list"):
+ ent['filename'] += ".list"
if aa_repo_match(source):
try:
- util.subp(["add-apt-repository", source])
- except util.ProcessExecutionError as e:
- errorlist.append([source,
- ("add-apt-repository failed. " + str(e))])
+ util.subp(["add-apt-repository", source], target=target)
+ except util.ProcessExecutionError:
+ LOG.exception("add-apt-repository failed.")
+ raise
continue
+ sourcefn = util.target_path(target, ent['filename'])
try:
contents = "%s\n" % (source)
- util.write_file(ent['filename'], contents, omode="ab")
- except Exception:
- errorlist.append([source,
- "failed write to file %s" % ent['filename']])
+ util.write_file(sourcefn, contents, omode="a")
+ except IOError as detail:
+ LOG.exception("failed write to file %s: %s", sourcefn, detail)
+ raise
- return errorlist
+ update_packages(cloud)
+ return
-def find_apt_mirror_info(cloud, cfg):
- """find an apt_mirror given the cloud and cfg provided."""
- mirror = None
+def convert_v1_to_v2_apt_format(srclist):
+ """convert v1 apt format to v2 (dict in apt_sources)"""
+ srcdict = {}
+ if isinstance(srclist, list):
+ LOG.debug("apt config: convert V1 to V2 format (source list to dict)")
+ for srcent in srclist:
+ if 'filename' not in srcent:
+ # file collides for multiple !filename cases for compatibility
+ # yet we need them all processed, so not same dictionary key
+ srcent['filename'] = "cloud_config_sources.list"
+ key = util.rand_dict_key(srcdict, "cloud_config_sources.list")
+ else:
+ # all with filename use that as key (matching new format)
+ key = srcent['filename']
+ srcdict[key] = srcent
+ elif isinstance(srclist, dict):
+ srcdict = srclist
+ else:
+ raise ValueError("unknown apt_sources format")
+
+ return srcdict
- # this is less preferred way of specifying mirror preferred would be to
- # use the distro's search or package_mirror.
- mirror = cfg.get("apt_mirror", None)
- search = cfg.get("apt_mirror_search", None)
- if not mirror and search:
- mirror = util.search_for_mirror(search)
+def convert_key(oldcfg, aptcfg, oldkey, newkey):
+ """convert an old key to the new one if the old one exists
+ returns true if a key was found and converted"""
+ if oldcfg.get(oldkey, None) is not None:
+ aptcfg[newkey] = oldcfg.get(oldkey)
+ del oldcfg[oldkey]
+ return True
+ return False
+
+
+def convert_mirror(oldcfg, aptcfg):
+ """convert old apt_mirror keys into the new more advanced mirror spec"""
+ keymap = [('apt_mirror', 'uri'),
+ ('apt_mirror_search', 'search'),
+ ('apt_mirror_search_dns', 'search_dns')]
+ converted = False
+ newmcfg = {'arches': ['default']}
+ for oldkey, newkey in keymap:
+ if convert_key(oldcfg, newmcfg, oldkey, newkey):
+ converted = True
+
+ # only insert new style config if anything was converted
+ if converted:
+ aptcfg['primary'] = [newmcfg]
+
+
+def convert_v2_to_v3_apt_format(oldcfg):
+ """convert old to new keys and adapt restructured mirror spec"""
+ mapoldkeys = {'apt_sources': 'sources',
+ 'apt_mirror': None,
+ 'apt_mirror_search': None,
+ 'apt_mirror_search_dns': None,
+ 'apt_proxy': 'proxy',
+ 'apt_http_proxy': 'http_proxy',
+ 'apt_ftp_proxy': 'https_proxy',
+ 'apt_https_proxy': 'ftp_proxy',
+ 'apt_preserve_sources_list': 'preserve_sources_list',
+ 'apt_custom_sources_list': 'sources_list',
+ 'add_apt_repo_match': 'add_apt_repo_match'}
+ needtoconvert = []
+ for oldkey in mapoldkeys:
+ if oldkey in oldcfg:
+ if oldcfg[oldkey] in (None, ""):
+ del oldcfg[oldkey]
+ else:
+ needtoconvert.append(oldkey)
+
+ # no old config, so no new one to be created
+ if not needtoconvert:
+ return oldcfg
+ LOG.debug("apt config: convert V2 to V3 format for keys '%s'",
+ ", ".join(needtoconvert))
+
+ # if old AND new config are provided, prefer the new one (LP #1616831)
+ newaptcfg = oldcfg.get('apt', None)
+ if newaptcfg is not None:
+ LOG.debug("apt config: V1/2 and V3 format specified, preferring V3")
+ for oldkey in needtoconvert:
+ newkey = mapoldkeys[oldkey]
+ verify = oldcfg[oldkey] # drop, but keep a ref for verification
+ del oldcfg[oldkey]
+ if newkey is None or newaptcfg.get(newkey, None) is None:
+ # no simple mapping or no collision on this particular key
+ continue
+ if verify != newaptcfg[newkey]:
+ raise ValueError("Old and New apt format defined with unequal "
+ "values %s vs %s @ %s" % (verify,
+ newaptcfg[newkey],
+ oldkey))
+ # return conf after clearing conflicting V1/2 keys
+ return oldcfg
+
+ # create new format from old keys
+ aptcfg = {}
+
+ # simple renames / moves under the apt key
+ for oldkey in mapoldkeys:
+ if mapoldkeys[oldkey] is not None:
+ convert_key(oldcfg, aptcfg, oldkey, mapoldkeys[oldkey])
+
+ # mirrors changed in a more complex way
+ convert_mirror(oldcfg, aptcfg)
+
+ for oldkey in mapoldkeys:
+ if oldcfg.get(oldkey, None) is not None:
+ raise ValueError("old apt key '%s' left after conversion" % oldkey)
+
+ # insert new format into config and return full cfg with only v3 content
+ oldcfg['apt'] = aptcfg
+ return oldcfg
+
+
+def convert_to_v3_apt_format(cfg):
+ """convert the old list based format to the new dict based one. After that
+ convert the old dict keys/format to v3 a.k.a 'new apt config'"""
+ # V1 -> V2, the apt_sources entry from list to dict
+ apt_sources = cfg.get('apt_sources', None)
+ if apt_sources is not None:
+ cfg['apt_sources'] = convert_v1_to_v2_apt_format(apt_sources)
+
+ # V2 -> V3, move all former globals under the "apt" key
+ # Restructure into new key names and mirror hierarchy
+ cfg = convert_v2_to_v3_apt_format(cfg)
+
+ return cfg
+
+
+def search_for_mirror(candidates):
+ """
+ Search through a list of mirror urls for one that works
+ This needs to return quickly.
+ """
+ if candidates is None:
+ return None
+
+ LOG.debug("search for mirror in candidates: '%s'", candidates)
+ for cand in candidates:
+ try:
+ if util.is_resolvable_url(cand):
+ LOG.debug("found working mirror: '%s'", cand)
+ return cand
+ except Exception:
+ pass
+ return None
+
+
+def search_for_mirror_dns(configured, mirrortype, cfg, cloud):
+ """
+ Try to resolve a list of predefines DNS names to pick mirrors
+ """
+ mirror = None
- if (not mirror and
- util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)):
+ if configured:
mydom = ""
doms = []
+ if mirrortype == "primary":
+ mirrordns = "mirror"
+ elif mirrortype == "security":
+ mirrordns = "security-mirror"
+ else:
+ raise ValueError("unknown mirror type")
+
# if we have a fqdn, then search its domain portion first
- (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
+ (_, fqdn) = util.get_hostname_fqdn(cfg, cloud)
mydom = ".".join(fqdn.split(".")[1:])
if mydom:
doms.append(".%s" % mydom)
@@ -282,38 +589,136 @@ def find_apt_mirror_info(cloud, cfg):
mirror_list = []
distro = cloud.distro.name
- mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro)
+ mirrorfmt = "http://%s-%s%s/%s" % (distro, mirrordns, "%s", distro)
for post in doms:
mirror_list.append(mirrorfmt % (post))
- mirror = util.search_for_mirror(mirror_list)
+ mirror = search_for_mirror(mirror_list)
+
+ return mirror
+
+def update_mirror_info(pmirror, smirror, arch, cloud):
+ """sets security mirror to primary if not defined.
+ returns defaults if no mirrors are defined"""
+ if pmirror is not None:
+ if smirror is None:
+ smirror = pmirror
+ return {'PRIMARY': pmirror,
+ 'SECURITY': smirror}
+
+ # None specified at all, get default mirrors from cloud
mirror_info = cloud.datasource.get_package_mirror_info()
+ if mirror_info:
+ # get_package_mirror_info() returns a dictionary with
+ # arbitrary key/value pairs including 'primary' and 'security' keys.
+ # caller expects dict with PRIMARY and SECURITY.
+ m = mirror_info.copy()
+ m['PRIMARY'] = m['primary']
+ m['SECURITY'] = m['security']
+
+ return m
+
+ # if neither apt nor cloud configured mirrors fall back to
+ return get_default_mirrors(arch)
+
+
+def get_arch_mirrorconfig(cfg, mirrortype, arch):
+ """out of a list of potential mirror configurations select
+ and return the one matching the architecture (or default)"""
+ # select the mirror specification (if-any)
+ mirror_cfg_list = cfg.get(mirrortype, None)
+ if mirror_cfg_list is None:
+ return None
+
+ # select the specification matching the target arch
+ default = None
+ for mirror_cfg_elem in mirror_cfg_list:
+ arches = mirror_cfg_elem.get("arches")
+ if arch in arches:
+ return mirror_cfg_elem
+ if "default" in arches:
+ default = mirror_cfg_elem
+ return default
+
+
+def get_mirror(cfg, mirrortype, arch, cloud):
+ """pass the three potential stages of mirror specification
+ returns None is neither of them found anything otherwise the first
+ hit is returned"""
+ mcfg = get_arch_mirrorconfig(cfg, mirrortype, arch)
+ if mcfg is None:
+ return None
+
+ # directly specified
+ mirror = mcfg.get("uri", None)
+
+ # fallback to search if specified
+ if mirror is None:
+ # list of mirrors to try to resolve
+ mirror = search_for_mirror(mcfg.get("search", None))
+
+ # fallback to search_dns if specified
+ if mirror is None:
+ # list of mirrors to try to resolve
+ mirror = search_for_mirror_dns(mcfg.get("search_dns", None),
+ mirrortype, cfg, cloud)
+
+ return mirror
+
+
+def find_apt_mirror_info(cfg, cloud, arch=None):
+ """find_apt_mirror_info
+ find an apt_mirror given the cfg provided.
+ It can check for separate config of primary and security mirrors
+ If only primary is given security is assumed to be equal to primary
+ If the generic apt_mirror is given that is defining for both
+ """
- # this is a bit strange.
- # if mirror is set, then one of the legacy options above set it
- # but they do not cover security. so we need to get that from
- # get_package_mirror_info
- if mirror:
- mirror_info.update({'primary': mirror})
+ if arch is None:
+ arch = util.get_architecture()
+ LOG.debug("got arch for mirror selection: %s", arch)
+ pmirror = get_mirror(cfg, "primary", arch, cloud)
+ LOG.debug("got primary mirror: %s", pmirror)
+ smirror = get_mirror(cfg, "security", arch, cloud)
+ LOG.debug("got security mirror: %s", smirror)
+
+ mirror_info = update_mirror_info(pmirror, smirror, arch, cloud)
+
+ # less complex replacements use only MIRROR, derive from primary
+ mirror_info["MIRROR"] = mirror_info["PRIMARY"]
return mirror_info
def apply_apt_config(cfg, proxy_fname, config_fname):
+ """apply_apt_config
+ Applies any apt*proxy config from if specified
+ """
# Set up any apt proxy
- cfgs = (('apt_proxy', 'Acquire::HTTP::Proxy "%s";'),
- ('apt_http_proxy', 'Acquire::HTTP::Proxy "%s";'),
- ('apt_ftp_proxy', 'Acquire::FTP::Proxy "%s";'),
- ('apt_https_proxy', 'Acquire::HTTPS::Proxy "%s";'))
+ cfgs = (('proxy', 'Acquire::http::Proxy "%s";'),
+ ('http_proxy', 'Acquire::http::Proxy "%s";'),
+ ('ftp_proxy', 'Acquire::ftp::Proxy "%s";'),
+ ('https_proxy', 'Acquire::https::Proxy "%s";'))
proxies = [fmt % cfg.get(name) for (name, fmt) in cfgs if cfg.get(name)]
if len(proxies):
+ LOG.debug("write apt proxy info to %s", proxy_fname)
util.write_file(proxy_fname, '\n'.join(proxies) + '\n')
elif os.path.isfile(proxy_fname):
util.del_file(proxy_fname)
+ LOG.debug("no apt proxy configured, removed %s", proxy_fname)
- if cfg.get('apt_config', None):
- util.write_file(config_fname, cfg.get('apt_config'))
+ if cfg.get('conf', None):
+ LOG.debug("write apt config info to %s", config_fname)
+ util.write_file(config_fname, cfg.get('conf'))
elif os.path.isfile(config_fname):
util.del_file(config_fname)
+ LOG.debug("no apt config configured, removed %s", config_fname)
+
+
+CONFIG_CLEANERS = {
+ 'cloud-init': clean_cloud_init,
+}
+
+# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 70d4e7c3..0086840f 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -47,6 +47,8 @@ Example config:
from cloudinit import util
+distros = ['ubuntu']
+
def handle(name, cfg, cloud, log, args):
# Get config
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
index 0c84d600..b3089f30 100644
--- a/cloudinit/config/cc_mcollective.py
+++ b/cloudinit/config/cc_mcollective.py
@@ -19,6 +19,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import errno
+
import six
from six import BytesIO
@@ -36,49 +38,61 @@ SERVER_CFG = '/etc/mcollective/server.cfg'
LOG = logging.getLogger(__name__)
-def configure(config):
- # Read server.cfg values from the
- # original file in order to be able to mix the rest up
+def configure(config, server_cfg=SERVER_CFG,
+ pubcert_file=PUBCERT_FILE, pricert_file=PRICERT_FILE):
+ # Read server.cfg (if it exists) values from the
+ # original file in order to be able to mix the rest up.
try:
- mcollective_config = ConfigObj(SERVER_CFG, file_error=True)
- except IOError:
- LOG.warn("Did not find file %s", SERVER_CFG)
- mcollective_config = ConfigObj(config)
- else:
- for (cfg_name, cfg) in config.items():
- if cfg_name == 'public-cert':
- util.write_file(PUBCERT_FILE, cfg, mode=0o644)
- mcollective_config[
- 'plugin.ssl_server_public'] = PUBCERT_FILE
- mcollective_config['securityprovider'] = 'ssl'
- elif cfg_name == 'private-cert':
- util.write_file(PRICERT_FILE, cfg, mode=0o600)
- mcollective_config[
- 'plugin.ssl_server_private'] = PRICERT_FILE
- mcollective_config['securityprovider'] = 'ssl'
+ old_contents = util.load_file(server_cfg, quiet=False, decode=False)
+ mcollective_config = ConfigObj(BytesIO(old_contents))
+ except IOError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ else:
+ LOG.debug("Did not find file %s (starting with an empty"
+ " config)", server_cfg)
+ mcollective_config = ConfigObj()
+ for (cfg_name, cfg) in config.items():
+ if cfg_name == 'public-cert':
+ util.write_file(pubcert_file, cfg, mode=0o644)
+ mcollective_config[
+ 'plugin.ssl_server_public'] = pubcert_file
+ mcollective_config['securityprovider'] = 'ssl'
+ elif cfg_name == 'private-cert':
+ util.write_file(pricert_file, cfg, mode=0o600)
+ mcollective_config[
+ 'plugin.ssl_server_private'] = pricert_file
+ mcollective_config['securityprovider'] = 'ssl'
+ else:
+ if isinstance(cfg, six.string_types):
+ # Just set it in the 'main' section
+ mcollective_config[cfg_name] = cfg
+ elif isinstance(cfg, (dict)):
+ # Iterate through the config items, create a section if
+ # it is needed and then add/or create items as needed
+ if cfg_name not in mcollective_config.sections:
+ mcollective_config[cfg_name] = {}
+ for (o, v) in cfg.items():
+ mcollective_config[cfg_name][o] = v
else:
- if isinstance(cfg, six.string_types):
- # Just set it in the 'main' section
- mcollective_config[cfg_name] = cfg
- elif isinstance(cfg, (dict)):
- # Iterate through the config items, create a section if
- # it is needed and then add/or create items as needed
- if cfg_name not in mcollective_config.sections:
- mcollective_config[cfg_name] = {}
- for (o, v) in cfg.items():
- mcollective_config[cfg_name][o] = v
- else:
- # Otherwise just try to convert it to a string
- mcollective_config[cfg_name] = str(cfg)
- # We got all our config as wanted we'll rename
- # the previous server.cfg and create our new one
- util.rename(SERVER_CFG, "%s.old" % (SERVER_CFG))
-
- # Now we got the whole file, write to disk...
+ # Otherwise just try to convert it to a string
+ mcollective_config[cfg_name] = str(cfg)
+
+ try:
+ # We got all our config as wanted we'll copy
+ # the previous server.cfg and overwrite the old with our new one
+ util.copy(server_cfg, "%s.old" % (server_cfg))
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ # Doesn't exist to copy...
+ pass
+ else:
+ raise
+
+ # Now we got the whole (new) file, write to disk...
contents = BytesIO()
mcollective_config.write(contents)
- contents = contents.getvalue()
- util.write_file(SERVER_CFG, contents, mode=0o644)
+ util.write_file(server_cfg, contents.getvalue(), mode=0o644)
def handle(name, cfg, cloud, log, _args):
@@ -98,5 +112,5 @@ def handle(name, cfg, cloud, log, _args):
if 'conf' in mcollective_cfg:
configure(config=mcollective_cfg['conf'])
- # Start mcollective
- util.subp(['service', 'mcollective', 'start'], capture=False)
+ # restart mcollective to handle updated config
+ util.subp(['service', 'mcollective', 'restart'], capture=False)
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
new file mode 100644
index 00000000..ad69aa34
--- /dev/null
+++ b/cloudinit/config/cc_ntp.py
@@ -0,0 +1,106 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2016 Canonical Ltd.
+#
+# Author: Ryan Harper <ryan.harper@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit import log as logging
+from cloudinit.settings import PER_INSTANCE
+from cloudinit import templater
+from cloudinit import type_utils
+from cloudinit import util
+
+import os
+
+LOG = logging.getLogger(__name__)
+
+frequency = PER_INSTANCE
+NTP_CONF = '/etc/ntp.conf'
+NR_POOL_SERVERS = 4
+distros = ['centos', 'debian', 'fedora', 'opensuse', 'ubuntu']
+
+
+def handle(name, cfg, cloud, log, _args):
+ """
+ Enable and configure ntp
+
+ ntp:
+ pools: ['0.{{distro}}.pool.ntp.org', '1.{{distro}}.pool.ntp.org']
+ servers: ['192.168.2.1']
+
+ """
+
+ ntp_cfg = cfg.get('ntp', {})
+
+ if not isinstance(ntp_cfg, (dict)):
+ raise RuntimeError(("'ntp' key existed in config,"
+ " but not a dictionary type,"
+ " is a %s %instead"), type_utils.obj_name(ntp_cfg))
+
+ if 'ntp' not in cfg:
+ LOG.debug("Skipping module named %s,"
+ "not present or disabled by cfg", name)
+ return True
+
+ install_ntp(cloud.distro.install_packages, packages=['ntp'],
+ check_exe="ntpd")
+ rename_ntp_conf()
+ write_ntp_config_template(ntp_cfg, cloud)
+
+
+def install_ntp(install_func, packages=None, check_exe="ntpd"):
+ if util.which(check_exe):
+ return
+ if packages is None:
+ packages = ['ntp']
+
+ install_func(packages)
+
+
+def rename_ntp_conf(config=NTP_CONF):
+ if os.path.exists(config):
+ util.rename(config, config + ".dist")
+
+
+def generate_server_names(distro):
+ names = []
+ for x in range(0, NR_POOL_SERVERS):
+ name = "%d.%s.pool.ntp.org" % (x, distro)
+ names.append(name)
+ return names
+
+
+def write_ntp_config_template(cfg, cloud):
+ servers = cfg.get('servers', [])
+ pools = cfg.get('pools', [])
+
+ if len(servers) == 0 and len(pools) == 0:
+ LOG.debug('Adding distro default ntp pool servers')
+ pools = generate_server_names(cloud.distro.name)
+
+ params = {
+ 'servers': servers,
+ 'pools': pools,
+ }
+
+ template_fn = cloud.get_template_filename('ntp.conf.%s' %
+ (cloud.distro.name))
+ if not template_fn:
+ template_fn = cloud.get_template_filename('ntp.conf')
+ if not template_fn:
+ raise RuntimeError(("No template found, "
+ "not rendering %s"), NTP_CONF)
+
+ templater.render_to_file(template_fn, NTP_CONF, params)
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index 72176d42..ae720bd2 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -31,7 +31,7 @@ POST_LIST_ALL = [
'pub_key_ecdsa',
'instance_id',
'hostname',
- 'fdqn'
+ 'fqdn'
]
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index 3a113aea..d4ad724a 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -18,6 +18,8 @@
from cloudinit import util
+distros = ['fedora', 'rhel']
+
def handle(name, cfg, _cloud, log, _args):
sm = SubscriptionManager(cfg)
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
index f5786a31..13d70c8e 100644
--- a/cloudinit/config/cc_salt_minion.py
+++ b/cloudinit/config/cc_salt_minion.py
@@ -46,7 +46,12 @@ def handle(name, cfg, cloud, log, _args):
# ... copy the key pair if specified
if 'public_key' in salt_cfg and 'private_key' in salt_cfg:
- pki_dir = salt_cfg.get('pki_dir', '/etc/salt/pki')
+ if os.path.isdir("/etc/salt/pki/minion"):
+ pki_dir_default = "/etc/salt/pki/minion"
+ else:
+ pki_dir_default = "/etc/salt/pki"
+
+ pki_dir = salt_cfg.get('pki_dir', pki_dir_default)
with util.umask(0o77):
util.ensure_dir(pki_dir)
pub_name = os.path.join(pki_dir, 'minion.pub')
diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index 1a485ee6..6bcd8382 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -68,6 +68,8 @@ BUILTIN_CFG = {
'config': {},
}
+distros = ['ubuntu']
+
def parse_filename(fname):
fname = os.path.basename(fname)
diff --git a/cloudinit/config/cc_spacewalk.py b/cloudinit/config/cc_spacewalk.py
new file mode 100644
index 00000000..f3c1a664
--- /dev/null
+++ b/cloudinit/config/cc_spacewalk.py
@@ -0,0 +1,85 @@
+# vi: ts=4 expandtab
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+**Summary:** helper to setup https://fedorahosted.org/spacewalk/
+
+**Description:** This module will enable for configuring the needed
+actions to setup spacewalk on redhat based systems.
+
+It can be configured with the following option structure::
+
+ spacewalk:
+ server: spacewalk api server (required)
+"""
+
+from cloudinit import util
+
+
+distros = ['redhat', 'fedora']
+required_packages = ['rhn-setup']
+def_ca_cert_path = "/usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT"
+
+
+def is_registered():
+ # Check to see if already registered and don't bother; this is
+ # apparently done by trying to sync and if that fails then we
+ # assume we aren't registered; which is sorta ghetto...
+ already_registered = False
+ try:
+ util.subp(['rhn-profile-sync', '--verbose'], capture=False)
+ already_registered = True
+ except util.ProcessExecutionError as e:
+ if e.exit_code != 1:
+ raise
+ return already_registered
+
+
+def do_register(server, profile_name,
+ ca_cert_path=def_ca_cert_path,
+ proxy=None, log=None,
+ activation_key=None):
+ if log is not None:
+ log.info("Registering using `rhnreg_ks` profile '%s'"
+ " into server '%s'", profile_name, server)
+ cmd = ['rhnreg_ks']
+ cmd.extend(['--serverUrl', 'https://%s/XMLRPC' % server])
+ cmd.extend(['--profilename', str(profile_name)])
+ if proxy:
+ cmd.extend(["--proxy", str(proxy)])
+ if ca_cert_path:
+ cmd.extend(['--sslCACert', str(ca_cert_path)])
+ if activation_key:
+ cmd.extend(['--activationkey', str(activation_key)])
+ util.subp(cmd, capture=False)
+
+
+def handle(name, cfg, cloud, log, _args):
+ if 'spacewalk' not in cfg:
+ log.debug(("Skipping module named %s,"
+ " no 'spacewalk' key in configuration"), name)
+ return
+ cfg = cfg['spacewalk']
+ spacewalk_server = cfg.get('server')
+ if spacewalk_server:
+ # Need to have this installed before further things will work.
+ cloud.distro.install_packages(required_packages)
+ if not is_registered():
+ do_register(spacewalk_server,
+ cloud.datasource.get_hostname(fqdn=True),
+ proxy=cfg.get("proxy"), log=log,
+ activation_key=cfg.get('activation_key'))
+ else:
+ log.debug("Skipping module named %s, 'spacewalk/server' key"
+ " was not found in configuration", name)
diff --git a/cloudinit/config/cc_ubuntu_init_switch.py b/cloudinit/config/cc_ubuntu_init_switch.py
index 884d79f1..bffb4380 100644
--- a/cloudinit/config/cc_ubuntu_init_switch.py
+++ b/cloudinit/config/cc_ubuntu_init_switch.py
@@ -86,6 +86,8 @@ else
fi
"""
+distros = ['ubuntu']
+
def handle(name, cfg, cloud, log, args):
"""Handler method activated by cloud-init."""
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index 64fba869..22549e62 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -23,6 +23,8 @@ import six
from cloudinit import util
+distros = ['fedora', 'rhel']
+
def _canonicalize_id(repo_id):
repo_id = repo_id.lower().replace("-", "_")
diff --git a/cloudinit/dhclient_hook.py b/cloudinit/dhclient_hook.py
new file mode 100644
index 00000000..82cb1855
--- /dev/null
+++ b/cloudinit/dhclient_hook.py
@@ -0,0 +1,50 @@
+#!/usr/bin/python
+# vi: ts=4 expandtab
+
+import os
+
+from cloudinit import atomic_helper
+from cloudinit import log as logging
+from cloudinit import stages
+
+LOG = logging.getLogger(__name__)
+
+
+class LogDhclient(object):
+
+ def __init__(self, cli_args):
+ self.hooks_dir = self._get_hooks_dir()
+ self.net_interface = cli_args.net_interface
+ self.net_action = cli_args.net_action
+ self.hook_file = os.path.join(self.hooks_dir,
+ self.net_interface + ".json")
+
+ @staticmethod
+ def _get_hooks_dir():
+ i = stages.Init()
+ return os.path.join(i.paths.get_runpath(), 'dhclient.hooks')
+
+ def check_hooks_dir(self):
+ if not os.path.exists(self.hooks_dir):
+ os.makedirs(self.hooks_dir)
+ else:
+ # If the action is down and the json file exists, we need to
+ # delete the file
+ if self.net_action is 'down' and os.path.exists(self.hook_file):
+ os.remove(self.hook_file)
+
+ @staticmethod
+ def get_vals(info):
+ new_info = {}
+ for k, v in info.items():
+ if k.startswith("DHCP4_") or k.startswith("new_"):
+ key = (k.replace('DHCP4_', '').replace('new_', '')).lower()
+ new_info[key] = v
+ return new_info
+
+ def record(self):
+ envs = os.environ
+ if self.hook_file is None:
+ return
+ atomic_helper.write_json(self.hook_file, self.get_vals(envs))
+ LOG.debug("Wrote dhclient options in %s", self.hook_file)
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 40af8802..b1192e84 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -112,7 +112,7 @@ class Distro(object):
raise NotImplementedError()
def get_primary_arch(self):
- arch = os.uname[4]
+ arch = os.uname()[4]
if arch in ("i386", "i486", "i586", "i686"):
return "i386"
return arch
diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py
index 6267dd6e..1865dc69 100644
--- a/cloudinit/distros/gentoo.py
+++ b/cloudinit/distros/gentoo.py
@@ -1,8 +1,10 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2014 Rackspace, US Inc.
+# Copyright (C) 2016 Matthew Thode.
#
# Author: Nate House <nathan.house@rackspace.com>
+# Author: Matthew Thode <prometheanfire@gentoo.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -21,6 +23,7 @@ from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import util
+from cloudinit.distros import net_util
from cloudinit.distros.parsers.hostname import HostnameConf
from cloudinit.settings import PER_INSTANCE
@@ -29,9 +32,11 @@ LOG = logging.getLogger(__name__)
class Distro(distros.Distro):
- locale_conf_fn = "/etc/locale.gen"
- network_conf_fn = "/etc/conf.d/net"
- init_cmd = [''] # init scripts
+ locale_conf_fn = '/etc/locale.gen'
+ network_conf_fn = '/etc/conf.d/net'
+ resolve_conf_fn = '/etc/resolv.conf'
+ hostname_conf_fn = '/etc/conf.d/hostname'
+ init_cmd = ['service'] # init scripts
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
@@ -50,7 +55,7 @@ class Distro(distros.Distro):
# "" provides trailing newline during join
lines = [
util.make_header(),
- 'LANG="%s"' % (locale),
+ 'LANG="%s"' % locale,
"",
]
util.write_file(out_fn, "\n".join(lines))
@@ -60,8 +65,66 @@ class Distro(distros.Distro):
self.package_command('', pkgs=pkglist)
def _write_network(self, settings):
- util.write_file(self.network_conf_fn, settings)
- return ['all']
+ entries = net_util.translate_network(settings)
+ LOG.debug("Translated ubuntu style network settings %s into %s",
+ settings, entries)
+ dev_names = entries.keys()
+ nameservers = []
+
+ for (dev, info) in entries.items():
+ if 'dns-nameservers' in info:
+ nameservers.extend(info['dns-nameservers'])
+ if dev == 'lo':
+ continue
+ net_fn = self.network_conf_fn + '.' + dev
+ dns_nameservers = info.get('dns-nameservers')
+ if isinstance(dns_nameservers, (list, tuple)):
+ dns_nameservers = str(tuple(dns_nameservers)).replace(',', '')
+ # eth0, {'auto': True, 'ipv6': {}, 'bootproto': 'dhcp'}
+ # lo, {'dns-nameservers': ['10.0.1.3'], 'ipv6': {}, 'auto': True}
+ results = ''
+ if info.get('bootproto') == 'dhcp':
+ results += 'config_{name}="dhcp"'.format(name=dev)
+ else:
+ results += (
+ 'config_{name}="{ip_address} netmask {netmask}"\n'
+ 'mac_{name}="{hwaddr}"\n'
+ ).format(name=dev, ip_address=info.get('address'),
+ netmask=info.get('netmask'),
+ hwaddr=info.get('hwaddress'))
+ results += 'routes_{name}="default via {gateway}"\n'.format(
+ name=dev,
+ gateway=info.get('gateway')
+ )
+ if info.get('dns-nameservers'):
+ results += 'dns_servers_{name}="{dnsservers}"\n'.format(
+ name=dev,
+ dnsservers=dns_nameservers)
+ util.write_file(net_fn, results)
+ self._create_network_symlink(dev)
+ if info.get('auto'):
+ cmd = ['rc-update', 'add', 'net.{name}'.format(name=dev),
+ 'default']
+ try:
+ (_out, err) = util.subp(cmd)
+ if len(err):
+ LOG.warn("Running %s resulted in stderr output: %s",
+ cmd, err)
+ except util.ProcessExecutionError:
+ util.logexc(LOG, "Running interface command %s failed",
+ cmd)
+
+ if nameservers:
+ util.write_file(self.resolve_conf_fn,
+ convert_resolv_conf(nameservers))
+
+ return dev_names
+
+ @staticmethod
+ def _create_network_symlink(interface_name):
+ file_path = '/etc/init.d/net.{name}'.format(name=interface_name)
+ if not util.is_link(file_path):
+ util.sym_link('/etc/init.d/net.lo', file_path)
def _bring_up_interface(self, device_name):
cmd = ['/etc/init.d/net.%s' % device_name, 'restart']
@@ -108,13 +171,16 @@ class Distro(distros.Distro):
if not conf:
conf = HostnameConf('')
conf.set_hostname(your_hostname)
- util.write_file(out_fn, conf, 0o644)
+ gentoo_hostname_config = 'hostname="%s"' % conf
+ gentoo_hostname_config = gentoo_hostname_config.replace('\n', '')
+ util.write_file(out_fn, gentoo_hostname_config, 0o644)
def _read_system_hostname(self):
sys_hostname = self._read_hostname(self.hostname_conf_fn)
- return (self.hostname_conf_fn, sys_hostname)
+ return self.hostname_conf_fn, sys_hostname
- def _read_hostname_conf(self, filename):
+ @staticmethod
+ def _read_hostname_conf(filename):
conf = HostnameConf(util.load_file(filename))
conf.parse()
return conf
@@ -137,7 +203,7 @@ class Distro(distros.Distro):
if pkgs is None:
pkgs = []
- cmd = ['emerge']
+ cmd = list('emerge')
# Redirect output
cmd.append("--quiet")
@@ -158,3 +224,12 @@ class Distro(distros.Distro):
def update_package_sources(self):
self._runner.run("update-sources", self.package_command,
["-u", "world"], freq=PER_INSTANCE)
+
+
+def convert_resolv_conf(settings):
+ """Returns a settings string formatted for resolv.conf."""
+ result = ''
+ if isinstance(settings, list):
+ for ns in settings:
+ result += 'nameserver %s\n' % ns
+ return result
diff --git a/cloudinit/gpg.py b/cloudinit/gpg.py
index 6a76d785..5bbff513 100644
--- a/cloudinit/gpg.py
+++ b/cloudinit/gpg.py
@@ -36,11 +36,11 @@ def export_armour(key):
return armour
-def receive_key(key, keyserver):
+def recv_key(key, keyserver):
"""Receive gpg key from the specified keyserver"""
LOG.debug('Receive gpg key "%s"', key)
try:
- util.subp(["gpg", "--keyserver", keyserver, "--recv-keys", key],
+ util.subp(["gpg", "--keyserver", keyserver, "--recv", key],
capture=True)
except util.ProcessExecutionError as error:
raise ValueError(('Failed to import key "%s" '
@@ -57,12 +57,12 @@ def delete_key(key):
LOG.warn('Failed delete key "%s": %s', key, error)
-def get_key_by_id(keyid, keyserver="keyserver.ubuntu.com"):
+def getkeybyid(keyid, keyserver='keyserver.ubuntu.com'):
"""get gpg keyid from keyserver"""
armour = export_armour(keyid)
if not armour:
try:
- receive_key(keyid, keyserver=keyserver)
+ recv_key(keyid, keyserver=keyserver)
armour = export_armour(keyid)
except ValueError:
LOG.exception('Failed to obtain gpg key %s', keyid)
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 21cc602b..7e58bfea 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -36,7 +36,7 @@ def read_sys_net(devname, path, translate=None, enoent=None, keyerror=None):
try:
contents = util.load_file(sys_dev_path(devname, path))
except (OSError, IOError) as e:
- if getattr(e, 'errno', None) == errno.ENOENT:
+ if getattr(e, 'errno', None) in (errno.ENOENT, errno.ENOTDIR):
if enoent is not None:
return enoent
raise
@@ -347,7 +347,12 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
def get_interface_mac(ifname):
"""Returns the string value of an interface's MAC Address"""
- return read_sys_net(ifname, "address", enoent=False)
+ path = "address"
+ if os.path.isdir(sys_dev_path(ifname, "bonding_slave")):
+ # for a bond slave, get the nic's hwaddress, not the address it
+ # is using because its part of a bond.
+ path = "bonding_slave/perm_hwaddr"
+ return read_sys_net(ifname, path, enoent=False)
def get_interfaces_by_mac(devs=None):
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index eff5b924..cd533ddb 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -399,7 +399,7 @@ class Renderer(renderer.Renderer):
else:
# ifenslave docs say to auto the slave devices
lines = []
- if 'bond-master' in iface:
+ if 'bond-master' in iface or 'bond-slaves' in iface:
lines.append("auto {name}".format(**iface))
lines.append("iface {name} {inet} {mode}".format(**iface))
lines.extend(_iface_add_attrs(iface, index=0))
diff --git a/cloudinit/signal_handler.py b/cloudinit/signal_handler.py
index 0d95f506..d76223d1 100644
--- a/cloudinit/signal_handler.py
+++ b/cloudinit/signal_handler.py
@@ -54,7 +54,7 @@ def _pprint_frame(frame, depth, max_depth, contents):
def _handle_exit(signum, frame):
(msg, rc) = EXIT_FOR[signum]
- msg = msg % ({'version': vr.version()})
+ msg = msg % ({'version': vr.version_string()})
contents = StringIO()
contents.write("%s\n" % (msg))
_pprint_frame(frame, 1, BACK_FRAME_TRACE_DEPTH, contents)
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index a3529609..48136f7c 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -110,12 +110,6 @@ class DataSourceAltCloud(sources.DataSource):
'''
- uname_arch = os.uname()[4]
- if uname_arch.startswith("arm") or uname_arch == "aarch64":
- # Disabling because dmi data is not available on ARM processors
- LOG.debug("Disabling AltCloud datasource on arm (LP: #1243287)")
- return 'UNKNOWN'
-
system_name = util.read_dmi_data("system-product-name")
if not system_name:
return 'UNKNOWN'
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 8c7e8673..dbc2bb68 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -20,18 +20,17 @@ import base64
import contextlib
import crypt
import fnmatch
+from functools import partial
import os
import os.path
import time
-import xml.etree.ElementTree as ET
-
from xml.dom import minidom
-
-from cloudinit.sources.helpers.azure import get_metadata_from_fabric
+import xml.etree.ElementTree as ET
from cloudinit import log as logging
from cloudinit.settings import PER_ALWAYS
from cloudinit import sources
+from cloudinit.sources.helpers.azure import get_metadata_from_fabric
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -55,6 +54,7 @@ BUILTIN_DS_CONFIG = {
'hostname_command': 'hostname',
},
'disk_aliases': {'ephemeral0': '/dev/sdb'},
+ 'dhclient_lease_file': '/var/lib/dhcp/dhclient.eth0.leases',
}
BUILTIN_CLOUD_CONFIG = {
@@ -115,6 +115,7 @@ class DataSourceAzureNet(sources.DataSource):
self.ds_cfg = util.mergemanydict([
util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
BUILTIN_DS_CONFIG])
+ self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file')
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -123,6 +124,9 @@ class DataSourceAzureNet(sources.DataSource):
def get_metadata_from_agent(self):
temp_hostname = self.metadata.get('local-hostname')
hostname_command = self.ds_cfg['hostname_bounce']['hostname_command']
+ agent_cmd = self.ds_cfg['agent_command']
+ LOG.debug("Getting metadata via agent. hostname=%s cmd=%s",
+ temp_hostname, agent_cmd)
with temporary_hostname(temp_hostname, self.ds_cfg,
hostname_command=hostname_command) \
as previous_hostname:
@@ -138,7 +142,7 @@ class DataSourceAzureNet(sources.DataSource):
util.logexc(LOG, "handling set_hostname failed")
try:
- invoke_agent(self.ds_cfg['agent_command'])
+ invoke_agent(agent_cmd)
except util.ProcessExecutionError:
# claim the datasource even if the command failed
util.logexc(LOG, "agent command '%s' failed.",
@@ -226,16 +230,18 @@ class DataSourceAzureNet(sources.DataSource):
write_files(ddir, files, dirmode=0o700)
if self.ds_cfg['agent_command'] == '__builtin__':
- metadata_func = get_metadata_from_fabric
+ metadata_func = partial(get_metadata_from_fabric,
+ fallback_lease_file=self.
+ dhclient_lease_file)
else:
metadata_func = self.get_metadata_from_agent
+
try:
fabric_data = metadata_func()
except Exception as exc:
LOG.info("Error communicating with Azure fabric; assume we aren't"
" on Azure.", exc_info=True)
return False
-
self.metadata['instance-id'] = util.read_dmi_data('system-uuid')
self.metadata.update(fabric_data)
diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
index d1f806d6..be74503b 100644
--- a/cloudinit/sources/DataSourceCloudSigma.py
+++ b/cloudinit/sources/DataSourceCloudSigma.py
@@ -16,7 +16,6 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from base64 import b64decode
-import os
import re
from cloudinit.cs_utils import Cepko
@@ -45,11 +44,6 @@ class DataSourceCloudSigma(sources.DataSource):
Uses dmi data to detect if this instance of cloud-init is running
in the CloudSigma's infrastructure.
"""
- uname_arch = os.uname()[4]
- if uname_arch.startswith("arm") or uname_arch == "aarch64":
- # Disabling because dmi data on ARM processors
- LOG.debug("Disabling CloudSigma datasource on arm (LP: #1243287)")
- return False
LOG.debug("determining hypervisor product name via dmi data")
sys_product_name = util.read_dmi_data("system-product-name")
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 91d6ff13..5c9edabe 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -134,7 +134,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
vd = results.get('vendordata')
self.vendordata_pure = vd
try:
- self.vendordata_raw = openstack.convert_vendordata_json(vd)
+ self.vendordata_raw = sources.convert_vendordata(vd)
except ValueError as e:
LOG.warn("Invalid content in vendor-data: %s", e)
self.vendordata_raw = None
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index 44a17a00..fc596e17 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -1,6 +1,7 @@
# vi: ts=4 expandtab
#
# Author: Neal Shrader <neal@digitalocean.com>
+# Author: Ben Howard <bh@digitalocean.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -14,22 +15,27 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from cloudinit import ec2_utils
+# DigitalOcean Droplet API:
+# https://developers.digitalocean.com/documentation/metadata/
+
+import json
+
from cloudinit import log as logging
from cloudinit import sources
+from cloudinit import url_helper
from cloudinit import util
-import functools
-
-
LOG = logging.getLogger(__name__)
BUILTIN_DS_CONFIG = {
- 'metadata_url': 'http://169.254.169.254/metadata/v1/',
- 'mirrors_url': 'http://mirrors.digitalocean.com/'
+ 'metadata_url': 'http://169.254.169.254/metadata/v1.json',
}
-MD_RETRIES = 0
-MD_TIMEOUT = 1
+
+# Wait for a up to a minute, retrying the meta-data server
+# every 2 seconds.
+MD_RETRIES = 30
+MD_TIMEOUT = 2
+MD_WAIT_RETRY = 2
class DataSourceDigitalOcean(sources.DataSource):
@@ -40,43 +46,61 @@ class DataSourceDigitalOcean(sources.DataSource):
util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}),
BUILTIN_DS_CONFIG])
self.metadata_address = self.ds_cfg['metadata_url']
+ self.retries = self.ds_cfg.get('retries', MD_RETRIES)
+ self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT)
+ self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY)
- if self.ds_cfg.get('retries'):
- self.retries = self.ds_cfg['retries']
- else:
- self.retries = MD_RETRIES
+ def _get_sysinfo(self):
+ # DigitalOcean embeds vendor ID and instance/droplet_id in the
+ # SMBIOS information
- if self.ds_cfg.get('timeout'):
- self.timeout = self.ds_cfg['timeout']
- else:
- self.timeout = MD_TIMEOUT
+ LOG.debug("checking if instance is a DigitalOcean droplet")
+
+ # Detect if we are on DigitalOcean and return the Droplet's ID
+ vendor_name = util.read_dmi_data("system-manufacturer")
+ if vendor_name != "DigitalOcean":
+ return (False, None)
- def get_data(self):
- caller = functools.partial(util.read_file_or_url,
- timeout=self.timeout, retries=self.retries)
+ LOG.info("running on DigitalOcean")
- def mcaller(url):
- return caller(url).contents
+ droplet_id = util.read_dmi_data("system-serial-number")
+ if droplet_id:
+ LOG.debug(("system identified via SMBIOS as DigitalOcean Droplet"
+ "{}").format(droplet_id))
+ else:
+ LOG.critical(("system identified via SMBIOS as a DigitalOcean "
+ "Droplet, but did not provide an ID. Please file a "
+ "support ticket at: "
+ "https://cloud.digitalocean.com/support/tickets/"
+ "new"))
- md = ec2_utils.MetadataMaterializer(mcaller(self.metadata_address),
- base_url=self.metadata_address,
- caller=mcaller)
+ return (True, droplet_id)
- self.metadata = md.materialize()
+ def get_data(self, apply_filter=False):
+ (is_do, droplet_id) = self._get_sysinfo()
- if self.metadata.get('id'):
- return True
- else:
+ # only proceed if we know we are on DigitalOcean
+ if not is_do:
return False
- def get_userdata_raw(self):
- return "\n".join(self.metadata['user-data'])
+ LOG.debug("reading metadata from {}".format(self.metadata_address))
+ response = url_helper.readurl(self.metadata_address,
+ timeout=self.timeout,
+ sec_between=self.wait_retry,
+ retries=self.retries)
- def get_vendordata_raw(self):
- return "\n".join(self.metadata['vendor-data'])
+ contents = util.decode_binary(response.contents)
+ decoded = json.loads(contents)
+
+ self.metadata = decoded
+ self.metadata['instance-id'] = decoded.get('droplet_id', droplet_id)
+ self.metadata['local-hostname'] = decoded.get('hostname', droplet_id)
+ self.vendordata_raw = decoded.get("vendor_data", None)
+ self.userdata_raw = decoded.get("user_data", None)
+ return True
def get_public_ssh_keys(self):
- public_keys = self.metadata['public-keys']
+ public_keys = self.metadata.get('public_keys', [])
if isinstance(public_keys, list):
return public_keys
else:
@@ -84,21 +108,17 @@ class DataSourceDigitalOcean(sources.DataSource):
@property
def availability_zone(self):
- return self.metadata['region']
-
- def get_instance_id(self):
- return self.metadata['id']
-
- def get_hostname(self, fqdn=False, resolve_ip=False):
- return self.metadata['hostname']
-
- def get_package_mirror_info(self):
- return self.ds_cfg['mirrors_url']
+ return self.metadata.get('region', 'default')
@property
def launch_index(self):
return None
+ def check_instance_id(self, sys_cfg):
+ return sources.instance_id_matches_system_uuid(
+ self.get_instance_id(), 'system-serial-number')
+
+
# Used to match classes to dependencies
datasources = [
(DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index c660a350..6c12d703 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -31,7 +31,7 @@ REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname')
class GoogleMetadataFetcher(object):
- headers = {'X-Google-Metadata-Request': True}
+ headers = {'X-Google-Metadata-Request': 'True'}
def __init__(self, metadata_address):
self.metadata_address = metadata_address
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index d828f078..ab93c0a2 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -20,7 +20,6 @@
from __future__ import print_function
-import errno
import os
import time
@@ -32,7 +31,14 @@ from cloudinit import util
LOG = logging.getLogger(__name__)
MD_VERSION = "2012-03-01"
-BINARY_FIELDS = ('user-data',)
+DS_FIELDS = [
+ # remote path, location in dictionary, binary data?, optional?
+ ("meta-data/instance-id", 'meta-data/instance-id', False, False),
+ ("meta-data/local-hostname", 'meta-data/local-hostname', False, False),
+ ("meta-data/public-keys", 'meta-data/public-keys', False, True),
+ ('meta-data/vendor-data', 'vendor-data', True, True),
+ ('user-data', 'user-data', True, True),
+]
class DataSourceMAAS(sources.DataSource):
@@ -43,6 +49,7 @@ class DataSourceMAAS(sources.DataSource):
instance-id
user-data
hostname
+ vendor-data
"""
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -71,10 +78,7 @@ class DataSourceMAAS(sources.DataSource):
mcfg = self.ds_cfg
try:
- (userdata, metadata) = read_maas_seed_dir(self.seed_dir)
- self.userdata_raw = userdata
- self.metadata = metadata
- self.base_url = self.seed_dir
+ self._set_data(self.seed_dir, read_maas_seed_dir(self.seed_dir))
return True
except MAASSeedDirNone:
pass
@@ -95,18 +99,29 @@ class DataSourceMAAS(sources.DataSource):
if not self.wait_for_metadata_service(url):
return False
- self.base_url = url
-
- (userdata, metadata) = read_maas_seed_url(
- self.base_url, read_file_or_url=self.oauth_helper.readurl,
- paths=self.paths, retries=1)
- self.userdata_raw = userdata
- self.metadata = metadata
+ self._set_data(
+ url, read_maas_seed_url(
+ url, read_file_or_url=self.oauth_helper.readurl,
+ paths=self.paths, retries=1))
return True
except Exception:
util.logexc(LOG, "Failed fetching metadata from url %s", url)
return False
+ def _set_data(self, url, data):
+ # takes a url for base_url and a tuple of userdata, metadata, vd.
+ self.base_url = url
+ ud, md, vd = data
+ self.userdata_raw = ud
+ self.metadata = md
+ self.vendordata_pure = vd
+ if vd:
+ try:
+ self.vendordata_raw = sources.convert_vendordata(vd)
+ except ValueError as e:
+ LOG.warn("Invalid content in vendor-data: %s", e)
+ self.vendordata_raw = None
+
def wait_for_metadata_service(self, url):
mcfg = self.ds_cfg
max_wait = 120
@@ -126,6 +141,8 @@ class DataSourceMAAS(sources.DataSource):
LOG.warn("Failed to get timeout, using %s" % timeout)
starttime = time.time()
+ if url.endswith("/"):
+ url = url[:-1]
check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION)
urls = [check_url]
url = self.oauth_helper.wait_for_url(
@@ -141,27 +158,13 @@ class DataSourceMAAS(sources.DataSource):
def read_maas_seed_dir(seed_d):
- """
- Return user-data and metadata for a maas seed dir in seed_d.
- Expected format of seed_d are the following files:
- * instance-id
- * local-hostname
- * user-data
- """
- if not os.path.isdir(seed_d):
+ if seed_d.startswith("file://"):
+ seed_d = seed_d[7:]
+ if not os.path.isdir(seed_d) or len(os.listdir(seed_d)) == 0:
raise MAASSeedDirNone("%s: not a directory")
- files = ('local-hostname', 'instance-id', 'user-data', 'public-keys')
- md = {}
- for fname in files:
- try:
- md[fname] = util.load_file(os.path.join(seed_d, fname),
- decode=fname not in BINARY_FIELDS)
- except IOError as e:
- if e.errno != errno.ENOENT:
- raise
-
- return check_seed_contents(md, seed_d)
+ # seed_dir looks in seed_dir, not seed_dir/VERSION
+ return read_maas_seed_url("file://%s" % seed_d, version=None)
def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
@@ -175,73 +178,78 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
* <seed_url>/<version>/meta-data/instance-id
* <seed_url>/<version>/meta-data/local-hostname
* <seed_url>/<version>/user-data
+ If version is None, then <version>/ will not be used.
"""
- base_url = "%s/%s" % (seed_url, version)
- file_order = [
- 'local-hostname',
- 'instance-id',
- 'public-keys',
- 'user-data',
- ]
- files = {
- 'local-hostname': "%s/%s" % (base_url, 'meta-data/local-hostname'),
- 'instance-id': "%s/%s" % (base_url, 'meta-data/instance-id'),
- 'public-keys': "%s/%s" % (base_url, 'meta-data/public-keys'),
- 'user-data': "%s/%s" % (base_url, 'user-data'),
- }
-
if read_file_or_url is None:
read_file_or_url = util.read_file_or_url
+ if seed_url.endswith("/"):
+ seed_url = seed_url[:-1]
+
md = {}
- for name in file_order:
- url = files.get(name)
- if name == 'user-data':
- item_retries = 0
+ for path, dictname, binary, optional in DS_FIELDS:
+ if version is None:
+ url = "%s/%s" % (seed_url, path)
else:
- item_retries = retries
-
+ url = "%s/%s/%s" % (seed_url, version, path)
try:
ssl_details = util.fetch_ssl_details(paths)
- resp = read_file_or_url(url, retries=item_retries,
- timeout=timeout, ssl_details=ssl_details)
+ resp = read_file_or_url(url, retries=retries, timeout=timeout,
+ ssl_details=ssl_details)
if resp.ok():
- if name in BINARY_FIELDS:
- md[name] = resp.contents
+ if binary:
+ md[path] = resp.contents
else:
- md[name] = util.decode_binary(resp.contents)
+ md[path] = util.decode_binary(resp.contents)
else:
LOG.warn(("Fetching from %s resulted in"
" an invalid http code %s"), url, resp.code)
except url_helper.UrlError as e:
- if e.code != 404:
- raise
+ if e.code == 404 and not optional:
+ raise MAASSeedDirMalformed(
+ "Missing required %s: %s" % (path, e))
+ elif e.code != 404:
+ raise e
+
return check_seed_contents(md, seed_url)
def check_seed_contents(content, seed):
- """Validate if content is Is the content a dict that is valid as a
- return for a datasource.
- Either return a (userdata, metadata) tuple or
+ """Validate if dictionary content valid as a return for a datasource.
+ Either return a (userdata, metadata, vendordata) tuple or
Raise MAASSeedDirMalformed or MAASSeedDirNone
"""
- md_required = ('instance-id', 'local-hostname')
- if len(content) == 0:
+ ret = {}
+ missing = []
+ for spath, dpath, _binary, optional in DS_FIELDS:
+ if spath not in content:
+ if not optional:
+ missing.append(spath)
+ continue
+
+ if "/" in dpath:
+ top, _, p = dpath.partition("/")
+ if top not in ret:
+ ret[top] = {}
+ ret[top][p] = content[spath]
+ else:
+ ret[dpath] = content[spath]
+
+ if len(ret) == 0:
raise MAASSeedDirNone("%s: no data files found" % seed)
- found = list(content.keys())
- missing = [k for k in md_required if k not in found]
- if len(missing):
+ if missing:
raise MAASSeedDirMalformed("%s: missing files %s" % (seed, missing))
- userdata = content.get('user-data', b"")
- md = {}
- for (key, val) in content.items():
- if key == 'user-data':
- continue
- md[key] = val
+ vd_data = None
+ if ret.get('vendor-data'):
+ err = object()
+ vd_data = util.load_yaml(ret.get('vendor-data'), default=err,
+ allowed=(object))
+ if vd_data is err:
+ raise MAASSeedDirMalformed("vendor-data was not loadable as yaml.")
- return (userdata, md)
+ return ret.get('user-data'), ret.get('meta-data'), vd_data
class MAASSeedDirNone(Exception):
@@ -272,6 +280,7 @@ if __name__ == "__main__":
"""
import argparse
import pprint
+ import sys
parser = argparse.ArgumentParser(description='Interact with MAAS DS')
parser.add_argument("--config", metavar="file",
@@ -289,17 +298,25 @@ if __name__ == "__main__":
default=MD_VERSION)
subcmds = parser.add_subparsers(title="subcommands", dest="subcmd")
- subcmds.add_parser('crawl', help="crawl the datasource")
- subcmds.add_parser('get', help="do a single GET of provided url")
- subcmds.add_parser('check-seed', help="read andn verify seed at url")
-
- parser.add_argument("url", help="the data source to query")
+ for (name, help) in (('crawl', 'crawl the datasource'),
+ ('get', 'do a single GET of provided url'),
+ ('check-seed', 'read and verify seed at url')):
+ p = subcmds.add_parser(name, help=help)
+ p.add_argument("url", help="the datasource url", nargs='?',
+ default=None)
args = parser.parse_args()
creds = {'consumer_key': args.ckey, 'token_key': args.tkey,
'token_secret': args.tsec, 'consumer_secret': args.csec}
+ maaspkg_cfg = "/etc/cloud/cloud.cfg.d/90_dpkg_maas.cfg"
+ if (args.config is None and args.url is None and
+ os.path.exists(maaspkg_cfg) and
+ os.access(maaspkg_cfg, os.R_OK)):
+ sys.stderr.write("Used config in %s.\n" % maaspkg_cfg)
+ args.config = maaspkg_cfg
+
if args.config:
cfg = util.read_conf(args.config)
if 'datasource' in cfg:
@@ -307,6 +324,12 @@ if __name__ == "__main__":
for key in creds.keys():
if key in cfg and creds[key] is None:
creds[key] = cfg[key]
+ if args.url is None and 'metadata_url' in cfg:
+ args.url = cfg['metadata_url']
+
+ if args.url is None:
+ sys.stderr.write("Must provide a url or a config with url.\n")
+ sys.exit(1)
oauth_helper = url_helper.OauthUrlHelper(**creds)
@@ -331,16 +354,20 @@ if __name__ == "__main__":
printurl(url)
if args.subcmd == "check-seed":
+ sys.stderr.write("Checking seed at %s\n" % args.url)
readurl = oauth_helper.readurl
if args.url[0] == "/" or args.url.startswith("file://"):
- readurl = None
- (userdata, metadata) = read_maas_seed_url(
- args.url, version=args.apiver, read_file_or_url=readurl,
- retries=2)
- print("=== userdata ===")
- print(userdata.decode())
- print("=== metadata ===")
+ (userdata, metadata, vd) = read_maas_seed_dir(args.url)
+ else:
+ (userdata, metadata, vd) = read_maas_seed_url(
+ args.url, version=args.apiver, read_file_or_url=readurl,
+ retries=2)
+ print("=== user-data ===")
+ print("N/A" if userdata is None else userdata.decode())
+ print("=== meta-data ===")
pprint.pprint(metadata)
+ print("=== vendor-data ===")
+ pprint.pprint("N/A" if vd is None else vd)
elif args.subcmd == "get":
printurl(args.url)
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index cdc9eef5..e6a0b5fe 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -52,7 +52,7 @@ class DataSourceNoCloud(sources.DataSource):
found = []
mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "",
- 'network-config': {}}
+ 'network-config': None}
try:
# Parse the kernel command line, getting data passed in
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 7b3a76b9..635a836c 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -407,7 +407,7 @@ def read_context_disk_dir(source_dir, asuser=None):
# http://opennebula.org/documentation:rel3.8:cong#network_configuration
for k in context:
if re.match(r'^ETH\d+_IP$', k):
- (out, _) = util.subp(['/sbin/ip', 'link'])
+ (out, _) = util.subp(['ip', 'link'])
net = OpenNebulaNetwork(out, context)
results['network-interfaces'] = net.gen_conf()
break
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index c06d17f3..82558214 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -138,7 +138,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
vd = results.get('vendordata')
self.vendordata_pure = vd
try:
- self.vendordata_raw = openstack.convert_vendordata_json(vd)
+ self.vendordata_raw = sources.convert_vendordata(vd)
except ValueError as e:
LOG.warn("Invalid content in vendor-data: %s", e)
self.vendordata_raw = None
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index ccc86883..143ab368 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -60,11 +60,15 @@ SMARTOS_ATTRIB_MAP = {
'availability_zone': ('sdc:datacenter_name', True),
'vendor-data': ('sdc:vendor-data', False),
'operator-script': ('sdc:operator-script', False),
+ 'hostname': ('sdc:hostname', True),
+ 'dns_domain': ('sdc:dns_domain', True),
}
SMARTOS_ATTRIB_JSON = {
# Cloud-init Key : (SmartOS Key known JSON)
'network-data': 'sdc:nics',
+ 'dns_servers': 'sdc:resolvers',
+ 'routes': 'sdc:routes',
}
SMARTOS_ENV_LX_BRAND = "lx-brand"
@@ -311,7 +315,10 @@ class DataSourceSmartOS(sources.DataSource):
if self._network_config is None:
if self.network_data is not None:
self._network_config = (
- convert_smartos_network_data(self.network_data))
+ convert_smartos_network_data(
+ network_data=self.network_data,
+ dns_servers=self.metadata['dns_servers'],
+ dns_domain=self.metadata['dns_domain']))
return self._network_config
@@ -445,7 +452,8 @@ class JoyentMetadataClient(object):
class JoyentMetadataSocketClient(JoyentMetadataClient):
- def __init__(self, socketpath):
+ def __init__(self, socketpath, smartos_type=SMARTOS_ENV_LX_BRAND):
+ super(JoyentMetadataSocketClient, self).__init__(smartos_type)
self.socketpath = socketpath
def open_transport(self):
@@ -461,7 +469,7 @@ class JoyentMetadataSocketClient(JoyentMetadataClient):
class JoyentMetadataSerialClient(JoyentMetadataClient):
- def __init__(self, device, timeout=10, smartos_type=None):
+ def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM):
super(JoyentMetadataSerialClient, self).__init__(smartos_type)
self.device = device
self.timeout = timeout
@@ -583,7 +591,8 @@ def jmc_client_factory(
device=serial_device, timeout=serial_timeout,
smartos_type=smartos_type)
elif smartos_type == SMARTOS_ENV_LX_BRAND:
- return JoyentMetadataSocketClient(socketpath=metadata_sockfile)
+ return JoyentMetadataSocketClient(socketpath=metadata_sockfile,
+ smartos_type=smartos_type)
raise ValueError("Unknown value for smartos_type: %s" % smartos_type)
@@ -644,14 +653,8 @@ def write_boot_content(content, content_f, link=None, shebang=False,
util.logexc(LOG, "failed establishing content link: %s", e)
-def get_smartos_environ(uname_version=None, product_name=None,
- uname_arch=None):
+def get_smartos_environ(uname_version=None, product_name=None):
uname = os.uname()
- if uname_arch is None:
- uname_arch = uname[4]
-
- if uname_arch.startswith("arm") or uname_arch == "aarch64":
- return None
# SDC LX-Brand Zones lack dmidecode (no /dev/mem) but
# report 'BrandZ virtual linux' as the kernel version
@@ -671,8 +674,9 @@ def get_smartos_environ(uname_version=None, product_name=None,
return None
-# Covert SMARTOS 'sdc:nics' data to network_config yaml
-def convert_smartos_network_data(network_data=None):
+# Convert SMARTOS 'sdc:nics' data to network_config yaml
+def convert_smartos_network_data(network_data=None,
+ dns_servers=None, dns_domain=None):
"""Return a dictionary of network_config by parsing provided
SMARTOS sdc:nics configuration data
@@ -706,9 +710,7 @@ def convert_smartos_network_data(network_data=None):
'broadcast',
'dns_nameservers',
'dns_search',
- 'gateway',
'metric',
- 'netmask',
'pointopoint',
'routes',
'scope',
@@ -716,6 +718,29 @@ def convert_smartos_network_data(network_data=None):
],
}
+ if dns_servers:
+ if not isinstance(dns_servers, (list, tuple)):
+ dns_servers = [dns_servers]
+ else:
+ dns_servers = []
+
+ if dns_domain:
+ if not isinstance(dns_domain, (list, tuple)):
+ dns_domain = [dns_domain]
+ else:
+ dns_domain = []
+
+ def is_valid_ipv4(addr):
+ return '.' in addr
+
+ def is_valid_ipv6(addr):
+ return ':' in addr
+
+ pgws = {
+ 'ipv4': {'match': is_valid_ipv4, 'gw': None},
+ 'ipv6': {'match': is_valid_ipv6, 'gw': None},
+ }
+
config = []
for nic in network_data:
cfg = dict((k, v) for k, v in nic.items()
@@ -727,18 +752,40 @@ def convert_smartos_network_data(network_data=None):
cfg.update({'mac_address': nic['mac']})
subnets = []
- for ip, gw in zip(nic['ips'], nic['gateways']):
- subnet = dict((k, v) for k, v in nic.items()
- if k in valid_keys['subnet'])
- subnet.update({
- 'type': 'static',
- 'address': ip,
- 'gateway': gw,
- })
+ for ip in nic.get('ips', []):
+ if ip == "dhcp":
+ subnet = {'type': 'dhcp4'}
+ else:
+ subnet = dict((k, v) for k, v in nic.items()
+ if k in valid_keys['subnet'])
+ subnet.update({
+ 'type': 'static',
+ 'address': ip,
+ })
+
+ proto = 'ipv4' if is_valid_ipv4(ip) else 'ipv6'
+ # Only use gateways for 'primary' nics
+ if 'primary' in nic and nic.get('primary', False):
+ # the ips and gateways list may be N to M, here
+ # we map the ip index into the gateways list,
+ # and handle the case that we could have more ips
+ # than gateways. we only consume the first gateway
+ if not pgws[proto]['gw']:
+ gateways = [gw for gw in nic.get('gateways', [])
+ if pgws[proto]['match'](gw)]
+ if len(gateways):
+ pgws[proto]['gw'] = gateways[0]
+ subnet.update({'gateway': pgws[proto]['gw']})
+
subnets.append(subnet)
cfg.update({'subnets': subnets})
config.append(cfg)
+ if dns_servers:
+ config.append(
+ {'type': 'nameserver', 'address': dns_servers,
+ 'search': dns_domain})
+
return {'version': 1, 'config': config}
@@ -761,21 +808,36 @@ if __name__ == "__main__":
sys.exit(1)
if len(sys.argv) == 1:
keys = (list(SMARTOS_ATTRIB_JSON.keys()) +
- list(SMARTOS_ATTRIB_MAP.keys()))
+ list(SMARTOS_ATTRIB_MAP.keys()) + ['network_config'])
else:
keys = sys.argv[1:]
- data = {}
- for key in keys:
+ def load_key(client, key, data):
+ if key in data:
+ return data[key]
+
if key in SMARTOS_ATTRIB_JSON:
keyname = SMARTOS_ATTRIB_JSON[key]
- data[key] = jmc.get_json(keyname)
+ data[key] = client.get_json(keyname)
+ elif key == "network_config":
+ for depkey in ('network-data', 'dns_servers', 'dns_domain'):
+ load_key(client, depkey, data)
+ data[key] = convert_smartos_network_data(
+ network_data=data['network-data'],
+ dns_servers=data['dns_servers'],
+ dns_domain=data['dns_domain'])
else:
if key in SMARTOS_ATTRIB_MAP:
keyname, strip = SMARTOS_ATTRIB_MAP[key]
else:
keyname, strip = (key, False)
- val = jmc.get(keyname, strip=strip)
- data[key] = jmc.get(keyname, strip=strip)
+ data[key] = client.get(keyname, strip=strip)
+
+ return data[key]
+
+ data = {}
+ for key in keys:
+ load_key(client=jmc, key=key, data=data)
- print(json.dumps(data, indent=1))
+ print(json.dumps(data, indent=1, sort_keys=True,
+ separators=(',', ': ')))
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 87b8e524..d1395270 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -21,8 +21,8 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import abc
+import copy
import os
-
import six
from cloudinit import importer
@@ -355,6 +355,31 @@ def instance_id_matches_system_uuid(instance_id, field='system-uuid'):
return instance_id.lower() == dmi_value.lower()
+def convert_vendordata(data, recurse=True):
+ """data: a loaded object (strings, arrays, dicts).
+ return something suitable for cloudinit vendordata_raw.
+
+ if data is:
+ None: return None
+ string: return string
+ list: return data
+ the list is then processed in UserDataProcessor
+ dict: return convert_vendordata(data.get('cloud-init'))
+ """
+ if not data:
+ return None
+ if isinstance(data, six.string_types):
+ return data
+ if isinstance(data, list):
+ return copy.deepcopy(data)
+ if isinstance(data, dict):
+ if recurse is True:
+ return convert_vendordata(data.get('cloud-init'),
+ recurse=False)
+ raise ValueError("vendordata['cloud-init'] cannot be dict")
+ raise ValueError("Unknown data type for vendordata: %s" % type(data))
+
+
# 'depends' is a list of dependencies (DEP_FILESYSTEM)
# ds_list is a list of 2 item lists
# ds_list = [
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index 63ccf10e..689ed4cc 100644
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -1,3 +1,4 @@
+import json
import logging
import os
import re
@@ -6,6 +7,7 @@ import struct
import tempfile
import time
+from cloudinit import stages
from contextlib import contextmanager
from xml.etree import ElementTree
@@ -187,19 +189,33 @@ class WALinuxAgentShim(object):
' </Container>',
'</Health>'])
- def __init__(self):
- LOG.debug('WALinuxAgentShim instantiated...')
- self.endpoint = self.find_endpoint()
+ def __init__(self, fallback_lease_file=None):
+ LOG.debug('WALinuxAgentShim instantiated, fallback_lease_file=%s',
+ fallback_lease_file)
+ self.dhcpoptions = None
+ self._endpoint = None
self.openssl_manager = None
self.values = {}
+ self.lease_file = fallback_lease_file
def clean_up(self):
if self.openssl_manager is not None:
self.openssl_manager.clean_up()
@staticmethod
- def get_ip_from_lease_value(lease_value):
- unescaped_value = lease_value.replace('\\', '')
+ def _get_hooks_dir():
+ _paths = stages.Init()
+ return os.path.join(_paths.paths.get_runpath(), "dhclient.hooks")
+
+ @property
+ def endpoint(self):
+ if self._endpoint is None:
+ self._endpoint = self.find_endpoint(self.lease_file)
+ return self._endpoint
+
+ @staticmethod
+ def get_ip_from_lease_value(fallback_lease_value):
+ unescaped_value = fallback_lease_value.replace('\\', '')
if len(unescaped_value) > 4:
hex_string = ''
for hex_pair in unescaped_value.split(':'):
@@ -213,15 +229,75 @@ class WALinuxAgentShim(object):
return socket.inet_ntoa(packed_bytes)
@staticmethod
- def find_endpoint():
- LOG.debug('Finding Azure endpoint...')
- content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases')
- value = None
+ def _get_value_from_leases_file(fallback_lease_file):
+ leases = []
+ content = util.load_file(fallback_lease_file)
+ LOG.debug("content is {}".format(content))
for line in content.splitlines():
if 'unknown-245' in line:
- value = line.strip(' ').split(' ', 2)[-1].strip(';\n"')
+ # Example line from Ubuntu
+ # option unknown-245 a8:3f:81:10;
+ leases.append(line.strip(' ').split(' ', 2)[-1].strip(';\n"'))
+ # Return the "most recent" one in the list
+ if len(leases) < 1:
+ return None
+ else:
+ return leases[-1]
+
+ @staticmethod
+ def _load_dhclient_json():
+ dhcp_options = {}
+ hooks_dir = WALinuxAgentShim._get_hooks_dir()
+ if not os.path.exists(hooks_dir):
+ LOG.debug("%s not found.", hooks_dir)
+ return None
+ hook_files = [os.path.join(hooks_dir, x)
+ for x in os.listdir(hooks_dir)]
+ for hook_file in hook_files:
+ try:
+ name = os.path.basename(hook_file).replace('.json', '')
+ dhcp_options[name] = json.loads(util.load_file((hook_file)))
+ except ValueError:
+ raise ValueError("%s is not valid JSON data", hook_file)
+ return dhcp_options
+
+ @staticmethod
+ def _get_value_from_dhcpoptions(dhcp_options):
+ if dhcp_options is None:
+ return None
+ # the MS endpoint server is given to us as DHPC option 245
+ _value = None
+ for interface in dhcp_options:
+ _value = dhcp_options[interface].get('unknown_245', None)
+ if _value is not None:
+ LOG.debug("Endpoint server found in dhclient options")
+ break
+ return _value
+
+ @staticmethod
+ def find_endpoint(fallback_lease_file=None):
+ LOG.debug('Finding Azure endpoint...')
+ value = None
+ # Option-245 stored in /run/cloud-init/dhclient.hooks/<ifc>.json
+ # a dhclient exit hook that calls cloud-init-dhclient-hook
+ dhcp_options = WALinuxAgentShim._load_dhclient_json()
+ value = WALinuxAgentShim._get_value_from_dhcpoptions(dhcp_options)
if value is None:
- raise ValueError('No endpoint found in DHCP config.')
+ # Fallback and check the leases file if unsuccessful
+ LOG.debug("Unable to find endpoint in dhclient logs. "
+ " Falling back to check lease files")
+ if fallback_lease_file is None:
+ LOG.warn("No fallback lease file was specified.")
+ value = None
+ else:
+ LOG.debug("Looking for endpoint in lease file %s",
+ fallback_lease_file)
+ value = WALinuxAgentShim._get_value_from_leases_file(
+ fallback_lease_file)
+
+ if value is None:
+ raise ValueError('No endpoint found.')
+
endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value)
LOG.debug('Azure endpoint found at %s', endpoint_ip_address)
return endpoint_ip_address
@@ -271,8 +347,8 @@ class WALinuxAgentShim(object):
LOG.info('Reported ready to Azure fabric.')
-def get_metadata_from_fabric():
- shim = WALinuxAgentShim()
+def get_metadata_from_fabric(fallback_lease_file=None):
+ shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file)
try:
return shim.register_with_azure_and_fetch_data()
finally:
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 2e7a1d47..a5a2a1d6 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -539,6 +539,10 @@ def convert_net_json(network_json=None, known_macs=None):
networks = network_json.get('networks', [])
services = network_json.get('services', [])
+ link_updates = []
+ link_id_info = {}
+ bond_name_fmt = "bond%d"
+ bond_number = 0
config = []
for link in links:
subnets = []
@@ -551,6 +555,13 @@ def convert_net_json(network_json=None, known_macs=None):
if 'name' in link:
cfg['name'] = link['name']
+ if link.get('ethernet_mac_address'):
+ link_id_info[link['id']] = link.get('ethernet_mac_address')
+
+ curinfo = {'name': cfg.get('name'),
+ 'mac': link.get('ethernet_mac_address'),
+ 'id': link['id'], 'type': link['type']}
+
for network in [n for n in networks
if n['link'] == link['id']]:
subnet = dict((k, v) for k, v in network.items()
@@ -571,7 +582,7 @@ def convert_net_json(network_json=None, known_macs=None):
subnet['ipv6'] = True
subnets.append(subnet)
cfg.update({'subnets': subnets})
- if link['type'] in ['ethernet', 'vif', 'ovs', 'phy', 'bridge']:
+ if link['type'] in ['ethernet', 'vif', 'ovs', 'phy', 'bridge', 'tap']:
cfg.update({
'type': 'physical',
'mac_address': link['ethernet_mac_address']})
@@ -582,31 +593,56 @@ def convert_net_json(network_json=None, known_macs=None):
continue
elif k.startswith('bond'):
params.update({k: v})
- cfg.update({
- 'bond_interfaces': copy.deepcopy(link['bond_links']),
- 'params': params,
- })
+
+ # openstack does not provide a name for the bond.
+ # they do provide an 'id', but that is possibly non-sensical.
+ # so we just create our own name.
+ link_name = bond_name_fmt % bond_number
+ bond_number += 1
+
+ # bond_links reference links by their id, but we need to add
+ # to the network config by their nic name.
+ # store that in bond_links_needed, and update these later.
+ link_updates.append(
+ (cfg, 'bond_interfaces', '%s',
+ copy.deepcopy(link['bond_links']))
+ )
+ cfg.update({'params': params, 'name': link_name})
+
+ curinfo['name'] = link_name
elif link['type'] in ['vlan']:
+ name = "%s.%s" % (link['vlan_link'], link['vlan_id'])
cfg.update({
- 'name': "%s.%s" % (link['vlan_link'],
- link['vlan_id']),
- 'vlan_link': link['vlan_link'],
+ 'name': name,
'vlan_id': link['vlan_id'],
'mac_address': link['vlan_mac_address'],
})
+ link_updates.append((cfg, 'vlan_link', '%s', link['vlan_link']))
+ link_updates.append((cfg, 'name', "%%s.%s" % link['vlan_id'],
+ link['vlan_link']))
+ curinfo.update({'mac': link['vlan_mac_address'],
+ 'name': name})
else:
raise ValueError(
'Unknown network_data link type: %s' % link['type'])
config.append(cfg)
+ link_id_info[curinfo['id']] = curinfo
need_names = [d for d in config
if d.get('type') == 'physical' and 'name' not in d]
- if need_names:
+ if need_names or link_updates:
if known_macs is None:
known_macs = net.get_interfaces_by_mac()
+ # go through and fill out the link_id_info with names
+ for link_id, info in link_id_info.items():
+ if info.get('name'):
+ continue
+ if info.get('mac') in known_macs:
+ info['name'] = known_macs[info['mac']]
+
for d in need_names:
mac = d.get('mac_address')
if not mac:
@@ -615,34 +651,15 @@ def convert_net_json(network_json=None, known_macs=None):
raise ValueError("Unable to find a system nic for %s" % d)
d['name'] = known_macs[mac]
+ for cfg, key, fmt, target in link_updates:
+ if isinstance(target, (list, tuple)):
+ cfg[key] = [fmt % link_id_info[l]['name'] for l in target]
+ else:
+ cfg[key] = fmt % link_id_info[target]['name']
+
for service in services:
cfg = service
cfg.update({'type': 'nameserver'})
config.append(cfg)
return {'version': 1, 'config': config}
-
-
-def convert_vendordata_json(data, recurse=True):
- """data: a loaded json *object* (strings, arrays, dicts).
- return something suitable for cloudinit vendordata_raw.
-
- if data is:
- None: return None
- string: return string
- list: return data
- the list is then processed in UserDataProcessor
- dict: return convert_vendordata_json(data.get('cloud-init'))
- """
- if not data:
- return None
- if isinstance(data, six.string_types):
- return data
- if isinstance(data, list):
- return copy.deepcopy(data)
- if isinstance(data, dict):
- if recurse is True:
- return convert_vendordata_json(data.get('cloud-init'),
- recurse=False)
- raise ValueError("vendordata['cloud-init'] cannot be dict")
- raise ValueError("Unknown data type for vendordata: %s" % type(data))
diff --git a/cloudinit/util.py b/cloudinit/util.py
index e5dd61a0..7c37eb8f 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -61,6 +61,10 @@ from cloudinit import version
from cloudinit.settings import (CFG_BUILTIN)
+try:
+ string_types = (basestring,)
+except NameError:
+ string_types = (str,)
_DNS_REDIRECT_IP = None
LOG = logging.getLogger(__name__)
@@ -82,6 +86,71 @@ CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'],
PROC_CMDLINE = None
+_LSB_RELEASE = {}
+
+
+def get_architecture(target=None):
+ out, _ = subp(['dpkg', '--print-architecture'], capture=True,
+ target=target)
+ return out.strip()
+
+
+def _lsb_release(target=None):
+ fmap = {'Codename': 'codename', 'Description': 'description',
+ 'Distributor ID': 'id', 'Release': 'release'}
+
+ data = {}
+ try:
+ out, _ = subp(['lsb_release', '--all'], capture=True, target=target)
+ for line in out.splitlines():
+ fname, _, val = line.partition(":")
+ if fname in fmap:
+ data[fmap[fname]] = val.strip()
+ missing = [k for k in fmap.values() if k not in data]
+ if len(missing):
+ LOG.warn("Missing fields in lsb_release --all output: %s",
+ ','.join(missing))
+
+ except ProcessExecutionError as err:
+ LOG.warn("Unable to get lsb_release --all: %s", err)
+ data = dict((v, "UNAVAILABLE") for v in fmap.values())
+
+ return data
+
+
+def lsb_release(target=None):
+ if target_path(target) != "/":
+ # do not use or update cache if target is provided
+ return _lsb_release(target)
+
+ global _LSB_RELEASE
+ if not _LSB_RELEASE:
+ data = _lsb_release()
+ _LSB_RELEASE.update(data)
+ return _LSB_RELEASE
+
+
+def target_path(target, path=None):
+ # return 'path' inside target, accepting target as None
+ if target in (None, ""):
+ target = "/"
+ elif not isinstance(target, string_types):
+ raise ValueError("Unexpected input for target: %s" % target)
+ else:
+ target = os.path.abspath(target)
+ # abspath("//") returns "//" specifically for 2 slashes.
+ if target.startswith("//"):
+ target = target[1:]
+
+ if not path:
+ return target
+
+ # os.path.join("/etc", "/foo") returns "/foo". Chomp all leading /.
+ while len(path) and path[0] == "/":
+ path = path[1:]
+
+ return os.path.join(target, path)
+
def decode_binary(blob, encoding='utf-8'):
# Converts a binary type into a text type using given encoding.
@@ -1570,6 +1639,11 @@ def get_builtin_cfg():
return obj_copy.deepcopy(CFG_BUILTIN)
+def is_link(path):
+ LOG.debug("Testing if a link exists for %s", path)
+ return os.path.islink(path)
+
+
def sym_link(source, link, force=False):
LOG.debug("Creating symbolic link from %r => %r", link, source)
if force and os.path.exists(link):
@@ -1688,10 +1762,20 @@ def delete_dir_contents(dirname):
def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
- logstring=False):
+ logstring=False, decode="replace", target=None):
+
+ # not supported in cloud-init (yet), for now kept in the call signature
+ # to ease maintaining code shared between cloud-init and curtin
+ if target is not None:
+ raise ValueError("target arg not supported by cloud-init")
+
if rcs is None:
rcs = [0]
+
+ devnull_fp = None
try:
+ if target_path(target) != "/":
+ args = ['chroot', target] + list(args)
if not logstring:
LOG.debug(("Running command %s with allowed return codes %s"
@@ -1700,33 +1784,52 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
LOG.debug(("Running hidden command to protect sensitive "
"input/output logstring: %s"), logstring)
- if not capture:
- stdout = None
- stderr = None
- else:
+ stdin = None
+ stdout = None
+ stderr = None
+ if capture:
stdout = subprocess.PIPE
stderr = subprocess.PIPE
- stdin = subprocess.PIPE
- kws = dict(stdout=stdout, stderr=stderr, stdin=stdin,
- env=env, shell=shell)
- if six.PY3:
- # Use this so subprocess output will be (Python 3) str, not bytes.
- kws['universal_newlines'] = True
- sp = subprocess.Popen(args, **kws)
+ if data is None:
+ # using devnull assures any reads get null, rather
+ # than possibly waiting on input.
+ devnull_fp = open(os.devnull)
+ stdin = devnull_fp
+ else:
+ stdin = subprocess.PIPE
+ if not isinstance(data, bytes):
+ data = data.encode()
+
+ sp = subprocess.Popen(args, stdout=stdout,
+ stderr=stderr, stdin=stdin,
+ env=env, shell=shell)
(out, err) = sp.communicate(data)
+
+ # Just ensure blank instead of none.
+ if not out and capture:
+ out = b''
+ if not err and capture:
+ err = b''
+ if decode:
+ def ldecode(data, m='utf-8'):
+ if not isinstance(data, bytes):
+ return data
+ return data.decode(m, errors=decode)
+
+ out = ldecode(out)
+ err = ldecode(err)
except OSError as e:
raise ProcessExecutionError(cmd=args, reason=e,
errno=e.errno)
+ finally:
+ if devnull_fp:
+ devnull_fp.close()
+
rc = sp.returncode
if rc not in rcs:
raise ProcessExecutionError(stdout=out, stderr=err,
exit_code=rc,
cmd=args)
- # Just ensure blank instead of none?? (iff capturing)
- if not out and capture:
- out = ''
- if not err and capture:
- err = ''
return (out, err)
@@ -2227,10 +2330,17 @@ def read_dmi_data(key):
If all of the above fail to find a value, None will be returned.
"""
+
syspath_value = _read_dmi_syspath(key)
if syspath_value is not None:
return syspath_value
+ # running dmidecode can be problematic on some arches (LP: #1243287)
+ uname_arch = os.uname()[4]
+ if uname_arch.startswith("arm") or uname_arch == "aarch64":
+ LOG.debug("dmidata is not supported on %s", uname_arch)
+ return None
+
dmidecode_path = which('dmidecode')
if dmidecode_path:
return _call_dmidecode(key, dmidecode_path)
@@ -2244,3 +2354,18 @@ def message_from_string(string):
if sys.version_info[:2] < (2, 7):
return email.message_from_file(six.StringIO(string))
return email.message_from_string(string)
+
+
+def get_installed_packages(target=None):
+ (out, _) = subp(['dpkg-query', '--list'], target=target, capture=True)
+
+ pkgs_inst = set()
+ for line in out.splitlines():
+ try:
+ (state, pkg, _) = line.split(None, 2)
+ except ValueError:
+ continue
+ if state.startswith("hi") or state.startswith("ii"):
+ pkgs_inst.add(re.sub(":.*", "", pkg))
+
+ return pkgs_inst
diff --git a/cloudinit/version.py b/cloudinit/version.py
index 3d1d1d23..6acada84 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -16,12 +16,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from distutils import version as vr
-
-
-def version():
- return vr.StrictVersion("0.7.7")
+__VERSION__ = "0.7.7"
def version_string():
- return str(version())
+ return __VERSION__
diff --git a/config/cloud.cfg b/config/cloud.cfg
index a6afcc83..7c94ec5c 100644
--- a/config/cloud.cfg
+++ b/config/cloud.cfg
@@ -45,28 +45,29 @@ cloud_config_modules:
- emit_upstart
- disk_setup
- mounts
+ - ntp
- ssh-import-id
- locale
- set-passwords
- - snappy
- grub-dpkg
- apt-pipelining
- apt-configure
+ - timezone
+ - disable-ec2-metadata
+ - runcmd
+ - byobu
+
+# The modules that run in the 'final' stage
+cloud_final_modules:
+ - snappy
- package-update-upgrade-install
- fan
- landscape
- - timezone
- lxd
- puppet
- chef
- salt-minion
- mcollective
- - disable-ec2-metadata
- - runcmd
- - byobu
-
-# The modules that run in the 'final' stage
-cloud_final_modules:
- rightscale_userdata
- scripts-vendor
- scripts-per-once
diff --git a/doc/examples/cloud-config-add-apt-repos.txt b/doc/examples/cloud-config-add-apt-repos.txt
index be9d5472..22ef7612 100644
--- a/doc/examples/cloud-config-add-apt-repos.txt
+++ b/doc/examples/cloud-config-add-apt-repos.txt
@@ -4,18 +4,21 @@
#
# Default: auto select based on cloud metadata
# in ec2, the default is <region>.archive.ubuntu.com
-# apt_mirror:
-# use the provided mirror
-# apt_mirror_search:
-# search the list for the first mirror.
-# this is currently very limited, only verifying that
-# the mirror is dns resolvable or an IP address
+# apt:
+# primary:
+# - arches [default]
+# uri:
+# use the provided mirror
+# search:
+# search the list for the first mirror.
+# this is currently very limited, only verifying that
+# the mirror is dns resolvable or an IP address
#
-# if neither apt_mirror nor apt_mirror search is set (the default)
+# if neither mirror is set (the default)
# then use the mirror provided by the DataSource found.
# In EC2, that means using <region>.ec2.archive.ubuntu.com
-#
-# if no mirror is provided by the DataSource, and 'apt_mirror_search_dns' is
+#
+# if no mirror is provided by the DataSource, but 'search_dns' is
# true, then search for dns names '<distro>-mirror' in each of
# - fqdn of this host per cloud metadata
# - localdomain
@@ -27,8 +30,19 @@
# up and expose them only by creating dns entries.
#
# if none of that is found, then the default distro mirror is used
-apt_mirror: http://us.archive.ubuntu.com/ubuntu/
-apt_mirror_search:
- - http://local-mirror.mydomain
- - http://archive.ubuntu.com
-apt_mirror_search_dns: False
+apt:
+ primary:
+ - arches: [default]
+ uri: http://us.archive.ubuntu.com/ubuntu/
+# or
+apt:
+ primary:
+ - arches: [default]
+ search:
+ - http://local-mirror.mydomain
+ - http://archive.ubuntu.com
+# or
+apt:
+ primary:
+ - arches: [default]
+ search_dns: True
diff --git a/doc/examples/cloud-config-apt.txt b/doc/examples/cloud-config-apt.txt
new file mode 100644
index 00000000..1a0fc6f2
--- /dev/null
+++ b/doc/examples/cloud-config-apt.txt
@@ -0,0 +1,328 @@
+# apt_pipelining (configure Acquire::http::Pipeline-Depth)
+# Default: disables HTTP pipelining. Certain web servers, such
+# as S3 do not pipeline properly (LP: #948461).
+# Valid options:
+# False/default: Disables pipelining for APT
+# None/Unchanged: Use OS default
+# Number: Set pipelining to some number (not recommended)
+apt_pipelining: False
+
+## apt config via system_info:
+# under the 'system_info', you can customize cloud-init's interaction
+# with apt.
+# system_info:
+# apt_get_command: [command, argument, argument]
+# apt_get_upgrade_subcommand: dist-upgrade
+#
+# apt_get_command:
+# To specify a different 'apt-get' command, set 'apt_get_command'.
+# This must be a list, and the subcommand (update, upgrade) is appended to it.
+# default is:
+# ['apt-get', '--option=Dpkg::Options::=--force-confold',
+# '--option=Dpkg::options::=--force-unsafe-io', '--assume-yes', '--quiet']
+#
+# apt_get_upgrade_subcommand: "dist-upgrade"
+# Specify a different subcommand for 'upgrade. The default is 'dist-upgrade'.
+# This is the subcommand that is invoked for package_upgrade.
+#
+# apt_get_wrapper:
+# command: eatmydata
+# enabled: [True, False, "auto"]
+#
+
+# Install additional packages on first boot
+#
+# Default: none
+#
+# if packages are specified, this apt_update will be set to true
+
+packages: ['pastebinit']
+
+apt:
+ # The apt config consists of two major "areas".
+ #
+ # On one hand there is the global configuration for the apt feature.
+ #
+ # On one hand (down in this file) there is the source dictionary which allows
+ # to define various entries to be considered by apt.
+
+ ##############################################################################
+ # Section 1: global apt configuration
+ #
+ # The following examples number the top keys to ease identification in
+ # discussions.
+
+ # 1.1 preserve_sources_list
+ #
+ # Preserves the existing /etc/apt/sources.list
+ # Default: false - do overwrite sources_list. If set to true then any
+ # "mirrors" configuration will have no effect.
+ # Set to true to avoid affecting sources.list. In that case only
+ # "extra" source specifications will be written into
+ # /etc/apt/sources.list.d/*
+ preserve_sources_list: true
+
+ # 1.2 disable_suites
+ #
+ # This is an empty list by default, so nothing is disabled.
+ #
+ # If given, those suites are removed from sources.list after all other
+ # modifications have been made.
+ # Suites are even disabled if no other modification was made,
+ # but not if is preserve_sources_list is active.
+ # There is a special alias “$RELEASE” as in the sources that will be replace
+ # by the matching release.
+ #
+ # To ease configuration and improve readability the following common ubuntu
+ # suites will be automatically mapped to their full definition.
+ # updates => $RELEASE-updates
+ # backports => $RELEASE-backports
+ # security => $RELEASE-security
+ # proposed => $RELEASE-proposed
+ # release => $RELEASE
+ #
+ # There is no harm in specifying a suite to be disabled that is not found in
+ # the source.list file (just a no-op then)
+ #
+ # Note: Lines don’t get deleted, but disabled by being converted to a comment.
+ # The following example disables all usual defaults except $RELEASE-security.
+ # On top it disables a custom suite called "mysuite"
+ disable_suites: [$RELEASE-updates, backports, $RELEASE, mysuite]
+
+ # 1.3 primary/security archives
+ #
+ # Default: none - instead it is auto select based on cloud metadata
+ # so if neither "uri" nor "search", nor "search_dns" is set (the default)
+ # then use the mirror provided by the DataSource found.
+ # In EC2, that means using <region>.ec2.archive.ubuntu.com
+ #
+ # define a custom (e.g. localized) mirror that will be used in sources.list
+ # and any custom sources entries for deb / deb-src lines.
+ #
+ # One can set primary and security mirror to different uri's
+ # the child elements to the keys primary and secondary are equivalent
+ primary:
+ # arches is list of architectures the following config applies to
+ # the special keyword "default" applies to any architecture not explicitly
+ # listed.
+ - arches: [amd64, i386, default]
+ # uri is just defining the target as-is
+ uri: http://us.archive.ubuntu.com/ubuntu
+ #
+ # via search one can define lists that are tried one by one.
+ # The first with a working DNS resolution (or if it is an IP) will be
+ # picked. That way one can keep one configuration for multiple
+ # subenvironments that select the working one.
+ search:
+ - http://cool.but-sometimes-unreachable.com/ubuntu
+ - http://us.archive.ubuntu.com/ubuntu
+ # if no mirror is provided by uri or search but 'search_dns' is
+ # true, then search for dns names '<distro>-mirror' in each of
+ # - fqdn of this host per cloud metadata
+ # - localdomain
+ # - no domain (which would search domains listed in /etc/resolv.conf)
+ # If there is a dns entry for <distro>-mirror, then it is assumed that
+ # there is a distro mirror at http://<distro>-mirror.<domain>/<distro>
+ #
+ # That gives the cloud provider the opportunity to set mirrors of a distro
+ # up and expose them only by creating dns entries.
+ #
+ # if none of that is found, then the default distro mirror is used
+ search_dns: true
+ #
+ # If multiple of a category are given
+ # 1. uri
+ # 2. search
+ # 3. search_dns
+ # the first defining a valid mirror wins (in the order as defined here,
+ # not the order as listed in the config).
+ #
+ - arches: [s390x, arm64]
+ # as above, allowing to have one config for different per arch mirrors
+ # security is optional, if not defined it is set to the same value as primary
+ security:
+ uri: http://security.ubuntu.com/ubuntu
+ # If search_dns is set for security the searched pattern is:
+ # <distro>-security-mirror
+
+ # if no mirrors are specified at all, or all lookups fail it will try
+ # to get them from the cloud datasource and if those neither provide one fall
+ # back to:
+ # primary: http://archive.ubuntu.com/ubuntu
+ # security: http://security.ubuntu.com/ubuntu
+
+ # 1.4 sources_list
+ #
+ # Provide a custom template for rendering sources.list
+ # without one provided cloud-init uses builtin templates for
+ # ubuntu and debian.
+ # Within these sources.list templates you can use the following replacement
+ # variables (all have sane Ubuntu defaults, but mirrors can be overwritten
+ # as needed (see above)):
+ # => $RELEASE, $MIRROR, $PRIMARY, $SECURITY
+ sources_list: | # written by cloud-init custom template
+ deb $MIRROR $RELEASE main restricted
+ deb-src $MIRROR $RELEASE main restricted
+ deb $PRIMARY $RELEASE universe restricted
+ deb $SECURITY $RELEASE-security multiverse
+
+ # 1.5 conf
+ #
+ # Any apt config string that will be made available to apt
+ # see the APT.CONF(5) man page for details what can be specified
+ conf: | # APT config
+ APT {
+ Get {
+ Assume-Yes "true";
+ Fix-Broken "true";
+ };
+ };
+
+ # 1.6 (http_|ftp_|https_)proxy
+ #
+ # Proxies are the most common apt.conf option, so that for simplified use
+ # there is a shortcut for those. Those get automatically translated into the
+ # correct Acquire::*::Proxy statements.
+ #
+ # note: proxy actually being a short synonym to http_proxy
+ proxy: http://[[user][:pass]@]host[:port]/
+ http_proxy: http://[[user][:pass]@]host[:port]/
+ ftp_proxy: ftp://[[user][:pass]@]host[:port]/
+ https_proxy: https://[[user][:pass]@]host[:port]/
+
+ # 1.7 add_apt_repo_match
+ #
+ # 'source' entries in apt-sources that match this python regex
+ # expression will be passed to add-apt-repository
+ # The following example is also the builtin default if nothing is specified
+ add_apt_repo_match: '^[\w-]+:\w'
+
+
+ ##############################################################################
+ # Section 2: source list entries
+ #
+ # This is a dictionary (unlike most block/net which are lists)
+ #
+ # The key of each source entry is the filename and will be prepended by
+ # /etc/apt/sources.list.d/ if it doesn't start with a '/'.
+ # If it doesn't end with .list it will be appended so that apt picks up it's
+ # configuration.
+ #
+ # Whenever there is no content to be written into such a file, the key is
+ # not used as filename - yet it can still be used as index for merging
+ # configuration.
+ #
+ # The values inside the entries consost of the following optional entries:
+ # 'source': a sources.list entry (some variable replacements apply)
+ # 'keyid': providing a key to import via shortid or fingerprint
+ # 'key': providing a raw PGP key
+ # 'keyserver': specify an alternate keyserver to pull keys from that
+ # were specified by keyid
+
+ # This allows merging between multiple input files than a list like:
+ # cloud-config1
+ # sources:
+ # s1: {'key': 'key1', 'source': 'source1'}
+ # cloud-config2
+ # sources:
+ # s2: {'key': 'key2'}
+ # s1: {'keyserver': 'foo'}
+ # This would be merged to
+ # sources:
+ # s1:
+ # keyserver: foo
+ # key: key1
+ # source: source1
+ # s2:
+ # key: key2
+ #
+ # The following examples number the subfeatures per sources entry to ease
+ # identification in discussions.
+
+
+ sources:
+ curtin-dev-ppa.list:
+ # 2.1 source
+ #
+ # Creates a file in /etc/apt/sources.list.d/ for the sources list entry
+ # based on the key: "/etc/apt/sources.list.d/curtin-dev-ppa.list"
+ source: "deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu xenial main"
+
+ # 2.2 keyid
+ #
+ # Importing a gpg key for a given key id. Used keyserver defaults to
+ # keyserver.ubuntu.com
+ keyid: F430BBA5 # GPG key ID published on a key server
+
+ ignored1:
+ # 2.3 PPA shortcut
+ #
+ # Setup correct apt sources.list line and Auto-Import the signing key
+ # from LP
+ #
+ # See https://help.launchpad.net/Packaging/PPA for more information
+ # this requires 'add-apt-repository'. This will create a file in
+ # /etc/apt/sources.list.d automatically, therefore the key here is
+ # ignored as filename in those cases.
+ source: "ppa:curtin-dev/test-archive" # Quote the string
+
+ my-repo2.list:
+ # 2.4 replacement variables
+ #
+ # sources can use $MIRROR, $PRIMARY, $SECURITY and $RELEASE replacement
+ # variables.
+ # They will be replaced with the default or specified mirrors and the
+ # running release.
+ # The entry below would be possibly turned into:
+ # source: deb http://archive.ubuntu.com/ubuntu xenial multiverse
+ source: deb $MIRROR $RELEASE multiverse
+
+ my-repo3.list:
+ # this would have the same end effect as 'ppa:curtin-dev/test-archive'
+ source: "deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu xenial main"
+ keyid: F430BBA5 # GPG key ID published on the key server
+ filename: curtin-dev-ppa.list
+
+ ignored2:
+ # 2.5 key only
+ #
+ # this would only import the key without adding a ppa or other source spec
+ # since this doesn't generate a source.list file the filename key is ignored
+ keyid: F430BBA5 # GPG key ID published on a key server
+
+ ignored3:
+ # 2.6 key id alternatives
+ #
+ # Keyid's can also be specified via their long fingerprints
+ keyid: B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77
+
+ ignored4:
+ # 2.7 alternative keyservers
+ #
+ # One can also specify alternative keyservers to fetch keys from.
+ keyid: B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77
+ keyserver: pgp.mit.edu
+
+
+ my-repo4.list:
+ # 2.8 raw key
+ #
+ # The apt signing key can also be specified by providing a pgp public key
+ # block. Providing the PGP key this way is the most robust method for
+ # specifying a key, as it removes dependency on a remote key server.
+ #
+ # As with keyid's this can be specified with or without some actual source
+ # content.
+ key: | # The value needs to start with -----BEGIN PGP PUBLIC KEY BLOCK-----
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+ Version: SKS 1.0.10
+
+ mI0ESpA3UQEEALdZKVIMq0j6qWAXAyxSlF63SvPVIgxHPb9Nk0DZUixn+akqytxG4zKCONz6
+ qLjoBBfHnynyVLfT4ihg9an1PqxRnTO+JKQxl8NgKGz6Pon569GtAOdWNKw15XKinJTDLjnj
+ 9y96ljJqRcpV9t/WsIcdJPcKFR5voHTEoABE2aEXABEBAAG0GUxhdW5jaHBhZCBQUEEgZm9y
+ IEFsZXN0aWOItgQTAQIAIAUCSpA3UQIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJEA7H
+ 5Qi+CcVxWZ8D/1MyYvfj3FJPZUm2Yo1zZsQ657vHI9+pPouqflWOayRR9jbiyUFIn0VdQBrP
+ t0FwvnOFArUovUWoKAEdqR8hPy3M3APUZjl5K4cMZR/xaMQeQRZ5CHpS4DBKURKAHC0ltS5o
+ uBJKQOZm5iltJp15cgyIkBkGe8Mx18VFyVglAZey
+ =Y2oI
+ -----END PGP PUBLIC KEY BLOCK-----
diff --git a/doc/examples/cloud-config-chef-oneiric.txt b/doc/examples/cloud-config-chef-oneiric.txt
index 2e5f4b16..75c9aeed 100644
--- a/doc/examples/cloud-config-chef-oneiric.txt
+++ b/doc/examples/cloud-config-chef-oneiric.txt
@@ -11,39 +11,40 @@
# The default is to install from packages.
# Key from http://apt.opscode.com/packages@opscode.com.gpg.key
-apt_sources:
- - source: "deb http://apt.opscode.com/ $RELEASE-0.10 main"
- key: |
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: GnuPG v1.4.9 (GNU/Linux)
-
- mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
- twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
- dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
- JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
- ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
- XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
- DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
- sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
- Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
- YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
- CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
- +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu5Ag0ESmkLtBAIAIO2SwlR
- lU5i6gTOp42RHWW7/pmW78CwUqJnYqnXROrt3h9F9xrsGkH0Fh1FRtsnncgzIhvh
- DLQnRHnkXm0ws0jV0PF74ttoUT6BLAUsFi2SPP1zYNJ9H9fhhK/pjijtAcQwdgxu
- wwNJ5xCEscBZCjhSRXm0d30bK1o49Cow8ZIbHtnXVP41c9QWOzX/LaGZsKQZnaMx
- EzDk8dyyctR2f03vRSVyTFGgdpUcpbr9eTFVgikCa6ODEBv+0BnCH6yGTXwBid9g
- w0o1e/2DviKUWCC+AlAUOubLmOIGFBuI4UR+rux9affbHcLIOTiKQXv79lW3P7W8
- AAfniSQKfPWXrrcAAwUH/2XBqD4Uxhbs25HDUUiM/m6Gnlj6EsStg8n0nMggLhuN
- QmPfoNByMPUqvA7sULyfr6xCYzbzRNxABHSpf85FzGQ29RF4xsA4vOOU8RDIYQ9X
- Q8NqqR6pydprRFqWe47hsAN7BoYuhWqTtOLSBmnAnzTR5pURoqcquWYiiEavZixJ
- 3ZRAq/HMGioJEtMFrvsZjGXuzef7f0ytfR1zYeLVWnL9Bd32CueBlI7dhYwkFe+V
- Ep5jWOCj02C1wHcwt+uIRDJV6TdtbIiBYAdOMPk15+VBdweBXwMuYXr76+A7VeDL
- zIhi7tKFo6WiwjKZq0dzctsJJjtIfr4K4vbiD9Ojg1iISQQYEQIACQUCSmkLtAIb
- DAAKCRApQKupg++CauISAJ9CxYPOKhOxalBnVTLeNUkAHGg2gACeIsbobtaD4ZHG
- 0GLl8EkfA8uhluM=
- =zKAm
- -----END PGP PUBLIC KEY BLOCK-----
+apt:
+ sources:
+ - source: "deb http://apt.opscode.com/ $RELEASE-0.10 main"
+ key: |
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+ Version: GnuPG v1.4.9 (GNU/Linux)
+
+ mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
+ twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
+ dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
+ JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
+ ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
+ XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
+ DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
+ sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
+ Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
+ YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
+ CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
+ +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu5Ag0ESmkLtBAIAIO2SwlR
+ lU5i6gTOp42RHWW7/pmW78CwUqJnYqnXROrt3h9F9xrsGkH0Fh1FRtsnncgzIhvh
+ DLQnRHnkXm0ws0jV0PF74ttoUT6BLAUsFi2SPP1zYNJ9H9fhhK/pjijtAcQwdgxu
+ wwNJ5xCEscBZCjhSRXm0d30bK1o49Cow8ZIbHtnXVP41c9QWOzX/LaGZsKQZnaMx
+ EzDk8dyyctR2f03vRSVyTFGgdpUcpbr9eTFVgikCa6ODEBv+0BnCH6yGTXwBid9g
+ w0o1e/2DviKUWCC+AlAUOubLmOIGFBuI4UR+rux9affbHcLIOTiKQXv79lW3P7W8
+ AAfniSQKfPWXrrcAAwUH/2XBqD4Uxhbs25HDUUiM/m6Gnlj6EsStg8n0nMggLhuN
+ QmPfoNByMPUqvA7sULyfr6xCYzbzRNxABHSpf85FzGQ29RF4xsA4vOOU8RDIYQ9X
+ Q8NqqR6pydprRFqWe47hsAN7BoYuhWqTtOLSBmnAnzTR5pURoqcquWYiiEavZixJ
+ 3ZRAq/HMGioJEtMFrvsZjGXuzef7f0ytfR1zYeLVWnL9Bd32CueBlI7dhYwkFe+V
+ Ep5jWOCj02C1wHcwt+uIRDJV6TdtbIiBYAdOMPk15+VBdweBXwMuYXr76+A7VeDL
+ zIhi7tKFo6WiwjKZq0dzctsJJjtIfr4K4vbiD9Ojg1iISQQYEQIACQUCSmkLtAIb
+ DAAKCRApQKupg++CauISAJ9CxYPOKhOxalBnVTLeNUkAHGg2gACeIsbobtaD4ZHG
+ 0GLl8EkfA8uhluM=
+ =zKAm
+ -----END PGP PUBLIC KEY BLOCK-----
chef:
diff --git a/doc/examples/cloud-config-chef.txt b/doc/examples/cloud-config-chef.txt
index b886cba2..75d78a15 100644
--- a/doc/examples/cloud-config-chef.txt
+++ b/doc/examples/cloud-config-chef.txt
@@ -11,39 +11,40 @@
# The default is to install from packages.
# Key from http://apt.opscode.com/packages@opscode.com.gpg.key
-apt_sources:
- - source: "deb http://apt.opscode.com/ $RELEASE-0.10 main"
- key: |
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: GnuPG v1.4.9 (GNU/Linux)
-
- mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
- twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
- dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
- JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
- ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
- XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
- DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
- sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
- Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
- YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
- CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
- +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu5Ag0ESmkLtBAIAIO2SwlR
- lU5i6gTOp42RHWW7/pmW78CwUqJnYqnXROrt3h9F9xrsGkH0Fh1FRtsnncgzIhvh
- DLQnRHnkXm0ws0jV0PF74ttoUT6BLAUsFi2SPP1zYNJ9H9fhhK/pjijtAcQwdgxu
- wwNJ5xCEscBZCjhSRXm0d30bK1o49Cow8ZIbHtnXVP41c9QWOzX/LaGZsKQZnaMx
- EzDk8dyyctR2f03vRSVyTFGgdpUcpbr9eTFVgikCa6ODEBv+0BnCH6yGTXwBid9g
- w0o1e/2DviKUWCC+AlAUOubLmOIGFBuI4UR+rux9affbHcLIOTiKQXv79lW3P7W8
- AAfniSQKfPWXrrcAAwUH/2XBqD4Uxhbs25HDUUiM/m6Gnlj6EsStg8n0nMggLhuN
- QmPfoNByMPUqvA7sULyfr6xCYzbzRNxABHSpf85FzGQ29RF4xsA4vOOU8RDIYQ9X
- Q8NqqR6pydprRFqWe47hsAN7BoYuhWqTtOLSBmnAnzTR5pURoqcquWYiiEavZixJ
- 3ZRAq/HMGioJEtMFrvsZjGXuzef7f0ytfR1zYeLVWnL9Bd32CueBlI7dhYwkFe+V
- Ep5jWOCj02C1wHcwt+uIRDJV6TdtbIiBYAdOMPk15+VBdweBXwMuYXr76+A7VeDL
- zIhi7tKFo6WiwjKZq0dzctsJJjtIfr4K4vbiD9Ojg1iISQQYEQIACQUCSmkLtAIb
- DAAKCRApQKupg++CauISAJ9CxYPOKhOxalBnVTLeNUkAHGg2gACeIsbobtaD4ZHG
- 0GLl8EkfA8uhluM=
- =zKAm
- -----END PGP PUBLIC KEY BLOCK-----
+apt:
+ sources:
+ - source: "deb http://apt.opscode.com/ $RELEASE-0.10 main"
+ key: |
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+ Version: GnuPG v1.4.9 (GNU/Linux)
+
+ mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
+ twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
+ dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
+ JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
+ ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
+ XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
+ DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
+ sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
+ Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
+ YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
+ CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
+ +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu5Ag0ESmkLtBAIAIO2SwlR
+ lU5i6gTOp42RHWW7/pmW78CwUqJnYqnXROrt3h9F9xrsGkH0Fh1FRtsnncgzIhvh
+ DLQnRHnkXm0ws0jV0PF74ttoUT6BLAUsFi2SPP1zYNJ9H9fhhK/pjijtAcQwdgxu
+ wwNJ5xCEscBZCjhSRXm0d30bK1o49Cow8ZIbHtnXVP41c9QWOzX/LaGZsKQZnaMx
+ EzDk8dyyctR2f03vRSVyTFGgdpUcpbr9eTFVgikCa6ODEBv+0BnCH6yGTXwBid9g
+ w0o1e/2DviKUWCC+AlAUOubLmOIGFBuI4UR+rux9affbHcLIOTiKQXv79lW3P7W8
+ AAfniSQKfPWXrrcAAwUH/2XBqD4Uxhbs25HDUUiM/m6Gnlj6EsStg8n0nMggLhuN
+ QmPfoNByMPUqvA7sULyfr6xCYzbzRNxABHSpf85FzGQ29RF4xsA4vOOU8RDIYQ9X
+ Q8NqqR6pydprRFqWe47hsAN7BoYuhWqTtOLSBmnAnzTR5pURoqcquWYiiEavZixJ
+ 3ZRAq/HMGioJEtMFrvsZjGXuzef7f0ytfR1zYeLVWnL9Bd32CueBlI7dhYwkFe+V
+ Ep5jWOCj02C1wHcwt+uIRDJV6TdtbIiBYAdOMPk15+VBdweBXwMuYXr76+A7VeDL
+ zIhi7tKFo6WiwjKZq0dzctsJJjtIfr4K4vbiD9Ojg1iISQQYEQIACQUCSmkLtAIb
+ DAAKCRApQKupg++CauISAJ9CxYPOKhOxalBnVTLeNUkAHGg2gACeIsbobtaD4ZHG
+ 0GLl8EkfA8uhluM=
+ =zKAm
+ -----END PGP PUBLIC KEY BLOCK-----
chef:
diff --git a/doc/examples/cloud-config-ntp.txt b/doc/examples/cloud-config-ntp.txt
new file mode 100644
index 00000000..2fc656e4
--- /dev/null
+++ b/doc/examples/cloud-config-ntp.txt
@@ -0,0 +1,27 @@
+#cloud-config
+
+# ntp: configure ntp services
+# servers: List of NTP servers with which to sync
+# pools: List of NTP pool servers with which to sync (pools are typically
+# DNS hostnames which resolve to different specific servers to load
+# balance a set of services)
+#
+# Each server in the list will be added in list-order in the following format:
+#
+# [pool|server] <server entry> iburst
+#
+#
+# If no servers or pools are defined but ntp is enabled, then cloud-init will
+# render the distro default list of pools
+#
+# pools = [
+# '0.{distro}.pool.ntp.org',
+# '1.{distro}.pool.ntp.org',
+# '2.{distro}.pool.ntp.org',
+# '3.{distro}.pool.ntp.org',
+# ]
+#
+
+ntp:
+ pools: ['0.company.pool.ntp.org', '1.company.pool.ntp.org', 'ntp.myorg.org']
+ servers: ['my.ntp.server.local', 'ntp.ubuntu.com', '192.168.23.2']
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
index 3cc9c055..190029e4 100644
--- a/doc/examples/cloud-config.txt
+++ b/doc/examples/cloud-config.txt
@@ -18,256 +18,7 @@ package_upgrade: true
# Aliases: apt_reboot_if_required
package_reboot_if_required: true
-# Add apt repositories
-#
-# Default: auto select based on cloud metadata
-# in ec2, the default is <region>.archive.ubuntu.com
-# apt_mirror:
-# use the provided mirror
-# apt_mirror_search:
-# search the list for the first mirror.
-# this is currently very limited, only verifying that
-# the mirror is dns resolvable or an IP address
-#
-# if neither apt_mirror nor apt_mirror search is set (the default)
-# then use the mirror provided by the DataSource found.
-# In EC2, that means using <region>.ec2.archive.ubuntu.com
-#
-# if no mirror is provided by the DataSource, and 'apt_mirror_search_dns' is
-# true, then search for dns names '<distro>-mirror' in each of
-# - fqdn of this host per cloud metadata
-# - localdomain
-# - no domain (which would search domains listed in /etc/resolv.conf)
-# If there is a dns entry for <distro>-mirror, then it is assumed that there
-# is a distro mirror at http://<distro>-mirror.<domain>/<distro>
-#
-# That gives the cloud provider the opportunity to set mirrors of a distro
-# up and expose them only by creating dns entries.
-#
-# if none of that is found, then the default distro mirror is used
-apt_mirror: http://us.archive.ubuntu.com/ubuntu/
-apt_mirror_search:
- - http://local-mirror.mydomain
- - http://archive.ubuntu.com
-
-apt_mirror_search_dns: False
-
-# apt_proxy (configure Acquire::HTTP::Proxy)
-# 'apt_http_proxy' is an alias for 'apt_proxy'.
-# Also, available are 'apt_ftp_proxy' and 'apt_https_proxy'.
-# These affect Acquire::FTP::Proxy and Acquire::HTTPS::Proxy respectively
-apt_proxy: http://my.apt.proxy:3128
-
-# apt_pipelining (configure Acquire::http::Pipeline-Depth)
-# Default: disables HTTP pipelining. Certain web servers, such
-# as S3 do not pipeline properly (LP: #948461).
-# Valid options:
-# False/default: Disables pipelining for APT
-# None/Unchanged: Use OS default
-# Number: Set pipelining to some number (not recommended)
-apt_pipelining: False
-
-# Preserve existing /etc/apt/sources.list
-# Default: overwrite sources_list with mirror. If this is true
-# then apt_mirror above will have no effect
-apt_preserve_sources_list: true
-
-# Provide a custom template for rendering sources.list
-# Default: a default template for Ubuntu/Debain will be used as packaged in
-# Ubuntu: /etc/cloud/templates/sources.list.ubuntu.tmpl
-# Debian: /etc/cloud/templates/sources.list.debian.tmpl
-# Others: n/a
-# This will follow the normal mirror/codename replacement rules before
-# being written to disk.
-apt_custom_sources_list: |
- ## template:jinja
- ## Note, this file is written by cloud-init on first boot of an instance
- ## modifications made here will not survive a re-bundle.
- ## if you wish to make changes you can:
- ## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg
- ## or do the same in user-data
- ## b.) add sources in /etc/apt/sources.list.d
- ## c.) make changes to template file /etc/cloud/templates/sources.list.tmpl
- deb {{mirror}} {{codename}} main restricted
- deb-src {{mirror}} {{codename}} main restricted
-
- # could drop some of the usually used entries
-
- # could refer to other mirrors
- deb http://ddebs.ubuntu.com {{codename}} main restricted universe multiverse
- deb http://ddebs.ubuntu.com {{codename}}-updates main restricted universe multiverse
- deb http://ddebs.ubuntu.com {{codename}}-proposed main restricted universe multiverse
-
- # or even more uncommon examples like local or NFS mounted repos,
- # eventually whatever is compatible with sources.list syntax
- deb file:/home/apt/debian unstable main contrib non-free
-
-# 'source' entries in apt-sources that match this python regex
-# expression will be passed to add-apt-repository
-add_apt_repo_match: '^[\w-]+:\w'
-
-# 'apt_sources' is a dictionary
-# The key is the filename and will be prepended by /etc/apt/sources.list.d/ if
-# it doesn't start with a '/'.
-# There are certain cases - where no content is written into a source.list file
-# where the filename will be ignored - yet it can still be used as index for
-# merging.
-# The value it maps to is a dictionary with the following optional entries:
-# source: a sources.list entry (some variable replacements apply)
-# keyid: providing a key to import via shortid or fingerprint
-# key: providing a raw PGP key
-# keyserver: keyserver to fetch keys from, default is keyserver.ubuntu.com
-# filename: for compatibility with the older format (now the key to this
-# dictionary is the filename). If specified this overwrites the
-# filename given as key.
-
-# the new "filename: {specification-dictionary}, filename2: ..." format allows
-# better merging between multiple input files than a list like:
-# cloud-config1
-# sources:
-# s1: {'key': 'key1', 'source': 'source1'}
-# cloud-config2
-# sources:
-# s2: {'key': 'key2'}
-# s1: {filename: 'foo'}
-# this would be merged to
-#sources:
-# s1:
-# filename: foo
-# key: key1
-# source: source1
-# s2:
-# key: key2
-# Be aware that this style of merging is not the default (for backward
-# compatibility reasons). You should specify the following merge_how to get
-# this more complete and modern merging behaviour:
-# merge_how: "list()+dict()+str()"
-# This would then also be equivalent to the config merging used in curtin
-# (https://launchpad.net/curtin).
-
-# for more details see below in the various examples
-
-apt_sources:
- byobu-ppa.list:
- source: "deb http://ppa.launchpad.net/byobu/ppa/ubuntu karmic main"
- keyid: F430BBA5 # GPG key ID published on a key server
- # adding a source.list line, importing a gpg key for a given key id and
- # storing it in the file /etc/apt/sources.list.d/byobu-ppa.list
-
- # PPA shortcut:
- # * Setup correct apt sources.list line
- # * Import the signing key from LP
- #
- # See https://help.launchpad.net/Packaging/PPA for more information
- # this requires 'add-apt-repository'
- # due to that the filename key is ignored in this case
- ignored1:
- source: "ppa:smoser/ppa" # Quote the string
-
- # Custom apt repository:
- # * all that is required is 'source'
- # * Creates a file in /etc/apt/sources.list.d/ for the sources list entry
- # * [optional] Import the apt signing key from the keyserver
- # * Defaults:
- # + keyserver: keyserver.ubuntu.com
- #
- # See sources.list man page for more information about the format
- my-repo.list:
- source: deb http://archive.ubuntu.com/ubuntu karmic-backports main universe multiverse restricted
-
- # sources can use $MIRROR and $RELEASE and they will be replaced
- # with the local mirror for this cloud, and the running release
- # the entry below would be possibly turned into:
- # source: deb http://us-east-1.ec2.archive.ubuntu.com/ubuntu natty multiverse
- my-repo.list:
- source: deb $MIRROR $RELEASE multiverse
-
- # this would have the same end effect as 'ppa:byobu/ppa'
- my-repo.list:
- source: "deb http://ppa.launchpad.net/byobu/ppa/ubuntu karmic main"
- keyid: F430BBA5 # GPG key ID published on a key server
- filename: byobu-ppa.list
-
- # this would only import the key without adding a ppa or other source spec
- # since this doesn't generate a source.list file the filename key is ignored
- ignored2:
- keyid: F430BBA5 # GPG key ID published on a key server
-
- # In general keyid's can also be specified via their long fingerprints
- # since this doesn't generate a source.list file the filename key is ignored
- ignored3:
- keyid: B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77
-
- # Custom apt repository:
- # * The apt signing key can also be specified
- # by providing a pgp public key block
- # * Providing the PGP key here is the most robust method for
- # specifying a key, as it removes dependency on a remote key server
- my-repo.list:
- source: deb http://ppa.launchpad.net/alestic/ppa/ubuntu karmic main
- key: | # The value needs to start with -----BEGIN PGP PUBLIC KEY BLOCK-----
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: SKS 1.0.10
-
- mI0ESpA3UQEEALdZKVIMq0j6qWAXAyxSlF63SvPVIgxHPb9Nk0DZUixn+akqytxG4zKCONz6
- qLjoBBfHnynyVLfT4ihg9an1PqxRnTO+JKQxl8NgKGz6Pon569GtAOdWNKw15XKinJTDLjnj
- 9y96ljJqRcpV9t/WsIcdJPcKFR5voHTEoABE2aEXABEBAAG0GUxhdW5jaHBhZCBQUEEgZm9y
- IEFsZXN0aWOItgQTAQIAIAUCSpA3UQIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJEA7H
- 5Qi+CcVxWZ8D/1MyYvfj3FJPZUm2Yo1zZsQ657vHI9+pPouqflWOayRR9jbiyUFIn0VdQBrP
- t0FwvnOFArUovUWoKAEdqR8hPy3M3APUZjl5K4cMZR/xaMQeQRZ5CHpS4DBKURKAHC0ltS5o
- uBJKQOZm5iltJp15cgyIkBkGe8Mx18VFyVglAZey
- =Y2oI
- -----END PGP PUBLIC KEY BLOCK-----
-
- # Custom gpg key:
- # * As with keyid, a key may also be specified without a related source.
- # * all other facts mentioned above still apply
- # since this doesn't generate a source.list file the filename key is ignored
- ignored4:
- key: | # The value needs to start with -----BEGIN PGP PUBLIC KEY BLOCK-----
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: SKS 1.0.10
-
- mI0ESpA3UQEEALdZKVIMq0j6qWAXAyxSlF63SvPVIgxHPb9Nk0DZUixn+akqytxG4zKCONz6
- qLjoBBfHnynyVLfT4ihg9an1PqxRnTO+JKQxl8NgKGz6Pon569GtAOdWNKw15XKinJTDLjnj
- 9y96ljJqRcpV9t/WsIcdJPcKFR5voHTEoABE2aEXABEBAAG0GUxhdW5jaHBhZCBQUEEgZm9y
- IEFsZXN0aWOItgQTAQIAIAUCSpA3UQIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJEA7H
- 5Qi+CcVxWZ8D/1MyYvfj3FJPZUm2Yo1zZsQ657vHI9+pPouqflWOayRR9jbiyUFIn0VdQBrP
- t0FwvnOFArUovUWoKAEdqR8hPy3M3APUZjl5K4cMZR/xaMQeQRZ5CHpS4DBKURKAHC0ltS5o
- uBJKQOZm5iltJp15cgyIkBkGe8Mx18VFyVglAZey
- =Y2oI
- -----END PGP PUBLIC KEY BLOCK-----
-
-
-## apt config via system_info:
-# under the 'system_info', you can further customize cloud-init's interaction
-# with apt.
-# system_info:
-# apt_get_command: [command, argument, argument]
-# apt_get_upgrade_subcommand: dist-upgrade
-#
-# apt_get_command:
-# To specify a different 'apt-get' command, set 'apt_get_command'.
-# This must be a list, and the subcommand (update, upgrade) is appended to it.
-# default is:
-# ['apt-get', '--option=Dpkg::Options::=--force-confold',
-# '--option=Dpkg::options::=--force-unsafe-io', '--assume-yes', '--quiet']
-#
-# apt_get_upgrade_subcommand:
-# Specify a different subcommand for 'upgrade. The default is 'dist-upgrade'.
-# This is the subcommand that is invoked if package_upgrade is set to true above.
-#
-# apt_get_wrapper:
-# command: eatmydata
-# enabled: [True, False, "auto"]
-#
-
-# Install additional packages on first boot
-#
-# Default: none
-#
-# if packages are specified, this apt_update will be set to true
-#
+# For 'apt' specific config, see cloud-config-apt.txt
packages:
- pwgen
- pastebinit
diff --git a/doc/sources/azure/README.rst b/doc/sources/azure/README.rst
index 8239d1fa..ec7d9e84 100644
--- a/doc/sources/azure/README.rst
+++ b/doc/sources/azure/README.rst
@@ -9,10 +9,31 @@ Azure Platform
The azure cloud-platform provides initial data to an instance via an attached
CD formated in UDF. That CD contains a 'ovf-env.xml' file that provides some
information. Additional information is obtained via interaction with the
-"endpoint". The ip address of the endpoint is advertised to the instance
-inside of dhcp option 245. On ubuntu, that can be seen in
-/var/lib/dhcp/dhclient.eth0.leases as a colon delimited hex value (example:
-``option unknown-245 64:41:60:82;`` is 100.65.96.130)
+"endpoint".
+
+To find the endpoint, we now leverage the dhcp client's ability to log its
+known values on exit. The endpoint server is special DHCP option 245.
+Depending on your networking stack, this can be done
+by calling a script in /etc/dhcp/dhclient-exit-hooks or a file in
+/etc/NetworkManager/dispatcher.d. Both of these call a sub-command
+'dhclient_hook' of cloud-init itself. This sub-command will write the client
+information in json format to /run/cloud-init/dhclient.hook/<interface>.json.
+
+In order for cloud-init to leverage this method to find the endpoint, the
+cloud.cfg file must contain:
+
+datasource:
+ Azure:
+ set_hostname: False
+ agent_command: __builtin__
+
+If those files are not available, the fallback is to check the leases file
+for the endpoint server (again option 245).
+
+You can define the path to the lease file with the 'dhclient_lease_file'
+configuration. The default value is /var/lib/dhcp/dhclient.eth0.leases.
+
+ dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases
walinuxagent
------------
diff --git a/packages/bddeb b/packages/bddeb
index 3c77ce1d..abb7b607 100755
--- a/packages/bddeb
+++ b/packages/bddeb
@@ -1,6 +1,7 @@
#!/usr/bin/env python3
-import glob
+import argparse
+import json
import os
import shutil
import sys
@@ -15,15 +16,13 @@ def find_root():
if os.path.isfile(os.path.join(top_dir, 'setup.py')):
return os.path.abspath(top_dir)
raise OSError(("Unable to determine where your cloud-init topdir is."
- " set CLOUD_INIT_TOP_D?"))
-
-# Use the util functions from cloudinit
-sys.path.insert(0, find_root())
+ " set CLOUD_INIT_TOP_D?"))
-from cloudinit import templater
-from cloudinit import util
-
-import argparse
+if "avoid-pep8-E402-import-not-top-of-file":
+ # Use the util functions from cloudinit
+ sys.path.insert(0, find_root())
+ from cloudinit import templater
+ from cloudinit import util
# Package names that will showup in requires to what we can actually
# use in our debian 'control' file, this is a translation of the 'requires'
@@ -58,27 +57,37 @@ NONSTD_NAMED_PACKAGES = {
DEBUILD_ARGS = ["-S", "-d"]
-def write_debian_folder(root, version, revno, pkgmap,
- pyver="3", append_requires=[]):
+def run_helper(helper, args=None, strip=True):
+ if args is None:
+ args = []
+ cmd = [util.abs_join(find_root(), 'tools', helper)] + args
+ (stdout, _stderr) = util.subp(cmd)
+ if strip:
+ stdout = stdout.strip()
+ return stdout
+
+
+def write_debian_folder(root, templ_data, pkgmap, pyver="3",
+ append_requires=[]):
deb_dir = util.abs_join(root, 'debian')
- os.makedirs(deb_dir)
+
+ # Just copy debian/ dir and then update files
+ pdeb_d = util.abs_join(find_root(), 'packages', 'debian')
+ util.subp(['cp', '-a', pdeb_d, deb_dir])
# Fill in the change log template
templater.render_to_file(util.abs_join(find_root(),
'packages', 'debian', 'changelog.in'),
util.abs_join(deb_dir, 'changelog'),
- params={
- 'version': version,
- 'revision': revno,
- })
+ params=templ_data)
# Write out the control file template
- cmd = [util.abs_join(find_root(), 'tools', 'read-dependencies')]
- (stdout, _stderr) = util.subp(cmd)
- pypi_pkgs = [p.lower().strip() for p in stdout.splitlines()]
+ reqs = run_helper('read-dependencies').splitlines()
+ test_reqs = run_helper(
+ 'read-dependencies', ['test-requirements.txt']).splitlines()
- (stdout, _stderr) = util.subp(cmd + ['test-requirements.txt'])
- pypi_test_pkgs = [p.lower().strip() for p in stdout.splitlines()]
+ pypi_pkgs = [p.lower().strip() for p in reqs]
+ pypi_test_pkgs = [p.lower().strip() for p in test_reqs]
# Map to known packages
requires = append_requires
@@ -109,11 +118,9 @@ def write_debian_folder(root, version, revno, pkgmap,
util.abs_join(deb_dir, 'rules'),
params={'python': python, 'pyver': pyver})
- # Just copy any other files directly (including .in)
- pdeb_d = util.abs_join(find_root(), 'packages', 'debian')
- for f in [os.path.join(pdeb_d, f) for f in os.listdir(pdeb_d)]:
- if os.path.isfile(f):
- shutil.copy(f, util.abs_join(deb_dir, os.path.basename(f)))
+
+def read_version():
+ return json.loads(run_helper('read-version', ['--json']))
def main():
@@ -140,11 +147,14 @@ def main():
default=os.environ.get("INIT_SYSTEM",
"upstart,systemd"))
+ parser.add_argument("--release", dest="release",
+ help=("build with changelog referencing RELEASE"),
+ default="UNRELEASED")
for ent in DEBUILD_ARGS:
parser.add_argument(ent, dest="debuild_args", action='append_const',
- const=ent, help=("pass through '%s' to debuild" % ent),
- default=[])
+ const=ent, default=[],
+ help=("pass through '%s' to debuild" % ent))
parser.add_argument("--sign", default=False, action='store_true',
help="sign result. do not pass -us -uc to debuild")
@@ -178,59 +188,35 @@ def main():
pkgmap[p] = "python3-" + p
pyver = "3"
+ templ_data = {'debian_release': args.release}
with util.tempdir() as tdir:
- cmd = [util.abs_join(find_root(), 'tools', 'read-version')]
- (sysout, _stderr) = util.subp(cmd)
- version = sysout.strip()
-
- cmd = ['bzr', 'revno']
- (sysout, _stderr) = util.subp(cmd)
- revno = sysout.strip()
+ # output like 0.7.6-1022-g36e92d3
+ ver_data = read_version()
# This is really only a temporary archive
# since we will extract it then add in the debian
# folder, then re-archive it for debian happiness
print("Creating a temporary tarball using the 'make-tarball' helper")
- cmd = [util.abs_join(find_root(), 'tools', 'make-tarball')]
- (sysout, _stderr) = util.subp(cmd)
- arch_fn = sysout.strip()
- tmp_arch_fn = util.abs_join(tdir, os.path.basename(arch_fn))
- shutil.move(arch_fn, tmp_arch_fn)
-
- print("Extracting temporary tarball %r" % (tmp_arch_fn))
- cmd = ['tar', '-xvzf', tmp_arch_fn, '-C', tdir]
+ tarball = "cloud-init_%s.orig.tar.gz" % ver_data['version_long']
+ tarball_fp = util.abs_join(tdir, tarball)
+ run_helper('make-tarball', ['--long', '--output=' + tarball_fp])
+
+ print("Extracting temporary tarball %r" % (tarball))
+ cmd = ['tar', '-xvzf', tarball_fp, '-C', tdir]
util.subp(cmd, capture=capture)
- extracted_name = tmp_arch_fn[:-len('.tar.gz')]
- os.remove(tmp_arch_fn)
- xdir = util.abs_join(tdir, 'cloud-init')
- shutil.move(extracted_name, xdir)
+ xdir = util.abs_join(tdir, "cloud-init-%s" % ver_data['version_long'])
print("Creating a debian/ folder in %r" % (xdir))
if args.cloud_utils:
- append_requires=['cloud-utils | cloud-guest-utils']
+ append_requires = ['cloud-utils | cloud-guest-utils']
else:
- append_requires=[]
- write_debian_folder(xdir, version, revno, pkgmap,
- pyver=pyver, append_requires=append_requires)
-
- # The naming here seems to follow some debian standard
- # so it will whine if it is changed...
- tar_fn = "cloud-init_%s~bzr%s.orig.tar.gz" % (version, revno)
- print("Archiving the adjusted source into %r" %
- (util.abs_join(tdir, tar_fn)))
- cmd = ['tar', '-czvf',
- util.abs_join(tdir, tar_fn),
- '-C', xdir]
- cmd.extend(os.listdir(xdir))
- util.subp(cmd, capture=capture)
+ append_requires = []
- # Copy it locally for reference
- shutil.copy(util.abs_join(tdir, tar_fn),
- util.abs_join(os.getcwd(), tar_fn))
- print("Copied that archive to %r for local usage (if desired)." %
- (util.abs_join(os.getcwd(), tar_fn)))
+ templ_data.update(ver_data)
+ write_debian_folder(xdir, templ_data, pkgmap,
+ pyver=pyver, append_requires=append_requires)
print("Running 'debuild %s' in %r" % (' '.join(args.debuild_args),
xdir))
diff --git a/packages/brpm b/packages/brpm
index b41b675f..89696ab8 100755
--- a/packages/brpm
+++ b/packages/brpm
@@ -1,63 +1,41 @@
-#!/usr/bin/python
+#!/usr/bin/env python
import argparse
-import contextlib
import glob
+import json
import os
import shutil
-import subprocess
import sys
import tempfile
-import re
-
-from datetime import datetime
def find_root():
# expected path is in <top_dir>/packages/
top_dir = os.environ.get("CLOUD_INIT_TOP_D", None)
if top_dir is None:
- top_dir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
+ top_dir = os.path.dirname(
+ os.path.dirname(os.path.abspath(sys.argv[0])))
if os.path.isfile(os.path.join(top_dir, 'setup.py')):
return os.path.abspath(top_dir)
raise OSError(("Unable to determine where your cloud-init topdir is."
- " set CLOUD_INIT_TOP_D?"))
-
+ " set CLOUD_INIT_TOP_D?"))
-# Use the util functions from cloudinit
-sys.path.insert(0, find_root())
-from cloudinit import templater
-from cloudinit import util
+if "avoid-pep8-E402-import-not-top-of-file":
+ # Use the util functions from cloudinit
+ sys.path.insert(0, find_root())
+ from cloudinit import templater
+ from cloudinit import util
-# Mapping of expected packages to there full name...
-# this is a translation of the 'requires'
-# file pypi package name to a redhat/fedora package name.
-PKG_MP = {
+# Map python requirements to package names. If a match isn't found
+# here, we assume 'python-<pypi_name>'.
+PACKAGE_MAP = {
'redhat': {
- 'argparse': 'python-argparse',
- 'cheetah': 'python-cheetah',
- 'jinja2': 'python-jinja2',
- 'configobj': 'python-configobj',
- 'jsonpatch': 'python-jsonpatch',
- 'oauthlib': 'python-oauthlib',
- 'prettytable': 'python-prettytable',
'pyserial': 'pyserial',
'pyyaml': 'PyYAML',
- 'requests': 'python-requests',
- 'six': 'python-six',
},
'suse': {
- 'argparse': 'python-argparse',
- 'cheetah': 'python-cheetah',
- 'configobj': 'python-configobj',
- 'jsonpatch': 'python-jsonpatch',
- 'oauthlib': 'python-oauthlib',
- 'prettytable': 'python-prettytable',
- 'pyserial': 'python-pyserial',
'pyyaml': 'python-yaml',
- 'requests': 'python-requests',
- 'six': 'python-six',
}
}
@@ -65,110 +43,66 @@ PKG_MP = {
RPM_BUILD_SUBDIRS = ['BUILD', 'RPMS', 'SOURCES', 'SPECS', 'SRPMS']
-def get_log_header(version):
- # Try to find the version in the tags output
- cmd = ['bzr', 'tags']
- (stdout, _stderr) = util.subp(cmd)
- a_rev = None
- for t in stdout.splitlines():
- ver, rev = t.split(None)
- if ver == version:
- a_rev = rev
- break
- if not a_rev:
- return None
-
- # Extract who made that tag as the header
- cmd = ['bzr', 'log', '-r%s' % (a_rev), '--timezone=utc']
+def run_helper(helper, args=None, strip=True):
+ if args is None:
+ args = []
+ cmd = [util.abs_join(find_root(), 'tools', helper)] + args
(stdout, _stderr) = util.subp(cmd)
- kvs = {
- 'comment': version,
- }
+ if strip:
+ stdout = stdout.strip()
+ return stdout
- for line in stdout.splitlines():
- if line.startswith('committer:'):
- kvs['who'] = line[len('committer:'):].strip()
- if line.startswith('timestamp:'):
- ts = line[len('timestamp:'):]
- ts = ts.strip()
- # http://bugs.python.org/issue6641
- ts = ts.replace("+0000", '').strip()
- ds = datetime.strptime(ts, '%a %Y-%m-%d %H:%M:%S')
- kvs['ds'] = ds
- return format_change_line(**kvs)
+def read_dependencies():
+ '''Returns the Python depedencies from requirements.txt. This explicitly
+ removes 'argparse' from the list of requirements for python >= 2.7,
+ because with 2.7 argparse became part of the standard library.'''
+ stdout = run_helper('read-dependencies')
+ return [p.lower().strip() for p in stdout.splitlines()
+ if p != 'argparse' or (p == 'argparse' and
+ sys.version_info[0:2] < (2, 7))]
-def format_change_line(ds, who, comment=None):
- # Rpmbuild seems to be pretty strict about the date format
- d = ds.strftime("%a %b %d %Y")
- d += " - %s" % (who)
- if comment:
- d += " - %s" % (comment)
- return "* %s" % (d)
+def translate_dependencies(deps, distro):
+ '''Maps python requirements into package names. We assume
+ python-<pypi_name> for packages not listed explicitly in
+ PACKAGE_MAP.'''
+ return [PACKAGE_MAP[distro][req]
+ if req in PACKAGE_MAP[distro] else 'python-%s' % req
+ for req in deps]
-def generate_spec_contents(args, tmpl_fn, top_dir, arc_fn):
+def read_version():
+ return json.loads(run_helper('read-version', ['--json']))
- # Figure out the version and revno
- cmd = [util.abs_join(find_root(), 'tools', 'read-version')]
- (stdout, _stderr) = util.subp(cmd)
- version = stdout.strip()
-
- cmd = ['bzr', 'revno']
- (stdout, _stderr) = util.subp(cmd)
- revno = stdout.strip()
+
+def generate_spec_contents(args, version_data, tmpl_fn, top_dir, arc_fn):
# Tmpl params
subs = {}
- subs['version'] = version
- subs['revno'] = revno
- subs['release'] = "bzr%s" % (revno)
+
if args.sub_release is not None:
- subs['subrelease'] = "." + str(args.sub_release)
+ subs['subrelease'] = str(args.sub_release)
else:
- subs['subrelease'] = ''
- subs['archive_name'] = arc_fn
+ subs['subrelease'] = ""
- cmd = [util.abs_join(find_root(), 'tools', 'read-dependencies')]
- (stdout, _stderr) = util.subp(cmd)
- pkgs = [p.lower().strip() for p in stdout.splitlines()]
+ subs['archive_name'] = arc_fn
+ subs['source_name'] = os.path.basename(arc_fn).replace('.tar.gz', '')
+ subs.update(version_data)
+
+ # rpm does not like '-' in the Version, so change
+ # X.Y.Z-N-gHASH to X.Y.Z+N.gHASH
+ if "-" in version_data.get('version'):
+ ver, commits, ghash = version_data['version'].split("-")
+ rpm_upstream_version = "%s+%s.%s" % (ver, commits, ghash)
+ else:
+ rpm_upstream_version = version_data['version']
+ subs['rpm_upstream_version'] = rpm_upstream_version
# Map to known packages
- requires = []
- for p in pkgs:
- tgt_pkg = PKG_MP[args.distro].get(p)
- if not tgt_pkg:
- raise RuntimeError(("Do not know how to translate pypi dependency"
- " %r to a known package") % (p))
- else:
- requires.append(tgt_pkg)
- subs['requires'] = requires
-
- # Format a nice changelog (as best as we can)
- changelog = util.load_file(util.abs_join(find_root(), 'ChangeLog'))
- changelog_lines = []
- missing_versions = 0
- for line in changelog.splitlines():
- if not line.strip():
- continue
- if re.match(r"^\s*[\d][.][\d][.][\d]:\s*", line):
- line = line.strip(":")
- header = get_log_header(line)
- if not header:
- missing_versions += 1
- if missing_versions == 1:
- # Must be using a new 'dev'/'trunk' release
- changelog_lines.append(format_change_line(datetime.now(),
- '??'))
- else:
- sys.stderr.write(("Changelog version line %s does not "
- "have a corresponding tag!\n") % (line))
- else:
- changelog_lines.append(header)
- else:
- changelog_lines.append(line)
- subs['changelog'] = "\n".join(changelog_lines)
+ python_deps = read_dependencies()
+ package_deps = translate_dependencies(python_deps, args.distro)
+ subs['requires'] = package_deps
if args.boot == 'sysvinit':
subs['sysvinit'] = True
@@ -180,21 +114,23 @@ def generate_spec_contents(args, tmpl_fn, top_dir, arc_fn):
else:
subs['systemd'] = False
- subs['defines'] = ["_topdir %s" % (top_dir)]
subs['init_sys'] = args.boot
subs['patches'] = [os.path.basename(p) for p in args.patches]
return templater.render_from_file(tmpl_fn, params=subs)
def main():
-
+
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--distro", dest="distro",
help="select distro (default: %(default)s)",
metavar="DISTRO", default='redhat',
choices=('redhat', 'suse'))
+ parser.add_argument('--srpm',
+ help='Produce a source rpm',
+ action='store_true')
parser.add_argument("-b", "--boot", dest="boot",
- help="select boot type (default: %(default)s)",
+ help="select boot type (default: %(default)s)",
metavar="TYPE", default='sysvinit',
choices=('sysvinit', 'systemd'))
parser.add_argument("-v", "--verbose", dest="verbose",
@@ -218,57 +154,62 @@ def main():
if args.verbose:
capture = False
- # Clean out the root dir and make sure the dirs we want are in place
- root_dir = os.path.expanduser("~/rpmbuild")
- if os.path.isdir(root_dir):
- shutil.rmtree(root_dir)
-
- arc_dir = util.abs_join(root_dir, 'SOURCES')
- build_dirs = [root_dir, arc_dir]
- for dname in RPM_BUILD_SUBDIRS:
- build_dirs.append(util.abs_join(root_dir, dname))
- build_dirs.sort()
- util.ensure_dirs(build_dirs)
-
- # Archive the code
- cmd = [util.abs_join(find_root(), 'tools', 'make-tarball')]
- (stdout, _stderr) = util.subp(cmd)
- archive_fn = stdout.strip()
- real_archive_fn = os.path.join(arc_dir, os.path.basename(archive_fn))
- shutil.move(archive_fn, real_archive_fn)
- print("Archived the code in %r" % (real_archive_fn))
-
- # Form the spec file to be used
- tmpl_fn = util.abs_join(find_root(), 'packages',
- args.distro, 'cloud-init.spec.in')
- contents = generate_spec_contents(args, tmpl_fn, root_dir,
- os.path.basename(archive_fn))
- spec_fn = util.abs_join(root_dir, 'cloud-init.spec')
- util.write_file(spec_fn, contents)
- print("Created spec file at %r" % (spec_fn))
- print(contents)
- for p in args.patches:
- util.copy(p, util.abs_join(arc_dir, os.path.basename(p)))
-
- # Now build it!
- print("Running 'rpmbuild' in %r" % (root_dir))
- cmd = ['rpmbuild', '-ba', spec_fn]
- util.subp(cmd, capture=capture)
-
- # Copy the items built to our local dir
- globs = []
- globs.extend(glob.glob("%s/*.rpm" %
- (util.abs_join(root_dir, 'RPMS', 'noarch'))))
- globs.extend(glob.glob("%s/*.rpm" %
- (util.abs_join(root_dir, 'RPMS', 'x86_64'))))
- globs.extend(glob.glob("%s/*.rpm" %
- (util.abs_join(root_dir, 'RPMS'))))
- globs.extend(glob.glob("%s/*.rpm" %
- (util.abs_join(root_dir, 'SRPMS'))))
- for rpm_fn in globs:
- tgt_fn = util.abs_join(os.getcwd(), os.path.basename(rpm_fn))
- shutil.move(rpm_fn, tgt_fn)
- print("Wrote out %s package %r" % (args.distro, tgt_fn))
+ workdir = None
+ try:
+ workdir = tempfile.mkdtemp(prefix='rpmbuild')
+ os.environ['HOME'] = workdir
+ topdir = os.path.join(workdir, 'rpmbuild')
+ build_dirs = [os.path.join(topdir, dir)
+ for dir in RPM_BUILD_SUBDIRS]
+ util.ensure_dirs(build_dirs)
+
+ version_data = read_version()
+
+ # Archive the code
+ archive_fn = "cloud-init-%s.tar.gz" % version_data['version_long']
+ real_archive_fn = os.path.join(topdir, 'SOURCES', archive_fn)
+ archive_fn = run_helper(
+ 'make-tarball', ['--long', '--output=' + real_archive_fn])
+ print("Archived the code in %r" % (real_archive_fn))
+
+ # Form the spec file to be used
+ tmpl_fn = util.abs_join(find_root(), 'packages',
+ args.distro, 'cloud-init.spec.in')
+ contents = generate_spec_contents(args, version_data, tmpl_fn, topdir,
+ os.path.basename(archive_fn))
+ spec_fn = util.abs_join(topdir, 'SPECS', 'cloud-init.spec')
+ util.write_file(spec_fn, contents)
+ print("Created spec file at %r" % (spec_fn))
+ for p in args.patches:
+ util.copy(p, util.abs_join(topdir, 'SOURCES', os.path.basename(p)))
+
+ # Now build it!
+ print("Running 'rpmbuild' in %r" % (topdir))
+
+ if args.srpm:
+ cmd = ['rpmbuild', '-bs', '--nodeps', spec_fn]
+ else:
+ cmd = ['rpmbuild', '-ba', spec_fn]
+
+ util.subp(cmd, capture=capture)
+
+ # Copy the items built to our local dir
+ globs = []
+ globs.extend(glob.glob("%s/*.rpm" %
+ (util.abs_join(topdir, 'RPMS', 'noarch'))))
+ globs.extend(glob.glob("%s/*.rpm" %
+ (util.abs_join(topdir, 'RPMS', 'x86_64'))))
+ globs.extend(glob.glob("%s/*.rpm" %
+ (util.abs_join(topdir, 'RPMS'))))
+ globs.extend(glob.glob("%s/*.rpm" %
+ (util.abs_join(topdir, 'SRPMS'))))
+ for rpm_fn in globs:
+ tgt_fn = util.abs_join(os.getcwd(), os.path.basename(rpm_fn))
+ shutil.move(rpm_fn, tgt_fn)
+ print("Wrote out %s package %r" % (args.distro, tgt_fn))
+ finally:
+ if workdir is not None:
+ shutil.rmtree(workdir)
return 0
diff --git a/packages/debian/changelog.in b/packages/debian/changelog.in
index c9affe47..bdf8d56f 100644
--- a/packages/debian/changelog.in
+++ b/packages/debian/changelog.in
@@ -1,5 +1,5 @@
## template:basic
-cloud-init (${version}~bzr${revision}-1) UNRELEASED; urgency=low
+cloud-init (${version_long}-1~bddeb) ${debian_release}; urgency=low
* build
diff --git a/packages/debian/rules.in b/packages/debian/rules.in
index cf2dd405..9b004357 100755
--- a/packages/debian/rules.in
+++ b/packages/debian/rules.in
@@ -14,7 +14,7 @@ override_dh_install:
override_dh_auto_test:
ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS)))
- http_proxy= make PYVER=${pyver} check
+ http_proxy= make PYVER=python${pyver} check
else
@echo check disabled by DEB_BUILD_OPTIONS=$(DEB_BUILD_OPTIONS)
endif
diff --git a/packages/debian/source/format b/packages/debian/source/format
new file mode 100644
index 00000000..163aaf8d
--- /dev/null
+++ b/packages/debian/source/format
@@ -0,0 +1 @@
+3.0 (quilt)
diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in
index 254d209b..d0ae048f 100644
--- a/packages/redhat/cloud-init.spec.in
+++ b/packages/redhat/cloud-init.spec.in
@@ -1,17 +1,13 @@
-## This is a cheetah template
+## template: cheetah
%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
# See: http://www.zarb.org/~jasonc/macros.php
# Or: http://fedoraproject.org/wiki/Packaging:ScriptletSnippets
# Or: http://www.rpm.org/max-rpm/ch-rpm-inside.html
-#for $d in $defines
-%define ${d}
-#end for
-
Name: cloud-init
-Version: ${version}
-Release: ${release}${subrelease}%{?dist}
+Version: ${rpm_upstream_version}
+Release: 1${subrelease}%{?dist}
Summary: Cloud instance init scripts
Group: System Environment/Base
@@ -22,9 +18,9 @@ Source0: ${archive_name}
BuildArch: noarch
BuildRoot: %{_tmppath}
-BuildRequires: python-devel
-BuildRequires: python-setuptools
-BuildRequires: python-cheetah
+BuildRequires: python-devel
+BuildRequires: python-setuptools
+BuildRequires: python-cheetah
# System util packages needed
Requires: shadow-utils
@@ -68,7 +64,7 @@ need special scripts to run during initialization to retrieve and install
ssh keys and to let the user run various scripts.
%prep
-%setup -q -n %{name}-%{version}~${release}
+%setup -q -n ${source_name}
# Custom patches activation
#set $size = 0
@@ -198,7 +194,3 @@ fi
# Python code is here...
%{python_sitelib}/*
-
-%changelog
-
-${changelog}
diff --git a/packages/suse/cloud-init.spec.in b/packages/suse/cloud-init.spec.in
index 53e6ad13..f994a0cf 100644
--- a/packages/suse/cloud-init.spec.in
+++ b/packages/suse/cloud-init.spec.in
@@ -1,16 +1,12 @@
-## This is a cheetah template
+## template: cheetah
# See: http://www.zarb.org/~jasonc/macros.php
# Or: http://fedoraproject.org/wiki/Packaging:ScriptletSnippets
# Or: http://www.rpm.org/max-rpm/ch-rpm-inside.html
-#for $d in $defines
-%define ${d}
-#end for
-
Name: cloud-init
Version: ${version}
-Release: ${release}${subrelease}%{?dist}
+Release: 1${subrelease}%{?dist}
Summary: Cloud instance init scripts
Group: System/Management
@@ -63,7 +59,7 @@ need special scripts to run during initialization to retrieve and install
ssh keys and to let the user run various scripts.
%prep
-%setup -q -n %{name}-%{version}~${release}
+%setup -q -n ${source_name}
# Custom patches activation
#set $size = 0
@@ -157,7 +153,3 @@ mkdir -p %{buildroot}/var/lib/cloud
%{python_sitelib}/*
/var/lib/cloud
-
-%changelog
-
-${changelog}
diff --git a/requirements.txt b/requirements.txt
index cc1dc05f..0c4951f5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -22,7 +22,7 @@ oauthlib
# that the built-in config parser is not sufficent (ie
# when we need to preserve comments, or do not have a top-level
# section)...
-configobj
+configobj>=5.0.2
# All new style configurations are in the yaml format
pyyaml
diff --git a/setup.py b/setup.py
index 0af576a9..8ff667d5 100755
--- a/setup.py
+++ b/setup.py
@@ -74,6 +74,7 @@ INITSYS_FILES = {
'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)],
'sysvinit_freebsd': [f for f in glob('sysvinit/freebsd/*') if is_f(f)],
'sysvinit_deb': [f for f in glob('sysvinit/debian/*') if is_f(f)],
+ 'sysvinit_openrc': [f for f in glob('sysvinit/gentoo/*') if is_f(f)],
'systemd': [f for f in (glob('systemd/*.service') +
glob('systemd/*.target')) if is_f(f)],
'systemd.generators': [f for f in glob('systemd/*-generator') if is_f(f)],
@@ -83,6 +84,7 @@ INITSYS_ROOTS = {
'sysvinit': '/etc/rc.d/init.d',
'sysvinit_freebsd': '/usr/local/etc/rc.d',
'sysvinit_deb': '/etc/init.d',
+ 'sysvinit_openrc': '/etc/init.d',
'systemd': pkg_config_read('systemd', 'systemdsystemunitdir'),
'systemd.generators': pkg_config_read('systemd',
'systemdsystemgeneratordir'),
@@ -116,13 +118,13 @@ def in_virtualenv():
def get_version():
- cmd = ['tools/read-version']
+ cmd = [sys.executable, 'tools/read-version']
(ver, _e) = tiny_p(cmd)
return str(ver).strip()
def read_requires():
- cmd = ['tools/read-dependencies']
+ cmd = [sys.executable, 'tools/read-dependencies']
(deps, _e) = tiny_p(cmd)
return str(deps).splitlines()
@@ -176,6 +178,8 @@ else:
(ETC + '/cloud', glob('config/*.cfg')),
(ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')),
(ETC + '/cloud/templates', glob('templates/*')),
+ (ETC + '/NetworkManager/dispatcher.d/', ['tools/hook-network-manager']),
+ (ETC + '/dhcp/dhclient-exit-hooks.d/', ['tools/hook-dhclient']),
(USR_LIB_EXEC + '/cloud-init', ['tools/uncloud-init',
'tools/write-ssh-key-fingerprints']),
(USR + '/share/doc/cloud-init', [f for f in glob('doc/*') if is_f(f)]),
diff --git a/systemd/cloud-final.service b/systemd/cloud-final.service
index 3927710f..b8f69b78 100644
--- a/systemd/cloud-final.service
+++ b/systemd/cloud-final.service
@@ -1,6 +1,6 @@
[Unit]
Description=Execute cloud user/final scripts
-After=network-online.target cloud-config.service rc-local.service
+After=network-online.target cloud-config.service rc-local.service multi-user.target
Wants=network-online.target cloud-config.service
[Service]
diff --git a/systemd/cloud-init-generator b/systemd/cloud-init-generator
index 2d319695..fedb6309 100755
--- a/systemd/cloud-init-generator
+++ b/systemd/cloud-init-generator
@@ -6,6 +6,7 @@ DEBUG_LEVEL=1
LOG_D="/run/cloud-init"
ENABLE="enabled"
DISABLE="disabled"
+RUN_ENABLED_FILE="$LOG_D/$ENABLE"
CLOUD_SYSTEM_TARGET="/lib/systemd/system/cloud-init.target"
CLOUD_TARGET_NAME="cloud-init.target"
# lxc sets 'container', but lets make that explicitly a global
@@ -107,6 +108,7 @@ main() {
"ln $CLOUD_SYSTEM_TARGET $link_path"
fi
fi
+ : > "$RUN_ENABLED_FILE"
elif [ "$result" = "$DISABLE" ]; then
if [ -f "$link_path" ]; then
if rm -f "$link_path"; then
@@ -118,6 +120,9 @@ main() {
else
debug 1 "already disabled: no change needed [no $link_path]"
fi
+ if [ -e "$RUN_ENABLED_FILE" ]; then
+ rm -f "$RUN_ENABLED_FILE"
+ fi
else
debug 0 "unexpected result '$result'"
ret=3
diff --git a/sysvinit/gentoo/cloud-config b/sysvinit/gentoo/cloud-config
index b0fa786d..5618472b 100644
--- a/sysvinit/gentoo/cloud-config
+++ b/sysvinit/gentoo/cloud-config
@@ -1,4 +1,4 @@
-#!/sbin/runscript
+#!/sbin/openrc-run
depend() {
after cloud-init-local
diff --git a/sysvinit/gentoo/cloud-final b/sysvinit/gentoo/cloud-final
index b457a354..a9bf01fb 100644
--- a/sysvinit/gentoo/cloud-final
+++ b/sysvinit/gentoo/cloud-final
@@ -1,4 +1,4 @@
-#!/sbin/runscript
+#!/sbin/openrc-run
depend() {
after cloud-config
diff --git a/sysvinit/gentoo/cloud-init b/sysvinit/gentoo/cloud-init
index 9ab64ad8..5afc0f2e 100644
--- a/sysvinit/gentoo/cloud-init
+++ b/sysvinit/gentoo/cloud-init
@@ -1,4 +1,4 @@
-#!/sbin/runscript
+#!/sbin/openrc-run
# add depends for network, dns, fs etc
depend() {
after cloud-init-local
diff --git a/sysvinit/gentoo/cloud-init-local b/sysvinit/gentoo/cloud-init-local
index 9d47263e..9bd0b569 100644
--- a/sysvinit/gentoo/cloud-init-local
+++ b/sysvinit/gentoo/cloud-init-local
@@ -1,4 +1,4 @@
-#!/sbin/runscript
+#!/sbin/openrc-run
depend() {
after localmount
diff --git a/templates/ntp.conf.debian.tmpl b/templates/ntp.conf.debian.tmpl
new file mode 100644
index 00000000..3f07eeaa
--- /dev/null
+++ b/templates/ntp.conf.debian.tmpl
@@ -0,0 +1,63 @@
+## template:jinja
+
+# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help
+
+driftfile /var/lib/ntp/ntp.drift
+
+# Enable this if you want statistics to be logged.
+#statsdir /var/log/ntpstats/
+
+statistics loopstats peerstats clockstats
+filegen loopstats file loopstats type day enable
+filegen peerstats file peerstats type day enable
+filegen clockstats file clockstats type day enable
+
+
+# You do need to talk to an NTP server or two (or three).
+#server ntp.your-provider.example
+
+# pool.ntp.org maps to about 1000 low-stratum NTP servers. Your server will
+# pick a different set every time it starts up. Please consider joining the
+# pool: <http://www.pool.ntp.org/join.html>
+{% if pools -%}# pools{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+# Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for
+# details. The web page <http://support.ntp.org/bin/view/Support/AccessRestrictions>
+# might also be helpful.
+#
+# Note that "restrict" applies to both servers and clients, so a configuration
+# that might be intended to block requests from certain clients could also end
+# up blocking replies from your own upstream servers.
+
+# By default, exchange time with everybody, but don't allow configuration.
+restrict -4 default kod notrap nomodify nopeer noquery limited
+restrict -6 default kod notrap nomodify nopeer noquery limited
+
+# Local users may interrogate the ntp server more closely.
+restrict 127.0.0.1
+restrict ::1
+
+# Needed for adding pool entries
+restrict source notrap nomodify noquery
+
+# Clients from this (example!) subnet have unlimited access, but only if
+# cryptographically authenticated.
+#restrict 192.168.123.0 mask 255.255.255.0 notrust
+
+
+# If you want to provide time to your local subnet, change the next line.
+# (Again, the address is an example only.)
+#broadcast 192.168.123.255
+
+# If you want to listen to time broadcasts on your local subnet, de-comment the
+# next lines. Please do this only if you trust everybody on the network!
+#disable auth
+#broadcastclient
diff --git a/templates/ntp.conf.fedora.tmpl b/templates/ntp.conf.fedora.tmpl
new file mode 100644
index 00000000..af7b1b09
--- /dev/null
+++ b/templates/ntp.conf.fedora.tmpl
@@ -0,0 +1,66 @@
+## template:jinja
+
+# For more information about this file, see the man pages
+# ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5).
+
+driftfile /var/lib/ntp/drift
+
+# Permit time synchronization with our time source, but do not
+# permit the source to query or modify the service on this system.
+restrict default nomodify notrap nopeer noquery
+
+# Permit all access over the loopback interface. This could
+# be tightened as well, but to do so would effect some of
+# the administrative functions.
+restrict 127.0.0.1
+restrict ::1
+
+# Hosts on local network are less restricted.
+#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap
+
+# Use public servers from the pool.ntp.org project.
+# Please consider joining the pool (http://www.pool.ntp.org/join.html).
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+#broadcast 192.168.1.255 autokey # broadcast server
+#broadcastclient # broadcast client
+#broadcast 224.0.1.1 autokey # multicast server
+#multicastclient 224.0.1.1 # multicast client
+#manycastserver 239.255.254.254 # manycast server
+#manycastclient 239.255.254.254 autokey # manycast client
+
+# Enable public key cryptography.
+#crypto
+
+includefile /etc/ntp/crypto/pw
+
+# Key file containing the keys and key identifiers used when operating
+# with symmetric key cryptography.
+keys /etc/ntp/keys
+
+# Specify the key identifiers which are trusted.
+#trustedkey 4 8 42
+
+# Specify the key identifier to use with the ntpdc utility.
+#requestkey 8
+
+# Specify the key identifier to use with the ntpq utility.
+#controlkey 8
+
+# Enable writing of statistics records.
+#statistics clockstats cryptostats loopstats peerstats
+
+# Disable the monitoring facility to prevent amplification attacks using ntpdc
+# monlist command when default restrict does not include the noquery flag. See
+# CVE-2013-5211 for more details.
+# Note: Monitoring will not be disabled with the limited restriction flag.
+disable monitor
diff --git a/templates/ntp.conf.rhel.tmpl b/templates/ntp.conf.rhel.tmpl
new file mode 100644
index 00000000..62b47764
--- /dev/null
+++ b/templates/ntp.conf.rhel.tmpl
@@ -0,0 +1,61 @@
+## template:jinja
+
+# For more information about this file, see the man pages
+# ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5).
+
+driftfile /var/lib/ntp/drift
+
+# Permit time synchronization with our time source, but do not
+# permit the source to query or modify the service on this system.
+restrict default kod nomodify notrap nopeer noquery
+restrict -6 default kod nomodify notrap nopeer noquery
+
+# Permit all access over the loopback interface. This could
+# be tightened as well, but to do so would effect some of
+# the administrative functions.
+restrict 127.0.0.1
+restrict -6 ::1
+
+# Hosts on local network are less restricted.
+#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap
+
+# Use public servers from the pool.ntp.org project.
+# Please consider joining the pool (http://www.pool.ntp.org/join.html).
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+#broadcast 192.168.1.255 autokey # broadcast server
+#broadcastclient # broadcast client
+#broadcast 224.0.1.1 autokey # multicast server
+#multicastclient 224.0.1.1 # multicast client
+#manycastserver 239.255.254.254 # manycast server
+#manycastclient 239.255.254.254 autokey # manycast client
+
+# Enable public key cryptography.
+#crypto
+
+includefile /etc/ntp/crypto/pw
+
+# Key file containing the keys and key identifiers used when operating
+# with symmetric key cryptography.
+keys /etc/ntp/keys
+
+# Specify the key identifiers which are trusted.
+#trustedkey 4 8 42
+
+# Specify the key identifier to use with the ntpdc utility.
+#requestkey 8
+
+# Specify the key identifier to use with the ntpq utility.
+#controlkey 8
+
+# Enable writing of statistics records.
+#statistics clockstats cryptostats loopstats peerstats
diff --git a/templates/ntp.conf.sles.tmpl b/templates/ntp.conf.sles.tmpl
new file mode 100644
index 00000000..5c5fc4db
--- /dev/null
+++ b/templates/ntp.conf.sles.tmpl
@@ -0,0 +1,100 @@
+## template:jinja
+
+################################################################################
+## /etc/ntp.conf
+##
+## Sample NTP configuration file.
+## See package 'ntp-doc' for documentation, Mini-HOWTO and FAQ.
+## Copyright (c) 1998 S.u.S.E. GmbH Fuerth, Germany.
+##
+## Author: Michael Andres, <ma@suse.de>
+## Michael Skibbe, <mskibbe@suse.de>
+##
+################################################################################
+
+##
+## Radio and modem clocks by convention have addresses in the
+## form 127.127.t.u, where t is the clock type and u is a unit
+## number in the range 0-3.
+##
+## Most of these clocks require support in the form of a
+## serial port or special bus peripheral. The particular
+## device is normally specified by adding a soft link
+## /dev/device-u to the particular hardware device involved,
+## where u correspond to the unit number above.
+##
+## Generic DCF77 clock on serial port (Conrad DCF77)
+## Address: 127.127.8.u
+## Serial Port: /dev/refclock-u
+##
+## (create soft link /dev/refclock-0 to the particular ttyS?)
+##
+# server 127.127.8.0 mode 5 prefer
+
+##
+## Undisciplined Local Clock. This is a fake driver intended for backup
+## and when no outside source of synchronized time is available.
+##
+# server 127.127.1.0 # local clock (LCL)
+# fudge 127.127.1.0 stratum 10 # LCL is unsynchronized
+
+##
+## Add external Servers using
+## # rcntpd addserver <yourserver>
+## The servers will only be added to the currently running instance, not
+## to /etc/ntp.conf.
+##
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+# Access control configuration; see /usr/share/doc/packages/ntp/html/accopt.html for
+# details. The web page <http://support.ntp.org/bin/view/Support/AccessRestrictions>
+# might also be helpful.
+#
+# Note that "restrict" applies to both servers and clients, so a configuration
+# that might be intended to block requests from certain clients could also end
+# up blocking replies from your own upstream servers.
+
+# By default, exchange time with everybody, but don't allow configuration.
+restrict -4 default notrap nomodify nopeer noquery
+restrict -6 default notrap nomodify nopeer noquery
+
+# Local users may interrogate the ntp server more closely.
+restrict 127.0.0.1
+restrict ::1
+
+# Clients from this (example!) subnet have unlimited access, but only if
+# cryptographically authenticated.
+#restrict 192.168.123.0 mask 255.255.255.0 notrust
+
+##
+## Miscellaneous stuff
+##
+
+driftfile /var/lib/ntp/drift/ntp.drift # path for drift file
+
+logfile /var/log/ntp # alternate log file
+# logconfig =syncstatus + sysevents
+# logconfig =all
+
+# statsdir /tmp/ # directory for statistics files
+# filegen peerstats file peerstats type day enable
+# filegen loopstats file loopstats type day enable
+# filegen clockstats file clockstats type day enable
+
+#
+# Authentication stuff
+#
+keys /etc/ntp.keys # path for keys file
+trustedkey 1 # define trusted keys
+requestkey 1 # key (7) for accessing server variables
+controlkey 1 # key (6) for accessing server variables
+
diff --git a/templates/ntp.conf.ubuntu.tmpl b/templates/ntp.conf.ubuntu.tmpl
new file mode 100644
index 00000000..862a4fbd
--- /dev/null
+++ b/templates/ntp.conf.ubuntu.tmpl
@@ -0,0 +1,75 @@
+## template:jinja
+
+# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help
+
+driftfile /var/lib/ntp/ntp.drift
+
+# Enable this if you want statistics to be logged.
+#statsdir /var/log/ntpstats/
+
+statistics loopstats peerstats clockstats
+filegen loopstats file loopstats type day enable
+filegen peerstats file peerstats type day enable
+filegen clockstats file clockstats type day enable
+
+# Specify one or more NTP servers.
+
+# Use servers from the NTP Pool Project. Approved by Ubuntu Technical Board
+# on 2011-02-08 (LP: #104525). See http://www.pool.ntp.org/join.html for
+# more information.
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+# Use Ubuntu's ntp server as a fallback.
+# pool ntp.ubuntu.com
+
+# Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for
+# details. The web page <http://support.ntp.org/bin/view/Support/AccessRestrictions>
+# might also be helpful.
+#
+# Note that "restrict" applies to both servers and clients, so a configuration
+# that might be intended to block requests from certain clients could also end
+# up blocking replies from your own upstream servers.
+
+# By default, exchange time with everybody, but don't allow configuration.
+restrict -4 default kod notrap nomodify nopeer noquery limited
+restrict -6 default kod notrap nomodify nopeer noquery limited
+
+# Local users may interrogate the ntp server more closely.
+restrict 127.0.0.1
+restrict ::1
+
+# Needed for adding pool entries
+restrict source notrap nomodify noquery
+
+# Clients from this (example!) subnet have unlimited access, but only if
+# cryptographically authenticated.
+#restrict 192.168.123.0 mask 255.255.255.0 notrust
+
+
+# If you want to provide time to your local subnet, change the next line.
+# (Again, the address is an example only.)
+#broadcast 192.168.123.255
+
+# If you want to listen to time broadcasts on your local subnet, de-comment the
+# next lines. Please do this only if you trust everybody on the network!
+#disable auth
+#broadcastclient
+
+#Changes recquired to use pps synchonisation as explained in documentation:
+#http://www.ntp.org/ntpfaq/NTP-s-config-adv.htm#AEN3918
+
+#server 127.127.8.1 mode 135 prefer # Meinberg GPS167 with PPS
+#fudge 127.127.8.1 time1 0.0042 # relative to PPS for my hardware
+
+#server 127.127.22.1 # ATOM(PPS)
+#fudge 127.127.22.1 flag3 1 # enable PPS API
+
diff --git a/tests/configs/sample1.yaml b/tests/configs/sample1.yaml
index 6231f293..ae935cc0 100644
--- a/tests/configs/sample1.yaml
+++ b/tests/configs/sample1.yaml
@@ -3,9 +3,6 @@
#apt_upgrade: true
packages: [ bzr, pastebinit, ubuntu-dev-tools, ccache, bzr-builddeb, vim-nox, git-core, lftp ]
-#apt_sources:
-# - source: ppa:smoser/ppa
-
#disable_root: False
# mounts:
diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
index 972245df..1cdc05a1 100644
--- a/tests/unittests/helpers.py
+++ b/tests/unittests/helpers.py
@@ -252,11 +252,27 @@ class HttprettyTestCase(TestCase):
super(HttprettyTestCase, self).tearDown()
+class TempDirTestCase(TestCase):
+ # provide a tempdir per class, not per test.
+ def setUp(self):
+ super(TempDirTestCase, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.tmp)
+
+ def tmp_path(self, path):
+ if path.startswith(os.path.sep):
+ path = "." + path
+
+ return os.path.normpath(os.path.join(self.tmp, path))
+
+
def populate_dir(path, files):
if not os.path.exists(path):
os.makedirs(path)
for (name, content) in files.items():
- with open(os.path.join(path, name), "wb") as fp:
+ p = os.path.join(path, name)
+ util.ensure_dir(os.path.dirname(p))
+ with open(p, "wb") as fp:
if isinstance(content, six.binary_type):
fp.write(content)
else:
diff --git a/tests/unittests/test_atomic_helper.py b/tests/unittests/test_atomic_helper.py
new file mode 100644
index 00000000..feb81551
--- /dev/null
+++ b/tests/unittests/test_atomic_helper.py
@@ -0,0 +1,54 @@
+import json
+import os
+import stat
+
+from cloudinit import atomic_helper
+
+from . import helpers
+
+
+class TestAtomicHelper(helpers.TempDirTestCase):
+ def test_basic_usage(self):
+ """write_file takes bytes if no omode."""
+ path = self.tmp_path("test_basic_usage")
+ contents = b"Hey there\n"
+ atomic_helper.write_file(path, contents)
+ self.check_file(path, contents)
+
+ def test_string(self):
+ """write_file can take a string with mode w."""
+ path = self.tmp_path("test_string")
+ contents = "Hey there\n"
+ atomic_helper.write_file(path, contents, omode="w")
+ self.check_file(path, contents, omode="r")
+
+ def test_file_permissions(self):
+ """write_file with mode 400 works correctly."""
+ path = self.tmp_path("test_file_permissions")
+ contents = b"test_file_perms"
+ atomic_helper.write_file(path, contents, mode=0o400)
+ self.check_file(path, contents, perms=0o400)
+
+ def test_write_json(self):
+ """write_json output is readable json."""
+ path = self.tmp_path("test_write_json")
+ data = {'key1': 'value1', 'key2': ['i1', 'i2']}
+ atomic_helper.write_json(path, data)
+ with open(path, "r") as fp:
+ found = json.load(fp)
+ self.assertEqual(data, found)
+ self.check_perms(path, 0o644)
+
+ def check_file(self, path, content, omode=None, perms=0o644):
+ if omode is None:
+ omode = "rb"
+ self.assertTrue(os.path.exists(path))
+ self.assertTrue(os.path.isfile(path))
+ with open(path, omode) as fp:
+ found = fp.read()
+ self.assertEqual(content, found)
+ self.check_perms(path, perms)
+
+ def check_perms(self, path, perms):
+ file_stat = os.stat(path)
+ self.assertEqual(perms, stat.S_IMODE(file_stat.st_mode))
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
index 65202ff0..64523e16 100644
--- a/tests/unittests/test_datasource/test_azure_helper.py
+++ b/tests/unittests/test_datasource/test_azure_helper.py
@@ -54,13 +54,17 @@ class TestFindEndpoint(TestCase):
self.load_file = patches.enter_context(
mock.patch.object(azure_helper.util, 'load_file'))
+ self.dhcp_options = patches.enter_context(
+ mock.patch.object(azure_helper.WALinuxAgentShim,
+ '_load_dhclient_json'))
+
def test_missing_file(self):
- self.load_file.side_effect = IOError
- self.assertRaises(IOError,
+ self.assertRaises(ValueError,
azure_helper.WALinuxAgentShim.find_endpoint)
def test_missing_special_azure_line(self):
self.load_file.return_value = ''
+ self.dhcp_options.return_value = {'eth0': {'key': 'value'}}
self.assertRaises(ValueError,
azure_helper.WALinuxAgentShim.find_endpoint)
@@ -72,13 +76,18 @@ class TestFindEndpoint(TestCase):
' option unknown-245 {0};'.format(encoded_address),
'}'])
+ def test_from_dhcp_client(self):
+ self.dhcp_options.return_value = {"eth0": {"unknown_245": "5:4:3:2"}}
+ self.assertEqual('5.4.3.2',
+ azure_helper.WALinuxAgentShim.find_endpoint(None))
+
def test_latest_lease_used(self):
encoded_addresses = ['5:4:3:2', '4:3:2:1']
file_content = '\n'.join([self._build_lease_content(encoded_address)
for encoded_address in encoded_addresses])
self.load_file.return_value = file_content
self.assertEqual(encoded_addresses[-1].replace(':', '.'),
- azure_helper.WALinuxAgentShim.find_endpoint())
+ azure_helper.WALinuxAgentShim.find_endpoint("foobar"))
class TestExtractIpAddressFromLeaseValue(TestCase):
diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py
index 18551b92..98ff97a7 100644
--- a/tests/unittests/test_datasource/test_configdrive.py
+++ b/tests/unittests/test_datasource/test_configdrive.py
@@ -101,6 +101,98 @@ NETWORK_DATA_2 = {
"type": "vif", "id": "eth1", "vif_id": "vif-foo2"}]
}
+# This network data ha 'tap' type for a link.
+NETWORK_DATA_3 = {
+ "services": [{"type": "dns", "address": "172.16.36.11"},
+ {"type": "dns", "address": "172.16.36.12"}],
+ "networks": [
+ {"network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e",
+ "type": "ipv4", "netmask": "255.255.255.128",
+ "link": "tap77a0dc5b-72", "ip_address": "172.17.48.18",
+ "id": "network0",
+ "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0",
+ "gateway": "172.17.48.1"}]},
+ {"network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e",
+ "type": "ipv6", "netmask": "ffff:ffff:ffff:ffff::",
+ "link": "tap77a0dc5b-72",
+ "ip_address": "fdb8:52d0:9d14:0:f816:3eff:fe9f:70d",
+ "id": "network1",
+ "routes": [{"netmask": "::", "network": "::",
+ "gateway": "fdb8:52d0:9d14::1"}]},
+ {"network_id": "1f53cb0e-72d3-47c7-94b9-ff4397c5fe54",
+ "type": "ipv4", "netmask": "255.255.255.128",
+ "link": "tap7d6b7bec-93", "ip_address": "172.16.48.13",
+ "id": "network2",
+ "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0",
+ "gateway": "172.16.48.1"},
+ {"netmask": "255.255.0.0", "network": "172.16.0.0",
+ "gateway": "172.16.48.1"}]}],
+ "links": [
+ {"ethernet_mac_address": "fa:16:3e:dd:50:9a", "mtu": None,
+ "type": "tap", "id": "tap77a0dc5b-72",
+ "vif_id": "77a0dc5b-720e-41b7-bfa7-1b2ff62e0d48"},
+ {"ethernet_mac_address": "fa:16:3e:a8:14:69", "mtu": None,
+ "type": "tap", "id": "tap7d6b7bec-93",
+ "vif_id": "7d6b7bec-93e6-4c03-869a-ddc5014892d5"}
+ ]
+}
+
+NETWORK_DATA_BOND = {
+ "services": [
+ {"type": "dns", "address": "1.1.1.191"},
+ {"type": "dns", "address": "1.1.1.4"},
+ ],
+ "networks": [
+ {"id": "network2-ipv4", "ip_address": "2.2.2.13",
+ "link": "vlan2", "netmask": "255.255.255.248",
+ "network_id": "4daf5ce8-38cf-4240-9f1a-04e86d7c6117",
+ "type": "ipv4",
+ "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0",
+ "gateway": "2.2.2.9"}]},
+ {"id": "network3-ipv4", "ip_address": "10.0.1.5",
+ "link": "vlan3", "netmask": "255.255.255.248",
+ "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d",
+ "type": "ipv4",
+ "routes": [{"netmask": "255.255.255.255",
+ "network": "192.168.1.0", "gateway": "10.0.1.1"}]}
+ ],
+ "links": [
+ {"ethernet_mac_address": "0c:c4:7a:34:6e:3c",
+ "id": "eth0", "mtu": 1500, "type": "phy"},
+ {"ethernet_mac_address": "0c:c4:7a:34:6e:3d",
+ "id": "eth1", "mtu": 1500, "type": "phy"},
+ {"bond_links": ["eth0", "eth1"],
+ "bond_miimon": 100, "bond_mode": "4",
+ "bond_xmit_hash_policy": "layer3+4",
+ "ethernet_mac_address": "0c:c4:7a:34:6e:3c",
+ "id": "bond0", "type": "bond"},
+ {"ethernet_mac_address": "fa:16:3e:b3:72:30",
+ "id": "vlan2", "type": "vlan", "vlan_id": 602,
+ "vlan_link": "bond0", "vlan_mac_address": "fa:16:3e:b3:72:30"},
+ {"ethernet_mac_address": "fa:16:3e:66:ab:a6",
+ "id": "vlan3", "type": "vlan", "vlan_id": 612, "vlan_link": "bond0",
+ "vlan_mac_address": "fa:16:3e:66:ab:a6"}
+ ]
+}
+
+NETWORK_DATA_VLAN = {
+ "services": [{"type": "dns", "address": "1.1.1.191"}],
+ "networks": [
+ {"id": "network1-ipv4", "ip_address": "10.0.1.5",
+ "link": "vlan1", "netmask": "255.255.255.248",
+ "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d",
+ "type": "ipv4",
+ "routes": [{"netmask": "255.255.255.255",
+ "network": "192.168.1.0", "gateway": "10.0.1.1"}]}
+ ],
+ "links": [
+ {"ethernet_mac_address": "fa:16:3e:69:b0:58",
+ "id": "eth0", "mtu": 1500, "type": "phy"},
+ {"ethernet_mac_address": "fa:16:3e:b3:72:30",
+ "id": "vlan1", "type": "vlan", "vlan_id": 602,
+ "vlan_link": "eth0", "vlan_mac_address": "fa:16:3e:b3:72:30"},
+ ]
+}
KNOWN_MACS = {
'fa:16:3e:69:b0:58': 'enp0s1',
@@ -108,6 +200,8 @@ KNOWN_MACS = {
'fa:16:3e:dd:50:9a': 'foo1',
'fa:16:3e:a8:14:69': 'foo2',
'fa:16:3e:ed:9a:59': 'foo3',
+ '0c:c4:7a:34:6e:3d': 'oeth1',
+ '0c:c4:7a:34:6e:3c': 'oeth0',
}
CFG_DRIVE_FILES_V2 = {
@@ -555,6 +649,61 @@ class TestConvertNetworkData(TestCase):
eni_rendering = f.read()
self.assertIn("route add default gw 2.2.2.9", eni_rendering)
+ def test_conversion_with_tap(self):
+ ncfg = openstack.convert_net_json(NETWORK_DATA_3,
+ known_macs=KNOWN_MACS)
+ physicals = set()
+ for i in ncfg['config']:
+ if i.get('type') == "physical":
+ physicals.add(i['name'])
+ self.assertEqual(physicals, set(('foo1', 'foo2')))
+
+ def test_bond_conversion(self):
+ # light testing of bond conversion and eni rendering of bond
+ ncfg = openstack.convert_net_json(NETWORK_DATA_BOND,
+ known_macs=KNOWN_MACS)
+ eni_renderer = eni.Renderer()
+ eni_renderer.render_network_state(
+ self.tmp, network_state.parse_net_config_data(ncfg))
+ with open(os.path.join(self.tmp, "etc",
+ "network", "interfaces"), 'r') as f:
+ eni_rendering = f.read()
+
+ # Verify there are expected interfaces in the net config.
+ interfaces = sorted(
+ [i['name'] for i in ncfg['config']
+ if i['type'] in ('vlan', 'bond', 'physical')])
+ self.assertEqual(
+ sorted(["oeth0", "oeth1", "bond0", "bond0.602", "bond0.612"]),
+ interfaces)
+
+ words = eni_rendering.split()
+ # 'eth0' and 'eth1' are the ids. because their mac adresses
+ # map to other names, we should not see them in the ENI
+ self.assertNotIn('eth0', words)
+ self.assertNotIn('eth1', words)
+
+ # oeth0 and oeth1 are the interface names for eni.
+ # bond0 will be generated for the bond. Each should be auto.
+ self.assertIn("auto oeth0", eni_rendering)
+ self.assertIn("auto oeth1", eni_rendering)
+ self.assertIn("auto bond0", eni_rendering)
+
+ def test_vlan(self):
+ # light testing of vlan config conversion and eni rendering
+ ncfg = openstack.convert_net_json(NETWORK_DATA_VLAN,
+ known_macs=KNOWN_MACS)
+ eni_renderer = eni.Renderer()
+ eni_renderer.render_network_state(
+ self.tmp, network_state.parse_net_config_data(ncfg))
+ with open(os.path.join(self.tmp, "etc",
+ "network", "interfaces"), 'r') as f:
+ eni_rendering = f.read()
+
+ self.assertIn("iface enp0s1", eni_rendering)
+ self.assertIn("address 10.0.1.5", eni_rendering)
+ self.assertIn("auto enp0s1.602", eni_rendering)
+
def cfg_ds_from_dir(seed_d):
cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None,
diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py
index 8936a1e3..f5d2ef35 100644
--- a/tests/unittests/test_datasource/test_digitalocean.py
+++ b/tests/unittests/test_datasource/test_digitalocean.py
@@ -15,68 +15,58 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import re
-
-from six.moves.urllib_parse import urlparse
+import json
from cloudinit import helpers
from cloudinit import settings
from cloudinit.sources import DataSourceDigitalOcean
from .. import helpers as test_helpers
+from ..helpers import HttprettyTestCase
httpretty = test_helpers.import_httpretty()
-# Abbreviated for the test
-DO_INDEX = """id
- hostname
- user-data
- vendor-data
- public-keys
- region"""
-
-DO_MULTIPLE_KEYS = """ssh-rsa AAAAB3NzaC1yc2EAAAA... neal@digitalocean.com
- ssh-rsa AAAAB3NzaC1yc2EAAAA... neal2@digitalocean.com"""
-DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... neal@digitalocean.com"
+DO_MULTIPLE_KEYS = ["ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co"]
+DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@do.co"
DO_META = {
- '': DO_INDEX,
- 'user-data': '#!/bin/bash\necho "user-data"',
- 'vendor-data': '#!/bin/bash\necho "vendor-data"',
- 'public-keys': DO_SINGLE_KEY,
+ 'user_data': 'user_data_here',
+ 'vendor_data': 'vendor_data_here',
+ 'public_keys': DO_SINGLE_KEY,
'region': 'nyc3',
'id': '2000000',
'hostname': 'cloudinit-test',
}
-MD_URL_RE = re.compile(r'http://169.254.169.254/metadata/v1/.*')
+MD_URL = 'http://169.254.169.254/metadata/v1.json'
+
+
+def _mock_dmi():
+ return (True, DO_META.get('id'))
def _request_callback(method, uri, headers):
- url_path = urlparse(uri).path
- if url_path.startswith('/metadata/v1/'):
- path = url_path.split('/metadata/v1/')[1:][0]
- else:
- path = None
- if path in DO_META:
- return (200, headers, DO_META.get(path))
- else:
- return (404, headers, '')
+ return (200, headers, json.dumps(DO_META))
-class TestDataSourceDigitalOcean(test_helpers.HttprettyTestCase):
+class TestDataSourceDigitalOcean(HttprettyTestCase):
+ """
+ Test reading the meta-data
+ """
def setUp(self):
self.ds = DataSourceDigitalOcean.DataSourceDigitalOcean(
settings.CFG_BUILTIN, None,
helpers.Paths({}))
+ self.ds._get_sysinfo = _mock_dmi
super(TestDataSourceDigitalOcean, self).setUp()
@httpretty.activate
def test_connection(self):
httpretty.register_uri(
- httpretty.GET, MD_URL_RE,
- body=_request_callback)
+ httpretty.GET, MD_URL,
+ body=json.dumps(DO_META))
success = self.ds.get_data()
self.assertTrue(success)
@@ -84,14 +74,14 @@ class TestDataSourceDigitalOcean(test_helpers.HttprettyTestCase):
@httpretty.activate
def test_metadata(self):
httpretty.register_uri(
- httpretty.GET, MD_URL_RE,
+ httpretty.GET, MD_URL,
body=_request_callback)
self.ds.get_data()
- self.assertEqual(DO_META.get('user-data'),
+ self.assertEqual(DO_META.get('user_data'),
self.ds.get_userdata_raw())
- self.assertEqual(DO_META.get('vendor-data'),
+ self.assertEqual(DO_META.get('vendor_data'),
self.ds.get_vendordata_raw())
self.assertEqual(DO_META.get('region'),
@@ -103,11 +93,8 @@ class TestDataSourceDigitalOcean(test_helpers.HttprettyTestCase):
self.assertEqual(DO_META.get('hostname'),
self.ds.get_hostname())
- self.assertEqual('http://mirrors.digitalocean.com/',
- self.ds.get_package_mirror_info())
-
# Single key
- self.assertEqual([DO_META.get('public-keys')],
+ self.assertEqual([DO_META.get('public_keys')],
self.ds.get_public_ssh_keys())
self.assertIsInstance(self.ds.get_public_ssh_keys(), list)
@@ -116,12 +103,12 @@ class TestDataSourceDigitalOcean(test_helpers.HttprettyTestCase):
def test_multiple_ssh_keys(self):
DO_META['public_keys'] = DO_MULTIPLE_KEYS
httpretty.register_uri(
- httpretty.GET, MD_URL_RE,
+ httpretty.GET, MD_URL,
body=_request_callback)
self.ds.get_data()
# Multiple keys
- self.assertEqual(DO_META.get('public-keys').splitlines(),
+ self.assertEqual(DO_META.get('public_keys'),
self.ds.get_public_ssh_keys())
self.assertIsInstance(self.ds.get_public_ssh_keys(), list)
diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py
index f66f1c6d..0126c883 100644
--- a/tests/unittests/test_datasource/test_maas.py
+++ b/tests/unittests/test_datasource/test_maas.py
@@ -2,6 +2,7 @@ from copy import copy
import os
import shutil
import tempfile
+import yaml
from cloudinit.sources import DataSourceMAAS
from cloudinit import url_helper
@@ -24,41 +25,44 @@ class TestMAASDataSource(TestCase):
def test_seed_dir_valid(self):
"""Verify a valid seeddir is read as such."""
- data = {'instance-id': 'i-valid01',
- 'local-hostname': 'valid01-hostname',
- 'user-data': b'valid01-userdata',
+ userdata = b'valid01-userdata'
+ data = {'meta-data/instance-id': 'i-valid01',
+ 'meta-data/local-hostname': 'valid01-hostname',
+ 'user-data': userdata,
'public-keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname'}
my_d = os.path.join(self.tmp, "valid")
populate_dir(my_d, data)
- (userdata, metadata) = DataSourceMAAS.read_maas_seed_dir(my_d)
+ ud, md, vd = DataSourceMAAS.read_maas_seed_dir(my_d)
- self.assertEqual(userdata, data['user-data'])
+ self.assertEqual(userdata, ud)
for key in ('instance-id', 'local-hostname'):
- self.assertEqual(data[key], metadata[key])
+ self.assertEqual(data["meta-data/" + key], md[key])
# verify that 'userdata' is not returned as part of the metadata
- self.assertFalse(('user-data' in metadata))
+ self.assertFalse(('user-data' in md))
+ self.assertEqual(vd, None)
def test_seed_dir_valid_extra(self):
"""Verify extra files do not affect seed_dir validity."""
- data = {'instance-id': 'i-valid-extra',
- 'local-hostname': 'valid-extra-hostname',
- 'user-data': b'valid-extra-userdata', 'foo': 'bar'}
+ userdata = b'valid-extra-userdata'
+ data = {'meta-data/instance-id': 'i-valid-extra',
+ 'meta-data/local-hostname': 'valid-extra-hostname',
+ 'user-data': userdata, 'foo': 'bar'}
my_d = os.path.join(self.tmp, "valid_extra")
populate_dir(my_d, data)
- (userdata, metadata) = DataSourceMAAS.read_maas_seed_dir(my_d)
+ ud, md, vd = DataSourceMAAS.read_maas_seed_dir(my_d)
- self.assertEqual(userdata, data['user-data'])
+ self.assertEqual(userdata, ud)
for key in ('instance-id', 'local-hostname'):
- self.assertEqual(data[key], metadata[key])
+ self.assertEqual(data['meta-data/' + key], md[key])
# additional files should not just appear as keys in metadata atm
- self.assertFalse(('foo' in metadata))
+ self.assertFalse(('foo' in md))
def test_seed_dir_invalid(self):
"""Verify that invalid seed_dir raises MAASSeedDirMalformed."""
@@ -97,67 +101,60 @@ class TestMAASDataSource(TestCase):
DataSourceMAAS.read_maas_seed_dir,
os.path.join(self.tmp, "nonexistantdirectory"))
+ def mock_read_maas_seed_url(self, data, seed, version="19991231"):
+ """mock up readurl to appear as a web server at seed has provided data.
+ return what read_maas_seed_url returns."""
+ def my_readurl(*args, **kwargs):
+ if len(args):
+ url = args[0]
+ else:
+ url = kwargs['url']
+ prefix = "%s/%s/" % (seed, version)
+ if not url.startswith(prefix):
+ raise ValueError("unexpected call %s" % url)
+
+ short = url[len(prefix):]
+ if short not in data:
+ raise url_helper.UrlError("not found", code=404, url=url)
+ return url_helper.StringResponse(data[short])
+
+ # Now do the actual call of the code under test.
+ with mock.patch("cloudinit.url_helper.readurl") as mock_readurl:
+ mock_readurl.side_effect = my_readurl
+ return DataSourceMAAS.read_maas_seed_url(seed, version=version)
+
def test_seed_url_valid(self):
"""Verify that valid seed_url is read as such."""
valid = {
'meta-data/instance-id': 'i-instanceid',
'meta-data/local-hostname': 'test-hostname',
'meta-data/public-keys': 'test-hostname',
+ 'meta-data/vendor-data': b'my-vendordata',
'user-data': b'foodata',
}
- valid_order = [
- 'meta-data/local-hostname',
- 'meta-data/instance-id',
- 'meta-data/public-keys',
- 'user-data',
- ]
my_seed = "http://example.com/xmeta"
my_ver = "1999-99-99"
- my_headers = {'header1': 'value1', 'header2': 'value2'}
-
- def my_headers_cb(url):
- return my_headers
-
- # Each time url_helper.readurl() is called, something different is
- # returned based on the canned data above. We need to build up a list
- # of side effect return values, which the mock will return. At the
- # same time, we'll build up a list of expected call arguments for
- # asserting after the code under test is run.
- calls = []
-
- def side_effect():
- for key in valid_order:
- resp = valid.get(key)
- url = "%s/%s/%s" % (my_seed, my_ver, key)
- calls.append(
- mock.call(url, headers=None, timeout=mock.ANY,
- data=mock.ANY, sec_between=mock.ANY,
- ssl_details=mock.ANY, retries=mock.ANY,
- headers_cb=my_headers_cb,
- exception_cb=mock.ANY))
- yield url_helper.StringResponse(resp)
-
- # Now do the actual call of the code under test.
- with mock.patch.object(url_helper, 'readurl',
- side_effect=side_effect()) as mockobj:
- userdata, metadata = DataSourceMAAS.read_maas_seed_url(
- my_seed, version=my_ver)
-
- self.assertEqual(b"foodata", userdata)
- self.assertEqual(metadata['instance-id'],
- valid['meta-data/instance-id'])
- self.assertEqual(metadata['local-hostname'],
- valid['meta-data/local-hostname'])
-
- mockobj.has_calls(calls)
-
- def test_seed_url_invalid(self):
- """Verify that invalid seed_url raises MAASSeedDirMalformed."""
- pass
-
- def test_seed_url_missing(self):
- """Verify seed_url with no found entries raises MAASSeedDirNone."""
- pass
+ ud, md, vd = self.mock_read_maas_seed_url(valid, my_seed, my_ver)
+
+ self.assertEqual(valid['meta-data/instance-id'], md['instance-id'])
+ self.assertEqual(
+ valid['meta-data/local-hostname'], md['local-hostname'])
+ self.assertEqual(valid['meta-data/public-keys'], md['public-keys'])
+ self.assertEqual(valid['user-data'], ud)
+ # vendor-data is yaml, which decodes a string
+ self.assertEqual(valid['meta-data/vendor-data'].decode(), vd)
+
+ def test_seed_url_vendor_data_dict(self):
+ expected_vd = {'key1': 'value1'}
+ valid = {
+ 'meta-data/instance-id': 'i-instanceid',
+ 'meta-data/local-hostname': 'test-hostname',
+ 'meta-data/vendor-data': yaml.safe_dump(expected_vd).encode(),
+ }
+ ud, md, vd = self.mock_read_maas_seed_url(
+ valid, "http://example.com/foo")
+ self.assertEqual(valid['meta-data/instance-id'], md['instance-id'])
+ self.assertEqual(expected_vd, vd)
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py
index b0fa1130..f6a46ce9 100644
--- a/tests/unittests/test_datasource/test_nocloud.py
+++ b/tests/unittests/test_datasource/test_nocloud.py
@@ -6,7 +6,7 @@ from ..helpers import TestCase, populate_dir, mock, ExitStack
import os
import shutil
import tempfile
-
+import textwrap
import yaml
@@ -129,6 +129,89 @@ class TestNoCloudDataSource(TestCase):
self.assertFalse(dsrc.vendordata)
self.assertTrue(ret)
+ def test_metadata_network_interfaces(self):
+ gateway = "103.225.10.1"
+ md = {
+ 'instance-id': 'i-abcd',
+ 'local-hostname': 'hostname1',
+ 'network-interfaces': textwrap.dedent("""\
+ auto eth0
+ iface eth0 inet static
+ hwaddr 00:16:3e:70:e1:04
+ address 103.225.10.12
+ netmask 255.255.255.0
+ gateway """ + gateway + """
+ dns-servers 8.8.8.8""")}
+
+ populate_dir(
+ os.path.join(self.paths.seed_dir, "nocloud"),
+ {'user-data': b"ud",
+ 'meta-data': yaml.dump(md) + "\n"})
+
+ sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+
+ ds = DataSourceNoCloud.DataSourceNoCloud
+
+ dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ # very simple check just for the strings above
+ self.assertIn(gateway, str(dsrc.network_config))
+
+ def test_metadata_network_config(self):
+ # network-config needs to get into network_config
+ netconf = {'version': 1,
+ 'config': [{'type': 'physical', 'name': 'interface0',
+ 'subnets': [{'type': 'dhcp'}]}]}
+ populate_dir(
+ os.path.join(self.paths.seed_dir, "nocloud"),
+ {'user-data': b"ud",
+ 'meta-data': "instance-id: IID\n",
+ 'network-config': yaml.dump(netconf) + "\n"})
+
+ sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+
+ ds = DataSourceNoCloud.DataSourceNoCloud
+
+ dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(netconf, dsrc.network_config)
+
+ def test_metadata_network_config_over_interfaces(self):
+ # network-config should override meta-data/network-interfaces
+ gateway = "103.225.10.1"
+ md = {
+ 'instance-id': 'i-abcd',
+ 'local-hostname': 'hostname1',
+ 'network-interfaces': textwrap.dedent("""\
+ auto eth0
+ iface eth0 inet static
+ hwaddr 00:16:3e:70:e1:04
+ address 103.225.10.12
+ netmask 255.255.255.0
+ gateway """ + gateway + """
+ dns-servers 8.8.8.8""")}
+
+ netconf = {'version': 1,
+ 'config': [{'type': 'physical', 'name': 'interface0',
+ 'subnets': [{'type': 'dhcp'}]}]}
+ populate_dir(
+ os.path.join(self.paths.seed_dir, "nocloud"),
+ {'user-data': b"ud",
+ 'meta-data': yaml.dump(md) + "\n",
+ 'network-config': yaml.dump(netconf) + "\n"})
+
+ sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+
+ ds = DataSourceNoCloud.DataSourceNoCloud
+
+ dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(netconf, dsrc.network_config)
+ self.assertNotIn(gateway, str(dsrc.network_config))
+
class TestParseCommandLineData(TestCase):
diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py
index 5c8592c5..97b99a18 100644
--- a/tests/unittests/test_datasource/test_openstack.py
+++ b/tests/unittests/test_datasource/test_openstack.py
@@ -27,6 +27,7 @@ from six import StringIO
from cloudinit import helpers
from cloudinit import settings
+from cloudinit.sources import convert_vendordata
from cloudinit.sources import DataSourceOpenStack as ds
from cloudinit.sources.helpers import openstack
from cloudinit import util
@@ -318,7 +319,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
class TestVendorDataLoading(test_helpers.TestCase):
def cvj(self, data):
- return openstack.convert_vendordata_json(data)
+ return convert_vendordata(data)
def test_vd_load_none(self):
# non-existant vendor-data should return none
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index 9c6c8768..0532f986 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -36,6 +36,8 @@ import uuid
from cloudinit import serial
from cloudinit.sources import DataSourceSmartOS
+from cloudinit.sources.DataSourceSmartOS import (
+ convert_smartos_network_data as convert_net)
import six
@@ -86,6 +88,229 @@ SDC_NICS = json.loads("""
]
""")
+
+SDC_NICS_ALT = json.loads("""
+[
+ {
+ "interface": "net0",
+ "mac": "90:b8:d0:ae:64:51",
+ "vlan_id": 324,
+ "nic_tag": "external",
+ "gateway": "8.12.42.1",
+ "gateways": [
+ "8.12.42.1"
+ ],
+ "netmask": "255.255.255.0",
+ "ip": "8.12.42.51",
+ "ips": [
+ "8.12.42.51/24"
+ ],
+ "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe",
+ "model": "virtio",
+ "mtu": 1500,
+ "primary": true
+ },
+ {
+ "interface": "net1",
+ "mac": "90:b8:d0:bd:4f:9c",
+ "vlan_id": 600,
+ "nic_tag": "internal",
+ "netmask": "255.255.255.0",
+ "ip": "10.210.1.217",
+ "ips": [
+ "10.210.1.217/24"
+ ],
+ "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6",
+ "model": "virtio",
+ "mtu": 1500
+ }
+]
+""")
+
+SDC_NICS_DHCP = json.loads("""
+[
+ {
+ "interface": "net0",
+ "mac": "90:b8:d0:ae:64:51",
+ "vlan_id": 324,
+ "nic_tag": "external",
+ "gateway": "8.12.42.1",
+ "gateways": [
+ "8.12.42.1"
+ ],
+ "netmask": "255.255.255.0",
+ "ip": "8.12.42.51",
+ "ips": [
+ "8.12.42.51/24"
+ ],
+ "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe",
+ "model": "virtio",
+ "mtu": 1500,
+ "primary": true
+ },
+ {
+ "interface": "net1",
+ "mac": "90:b8:d0:bd:4f:9c",
+ "vlan_id": 600,
+ "nic_tag": "internal",
+ "netmask": "255.255.255.0",
+ "ip": "10.210.1.217",
+ "ips": [
+ "dhcp"
+ ],
+ "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6",
+ "model": "virtio",
+ "mtu": 1500
+ }
+]
+""")
+
+SDC_NICS_MIP = json.loads("""
+[
+ {
+ "interface": "net0",
+ "mac": "90:b8:d0:ae:64:51",
+ "vlan_id": 324,
+ "nic_tag": "external",
+ "gateway": "8.12.42.1",
+ "gateways": [
+ "8.12.42.1"
+ ],
+ "netmask": "255.255.255.0",
+ "ip": "8.12.42.51",
+ "ips": [
+ "8.12.42.51/24",
+ "8.12.42.52/24"
+ ],
+ "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe",
+ "model": "virtio",
+ "mtu": 1500,
+ "primary": true
+ },
+ {
+ "interface": "net1",
+ "mac": "90:b8:d0:bd:4f:9c",
+ "vlan_id": 600,
+ "nic_tag": "internal",
+ "netmask": "255.255.255.0",
+ "ip": "10.210.1.217",
+ "ips": [
+ "10.210.1.217/24",
+ "10.210.1.151/24"
+ ],
+ "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6",
+ "model": "virtio",
+ "mtu": 1500
+ }
+]
+""")
+
+SDC_NICS_MIP_IPV6 = json.loads("""
+[
+ {
+ "interface": "net0",
+ "mac": "90:b8:d0:ae:64:51",
+ "vlan_id": 324,
+ "nic_tag": "external",
+ "gateway": "8.12.42.1",
+ "gateways": [
+ "8.12.42.1"
+ ],
+ "netmask": "255.255.255.0",
+ "ip": "8.12.42.51",
+ "ips": [
+ "2001:4800:78ff:1b:be76:4eff:fe06:96b3/64",
+ "8.12.42.51/24"
+ ],
+ "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe",
+ "model": "virtio",
+ "mtu": 1500,
+ "primary": true
+ },
+ {
+ "interface": "net1",
+ "mac": "90:b8:d0:bd:4f:9c",
+ "vlan_id": 600,
+ "nic_tag": "internal",
+ "netmask": "255.255.255.0",
+ "ip": "10.210.1.217",
+ "ips": [
+ "10.210.1.217/24"
+ ],
+ "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6",
+ "model": "virtio",
+ "mtu": 1500
+ }
+]
+""")
+
+SDC_NICS_IPV4_IPV6 = json.loads("""
+[
+ {
+ "interface": "net0",
+ "mac": "90:b8:d0:ae:64:51",
+ "vlan_id": 324,
+ "nic_tag": "external",
+ "gateway": "8.12.42.1",
+ "gateways": ["8.12.42.1", "2001::1", "2001::2"],
+ "netmask": "255.255.255.0",
+ "ip": "8.12.42.51",
+ "ips": ["2001::10/64", "8.12.42.51/24", "2001::11/64",
+ "8.12.42.52/32"],
+ "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe",
+ "model": "virtio",
+ "mtu": 1500,
+ "primary": true
+ },
+ {
+ "interface": "net1",
+ "mac": "90:b8:d0:bd:4f:9c",
+ "vlan_id": 600,
+ "nic_tag": "internal",
+ "netmask": "255.255.255.0",
+ "ip": "10.210.1.217",
+ "ips": ["10.210.1.217/24"],
+ "gateways": ["10.210.1.210"],
+ "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6",
+ "model": "virtio",
+ "mtu": 1500
+ }
+]
+""")
+
+SDC_NICS_SINGLE_GATEWAY = json.loads("""
+[
+ {
+ "interface":"net0",
+ "mac":"90:b8:d0:d8:82:b4",
+ "vlan_id":324,
+ "nic_tag":"external",
+ "gateway":"8.12.42.1",
+ "gateways":["8.12.42.1"],
+ "netmask":"255.255.255.0",
+ "ip":"8.12.42.26",
+ "ips":["8.12.42.26/24"],
+ "network_uuid":"992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe",
+ "model":"virtio",
+ "mtu":1500,
+ "primary":true
+ },
+ {
+ "interface":"net1",
+ "mac":"90:b8:d0:0a:51:31",
+ "vlan_id":600,
+ "nic_tag":"internal",
+ "netmask":"255.255.255.0",
+ "ip":"10.210.1.27",
+ "ips":["10.210.1.27/24"],
+ "network_uuid":"98657fdf-11f4-4ee2-88a4-ce7fe73e33a6",
+ "model":"virtio",
+ "mtu":1500
+ }
+]
+""")
+
+
MOCK_RETURNS = {
'hostname': 'test-host',
'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname',
@@ -524,20 +749,135 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase):
class TestNetworkConversion(TestCase):
-
def test_convert_simple(self):
expected = {
'version': 1,
'config': [
{'name': 'net0', 'type': 'physical',
'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
- 'netmask': '255.255.255.0',
'address': '8.12.42.102/24'}],
'mtu': 1500, 'mac_address': '90:b8:d0:f5:e4:f5'},
{'name': 'net1', 'type': 'physical',
- 'subnets': [{'type': 'static', 'gateway': '192.168.128.1',
- 'netmask': '255.255.252.0',
+ 'subnets': [{'type': 'static',
'address': '192.168.128.93/22'}],
'mtu': 8500, 'mac_address': '90:b8:d0:a5:ff:cd'}]}
- found = DataSourceSmartOS.convert_smartos_network_data(SDC_NICS)
+ found = convert_net(SDC_NICS)
+ self.assertEqual(expected, found)
+
+ def test_convert_simple_alt(self):
+ expected = {
+ 'version': 1,
+ 'config': [
+ {'name': 'net0', 'type': 'physical',
+ 'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
+ 'address': '8.12.42.51/24'}],
+ 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
+ {'name': 'net1', 'type': 'physical',
+ 'subnets': [{'type': 'static',
+ 'address': '10.210.1.217/24'}],
+ 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]}
+ found = convert_net(SDC_NICS_ALT)
+ self.assertEqual(expected, found)
+
+ def test_convert_simple_dhcp(self):
+ expected = {
+ 'version': 1,
+ 'config': [
+ {'name': 'net0', 'type': 'physical',
+ 'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
+ 'address': '8.12.42.51/24'}],
+ 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
+ {'name': 'net1', 'type': 'physical',
+ 'subnets': [{'type': 'dhcp4'}],
+ 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]}
+ found = convert_net(SDC_NICS_DHCP)
+ self.assertEqual(expected, found)
+
+ def test_convert_simple_multi_ip(self):
+ expected = {
+ 'version': 1,
+ 'config': [
+ {'name': 'net0', 'type': 'physical',
+ 'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
+ 'address': '8.12.42.51/24'},
+ {'type': 'static',
+ 'address': '8.12.42.52/24'}],
+ 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
+ {'name': 'net1', 'type': 'physical',
+ 'subnets': [{'type': 'static',
+ 'address': '10.210.1.217/24'},
+ {'type': 'static',
+ 'address': '10.210.1.151/24'}],
+ 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]}
+ found = convert_net(SDC_NICS_MIP)
+ self.assertEqual(expected, found)
+
+ def test_convert_with_dns(self):
+ expected = {
+ 'version': 1,
+ 'config': [
+ {'name': 'net0', 'type': 'physical',
+ 'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
+ 'address': '8.12.42.51/24'}],
+ 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
+ {'name': 'net1', 'type': 'physical',
+ 'subnets': [{'type': 'dhcp4'}],
+ 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'},
+ {'type': 'nameserver',
+ 'address': ['8.8.8.8', '8.8.8.1'], 'search': ["local"]}]}
+ found = convert_net(
+ network_data=SDC_NICS_DHCP, dns_servers=['8.8.8.8', '8.8.8.1'],
+ dns_domain="local")
+ self.assertEqual(expected, found)
+
+ def test_convert_simple_multi_ipv6(self):
+ expected = {
+ 'version': 1,
+ 'config': [
+ {'name': 'net0', 'type': 'physical',
+ 'subnets': [{'type': 'static', 'address':
+ '2001:4800:78ff:1b:be76:4eff:fe06:96b3/64'},
+ {'type': 'static', 'gateway': '8.12.42.1',
+ 'address': '8.12.42.51/24'}],
+ 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
+ {'name': 'net1', 'type': 'physical',
+ 'subnets': [{'type': 'static',
+ 'address': '10.210.1.217/24'}],
+ 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]}
+ found = convert_net(SDC_NICS_MIP_IPV6)
+ self.assertEqual(expected, found)
+
+ def test_convert_simple_both_ipv4_ipv6(self):
+ expected = {
+ 'version': 1,
+ 'config': [
+ {'mac_address': '90:b8:d0:ae:64:51', 'mtu': 1500,
+ 'name': 'net0', 'type': 'physical',
+ 'subnets': [{'address': '2001::10/64', 'gateway': '2001::1',
+ 'type': 'static'},
+ {'address': '8.12.42.51/24',
+ 'gateway': '8.12.42.1',
+ 'type': 'static'},
+ {'address': '2001::11/64', 'type': 'static'},
+ {'address': '8.12.42.52/32', 'type': 'static'}]},
+ {'mac_address': '90:b8:d0:bd:4f:9c', 'mtu': 1500,
+ 'name': 'net1', 'type': 'physical',
+ 'subnets': [{'address': '10.210.1.217/24',
+ 'type': 'static'}]}]}
+ found = convert_net(SDC_NICS_IPV4_IPV6)
+ self.assertEqual(expected, found)
+
+ def test_gateways_not_on_all_nics(self):
+ expected = {
+ 'version': 1,
+ 'config': [
+ {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500,
+ 'name': 'net0', 'type': 'physical',
+ 'subnets': [{'address': '8.12.42.26/24',
+ 'gateway': '8.12.42.1', 'type': 'static'}]},
+ {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500,
+ 'name': 'net1', 'type': 'physical',
+ 'subnets': [{'address': '10.210.1.27/24',
+ 'type': 'static'}]}]}
+ found = convert_net(SDC_NICS_SINGLE_GATEWAY)
self.assertEqual(expected, found)
diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py
index 96fa0811..24ad115f 100644
--- a/tests/unittests/test_distros/test_generic.py
+++ b/tests/unittests/test_distros/test_generic.py
@@ -226,8 +226,5 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase):
os.symlink('/', '/run/systemd/system')
self.assertFalse(d.uses_systemd())
-# def _get_package_mirror_info(mirror_info, availability_zone=None,
-# mirror_filter=util.search_for_mirror):
-
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_apt_configure.py b/tests/unittests/test_handler/test_handler_apt_conf_v1.py
index d1dca2c4..45714efd 100644
--- a/tests/unittests/test_handler/test_handler_apt_configure.py
+++ b/tests/unittests/test_handler/test_handler_apt_conf_v1.py
@@ -3,6 +3,7 @@ from cloudinit import util
from ..helpers import TestCase
+import copy
import os
import re
import shutil
@@ -27,7 +28,7 @@ class TestAptProxyConfig(TestCase):
contents, flags=re.IGNORECASE)
def test_apt_proxy_written(self):
- cfg = {'apt_proxy': 'myproxy'}
+ cfg = {'proxy': 'myproxy'}
cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.pfile))
@@ -37,7 +38,7 @@ class TestAptProxyConfig(TestCase):
self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
def test_apt_http_proxy_written(self):
- cfg = {'apt_http_proxy': 'myproxy'}
+ cfg = {'http_proxy': 'myproxy'}
cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.pfile))
@@ -47,13 +48,13 @@ class TestAptProxyConfig(TestCase):
self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
def test_apt_all_proxy_written(self):
- cfg = {'apt_http_proxy': 'myproxy_http_proxy',
- 'apt_https_proxy': 'myproxy_https_proxy',
- 'apt_ftp_proxy': 'myproxy_ftp_proxy'}
+ cfg = {'http_proxy': 'myproxy_http_proxy',
+ 'https_proxy': 'myproxy_https_proxy',
+ 'ftp_proxy': 'myproxy_ftp_proxy'}
- values = {'http': cfg['apt_http_proxy'],
- 'https': cfg['apt_https_proxy'],
- 'ftp': cfg['apt_ftp_proxy'],
+ values = {'http': cfg['http_proxy'],
+ 'https': cfg['https_proxy'],
+ 'ftp': cfg['ftp_proxy'],
}
cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
@@ -74,7 +75,7 @@ class TestAptProxyConfig(TestCase):
def test_proxy_replaced(self):
util.write_file(self.cfile, "content doesnt matter")
- cc_apt_configure.apply_apt_config({'apt_proxy': "foo"},
+ cc_apt_configure.apply_apt_config({'proxy': "foo"},
self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.pfile))
contents = load_tfile_or_url(self.pfile)
@@ -82,7 +83,7 @@ class TestAptProxyConfig(TestCase):
def test_config_written(self):
payload = 'this is my apt config'
- cfg = {'apt_config': payload}
+ cfg = {'conf': payload}
cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
@@ -93,17 +94,38 @@ class TestAptProxyConfig(TestCase):
def test_config_replaced(self):
util.write_file(self.pfile, "content doesnt matter")
- cc_apt_configure.apply_apt_config({'apt_config': "foo"},
+ cc_apt_configure.apply_apt_config({'conf': "foo"},
self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.cfile))
self.assertEqual(load_tfile_or_url(self.cfile), "foo")
def test_config_deleted(self):
- # if no 'apt_config' is provided, delete any previously written file
+ # if no 'conf' is provided, delete any previously written file
util.write_file(self.pfile, "content doesnt matter")
cc_apt_configure.apply_apt_config({}, self.pfile, self.cfile)
self.assertFalse(os.path.isfile(self.pfile))
self.assertFalse(os.path.isfile(self.cfile))
+class TestConversion(TestCase):
+ def test_convert_with_apt_mirror_as_empty_string(self):
+ # an empty apt_mirror is the same as no apt_mirror
+ empty_m_found = cc_apt_configure.convert_to_v3_apt_format(
+ {'apt_mirror': ''})
+ default_found = cc_apt_configure.convert_to_v3_apt_format({})
+ self.assertEqual(default_found, empty_m_found)
+
+ def test_convert_with_apt_mirror(self):
+ mirror = 'http://my.mirror/ubuntu'
+ f = cc_apt_configure.convert_to_v3_apt_format({'apt_mirror': mirror})
+ self.assertIn(mirror, {m['uri'] for m in f['apt']['primary']})
+
+ def test_no_old_content(self):
+ mirror = 'http://my.mirror/ubuntu'
+ mydata = {'apt': {'primary': {'arches': ['default'], 'uri': mirror}}}
+ expected = copy.deepcopy(mydata)
+ self.assertEqual(expected,
+ cc_apt_configure.convert_to_v3_apt_format(mydata))
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list.py b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py
index acde0863..f4411869 100644
--- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list.py
+++ b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py
@@ -79,6 +79,15 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
self.new_root = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.new_root)
+ rpatcher = mock.patch("cloudinit.util.lsb_release")
+ get_rel = rpatcher.start()
+ get_rel.return_value = {'codename': "fakerelease"}
+ self.addCleanup(rpatcher.stop)
+ apatcher = mock.patch("cloudinit.util.get_architecture")
+ get_arch = apatcher.start()
+ get_arch.return_value = 'amd64'
+ self.addCleanup(apatcher.stop)
+
def _get_cloud(self, distro, metadata=None):
self.patchUtils(self.new_root)
paths = helpers.Paths({})
@@ -102,25 +111,38 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
cfg = {'apt_mirror': mirror}
mycloud = self._get_cloud(distro)
- with mock.patch.object(templater, 'render_to_file') as mocktmpl:
- with mock.patch.object(os.path, 'isfile',
- return_value=True) as mockisfile:
- with mock.patch.object(util, 'rename'):
- cc_apt_configure.handle("notimportant", cfg, mycloud,
- LOG, None)
+ with mock.patch.object(util, 'write_file') as mockwf:
+ with mock.patch.object(util, 'load_file',
+ return_value="faketmpl") as mocklf:
+ with mock.patch.object(os.path, 'isfile',
+ return_value=True) as mockisfile:
+ with mock.patch.object(templater, 'render_string',
+ return_value="fake") as mockrnd:
+ with mock.patch.object(util, 'rename'):
+ cc_apt_configure.handle("test", cfg, mycloud,
+ LOG, None)
mockisfile.assert_any_call(
('/etc/cloud/templates/sources.list.%s.tmpl' % distro))
- mocktmpl.assert_called_once_with(
- ('/etc/cloud/templates/sources.list.%s.tmpl' % distro),
- '/etc/apt/sources.list',
- {'codename': '', 'primary': mirrorcheck, 'mirror': mirrorcheck})
-
- def test_apt_source_list_debian(self):
+ mocklf.assert_any_call(
+ ('/etc/cloud/templates/sources.list.%s.tmpl' % distro))
+ mockrnd.assert_called_once_with('faketmpl',
+ {'RELEASE': 'fakerelease',
+ 'PRIMARY': mirrorcheck,
+ 'MIRROR': mirrorcheck,
+ 'SECURITY': mirrorcheck,
+ 'codename': 'fakerelease',
+ 'primary': mirrorcheck,
+ 'mirror': mirrorcheck,
+ 'security': mirrorcheck})
+ mockwf.assert_called_once_with('/etc/apt/sources.list', 'fake',
+ mode=0o644)
+
+ def test_apt_v1_source_list_debian(self):
"""Test rendering of a source.list from template for debian"""
self.apt_source_list('debian', 'http://httpredir.debian.org/debian')
- def test_apt_source_list_ubuntu(self):
+ def test_apt_v1_source_list_ubuntu(self):
"""Test rendering of a source.list from template for ubuntu"""
self.apt_source_list('ubuntu', 'http://archive.ubuntu.com/ubuntu/')
@@ -134,7 +156,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
print("Faking SUCCESS for '%s'" % name)
return True
- def test_apt_srcl_debian_mirrorfail(self):
+ def test_apt_v1_srcl_debian_mirrorfail(self):
"""Test rendering of a source.list from template for debian"""
with mock.patch.object(util, 'is_resolvable',
side_effect=self.myresolve) as mockresolve:
@@ -145,7 +167,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
mockresolve.assert_any_call("does.not.exist")
mockresolve.assert_any_call("httpredir.debian.org")
- def test_apt_srcl_ubuntu_mirrorfail(self):
+ def test_apt_v1_srcl_ubuntu_mirrorfail(self):
"""Test rendering of a source.list from template for ubuntu"""
with mock.patch.object(util, 'is_resolvable',
side_effect=self.myresolve) as mockresolve:
@@ -156,7 +178,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
mockresolve.assert_any_call("does.not.exist")
mockresolve.assert_any_call("archive.ubuntu.com")
- def test_apt_srcl_custom(self):
+ def test_apt_v1_srcl_custom(self):
"""Test rendering from a custom source.list template"""
cfg = util.load_yaml(YAML_TEXT_CUSTOM_SL)
mycloud = self._get_cloud('ubuntu')
@@ -164,12 +186,10 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
# the second mock restores the original subp
with mock.patch.object(util, 'write_file') as mockwrite:
with mock.patch.object(util, 'subp', self.subp):
- with mock.patch.object(cc_apt_configure, 'get_release',
- return_value='fakerelease'):
- with mock.patch.object(Distro, 'get_primary_arch',
- return_value='amd64'):
- cc_apt_configure.handle("notimportant", cfg, mycloud,
- LOG, None)
+ with mock.patch.object(Distro, 'get_primary_arch',
+ return_value='amd64'):
+ cc_apt_configure.handle("notimportant", cfg, mycloud,
+ LOG, None)
mockwrite.assert_called_once_with(
'/etc/apt/sources.list',
diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py
new file mode 100644
index 00000000..e53b0450
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py
@@ -0,0 +1,187 @@
+""" test_apt_custom_sources_list
+Test templating of custom sources list
+"""
+import logging
+import os
+import shutil
+import tempfile
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+from mock import call
+
+from cloudinit import cloud
+from cloudinit import distros
+from cloudinit import helpers
+from cloudinit import util
+
+from cloudinit.config import cc_apt_configure
+from cloudinit.sources import DataSourceNone
+
+from cloudinit.distros.debian import Distro
+
+from .. import helpers as t_help
+
+LOG = logging.getLogger(__name__)
+
+TARGET = "/"
+
+# Input and expected output for the custom template
+YAML_TEXT_CUSTOM_SL = """
+apt:
+ primary:
+ - arches: [default]
+ uri: http://test.ubuntu.com/ubuntu/
+ security:
+ - arches: [default]
+ uri: http://testsec.ubuntu.com/ubuntu/
+ sources_list: |
+
+ # Note, this file is written by cloud-init at install time. It should not
+ # end up on the installed system itself.
+ # See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to
+ # newer versions of the distribution.
+ deb $MIRROR $RELEASE main restricted
+ deb-src $MIRROR $RELEASE main restricted
+ deb $PRIMARY $RELEASE universe restricted
+ deb $SECURITY $RELEASE-security multiverse
+ # FIND_SOMETHING_SPECIAL
+"""
+
+EXPECTED_CONVERTED_CONTENT = """
+# Note, this file is written by cloud-init at install time. It should not
+# end up on the installed system itself.
+# See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to
+# newer versions of the distribution.
+deb http://test.ubuntu.com/ubuntu/ fakerel main restricted
+deb-src http://test.ubuntu.com/ubuntu/ fakerel main restricted
+deb http://test.ubuntu.com/ubuntu/ fakerel universe restricted
+deb http://testsec.ubuntu.com/ubuntu/ fakerel-security multiverse
+# FIND_SOMETHING_SPECIAL
+"""
+
+# mocked to be independent to the unittest system
+MOCKED_APT_SRC_LIST = """
+deb http://test.ubuntu.com/ubuntu/ notouched main restricted
+deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted
+deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted
+deb http://testsec.ubuntu.com/ubuntu/ notouched-security main restricted
+"""
+
+EXPECTED_BASE_CONTENT = ("""
+deb http://test.ubuntu.com/ubuntu/ notouched main restricted
+deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted
+deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted
+deb http://testsec.ubuntu.com/ubuntu/ notouched-security main restricted
+""")
+
+EXPECTED_MIRROR_CONTENT = ("""
+deb http://test.ubuntu.com/ubuntu/ notouched main restricted
+deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted
+deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted
+deb http://test.ubuntu.com/ubuntu/ notouched-security main restricted
+""")
+
+EXPECTED_PRIMSEC_CONTENT = ("""
+deb http://test.ubuntu.com/ubuntu/ notouched main restricted
+deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted
+deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted
+deb http://testsec.ubuntu.com/ubuntu/ notouched-security main restricted
+""")
+
+
+class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
+ """TestAptSourceConfigSourceList - Class to test sources list rendering"""
+ def setUp(self):
+ super(TestAptSourceConfigSourceList, self).setUp()
+ self.subp = util.subp
+ self.new_root = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.new_root)
+
+ rpatcher = mock.patch("cloudinit.util.lsb_release")
+ get_rel = rpatcher.start()
+ get_rel.return_value = {'codename': "fakerel"}
+ self.addCleanup(rpatcher.stop)
+ apatcher = mock.patch("cloudinit.util.get_architecture")
+ get_arch = apatcher.start()
+ get_arch.return_value = 'amd64'
+ self.addCleanup(apatcher.stop)
+
+ def _get_cloud(self, distro, metadata=None):
+ self.patchUtils(self.new_root)
+ paths = helpers.Paths({})
+ cls = distros.fetch(distro)
+ mydist = cls(distro, {}, paths)
+ myds = DataSourceNone.DataSourceNone({}, mydist, paths)
+ if metadata:
+ myds.metadata.update(metadata)
+ return cloud.Cloud(myds, paths, {}, mydist, None)
+
+ def _apt_source_list(self, cfg, expected, distro):
+ "_apt_source_list - Test rendering from template (generic)"
+
+ # entry at top level now, wrap in 'apt' key
+ cfg = {'apt': cfg}
+ mycloud = self._get_cloud(distro)
+ with mock.patch.object(util, 'write_file') as mockwf:
+ with mock.patch.object(util, 'load_file',
+ return_value=MOCKED_APT_SRC_LIST) as mocklf:
+ with mock.patch.object(os.path, 'isfile',
+ return_value=True) as mockisfile:
+ with mock.patch.object(util, 'rename'):
+ cc_apt_configure.handle("test", cfg, mycloud,
+ LOG, None)
+
+ # check if it would have loaded the distro template
+ mockisfile.assert_any_call(
+ ('/etc/cloud/templates/sources.list.%s.tmpl' % distro))
+ mocklf.assert_any_call(
+ ('/etc/cloud/templates/sources.list.%s.tmpl' % distro))
+ # check expected content in result
+ mockwf.assert_called_once_with('/etc/apt/sources.list', expected,
+ mode=0o644)
+
+ def test_apt_v3_source_list_debian(self):
+ """test_apt_v3_source_list_debian - without custom sources or parms"""
+ cfg = {}
+ self._apt_source_list(cfg, EXPECTED_BASE_CONTENT, 'debian')
+
+ def test_apt_v3_source_list_ubuntu(self):
+ """test_apt_v3_source_list_ubuntu - without custom sources or parms"""
+ cfg = {}
+ self._apt_source_list(cfg, EXPECTED_BASE_CONTENT, 'ubuntu')
+
+ def test_apt_v3_source_list_psm(self):
+ """test_apt_v3_source_list_psm - Test specifying prim+sec mirrors"""
+ pm = 'http://test.ubuntu.com/ubuntu/'
+ sm = 'http://testsec.ubuntu.com/ubuntu/'
+ cfg = {'preserve_sources_list': False,
+ 'primary': [{'arches': ["default"],
+ 'uri': pm}],
+ 'security': [{'arches': ["default"],
+ 'uri': sm}]}
+
+ self._apt_source_list(cfg, EXPECTED_PRIMSEC_CONTENT, 'ubuntu')
+
+ def test_apt_v3_srcl_custom(self):
+ """test_apt_v3_srcl_custom - Test rendering a custom source template"""
+ cfg = util.load_yaml(YAML_TEXT_CUSTOM_SL)
+ mycloud = self._get_cloud('ubuntu')
+
+ # the second mock restores the original subp
+ with mock.patch.object(util, 'write_file') as mockwrite:
+ with mock.patch.object(util, 'subp', self.subp):
+ with mock.patch.object(Distro, 'get_primary_arch',
+ return_value='amd64'):
+ cc_apt_configure.handle("notimportant", cfg, mycloud,
+ LOG, None)
+
+ calls = [call('/etc/apt/sources.list',
+ EXPECTED_CONVERTED_CONTENT,
+ mode=0o644)]
+ mockwrite.assert_has_calls(calls)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_apt_source.py b/tests/unittests/test_handler/test_handler_apt_source_v1.py
index 99a4d860..ddff4341 100644
--- a/tests/unittests/test_handler/test_handler_apt_source.py
+++ b/tests/unittests/test_handler/test_handler_apt_source_v1.py
@@ -1,5 +1,7 @@
-""" test_handler_apt_source
+""" test_handler_apt_source_v1
Testing various config variations of the apt_source config
+This calls all things with v1 format to stress the conversion code on top of
+the actually tested code.
"""
import os
import re
@@ -32,6 +34,8 @@ S0ORP6HXET3+jC8BMG4tBWCTK/XEZw==
=ACB2
-----END PGP PUBLIC KEY BLOCK-----"""
+ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
+
def load_tfile_or_url(*args, **kwargs):
"""load_tfile_or_url
@@ -40,6 +44,19 @@ def load_tfile_or_url(*args, **kwargs):
return util.decode_binary(util.read_file_or_url(*args, **kwargs).contents)
+class FakeDistro(object):
+ """Fake Distro helper object"""
+ def update_package_sources(self):
+ """Fake update_package_sources helper method"""
+ return
+
+
+class FakeCloud(object):
+ """Fake Cloud helper object"""
+ def __init__(self):
+ self.distro = FakeDistro()
+
+
class TestAptSourceConfig(TestCase):
"""TestAptSourceConfig
Main Class to test apt_source configs
@@ -54,25 +71,39 @@ class TestAptSourceConfig(TestCase):
self.aptlistfile2 = os.path.join(self.tmp, "single-deb2.list")
self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list")
self.join = os.path.join
+ self.matcher = re.compile(ADD_APT_REPO_MATCH).search
# mock fallback filename into writable tmp dir
self.fallbackfn = os.path.join(self.tmp, "etc/apt/sources.list.d/",
"cloud_config_sources.list")
- patcher = mock.patch("cloudinit.config.cc_apt_configure.get_release")
- get_rel = patcher.start()
- get_rel.return_value = self.release
- self.addCleanup(patcher.stop)
+ self.fakecloud = FakeCloud()
+
+ rpatcher = mock.patch("cloudinit.util.lsb_release")
+ get_rel = rpatcher.start()
+ get_rel.return_value = {'codename': self.release}
+ self.addCleanup(rpatcher.stop)
+ apatcher = mock.patch("cloudinit.util.get_architecture")
+ get_arch = apatcher.start()
+ get_arch.return_value = 'amd64'
+ self.addCleanup(apatcher.stop)
- @staticmethod
- def _get_default_params():
+ def _get_default_params(self):
"""get_default_params
Get the most basic default mrror and release info to be used in tests
"""
params = {}
- params['RELEASE'] = cc_apt_configure.get_release()
+ params['RELEASE'] = self.release
params['MIRROR'] = "http://archive.ubuntu.com/ubuntu"
return params
+ def wrapv1conf(self, cfg):
+ params = self._get_default_params()
+ # old v1 list format under old keys, but callabe to main handler
+ # disable source.list rendering and set mirror to avoid other code
+ return {'apt_preserve_sources_list': True,
+ 'apt_mirror': params['MIRROR'],
+ 'apt_sources': cfg}
+
def myjoin(self, *args, **kwargs):
"""myjoin - redir into writable tmpdir"""
if (args[0] == "/etc/apt/sources.list.d/" and
@@ -86,9 +117,9 @@ class TestAptSourceConfig(TestCase):
"""apt_src_basic
Test Fix deb source string, has to overwrite mirror conf in params
"""
- params = self._get_default_params()
+ cfg = self.wrapv1conf(cfg)
- cc_apt_configure.add_apt_sources(cfg, params)
+ cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
self.assertTrue(os.path.isfile(filename))
@@ -181,8 +212,9 @@ class TestAptSourceConfig(TestCase):
"""apt_src_replace
Test Autoreplacement of MIRROR and RELEASE in source specs
"""
+ cfg = self.wrapv1conf(cfg)
params = self._get_default_params()
- cc_apt_configure.add_apt_sources(cfg, params)
+ cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
self.assertTrue(os.path.isfile(filename))
@@ -246,16 +278,18 @@ class TestAptSourceConfig(TestCase):
"""apt_src_keyid
Test specification of a source + keyid
"""
- params = self._get_default_params()
+ cfg = self.wrapv1conf(cfg)
with mock.patch.object(util, 'subp',
return_value=('fakekey 1234', '')) as mockobj:
- cc_apt_configure.add_apt_sources(cfg, params)
+ cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
# check if it added the right ammount of keys
calls = []
for _ in range(keynum):
- calls.append(call(('apt-key', 'add', '-'), 'fakekey 1234'))
+ calls.append(call(['apt-key', 'add', '-'],
+ data=b'fakekey 1234',
+ target=None))
mockobj.assert_has_calls(calls, any_order=True)
self.assertTrue(os.path.isfile(filename))
@@ -329,12 +363,13 @@ class TestAptSourceConfig(TestCase):
"""apt_src_key
Test specification of a source + key
"""
- params = self._get_default_params()
+ cfg = self.wrapv1conf([cfg])
with mock.patch.object(util, 'subp') as mockobj:
- cc_apt_configure.add_apt_sources([cfg], params)
+ cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
- mockobj.assert_called_with(('apt-key', 'add', '-'), 'fakekey 4321')
+ mockobj.assert_called_with(['apt-key', 'add', '-'],
+ data=b'fakekey 4321', target=None)
self.assertTrue(os.path.isfile(filename))
@@ -368,30 +403,31 @@ class TestAptSourceConfig(TestCase):
def test_apt_src_keyonly(self):
"""Test specifying key without source"""
- params = self._get_default_params()
cfg = {'key': "fakekey 4242",
'filename': self.aptlistfile}
+ cfg = self.wrapv1conf([cfg])
with mock.patch.object(util, 'subp') as mockobj:
- cc_apt_configure.add_apt_sources([cfg], params)
+ cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
- mockobj.assert_called_once_with(('apt-key', 'add', '-'),
- 'fakekey 4242')
+ mockobj.assert_called_once_with(['apt-key', 'add', '-'],
+ data=b'fakekey 4242', target=None)
# filename should be ignored on key only
self.assertFalse(os.path.isfile(self.aptlistfile))
def test_apt_src_keyidonly(self):
"""Test specification of a keyid without source"""
- params = self._get_default_params()
cfg = {'keyid': "03683F77",
'filename': self.aptlistfile}
+ cfg = self.wrapv1conf([cfg])
with mock.patch.object(util, 'subp',
return_value=('fakekey 1212', '')) as mockobj:
- cc_apt_configure.add_apt_sources([cfg], params)
+ cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
- mockobj.assert_called_with(('apt-key', 'add', '-'), 'fakekey 1212')
+ mockobj.assert_called_with(['apt-key', 'add', '-'],
+ data=b'fakekey 1212', target=None)
# filename should be ignored on key only
self.assertFalse(os.path.isfile(self.aptlistfile))
@@ -402,17 +438,18 @@ class TestAptSourceConfig(TestCase):
up to addition of the key (add_apt_key_raw mocked to keep the
environment as is)
"""
- params = self._get_default_params()
+ key = cfg['keyid']
+ keyserver = cfg.get('keyserver', 'keyserver.ubuntu.com')
+ cfg = self.wrapv1conf([cfg])
with mock.patch.object(cc_apt_configure, 'add_apt_key_raw') as mockkey:
- with mock.patch.object(gpg, 'get_key_by_id',
+ with mock.patch.object(gpg, 'getkeybyid',
return_value=expectedkey) as mockgetkey:
- cc_apt_configure.add_apt_sources([cfg], params)
+ cc_apt_configure.handle("test", cfg, self.fakecloud,
+ None, None)
- mockgetkey.assert_called_with(cfg['keyid'],
- cfg.get('keyserver',
- 'keyserver.ubuntu.com'))
- mockkey.assert_called_with(expectedkey)
+ mockgetkey.assert_called_with(key, keyserver)
+ mockkey.assert_called_with(expectedkey, None)
# filename should be ignored on key only
self.assertFalse(os.path.isfile(self.aptlistfile))
@@ -444,41 +481,38 @@ class TestAptSourceConfig(TestCase):
def test_apt_src_ppa(self):
"""Test adding a ppa"""
- params = self._get_default_params()
cfg = {'source': 'ppa:smoser/cloud-init-test',
'filename': self.aptlistfile}
-
- # default matcher needed for ppa
- matcher = re.compile(r'^[\w-]+:\w').search
+ cfg = self.wrapv1conf([cfg])
with mock.patch.object(util, 'subp') as mockobj:
- cc_apt_configure.add_apt_sources([cfg], params,
- aa_repo_match=matcher)
+ cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
mockobj.assert_called_once_with(['add-apt-repository',
- 'ppa:smoser/cloud-init-test'])
+ 'ppa:smoser/cloud-init-test'],
+ target=None)
# adding ppa should ignore filename (uses add-apt-repository)
self.assertFalse(os.path.isfile(self.aptlistfile))
def test_apt_src_ppa_tri(self):
"""Test adding three ppa's"""
- params = self._get_default_params()
cfg1 = {'source': 'ppa:smoser/cloud-init-test',
'filename': self.aptlistfile}
cfg2 = {'source': 'ppa:smoser/cloud-init-test2',
'filename': self.aptlistfile2}
cfg3 = {'source': 'ppa:smoser/cloud-init-test3',
'filename': self.aptlistfile3}
-
- # default matcher needed for ppa
- matcher = re.compile(r'^[\w-]+:\w').search
+ cfg = self.wrapv1conf([cfg1, cfg2, cfg3])
with mock.patch.object(util, 'subp') as mockobj:
- cc_apt_configure.add_apt_sources([cfg1, cfg2, cfg3], params,
- aa_repo_match=matcher)
- calls = [call(['add-apt-repository', 'ppa:smoser/cloud-init-test']),
- call(['add-apt-repository', 'ppa:smoser/cloud-init-test2']),
- call(['add-apt-repository', 'ppa:smoser/cloud-init-test3'])]
+ cc_apt_configure.handle("test", cfg, self.fakecloud,
+ None, None)
+ calls = [call(['add-apt-repository', 'ppa:smoser/cloud-init-test'],
+ target=None),
+ call(['add-apt-repository', 'ppa:smoser/cloud-init-test2'],
+ target=None),
+ call(['add-apt-repository', 'ppa:smoser/cloud-init-test3'],
+ target=None)]
mockobj.assert_has_calls(calls, any_order=True)
# adding ppa should ignore all filenames (uses add-apt-repository)
@@ -494,6 +528,7 @@ class TestAptSourceConfig(TestCase):
'filename': self.aptlistfile2}
cfg3 = {'source': 'deb $MIRROR $RELEASE universe',
'filename': self.aptlistfile3}
+ cfg = {'apt_sources': [cfg1, cfg2, cfg3]}
checkcfg = {self.aptlistfile: {'filename': self.aptlistfile,
'source': 'deb $MIRROR $RELEASE '
'multiverse'},
@@ -503,14 +538,89 @@ class TestAptSourceConfig(TestCase):
'source': 'deb $MIRROR $RELEASE '
'universe'}}
- newcfg = cc_apt_configure.convert_to_new_format([cfg1, cfg2, cfg3])
- self.assertEqual(newcfg, checkcfg)
+ newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg)
+ self.assertEqual(newcfg['apt']['sources'], checkcfg)
+
+ # convert again, should stay the same
+ newcfg2 = cc_apt_configure.convert_to_v3_apt_format(newcfg)
+ self.assertEqual(newcfg2['apt']['sources'], checkcfg)
- newcfg2 = cc_apt_configure.convert_to_new_format(newcfg)
- self.assertEqual(newcfg2, checkcfg)
+ # should work without raising an exception
+ cc_apt_configure.convert_to_v3_apt_format({})
+
+ with self.assertRaises(ValueError):
+ cc_apt_configure.convert_to_v3_apt_format({'apt_sources': 5})
+
+ def test_convert_to_new_format_collision(self):
+ """Test the conversion of old to new format with collisions
+ That matches e.g. the MAAS case specifying old and new config"""
+ cfg_1_and_3 = {'apt': {'proxy': 'http://192.168.122.1:8000/'},
+ 'apt_proxy': 'http://192.168.122.1:8000/'}
+ cfg_3_only = {'apt': {'proxy': 'http://192.168.122.1:8000/'}}
+ cfgconflict = {'apt': {'proxy': 'http://192.168.122.1:8000/'},
+ 'apt_proxy': 'ftp://192.168.122.1:8000/'}
+
+ # collision (equal)
+ newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3)
+ self.assertEqual(newcfg, cfg_3_only)
+ # collision (equal, so ok to remove)
+ newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_3_only)
+ self.assertEqual(newcfg, cfg_3_only)
+ # collision (unequal)
+ with self.assertRaises(ValueError):
+ cc_apt_configure.convert_to_v3_apt_format(cfgconflict)
+ def test_convert_to_new_format_dict_collision(self):
+ cfg1 = {'source': 'deb $MIRROR $RELEASE multiverse',
+ 'filename': self.aptlistfile}
+ cfg2 = {'source': 'deb $MIRROR $RELEASE main',
+ 'filename': self.aptlistfile2}
+ cfg3 = {'source': 'deb $MIRROR $RELEASE universe',
+ 'filename': self.aptlistfile3}
+ fullv3 = {self.aptlistfile: {'filename': self.aptlistfile,
+ 'source': 'deb $MIRROR $RELEASE '
+ 'multiverse'},
+ self.aptlistfile2: {'filename': self.aptlistfile2,
+ 'source': 'deb $MIRROR $RELEASE main'},
+ self.aptlistfile3: {'filename': self.aptlistfile3,
+ 'source': 'deb $MIRROR $RELEASE '
+ 'universe'}}
+ cfg_3_only = {'apt': {'sources': fullv3}}
+ cfg_1_and_3 = {'apt_sources': [cfg1, cfg2, cfg3]}
+ cfg_1_and_3.update(cfg_3_only)
+
+ # collision (equal, so ok to remove)
+ newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3)
+ self.assertEqual(newcfg, cfg_3_only)
+ # no old spec (same result)
+ newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_3_only)
+ self.assertEqual(newcfg, cfg_3_only)
+
+ diff = {self.aptlistfile: {'filename': self.aptlistfile,
+ 'source': 'deb $MIRROR $RELEASE '
+ 'DIFFERENTVERSE'},
+ self.aptlistfile2: {'filename': self.aptlistfile2,
+ 'source': 'deb $MIRROR $RELEASE main'},
+ self.aptlistfile3: {'filename': self.aptlistfile3,
+ 'source': 'deb $MIRROR $RELEASE '
+ 'universe'}}
+ cfg_3_only = {'apt': {'sources': diff}}
+ cfg_1_and_3_different = {'apt_sources': [cfg1, cfg2, cfg3]}
+ cfg_1_and_3_different.update(cfg_3_only)
+
+ # collision (unequal by dict having a different entry)
+ with self.assertRaises(ValueError):
+ cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3_different)
+
+ missing = {self.aptlistfile: {'filename': self.aptlistfile,
+ 'source': 'deb $MIRROR $RELEASE '
+ 'multiverse'}}
+ cfg_3_only = {'apt': {'sources': missing}}
+ cfg_1_and_3_missing = {'apt_sources': [cfg1, cfg2, cfg3]}
+ cfg_1_and_3_missing.update(cfg_3_only)
+ # collision (unequal by dict missing an entry)
with self.assertRaises(ValueError):
- cc_apt_configure.convert_to_new_format(5)
+ cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3_missing)
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py
new file mode 100644
index 00000000..b92a50d7
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py
@@ -0,0 +1,1104 @@
+"""test_handler_apt_source_v3
+Testing various config variations of the apt_source custom config
+This tries to call all in the new v3 format and cares about new features
+"""
+import glob
+import os
+import re
+import shutil
+import socket
+import tempfile
+
+from unittest import TestCase
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+from mock import call
+
+from cloudinit import cloud
+from cloudinit import distros
+from cloudinit import gpg
+from cloudinit import helpers
+from cloudinit import util
+
+from cloudinit.config import cc_apt_configure
+from cloudinit.sources import DataSourceNone
+
+from .. import helpers as t_help
+
+EXPECTEDKEY = u"""-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1
+
+mI0ESuZLUgEEAKkqq3idtFP7g9hzOu1a8+v8ImawQN4TrvlygfScMU1TIS1eC7UQ
+NUA8Qqgr9iUaGnejb0VciqftLrU9D6WYHSKz+EITefgdyJ6SoQxjoJdsCpJ7o9Jy
+8PQnpRttiFm4qHu6BVnKnBNxw/z3ST9YMqW5kbMQpfxbGe+obRox59NpABEBAAG0
+HUxhdW5jaHBhZCBQUEEgZm9yIFNjb3R0IE1vc2VyiLYEEwECACAFAkrmS1ICGwMG
+CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAGILvPA2g/d3aEA/9tVjc10HOZwV29
+OatVuTeERjjrIbxflO586GLA8cp0C9RQCwgod/R+cKYdQcHjbqVcP0HqxveLg0RZ
+FJpWLmWKamwkABErwQLGlM/Hwhjfade8VvEQutH5/0JgKHmzRsoqfR+LMO6OS+Sm
+S0ORP6HXET3+jC8BMG4tBWCTK/XEZw==
+=ACB2
+-----END PGP PUBLIC KEY BLOCK-----"""
+
+ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
+
+TARGET = None
+
+
+def load_tfile(*args, **kwargs):
+ """load_tfile_or_url
+ load file and return content after decoding
+ """
+ return util.decode_binary(util.read_file_or_url(*args, **kwargs).contents)
+
+
+class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
+ """TestAptSourceConfig
+ Main Class to test apt configs
+ """
+ def setUp(self):
+ super(TestAptSourceConfig, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ self.new_root = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.tmp)
+ self.addCleanup(shutil.rmtree, self.new_root)
+ self.aptlistfile = os.path.join(self.tmp, "single-deb.list")
+ self.aptlistfile2 = os.path.join(self.tmp, "single-deb2.list")
+ self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list")
+ self.join = os.path.join
+ self.matcher = re.compile(ADD_APT_REPO_MATCH).search
+
+ @staticmethod
+ def _add_apt_sources(*args, **kwargs):
+ with mock.patch.object(cc_apt_configure, 'update_packages'):
+ cc_apt_configure.add_apt_sources(*args, **kwargs)
+
+ @staticmethod
+ def _get_default_params():
+ """get_default_params
+ Get the most basic default mrror and release info to be used in tests
+ """
+ params = {}
+ params['RELEASE'] = util.lsb_release()['codename']
+ arch = 'amd64'
+ params['MIRROR'] = cc_apt_configure.\
+ get_default_mirrors(arch)["PRIMARY"]
+ return params
+
+ def _myjoin(self, *args, **kwargs):
+ """_myjoin - redir into writable tmpdir"""
+ if (args[0] == "/etc/apt/sources.list.d/" and
+ args[1] == "cloud_config_sources.list" and
+ len(args) == 2):
+ return self.join(self.tmp, args[0].lstrip("/"), args[1])
+ else:
+ return self.join(*args, **kwargs)
+
+ def _get_cloud(self, distro, metadata=None):
+ self.patchUtils(self.new_root)
+ paths = helpers.Paths({})
+ cls = distros.fetch(distro)
+ mydist = cls(distro, {}, paths)
+ myds = DataSourceNone.DataSourceNone({}, mydist, paths)
+ if metadata:
+ myds.metadata.update(metadata)
+ return cloud.Cloud(myds, paths, {}, mydist, None)
+
+ def _apt_src_basic(self, filename, cfg):
+ """_apt_src_basic
+ Test Fix deb source string, has to overwrite mirror conf in params
+ """
+ params = self._get_default_params()
+
+ self._add_apt_sources(cfg, TARGET, template_params=params,
+ aa_repo_match=self.matcher)
+
+ self.assertTrue(os.path.isfile(filename))
+
+ contents = load_tfile(filename)
+ self.assertTrue(re.search(r"%s %s %s %s\n" %
+ ("deb", "http://test.ubuntu.com/ubuntu",
+ "karmic-backports",
+ "main universe multiverse restricted"),
+ contents, flags=re.IGNORECASE))
+
+ def test_apt_v3_src_basic(self):
+ """test_apt_v3_src_basic - Test fix deb source string"""
+ cfg = {self.aptlistfile: {'source':
+ ('deb http://test.ubuntu.com/ubuntu'
+ ' karmic-backports'
+ ' main universe multiverse restricted')}}
+ self._apt_src_basic(self.aptlistfile, cfg)
+
+ def test_apt_v3_src_basic_tri(self):
+ """test_apt_v3_src_basic_tri - Test multiple fix deb source strings"""
+ cfg = {self.aptlistfile: {'source':
+ ('deb http://test.ubuntu.com/ubuntu'
+ ' karmic-backports'
+ ' main universe multiverse restricted')},
+ self.aptlistfile2: {'source':
+ ('deb http://test.ubuntu.com/ubuntu'
+ ' precise-backports'
+ ' main universe multiverse restricted')},
+ self.aptlistfile3: {'source':
+ ('deb http://test.ubuntu.com/ubuntu'
+ ' lucid-backports'
+ ' main universe multiverse restricted')}}
+ self._apt_src_basic(self.aptlistfile, cfg)
+
+ # extra verify on two extra files of this test
+ contents = load_tfile(self.aptlistfile2)
+ self.assertTrue(re.search(r"%s %s %s %s\n" %
+ ("deb", "http://test.ubuntu.com/ubuntu",
+ "precise-backports",
+ "main universe multiverse restricted"),
+ contents, flags=re.IGNORECASE))
+ contents = load_tfile(self.aptlistfile3)
+ self.assertTrue(re.search(r"%s %s %s %s\n" %
+ ("deb", "http://test.ubuntu.com/ubuntu",
+ "lucid-backports",
+ "main universe multiverse restricted"),
+ contents, flags=re.IGNORECASE))
+
+ def _apt_src_replacement(self, filename, cfg):
+ """apt_src_replace
+ Test Autoreplacement of MIRROR and RELEASE in source specs
+ """
+ params = self._get_default_params()
+ self._add_apt_sources(cfg, TARGET, template_params=params,
+ aa_repo_match=self.matcher)
+
+ self.assertTrue(os.path.isfile(filename))
+
+ contents = load_tfile(filename)
+ self.assertTrue(re.search(r"%s %s %s %s\n" %
+ ("deb", params['MIRROR'], params['RELEASE'],
+ "multiverse"),
+ contents, flags=re.IGNORECASE))
+
+ def test_apt_v3_src_replace(self):
+ """test_apt_v3_src_replace - Test replacement of MIRROR & RELEASE"""
+ cfg = {self.aptlistfile: {'source': 'deb $MIRROR $RELEASE multiverse'}}
+ self._apt_src_replacement(self.aptlistfile, cfg)
+
+ def test_apt_v3_src_replace_fn(self):
+ """test_apt_v3_src_replace_fn - Test filename overwritten in dict"""
+ cfg = {'ignored': {'source': 'deb $MIRROR $RELEASE multiverse',
+ 'filename': self.aptlistfile}}
+ # second file should overwrite the dict key
+ self._apt_src_replacement(self.aptlistfile, cfg)
+
+ def _apt_src_replace_tri(self, cfg):
+ """_apt_src_replace_tri
+ Test three autoreplacements of MIRROR and RELEASE in source specs with
+ generic part
+ """
+ self._apt_src_replacement(self.aptlistfile, cfg)
+
+ # extra verify on two extra files of this test
+ params = self._get_default_params()
+ contents = load_tfile(self.aptlistfile2)
+ self.assertTrue(re.search(r"%s %s %s %s\n" %
+ ("deb", params['MIRROR'], params['RELEASE'],
+ "main"),
+ contents, flags=re.IGNORECASE))
+ contents = load_tfile(self.aptlistfile3)
+ self.assertTrue(re.search(r"%s %s %s %s\n" %
+ ("deb", params['MIRROR'], params['RELEASE'],
+ "universe"),
+ contents, flags=re.IGNORECASE))
+
+ def test_apt_v3_src_replace_tri(self):
+ """test_apt_v3_src_replace_tri - Test multiple replace/overwrites"""
+ cfg = {self.aptlistfile: {'source': 'deb $MIRROR $RELEASE multiverse'},
+ 'notused': {'source': 'deb $MIRROR $RELEASE main',
+ 'filename': self.aptlistfile2},
+ self.aptlistfile3: {'source': 'deb $MIRROR $RELEASE universe'}}
+ self._apt_src_replace_tri(cfg)
+
+ def _apt_src_keyid(self, filename, cfg, keynum):
+ """_apt_src_keyid
+ Test specification of a source + keyid
+ """
+ params = self._get_default_params()
+
+ with mock.patch("cloudinit.util.subp",
+ return_value=('fakekey 1234', '')) as mockobj:
+ self._add_apt_sources(cfg, TARGET, template_params=params,
+ aa_repo_match=self.matcher)
+
+ # check if it added the right ammount of keys
+ calls = []
+ for _ in range(keynum):
+ calls.append(call(['apt-key', 'add', '-'], data=b'fakekey 1234',
+ target=TARGET))
+ mockobj.assert_has_calls(calls, any_order=True)
+
+ self.assertTrue(os.path.isfile(filename))
+
+ contents = load_tfile(filename)
+ self.assertTrue(re.search(r"%s %s %s %s\n" %
+ ("deb",
+ ('http://ppa.launchpad.net/smoser/'
+ 'cloud-init-test/ubuntu'),
+ "xenial", "main"),
+ contents, flags=re.IGNORECASE))
+
+ def test_apt_v3_src_keyid(self):
+ """test_apt_v3_src_keyid - Test source + keyid with filename"""
+ cfg = {self.aptlistfile: {'source': ('deb '
+ 'http://ppa.launchpad.net/'
+ 'smoser/cloud-init-test/ubuntu'
+ ' xenial main'),
+ 'keyid': "03683F77"}}
+ self._apt_src_keyid(self.aptlistfile, cfg, 1)
+
+ def test_apt_v3_src_keyid_tri(self):
+ """test_apt_v3_src_keyid_tri - Test multiple src+key+filen writes"""
+ cfg = {self.aptlistfile: {'source': ('deb '
+ 'http://ppa.launchpad.net/'
+ 'smoser/cloud-init-test/ubuntu'
+ ' xenial main'),
+ 'keyid': "03683F77"},
+ 'ignored': {'source': ('deb '
+ 'http://ppa.launchpad.net/'
+ 'smoser/cloud-init-test/ubuntu'
+ ' xenial universe'),
+ 'keyid': "03683F77",
+ 'filename': self.aptlistfile2},
+ self.aptlistfile3: {'source': ('deb '
+ 'http://ppa.launchpad.net/'
+ 'smoser/cloud-init-test/ubuntu'
+ ' xenial multiverse'),
+ 'keyid': "03683F77"}}
+
+ self._apt_src_keyid(self.aptlistfile, cfg, 3)
+ contents = load_tfile(self.aptlistfile2)
+ self.assertTrue(re.search(r"%s %s %s %s\n" %
+ ("deb",
+ ('http://ppa.launchpad.net/smoser/'
+ 'cloud-init-test/ubuntu'),
+ "xenial", "universe"),
+ contents, flags=re.IGNORECASE))
+ contents = load_tfile(self.aptlistfile3)
+ self.assertTrue(re.search(r"%s %s %s %s\n" %
+ ("deb",
+ ('http://ppa.launchpad.net/smoser/'
+ 'cloud-init-test/ubuntu'),
+ "xenial", "multiverse"),
+ contents, flags=re.IGNORECASE))
+
+ def test_apt_v3_src_key(self):
+ """test_apt_v3_src_key - Test source + key"""
+ params = self._get_default_params()
+ cfg = {self.aptlistfile: {'source': ('deb '
+ 'http://ppa.launchpad.net/'
+ 'smoser/cloud-init-test/ubuntu'
+ ' xenial main'),
+ 'key': "fakekey 4321"}}
+
+ with mock.patch.object(util, 'subp') as mockobj:
+ self._add_apt_sources(cfg, TARGET, template_params=params,
+ aa_repo_match=self.matcher)
+
+ mockobj.assert_any_call(['apt-key', 'add', '-'], data=b'fakekey 4321',
+ target=TARGET)
+
+ self.assertTrue(os.path.isfile(self.aptlistfile))
+
+ contents = load_tfile(self.aptlistfile)
+ self.assertTrue(re.search(r"%s %s %s %s\n" %
+ ("deb",
+ ('http://ppa.launchpad.net/smoser/'
+ 'cloud-init-test/ubuntu'),
+ "xenial", "main"),
+ contents, flags=re.IGNORECASE))
+
+ def test_apt_v3_src_keyonly(self):
+ """test_apt_v3_src_keyonly - Test key without source"""
+ params = self._get_default_params()
+ cfg = {self.aptlistfile: {'key': "fakekey 4242"}}
+
+ with mock.patch.object(util, 'subp') as mockobj:
+ self._add_apt_sources(cfg, TARGET, template_params=params,
+ aa_repo_match=self.matcher)
+
+ mockobj.assert_any_call(['apt-key', 'add', '-'], data=b'fakekey 4242',
+ target=TARGET)
+
+ # filename should be ignored on key only
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def test_apt_v3_src_keyidonly(self):
+ """test_apt_v3_src_keyidonly - Test keyid without source"""
+ params = self._get_default_params()
+ cfg = {self.aptlistfile: {'keyid': "03683F77"}}
+
+ with mock.patch.object(util, 'subp',
+ return_value=('fakekey 1212', '')) as mockobj:
+ self._add_apt_sources(cfg, TARGET, template_params=params,
+ aa_repo_match=self.matcher)
+
+ mockobj.assert_any_call(['apt-key', 'add', '-'], data=b'fakekey 1212',
+ target=TARGET)
+
+ # filename should be ignored on key only
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def apt_src_keyid_real(self, cfg, expectedkey):
+ """apt_src_keyid_real
+ Test specification of a keyid without source including
+ up to addition of the key (add_apt_key_raw mocked to keep the
+ environment as is)
+ """
+ params = self._get_default_params()
+
+ with mock.patch.object(cc_apt_configure, 'add_apt_key_raw') as mockkey:
+ with mock.patch.object(gpg, 'getkeybyid',
+ return_value=expectedkey) as mockgetkey:
+ self._add_apt_sources(cfg, TARGET, template_params=params,
+ aa_repo_match=self.matcher)
+
+ keycfg = cfg[self.aptlistfile]
+ mockgetkey.assert_called_with(keycfg['keyid'],
+ keycfg.get('keyserver',
+ 'keyserver.ubuntu.com'))
+ mockkey.assert_called_with(expectedkey, TARGET)
+
+ # filename should be ignored on key only
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def test_apt_v3_src_keyid_real(self):
+ """test_apt_v3_src_keyid_real - Test keyid including key add"""
+ keyid = "03683F77"
+ cfg = {self.aptlistfile: {'keyid': keyid}}
+
+ self.apt_src_keyid_real(cfg, EXPECTEDKEY)
+
+ def test_apt_v3_src_longkeyid_real(self):
+ """test_apt_v3_src_longkeyid_real Test long keyid including key add"""
+ keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77"
+ cfg = {self.aptlistfile: {'keyid': keyid}}
+
+ self.apt_src_keyid_real(cfg, EXPECTEDKEY)
+
+ def test_apt_v3_src_longkeyid_ks_real(self):
+ """test_apt_v3_src_longkeyid_ks_real Test long keyid from other ks"""
+ keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77"
+ cfg = {self.aptlistfile: {'keyid': keyid,
+ 'keyserver': 'keys.gnupg.net'}}
+
+ self.apt_src_keyid_real(cfg, EXPECTEDKEY)
+
+ def test_apt_v3_src_keyid_keyserver(self):
+ """test_apt_v3_src_keyid_keyserver - Test custom keyserver"""
+ keyid = "03683F77"
+ params = self._get_default_params()
+ cfg = {self.aptlistfile: {'keyid': keyid,
+ 'keyserver': 'test.random.com'}}
+
+ # in some test environments only *.ubuntu.com is reachable
+ # so mock the call and check if the config got there
+ with mock.patch.object(gpg, 'getkeybyid',
+ return_value="fakekey") as mockgetkey:
+ with mock.patch.object(cc_apt_configure,
+ 'add_apt_key_raw') as mockadd:
+ self._add_apt_sources(cfg, TARGET, template_params=params,
+ aa_repo_match=self.matcher)
+
+ mockgetkey.assert_called_with('03683F77', 'test.random.com')
+ mockadd.assert_called_with('fakekey', TARGET)
+
+ # filename should be ignored on key only
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def test_apt_v3_src_ppa(self):
+ """test_apt_v3_src_ppa - Test specification of a ppa"""
+ params = self._get_default_params()
+ cfg = {self.aptlistfile: {'source': 'ppa:smoser/cloud-init-test'}}
+
+ with mock.patch("cloudinit.util.subp") as mockobj:
+ self._add_apt_sources(cfg, TARGET, template_params=params,
+ aa_repo_match=self.matcher)
+ mockobj.assert_any_call(['add-apt-repository',
+ 'ppa:smoser/cloud-init-test'], target=TARGET)
+
+ # adding ppa should ignore filename (uses add-apt-repository)
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def test_apt_v3_src_ppa_tri(self):
+ """test_apt_v3_src_ppa_tri - Test specification of multiple ppa's"""
+ params = self._get_default_params()
+ cfg = {self.aptlistfile: {'source': 'ppa:smoser/cloud-init-test'},
+ self.aptlistfile2: {'source': 'ppa:smoser/cloud-init-test2'},
+ self.aptlistfile3: {'source': 'ppa:smoser/cloud-init-test3'}}
+
+ with mock.patch("cloudinit.util.subp") as mockobj:
+ self._add_apt_sources(cfg, TARGET, template_params=params,
+ aa_repo_match=self.matcher)
+ calls = [call(['add-apt-repository', 'ppa:smoser/cloud-init-test'],
+ target=TARGET),
+ call(['add-apt-repository', 'ppa:smoser/cloud-init-test2'],
+ target=TARGET),
+ call(['add-apt-repository', 'ppa:smoser/cloud-init-test3'],
+ target=TARGET)]
+ mockobj.assert_has_calls(calls, any_order=True)
+
+ # adding ppa should ignore all filenames (uses add-apt-repository)
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+ self.assertFalse(os.path.isfile(self.aptlistfile2))
+ self.assertFalse(os.path.isfile(self.aptlistfile3))
+
+ @mock.patch("cloudinit.config.cc_apt_configure.util.get_architecture")
+ def test_apt_v3_list_rename(self, m_get_architecture):
+ """test_apt_v3_list_rename - Test find mirror and apt list renaming"""
+ pre = "/var/lib/apt/lists"
+ # filenames are archive dependent
+
+ arch = 's390x'
+ m_get_architecture.return_value = arch
+ component = "ubuntu-ports"
+ archive = "ports.ubuntu.com"
+
+ cfg = {'primary': [{'arches': ["default"],
+ 'uri':
+ 'http://test.ubuntu.com/%s/' % component}],
+ 'security': [{'arches': ["default"],
+ 'uri':
+ 'http://testsec.ubuntu.com/%s/' % component}]}
+ post = ("%s_dists_%s-updates_InRelease" %
+ (component, util.lsb_release()['codename']))
+ fromfn = ("%s/%s_%s" % (pre, archive, post))
+ tofn = ("%s/test.ubuntu.com_%s" % (pre, post))
+
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, None, arch)
+
+ self.assertEqual(mirrors['MIRROR'],
+ "http://test.ubuntu.com/%s/" % component)
+ self.assertEqual(mirrors['PRIMARY'],
+ "http://test.ubuntu.com/%s/" % component)
+ self.assertEqual(mirrors['SECURITY'],
+ "http://testsec.ubuntu.com/%s/" % component)
+
+ with mock.patch.object(os, 'rename') as mockren:
+ with mock.patch.object(glob, 'glob',
+ return_value=[fromfn]):
+ cc_apt_configure.rename_apt_lists(mirrors, TARGET)
+
+ mockren.assert_any_call(fromfn, tofn)
+
+ @mock.patch("cloudinit.config.cc_apt_configure.util.get_architecture")
+ def test_apt_v3_list_rename_non_slash(self, m_get_architecture):
+ target = os.path.join(self.tmp, "rename_non_slash")
+ apt_lists_d = os.path.join(target, "./" + cc_apt_configure.APT_LISTS)
+
+ m_get_architecture.return_value = 'amd64'
+
+ mirror_path = "some/random/path/"
+ primary = "http://test.ubuntu.com/" + mirror_path
+ security = "http://test-security.ubuntu.com/" + mirror_path
+ mirrors = {'PRIMARY': primary, 'SECURITY': security}
+
+ # these match default archive prefixes
+ opri_pre = "archive.ubuntu.com_ubuntu_dists_xenial"
+ osec_pre = "security.ubuntu.com_ubuntu_dists_xenial"
+ # this one won't match and should not be renamed defaults.
+ other_pre = "dl.google.com_linux_chrome_deb_dists_stable"
+ # these are our new expected prefixes
+ npri_pre = "test.ubuntu.com_some_random_path_dists_xenial"
+ nsec_pre = "test-security.ubuntu.com_some_random_path_dists_xenial"
+
+ files = [
+ # orig prefix, new prefix, suffix
+ (opri_pre, npri_pre, "_main_binary-amd64_Packages"),
+ (opri_pre, npri_pre, "_main_binary-amd64_InRelease"),
+ (opri_pre, npri_pre, "-updates_main_binary-amd64_Packages"),
+ (opri_pre, npri_pre, "-updates_main_binary-amd64_InRelease"),
+ (other_pre, other_pre, "_main_binary-amd64_Packages"),
+ (other_pre, other_pre, "_Release"),
+ (other_pre, other_pre, "_Release.gpg"),
+ (osec_pre, nsec_pre, "_InRelease"),
+ (osec_pre, nsec_pre, "_main_binary-amd64_Packages"),
+ (osec_pre, nsec_pre, "_universe_binary-amd64_Packages"),
+ ]
+
+ expected = sorted([npre + suff for opre, npre, suff in files])
+ # create files
+ for (opre, npre, suff) in files:
+ fpath = os.path.join(apt_lists_d, opre + suff)
+ util.write_file(fpath, content=fpath)
+
+ cc_apt_configure.rename_apt_lists(mirrors, target)
+ found = sorted(os.listdir(apt_lists_d))
+ self.assertEqual(expected, found)
+
+ @staticmethod
+ def test_apt_v3_proxy():
+ """test_apt_v3_proxy - Test apt_*proxy configuration"""
+ cfg = {"proxy": "foobar1",
+ "http_proxy": "foobar2",
+ "ftp_proxy": "foobar3",
+ "https_proxy": "foobar4"}
+
+ with mock.patch.object(util, 'write_file') as mockobj:
+ cc_apt_configure.apply_apt_config(cfg, "proxyfn", "notused")
+
+ mockobj.assert_called_with('proxyfn',
+ ('Acquire::http::Proxy "foobar1";\n'
+ 'Acquire::http::Proxy "foobar2";\n'
+ 'Acquire::ftp::Proxy "foobar3";\n'
+ 'Acquire::https::Proxy "foobar4";\n'))
+
+ def test_apt_v3_mirror(self):
+ """test_apt_v3_mirror - Test defining a mirror"""
+ pmir = "http://us.archive.ubuntu.com/ubuntu/"
+ smir = "http://security.ubuntu.com/ubuntu/"
+ cfg = {"primary": [{'arches': ["default"],
+ "uri": pmir}],
+ "security": [{'arches': ["default"],
+ "uri": smir}]}
+
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, None, 'amd64')
+
+ self.assertEqual(mirrors['MIRROR'],
+ pmir)
+ self.assertEqual(mirrors['PRIMARY'],
+ pmir)
+ self.assertEqual(mirrors['SECURITY'],
+ smir)
+
+ def test_apt_v3_mirror_default(self):
+ """test_apt_v3_mirror_default - Test without defining a mirror"""
+ arch = 'amd64'
+ default_mirrors = cc_apt_configure.get_default_mirrors(arch)
+ pmir = default_mirrors["PRIMARY"]
+ smir = default_mirrors["SECURITY"]
+ mycloud = self._get_cloud('ubuntu')
+ mirrors = cc_apt_configure.find_apt_mirror_info({}, mycloud, arch)
+
+ self.assertEqual(mirrors['MIRROR'],
+ pmir)
+ self.assertEqual(mirrors['PRIMARY'],
+ pmir)
+ self.assertEqual(mirrors['SECURITY'],
+ smir)
+
+ def test_apt_v3_mirror_arches(self):
+ """test_apt_v3_mirror_arches - Test arches selection of mirror"""
+ pmir = "http://my-primary.ubuntu.com/ubuntu/"
+ smir = "http://my-security.ubuntu.com/ubuntu/"
+ arch = 'ppc64el'
+ cfg = {"primary": [{'arches': ["default"], "uri": "notthis-primary"},
+ {'arches': [arch], "uri": pmir}],
+ "security": [{'arches': ["default"], "uri": "nothis-security"},
+ {'arches': [arch], "uri": smir}]}
+
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, None, arch)
+
+ self.assertEqual(mirrors['PRIMARY'], pmir)
+ self.assertEqual(mirrors['MIRROR'], pmir)
+ self.assertEqual(mirrors['SECURITY'], smir)
+
+ def test_apt_v3_mirror_arches_default(self):
+ """test_apt_v3_mirror_arches - Test falling back to default arch"""
+ pmir = "http://us.archive.ubuntu.com/ubuntu/"
+ smir = "http://security.ubuntu.com/ubuntu/"
+ cfg = {"primary": [{'arches': ["default"],
+ "uri": pmir},
+ {'arches': ["thisarchdoesntexist"],
+ "uri": "notthis"}],
+ "security": [{'arches': ["thisarchdoesntexist"],
+ "uri": "nothat"},
+ {'arches': ["default"],
+ "uri": smir}]}
+
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, None, 'amd64')
+
+ self.assertEqual(mirrors['MIRROR'],
+ pmir)
+ self.assertEqual(mirrors['PRIMARY'],
+ pmir)
+ self.assertEqual(mirrors['SECURITY'],
+ smir)
+
+ @mock.patch("cloudinit.config.cc_apt_configure.util.get_architecture")
+ def test_apt_v3_get_def_mir_non_intel_no_arch(self, m_get_architecture):
+ arch = 'ppc64el'
+ m_get_architecture.return_value = arch
+ expected = {'PRIMARY': 'http://ports.ubuntu.com/ubuntu-ports',
+ 'SECURITY': 'http://ports.ubuntu.com/ubuntu-ports'}
+ self.assertEqual(expected, cc_apt_configure.get_default_mirrors())
+
+ def test_apt_v3_get_default_mirrors_non_intel_with_arch(self):
+ found = cc_apt_configure.get_default_mirrors('ppc64el')
+
+ expected = {'PRIMARY': 'http://ports.ubuntu.com/ubuntu-ports',
+ 'SECURITY': 'http://ports.ubuntu.com/ubuntu-ports'}
+ self.assertEqual(expected, found)
+
+ def test_apt_v3_mirror_arches_sysdefault(self):
+ """test_apt_v3_mirror_arches - Test arches fallback to sys default"""
+ arch = 'amd64'
+ default_mirrors = cc_apt_configure.get_default_mirrors(arch)
+ pmir = default_mirrors["PRIMARY"]
+ smir = default_mirrors["SECURITY"]
+ mycloud = self._get_cloud('ubuntu')
+ cfg = {"primary": [{'arches': ["thisarchdoesntexist_64"],
+ "uri": "notthis"},
+ {'arches': ["thisarchdoesntexist"],
+ "uri": "notthiseither"}],
+ "security": [{'arches': ["thisarchdoesntexist"],
+ "uri": "nothat"},
+ {'arches': ["thisarchdoesntexist_64"],
+ "uri": "nothateither"}]}
+
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
+
+ self.assertEqual(mirrors['MIRROR'], pmir)
+ self.assertEqual(mirrors['PRIMARY'], pmir)
+ self.assertEqual(mirrors['SECURITY'], smir)
+
+ def test_apt_v3_mirror_search(self):
+ """test_apt_v3_mirror_search - Test searching mirrors in a list
+ mock checks to avoid relying on network connectivity"""
+ pmir = "http://us.archive.ubuntu.com/ubuntu/"
+ smir = "http://security.ubuntu.com/ubuntu/"
+ cfg = {"primary": [{'arches': ["default"],
+ "search": ["pfailme", pmir]}],
+ "security": [{'arches': ["default"],
+ "search": ["sfailme", smir]}]}
+
+ with mock.patch.object(cc_apt_configure, 'search_for_mirror',
+ side_effect=[pmir, smir]) as mocksearch:
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, None,
+ 'amd64')
+
+ calls = [call(["pfailme", pmir]),
+ call(["sfailme", smir])]
+ mocksearch.assert_has_calls(calls)
+
+ self.assertEqual(mirrors['MIRROR'],
+ pmir)
+ self.assertEqual(mirrors['PRIMARY'],
+ pmir)
+ self.assertEqual(mirrors['SECURITY'],
+ smir)
+
+ def test_apt_v3_mirror_search_many2(self):
+ """test_apt_v3_mirror_search_many3 - Test both mirrors specs at once"""
+ pmir = "http://us.archive.ubuntu.com/ubuntu/"
+ smir = "http://security.ubuntu.com/ubuntu/"
+ cfg = {"primary": [{'arches': ["default"],
+ "uri": pmir,
+ "search": ["pfailme", "foo"]}],
+ "security": [{'arches': ["default"],
+ "uri": smir,
+ "search": ["sfailme", "bar"]}]}
+
+ arch = 'amd64'
+
+ # should be called only once per type, despite two mirror configs
+ mycloud = None
+ with mock.patch.object(cc_apt_configure, 'get_mirror',
+ return_value="http://mocked/foo") as mockgm:
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
+ calls = [call(cfg, 'primary', arch, mycloud),
+ call(cfg, 'security', arch, mycloud)]
+ mockgm.assert_has_calls(calls)
+
+ # should not be called, since primary is specified
+ with mock.patch.object(cc_apt_configure,
+ 'search_for_mirror') as mockse:
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, None, arch)
+ mockse.assert_not_called()
+
+ self.assertEqual(mirrors['MIRROR'],
+ pmir)
+ self.assertEqual(mirrors['PRIMARY'],
+ pmir)
+ self.assertEqual(mirrors['SECURITY'],
+ smir)
+
+ def test_apt_v3_url_resolvable(self):
+ """test_apt_v3_url_resolvable - Test resolving urls"""
+
+ with mock.patch.object(util, 'is_resolvable') as mockresolve:
+ util.is_resolvable_url("http://1.2.3.4/ubuntu")
+ mockresolve.assert_called_with("1.2.3.4")
+
+ with mock.patch.object(util, 'is_resolvable') as mockresolve:
+ util.is_resolvable_url("http://us.archive.ubuntu.com/ubuntu")
+ mockresolve.assert_called_with("us.archive.ubuntu.com")
+
+ # former tests can leave this set (or not if the test is ran directly)
+ # do a hard reset to ensure a stable result
+ util._DNS_REDIRECT_IP = None
+ bad = [(None, None, None, "badname", ["10.3.2.1"])]
+ good = [(None, None, None, "goodname", ["10.2.3.4"])]
+ with mock.patch.object(socket, 'getaddrinfo',
+ side_effect=[bad, bad, bad, good,
+ good]) as mocksock:
+ ret = util.is_resolvable_url("http://us.archive.ubuntu.com/ubuntu")
+ ret2 = util.is_resolvable_url("http://1.2.3.4/ubuntu")
+ mocksock.assert_any_call('does-not-exist.example.com.', None,
+ 0, 0, 1, 2)
+ mocksock.assert_any_call('example.invalid.', None, 0, 0, 1, 2)
+ mocksock.assert_any_call('us.archive.ubuntu.com', None)
+ mocksock.assert_any_call('1.2.3.4', None)
+
+ self.assertTrue(ret)
+ self.assertTrue(ret2)
+
+ # side effect need only bad ret after initial call
+ with mock.patch.object(socket, 'getaddrinfo',
+ side_effect=[bad]) as mocksock:
+ ret3 = util.is_resolvable_url("http://failme.com/ubuntu")
+ calls = [call('failme.com', None)]
+ mocksock.assert_has_calls(calls)
+ self.assertFalse(ret3)
+
+ def test_apt_v3_disable_suites(self):
+ """test_disable_suites - disable_suites with many configurations"""
+ release = "xenial"
+ orig = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+
+ # disable nothing
+ disabled = []
+ expect = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # single disable release suite
+ disabled = ["$RELEASE"]
+ expect = """\
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # single disable other suite
+ disabled = ["$RELEASE-updates"]
+ expect = ("""deb http://ubuntu.com//ubuntu xenial main
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu"""
+ """ xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # multi disable
+ disabled = ["$RELEASE-updates", "$RELEASE-security"]
+ expect = ("""deb http://ubuntu.com//ubuntu xenial main
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
+ """xenial-updates main
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
+ """xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # multi line disable (same suite multiple times in input)
+ disabled = ["$RELEASE-updates", "$RELEASE-security"]
+ orig = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://UBUNTU.com//ubuntu xenial-updates main
+deb http://UBUNTU.COM//ubuntu xenial-updates main
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ expect = ("""deb http://ubuntu.com//ubuntu xenial main
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
+ """xenial-updates main
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
+ """xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+# suite disabled by cloud-init: deb http://UBUNTU.com//ubuntu """
+ """xenial-updates main
+# suite disabled by cloud-init: deb http://UBUNTU.COM//ubuntu """
+ """xenial-updates main
+deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # comment in input
+ disabled = ["$RELEASE-updates", "$RELEASE-security"]
+ orig = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+#foo
+#deb http://UBUNTU.com//ubuntu xenial-updates main
+deb http://UBUNTU.COM//ubuntu xenial-updates main
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ expect = ("""deb http://ubuntu.com//ubuntu xenial main
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
+ """xenial-updates main
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
+ """xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+#foo
+#deb http://UBUNTU.com//ubuntu xenial-updates main
+# suite disabled by cloud-init: deb http://UBUNTU.COM//ubuntu """
+ """xenial-updates main
+deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # single disable custom suite
+ disabled = ["foobar"]
+ orig = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb http://ubuntu.com/ubuntu/ foobar main"""
+ expect = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+# suite disabled by cloud-init: deb http://ubuntu.com/ubuntu/ foobar main"""
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # single disable non existing suite
+ disabled = ["foobar"]
+ orig = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb http://ubuntu.com/ubuntu/ notfoobar main"""
+ expect = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb http://ubuntu.com/ubuntu/ notfoobar main"""
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # single disable suite with option
+ disabled = ["$RELEASE-updates"]
+ orig = """deb http://ubuntu.com//ubuntu xenial main
+deb [a=b] http://ubu.com//ubu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ expect = ("""deb http://ubuntu.com//ubuntu xenial main
+# suite disabled by cloud-init: deb [a=b] http://ubu.com//ubu """
+ """xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # single disable suite with more options and auto $RELEASE expansion
+ disabled = ["updates"]
+ orig = """deb http://ubuntu.com//ubuntu xenial main
+deb [a=b c=d] http://ubu.com//ubu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ expect = """deb http://ubuntu.com//ubuntu xenial main
+# suite disabled by cloud-init: deb [a=b c=d] \
+http://ubu.com//ubu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # single disable suite while options at others
+ disabled = ["$RELEASE-security"]
+ orig = """deb http://ubuntu.com//ubuntu xenial main
+deb [arch=foo] http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ expect = ("""deb http://ubuntu.com//ubuntu xenial main
+deb [arch=foo] http://ubuntu.com//ubuntu xenial-updates main
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
+ """xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ def test_disable_suites_blank_lines(self):
+ """test_disable_suites_blank_lines - ensure blank lines allowed"""
+ lines = ["deb %(repo)s %(rel)s main universe",
+ "",
+ "deb %(repo)s %(rel)s-updates main universe",
+ " # random comment",
+ "#comment here",
+ ""]
+ rel = "trusty"
+ repo = 'http://example.com/mirrors/ubuntu'
+ orig = "\n".join(lines) % {'repo': repo, 'rel': rel}
+ self.assertEqual(
+ orig, cc_apt_configure.disable_suites(["proposed"], orig, rel))
+
+ def test_apt_v3_mirror_search_dns(self):
+ """test_apt_v3_mirror_search_dns - Test searching dns patterns"""
+ pmir = "phit"
+ smir = "shit"
+ arch = 'amd64'
+ mycloud = self._get_cloud('ubuntu')
+ cfg = {"primary": [{'arches': ["default"],
+ "search_dns": True}],
+ "security": [{'arches': ["default"],
+ "search_dns": True}]}
+
+ with mock.patch.object(cc_apt_configure, 'get_mirror',
+ return_value="http://mocked/foo") as mockgm:
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
+ calls = [call(cfg, 'primary', arch, mycloud),
+ call(cfg, 'security', arch, mycloud)]
+ mockgm.assert_has_calls(calls)
+
+ with mock.patch.object(cc_apt_configure, 'search_for_mirror_dns',
+ return_value="http://mocked/foo") as mocksdns:
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
+ calls = [call(True, 'primary', cfg, mycloud),
+ call(True, 'security', cfg, mycloud)]
+ mocksdns.assert_has_calls(calls)
+
+ # first return is for the non-dns call before
+ with mock.patch.object(cc_apt_configure, 'search_for_mirror',
+ side_effect=[None, pmir, None, smir]) as mockse:
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
+
+ calls = [call(None),
+ call(['http://ubuntu-mirror.localdomain/ubuntu',
+ 'http://ubuntu-mirror/ubuntu']),
+ call(None),
+ call(['http://ubuntu-security-mirror.localdomain/ubuntu',
+ 'http://ubuntu-security-mirror/ubuntu'])]
+ mockse.assert_has_calls(calls)
+
+ self.assertEqual(mirrors['MIRROR'],
+ pmir)
+ self.assertEqual(mirrors['PRIMARY'],
+ pmir)
+ self.assertEqual(mirrors['SECURITY'],
+ smir)
+
+
+class TestDebconfSelections(TestCase):
+
+ @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections")
+ def test_no_set_sel_if_none_to_set(self, m_set_sel):
+ cc_apt_configure.apply_debconf_selections({'foo': 'bar'})
+ m_set_sel.assert_not_called()
+
+ @mock.patch("cloudinit.config.cc_apt_configure."
+ "debconf_set_selections")
+ @mock.patch("cloudinit.config.cc_apt_configure."
+ "util.get_installed_packages")
+ def test_set_sel_call_has_expected_input(self, m_get_inst, m_set_sel):
+ data = {
+ 'set1': 'pkga pkga/q1 mybool false',
+ 'set2': ('pkgb\tpkgb/b1\tstr\tthis is a string\n'
+ 'pkgc\tpkgc/ip\tstring\t10.0.0.1')}
+ lines = '\n'.join(data.values()).split('\n')
+
+ m_get_inst.return_value = ["adduser", "apparmor"]
+ m_set_sel.return_value = None
+
+ cc_apt_configure.apply_debconf_selections({'debconf_selections': data})
+ self.assertTrue(m_get_inst.called)
+ self.assertEqual(m_set_sel.call_count, 1)
+
+ # assumes called with *args value.
+ selections = m_set_sel.call_args_list[0][0][0].decode()
+
+ missing = [l for l in lines if l not in selections.splitlines()]
+ self.assertEqual([], missing)
+
+ @mock.patch("cloudinit.config.cc_apt_configure.dpkg_reconfigure")
+ @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections")
+ @mock.patch("cloudinit.config.cc_apt_configure."
+ "util.get_installed_packages")
+ def test_reconfigure_if_intersection(self, m_get_inst, m_set_sel,
+ m_dpkg_r):
+ data = {
+ 'set1': 'pkga pkga/q1 mybool false',
+ 'set2': ('pkgb\tpkgb/b1\tstr\tthis is a string\n'
+ 'pkgc\tpkgc/ip\tstring\t10.0.0.1'),
+ 'cloud-init': ('cloud-init cloud-init/datasources'
+ 'multiselect MAAS')}
+
+ m_set_sel.return_value = None
+ m_get_inst.return_value = ["adduser", "apparmor", "pkgb",
+ "cloud-init", 'zdog']
+
+ cc_apt_configure.apply_debconf_selections({'debconf_selections': data})
+
+ # reconfigure should be called with the intersection
+ # of (packages in config, packages installed)
+ self.assertEqual(m_dpkg_r.call_count, 1)
+ # assumes called with *args (dpkg_reconfigure([a,b,c], target=))
+ packages = m_dpkg_r.call_args_list[0][0][0]
+ self.assertEqual(set(['cloud-init', 'pkgb']), set(packages))
+
+ @mock.patch("cloudinit.config.cc_apt_configure.dpkg_reconfigure")
+ @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections")
+ @mock.patch("cloudinit.config.cc_apt_configure."
+ "util.get_installed_packages")
+ def test_reconfigure_if_no_intersection(self, m_get_inst, m_set_sel,
+ m_dpkg_r):
+ data = {'set1': 'pkga pkga/q1 mybool false'}
+
+ m_get_inst.return_value = ["adduser", "apparmor", "pkgb",
+ "cloud-init", 'zdog']
+ m_set_sel.return_value = None
+
+ cc_apt_configure.apply_debconf_selections({'debconf_selections': data})
+
+ self.assertTrue(m_get_inst.called)
+ self.assertEqual(m_dpkg_r.call_count, 0)
+
+ @mock.patch("cloudinit.config.cc_apt_configure.util.subp")
+ def test_dpkg_reconfigure_does_reconfigure(self, m_subp):
+ target = "/foo-target"
+
+ # due to the way the cleaners are called (via dictionary reference)
+ # mocking clean_cloud_init directly does not work. So we mock
+ # the CONFIG_CLEANERS dictionary and assert our cleaner is called.
+ ci_cleaner = mock.MagicMock()
+ with mock.patch.dict(("cloudinit.config.cc_apt_configure."
+ "CONFIG_CLEANERS"),
+ values={'cloud-init': ci_cleaner}, clear=True):
+ cc_apt_configure.dpkg_reconfigure(['pkga', 'cloud-init'],
+ target=target)
+ # cloud-init is actually the only package we have a cleaner for
+ # so for now, its the only one that should reconfigured
+ self.assertTrue(m_subp.called)
+ ci_cleaner.assert_called_with(target)
+ self.assertEqual(m_subp.call_count, 1)
+ found = m_subp.call_args_list[0][0][0]
+ expected = ['dpkg-reconfigure', '--frontend=noninteractive',
+ 'cloud-init']
+ self.assertEqual(expected, found)
+
+ @mock.patch("cloudinit.config.cc_apt_configure.util.subp")
+ def test_dpkg_reconfigure_not_done_on_no_data(self, m_subp):
+ cc_apt_configure.dpkg_reconfigure([])
+ m_subp.assert_not_called()
+
+ @mock.patch("cloudinit.config.cc_apt_configure.util.subp")
+ def test_dpkg_reconfigure_not_done_if_no_cleaners(self, m_subp):
+ cc_apt_configure.dpkg_reconfigure(['pkgfoo', 'pkgbar'])
+ m_subp.assert_not_called()
+
+#
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_mcollective.py b/tests/unittests/test_handler/test_handler_mcollective.py
index f9448d80..c3a5a634 100644
--- a/tests/unittests/test_handler/test_handler_mcollective.py
+++ b/tests/unittests/test_handler/test_handler_mcollective.py
@@ -1,10 +1,12 @@
+from cloudinit import (cloud, distros, helpers, util)
from cloudinit.config import cc_mcollective
-from cloudinit import util
+from cloudinit.sources import DataSourceNoCloud
-from .. import helpers
+from .. import helpers as t_help
import configobj
import logging
+import os
import shutil
from six import BytesIO
import tempfile
@@ -12,11 +14,43 @@ import tempfile
LOG = logging.getLogger(__name__)
-class TestConfig(helpers.FilesystemMockingTestCase):
+STOCK_CONFIG = """\
+main_collective = mcollective
+collectives = mcollective
+libdir = /usr/share/mcollective/plugins
+logfile = /var/log/mcollective.log
+loglevel = info
+daemonize = 1
+
+# Plugins
+securityprovider = psk
+plugin.psk = unset
+
+connector = activemq
+plugin.activemq.pool.size = 1
+plugin.activemq.pool.1.host = stomp1
+plugin.activemq.pool.1.port = 61613
+plugin.activemq.pool.1.user = mcollective
+plugin.activemq.pool.1.password = marionette
+
+# Facts
+factsource = yaml
+plugin.yaml = /etc/mcollective/facts.yaml
+"""
+
+
+class TestConfig(t_help.FilesystemMockingTestCase):
def setUp(self):
super(TestConfig, self).setUp()
self.tmp = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp)
+ # "./": make os.path.join behave correctly with abs path as second arg
+ self.server_cfg = os.path.join(
+ self.tmp, "./" + cc_mcollective.SERVER_CFG)
+ self.pubcert_file = os.path.join(
+ self.tmp, "./" + cc_mcollective.PUBCERT_FILE)
+ self.pricert_file = os.path.join(
+ self.tmp, self.tmp, "./" + cc_mcollective.PRICERT_FILE)
def test_basic_config(self):
cfg = {
@@ -38,23 +72,79 @@ class TestConfig(helpers.FilesystemMockingTestCase):
},
},
}
+ expected = cfg['mcollective']['conf']
+
self.patchUtils(self.tmp)
cc_mcollective.configure(cfg['mcollective']['conf'])
- contents = util.load_file("/etc/mcollective/server.cfg", decode=False)
+ contents = util.load_file(cc_mcollective.SERVER_CFG, decode=False)
contents = configobj.ConfigObj(BytesIO(contents))
- expected = {
- 'loglevel': 'debug',
- 'connector': 'rabbitmq',
- 'logfile': '/var/log/mcollective.log',
- 'ttl': '4294957',
- 'collectives': 'mcollective',
- 'main_collective': 'mcollective',
- 'securityprovider': 'psk',
- 'daemonize': '1',
- 'factsource': 'yaml',
- 'direct_addressing': '1',
- 'plugin.psk': 'unset',
- 'libdir': '/usr/share/mcollective/plugins',
- 'identity': '1',
- }
self.assertEqual(expected, dict(contents))
+
+ def test_existing_config_is_saved(self):
+ cfg = {'loglevel': 'warn'}
+ util.write_file(self.server_cfg, STOCK_CONFIG)
+ cc_mcollective.configure(config=cfg, server_cfg=self.server_cfg)
+ self.assertTrue(os.path.exists(self.server_cfg))
+ self.assertTrue(os.path.exists(self.server_cfg + ".old"))
+ self.assertEqual(util.load_file(self.server_cfg + ".old"),
+ STOCK_CONFIG)
+
+ def test_existing_updated(self):
+ cfg = {'loglevel': 'warn'}
+ util.write_file(self.server_cfg, STOCK_CONFIG)
+ cc_mcollective.configure(config=cfg, server_cfg=self.server_cfg)
+ cfgobj = configobj.ConfigObj(self.server_cfg)
+ self.assertEqual(cfg['loglevel'], cfgobj['loglevel'])
+
+ def test_certificats_written(self):
+ # check public-cert and private-cert keys in config get written
+ cfg = {'loglevel': 'debug',
+ 'public-cert': "this is my public-certificate",
+ 'private-cert': "secret private certificate"}
+
+ cc_mcollective.configure(
+ config=cfg, server_cfg=self.server_cfg,
+ pricert_file=self.pricert_file, pubcert_file=self.pubcert_file)
+
+ found = configobj.ConfigObj(self.server_cfg)
+
+ # make sure these didnt get written in
+ self.assertFalse('public-cert' in found)
+ self.assertFalse('private-cert' in found)
+
+ # these need updating to the specified paths
+ self.assertEqual(found['plugin.ssl_server_public'], self.pubcert_file)
+ self.assertEqual(found['plugin.ssl_server_private'], self.pricert_file)
+
+ # and the security provider should be ssl
+ self.assertEqual(found['securityprovider'], 'ssl')
+
+ self.assertEqual(
+ util.load_file(self.pricert_file), cfg['private-cert'])
+ self.assertEqual(
+ util.load_file(self.pubcert_file), cfg['public-cert'])
+
+
+class TestHandler(t_help.TestCase):
+ def _get_cloud(self, distro):
+ cls = distros.fetch(distro)
+ paths = helpers.Paths({})
+ d = cls(distro, {}, paths)
+ ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths)
+ cc = cloud.Cloud(ds, paths, {}, d, None)
+ return cc
+
+ @t_help.mock.patch("cloudinit.config.cc_mcollective.util")
+ def test_mcollective_install(self, mock_util):
+ cc = self._get_cloud('ubuntu')
+ cc.distro = t_help.mock.MagicMock()
+ mock_util.load_file.return_value = b""
+ mycfg = {'mcollective': {'conf': {'loglevel': 'debug'}}}
+ cc_mcollective.handle('cc_mcollective', mycfg, cc, LOG, [])
+ self.assertTrue(cc.distro.install_packages.called)
+ install_pkg = cc.distro.install_packages.call_args_list[0][0][0]
+ self.assertEqual(install_pkg, ('mcollective',))
+
+ self.assertTrue(mock_util.subp.called)
+ self.assertEqual(mock_util.subp.call_args_list[0][0][0],
+ ['service', 'mcollective', 'restart'])
diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py
new file mode 100644
index 00000000..1c7bb06a
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_ntp.py
@@ -0,0 +1,274 @@
+from cloudinit.config import cc_ntp
+from cloudinit.sources import DataSourceNone
+from cloudinit import templater
+from cloudinit import (distros, helpers, cloud, util)
+from ..helpers import FilesystemMockingTestCase, mock
+
+import logging
+import os
+import shutil
+import tempfile
+
+LOG = logging.getLogger(__name__)
+
+NTP_TEMPLATE = """
+## template: jinja
+
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+"""
+
+
+NTP_EXPECTED_UBUNTU = """
+# pools
+pool 0.mycompany.pool.ntp.org iburst
+# servers
+server 192.168.23.3 iburst
+
+"""
+
+
+class TestNtp(FilesystemMockingTestCase):
+
+ def setUp(self):
+ super(TestNtp, self).setUp()
+ self.subp = util.subp
+ self.new_root = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.new_root)
+
+ def _get_cloud(self, distro, metadata=None):
+ self.patchUtils(self.new_root)
+ paths = helpers.Paths({})
+ cls = distros.fetch(distro)
+ mydist = cls(distro, {}, paths)
+ myds = DataSourceNone.DataSourceNone({}, mydist, paths)
+ if metadata:
+ myds.metadata.update(metadata)
+ return cloud.Cloud(myds, paths, {}, mydist, None)
+
+ @mock.patch("cloudinit.config.cc_ntp.util")
+ def test_ntp_install(self, mock_util):
+ cc = self._get_cloud('ubuntu')
+ cc.distro = mock.MagicMock()
+ cc.distro.name = 'ubuntu'
+ mock_util.which.return_value = None
+ install_func = mock.MagicMock()
+
+ cc_ntp.install_ntp(install_func, packages=['ntpx'], check_exe='ntpdx')
+
+ self.assertTrue(install_func.called)
+ mock_util.which.assert_called_with('ntpdx')
+ install_pkg = install_func.call_args_list[0][0][0]
+ self.assertEqual(sorted(install_pkg), ['ntpx'])
+
+ @mock.patch("cloudinit.config.cc_ntp.util")
+ def test_ntp_install_not_needed(self, mock_util):
+ cc = self._get_cloud('ubuntu')
+ cc.distro = mock.MagicMock()
+ cc.distro.name = 'ubuntu'
+ mock_util.which.return_value = ["/usr/sbin/ntpd"]
+ cc_ntp.install_ntp(cc)
+ self.assertFalse(cc.distro.install_packages.called)
+
+ def test_ntp_rename_ntp_conf(self):
+ with mock.patch.object(os.path, 'exists',
+ return_value=True) as mockpath:
+ with mock.patch.object(util, 'rename') as mockrename:
+ cc_ntp.rename_ntp_conf()
+
+ mockpath.assert_called_with('/etc/ntp.conf')
+ mockrename.assert_called_with('/etc/ntp.conf', '/etc/ntp.conf.dist')
+
+ def test_ntp_rename_ntp_conf_skip_missing(self):
+ with mock.patch.object(os.path, 'exists',
+ return_value=False) as mockpath:
+ with mock.patch.object(util, 'rename') as mockrename:
+ cc_ntp.rename_ntp_conf()
+
+ mockpath.assert_called_with('/etc/ntp.conf')
+ mockrename.assert_not_called()
+
+ def ntp_conf_render(self, distro):
+ """ntp_conf_render
+ Test rendering of a ntp.conf from template for a given distro
+ """
+
+ cfg = {'ntp': {}}
+ mycloud = self._get_cloud(distro)
+ distro_names = cc_ntp.generate_server_names(distro)
+
+ with mock.patch.object(templater, 'render_to_file') as mocktmpl:
+ with mock.patch.object(os.path, 'isfile', return_value=True):
+ with mock.patch.object(util, 'rename'):
+ cc_ntp.write_ntp_config_template(cfg, mycloud)
+
+ mocktmpl.assert_called_once_with(
+ ('/etc/cloud/templates/ntp.conf.%s.tmpl' % distro),
+ '/etc/ntp.conf',
+ {'servers': [], 'pools': distro_names})
+
+ def test_ntp_conf_render_rhel(self):
+ """Test templater.render_to_file() for rhel"""
+ self.ntp_conf_render('rhel')
+
+ def test_ntp_conf_render_debian(self):
+ """Test templater.render_to_file() for debian"""
+ self.ntp_conf_render('debian')
+
+ def test_ntp_conf_render_fedora(self):
+ """Test templater.render_to_file() for fedora"""
+ self.ntp_conf_render('fedora')
+
+ def test_ntp_conf_render_sles(self):
+ """Test templater.render_to_file() for sles"""
+ self.ntp_conf_render('sles')
+
+ def test_ntp_conf_render_ubuntu(self):
+ """Test templater.render_to_file() for ubuntu"""
+ self.ntp_conf_render('ubuntu')
+
+ def test_ntp_conf_servers_no_pools(self):
+ distro = 'ubuntu'
+ pools = []
+ servers = ['192.168.2.1']
+ cfg = {
+ 'ntp': {
+ 'pools': pools,
+ 'servers': servers,
+ }
+ }
+ mycloud = self._get_cloud(distro)
+
+ with mock.patch.object(templater, 'render_to_file') as mocktmpl:
+ with mock.patch.object(os.path, 'isfile', return_value=True):
+ with mock.patch.object(util, 'rename'):
+ cc_ntp.write_ntp_config_template(cfg.get('ntp'), mycloud)
+
+ mocktmpl.assert_called_once_with(
+ ('/etc/cloud/templates/ntp.conf.%s.tmpl' % distro),
+ '/etc/ntp.conf',
+ {'servers': servers, 'pools': pools})
+
+ def test_ntp_conf_custom_pools_no_server(self):
+ distro = 'ubuntu'
+ pools = ['0.mycompany.pool.ntp.org']
+ servers = []
+ cfg = {
+ 'ntp': {
+ 'pools': pools,
+ 'servers': servers,
+ }
+ }
+ mycloud = self._get_cloud(distro)
+
+ with mock.patch.object(templater, 'render_to_file') as mocktmpl:
+ with mock.patch.object(os.path, 'isfile', return_value=True):
+ with mock.patch.object(util, 'rename'):
+ cc_ntp.write_ntp_config_template(cfg.get('ntp'), mycloud)
+
+ mocktmpl.assert_called_once_with(
+ ('/etc/cloud/templates/ntp.conf.%s.tmpl' % distro),
+ '/etc/ntp.conf',
+ {'servers': servers, 'pools': pools})
+
+ def test_ntp_conf_custom_pools_and_server(self):
+ distro = 'ubuntu'
+ pools = ['0.mycompany.pool.ntp.org']
+ servers = ['192.168.23.3']
+ cfg = {
+ 'ntp': {
+ 'pools': pools,
+ 'servers': servers,
+ }
+ }
+ mycloud = self._get_cloud(distro)
+
+ with mock.patch.object(templater, 'render_to_file') as mocktmpl:
+ with mock.patch.object(os.path, 'isfile', return_value=True):
+ with mock.patch.object(util, 'rename'):
+ cc_ntp.write_ntp_config_template(cfg.get('ntp'), mycloud)
+
+ mocktmpl.assert_called_once_with(
+ ('/etc/cloud/templates/ntp.conf.%s.tmpl' % distro),
+ '/etc/ntp.conf',
+ {'servers': servers, 'pools': pools})
+
+ def test_ntp_conf_contents_match(self):
+ """Test rendered contents of /etc/ntp.conf for ubuntu"""
+ pools = ['0.mycompany.pool.ntp.org']
+ servers = ['192.168.23.3']
+ cfg = {
+ 'ntp': {
+ 'pools': pools,
+ 'servers': servers,
+ }
+ }
+ mycloud = self._get_cloud('ubuntu')
+ side_effect = [NTP_TEMPLATE.lstrip()]
+
+ # work backwards from util.write_file and mock out call path
+ # write_ntp_config_template()
+ # cloud.get_template_filename()
+ # os.path.isfile()
+ # templater.render_to_file()
+ # templater.render_from_file()
+ # util.load_file()
+ # util.write_file()
+ #
+ with mock.patch.object(util, 'write_file') as mockwrite:
+ with mock.patch.object(util, 'load_file', side_effect=side_effect):
+ with mock.patch.object(os.path, 'isfile', return_value=True):
+ with mock.patch.object(util, 'rename'):
+ cc_ntp.write_ntp_config_template(cfg.get('ntp'),
+ mycloud)
+
+ mockwrite.assert_called_once_with(
+ '/etc/ntp.conf',
+ NTP_EXPECTED_UBUNTU,
+ mode=420)
+
+ def test_ntp_handler(self):
+ """Test ntp handler renders ubuntu ntp.conf template"""
+ pools = ['0.mycompany.pool.ntp.org']
+ servers = ['192.168.23.3']
+ cfg = {
+ 'ntp': {
+ 'pools': pools,
+ 'servers': servers,
+ }
+ }
+ mycloud = self._get_cloud('ubuntu')
+ side_effect = [NTP_TEMPLATE.lstrip()]
+
+ with mock.patch.object(util, 'which', return_value=None):
+ with mock.patch.object(os.path, 'exists'):
+ with mock.patch.object(util, 'write_file') as mockwrite:
+ with mock.patch.object(util, 'load_file',
+ side_effect=side_effect):
+ with mock.patch.object(os.path, 'isfile',
+ return_value=True):
+ with mock.patch.object(util, 'rename'):
+ cc_ntp.handle("notimportant", cfg,
+ mycloud, LOG, None)
+
+ mockwrite.assert_called_once_with(
+ '/etc/ntp.conf',
+ NTP_EXPECTED_UBUNTU,
+ mode=420)
+
+ @mock.patch("cloudinit.config.cc_ntp.util")
+ def test_no_ntpcfg_does_nothing(self, mock_util):
+ cc = self._get_cloud('ubuntu')
+ cc.distro = mock.MagicMock()
+ cc_ntp.handle('cc_ntp', {}, cc, LOG, [])
+ self.assertFalse(cc.distro.install_packages.called)
+ self.assertFalse(mock_util.subp.called)
diff --git a/tests/unittests/test_handler/test_handler_spacewalk.py b/tests/unittests/test_handler/test_handler_spacewalk.py
new file mode 100644
index 00000000..44f95e4c
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_spacewalk.py
@@ -0,0 +1,42 @@
+from cloudinit.config import cc_spacewalk
+from cloudinit import util
+
+from .. import helpers
+
+import logging
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+LOG = logging.getLogger(__name__)
+
+
+class TestSpacewalk(helpers.TestCase):
+ space_cfg = {
+ 'spacewalk': {
+ 'server': 'localhost',
+ 'profile_name': 'test',
+ }
+ }
+
+ @mock.patch("cloudinit.config.cc_spacewalk.util.subp")
+ def test_not_is_registered(self, mock_util_subp):
+ mock_util_subp.side_effect = util.ProcessExecutionError(exit_code=1)
+ self.assertFalse(cc_spacewalk.is_registered())
+
+ @mock.patch("cloudinit.config.cc_spacewalk.util.subp")
+ def test_is_registered(self, mock_util_subp):
+ mock_util_subp.side_effect = None
+ self.assertTrue(cc_spacewalk.is_registered())
+
+ @mock.patch("cloudinit.config.cc_spacewalk.util.subp")
+ def test_do_register(self, mock_util_subp):
+ cc_spacewalk.do_register(**self.space_cfg['spacewalk'])
+ mock_util_subp.assert_called_with([
+ 'rhnreg_ks',
+ '--serverUrl', 'https://localhost/XMLRPC',
+ '--profilename', 'test',
+ '--sslCACert', cc_spacewalk.def_ca_cert_path,
+ ], capture=False)
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 37a984ac..d2031f59 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -371,8 +371,30 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase):
self._create_sysfs_parent_directory()
expected_dmi_value = 'dmidecode-used'
self._configure_dmidecode_return('use-dmidecode', expected_dmi_value)
- self.assertEqual(expected_dmi_value,
- util.read_dmi_data('use-dmidecode'))
+ with mock.patch("cloudinit.util.os.uname") as m_uname:
+ m_uname.return_value = ('x-sysname', 'x-nodename',
+ 'x-release', 'x-version', 'x86_64')
+ self.assertEqual(expected_dmi_value,
+ util.read_dmi_data('use-dmidecode'))
+
+ def test_dmidecode_not_used_on_arm(self):
+ self.patch_mapping({})
+ self._create_sysfs_parent_directory()
+ dmi_val = 'from-dmidecode'
+ dmi_name = 'use-dmidecode'
+ self._configure_dmidecode_return(dmi_name, dmi_val)
+
+ expected = {'armel': None, 'aarch64': None, 'x86_64': dmi_val}
+ found = {}
+ # we do not run the 'dmi-decode' binary on some arches
+ # verify that anything requested that is not in the sysfs dir
+ # will return None on those arches.
+ with mock.patch("cloudinit.util.os.uname") as m_uname:
+ for arch in expected:
+ m_uname.return_value = ('x-sysname', 'x-nodename',
+ 'x-release', 'x-version', arch)
+ found[arch] = util.read_dmi_data(dmi_name)
+ self.assertEqual(expected, found)
def test_none_returned_if_neither_source_has_data(self):
self.patch_mapping({})
@@ -486,4 +508,73 @@ class TestReadSeeded(helpers.TestCase):
self.assertEqual(found_md, {'key1': 'val1'})
self.assertEqual(found_ud, ud)
+
+class TestSubp(helpers.TestCase):
+
+ stdin2err = ['bash', '-c', 'cat >&2']
+ stdin2out = ['cat']
+ utf8_invalid = b'ab\xaadef'
+ utf8_valid = b'start \xc3\xa9 end'
+ utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7'
+
+ def printf_cmd(self, *args):
+ # bash's printf supports \xaa. So does /usr/bin/printf
+ # but by using bash, we remove dependency on another program.
+ return(['bash', '-c', 'printf "$@"', 'printf'] + list(args))
+
+ def test_subp_handles_utf8(self):
+ # The given bytes contain utf-8 accented characters as seen in e.g.
+ # the "deja dup" package in Ubuntu.
+ cmd = self.printf_cmd(self.utf8_valid_2)
+ (out, _err) = util.subp(cmd, capture=True)
+ self.assertEqual(out, self.utf8_valid_2.decode('utf-8'))
+
+ def test_subp_respects_decode_false(self):
+ (out, err) = util.subp(self.stdin2out, capture=True, decode=False,
+ data=self.utf8_valid)
+ self.assertTrue(isinstance(out, bytes))
+ self.assertTrue(isinstance(err, bytes))
+ self.assertEqual(out, self.utf8_valid)
+
+ def test_subp_decode_ignore(self):
+ # this executes a string that writes invalid utf-8 to stdout
+ (out, _err) = util.subp(self.printf_cmd('abc\\xaadef'),
+ capture=True, decode='ignore')
+ self.assertEqual(out, 'abcdef')
+
+ def test_subp_decode_strict_valid_utf8(self):
+ (out, _err) = util.subp(self.stdin2out, capture=True,
+ decode='strict', data=self.utf8_valid)
+ self.assertEqual(out, self.utf8_valid.decode('utf-8'))
+
+ def test_subp_decode_invalid_utf8_replaces(self):
+ (out, _err) = util.subp(self.stdin2out, capture=True,
+ data=self.utf8_invalid)
+ expected = self.utf8_invalid.decode('utf-8', errors='replace')
+ self.assertEqual(out, expected)
+
+ def test_subp_decode_strict_raises(self):
+ args = []
+ kwargs = {'args': self.stdin2out, 'capture': True,
+ 'decode': 'strict', 'data': self.utf8_invalid}
+ self.assertRaises(UnicodeDecodeError, util.subp, *args, **kwargs)
+
+ def test_subp_capture_stderr(self):
+ data = b'hello world'
+ (out, err) = util.subp(self.stdin2err, capture=True,
+ decode=False, data=data)
+ self.assertEqual(err, data)
+ self.assertEqual(out, b'')
+
+ def test_returns_none_if_no_capture(self):
+ (out, err) = util.subp(self.stdin2out, data=b'', capture=False)
+ self.assertEqual(err, None)
+ self.assertEqual(out, None)
+
+ def test_bunch_of_slashes_in_path(self):
+ self.assertEqual("/target/my/path/",
+ util.target_path("/target/", "//my/path/"))
+ self.assertEqual("/target/my/path/",
+ util.target_path("/target/", "///my/path/"))
+
# vi: ts=4 expandtab
diff --git a/tools/hook-dhclient b/tools/hook-dhclient
new file mode 100755
index 00000000..6a4626c6
--- /dev/null
+++ b/tools/hook-dhclient
@@ -0,0 +1,24 @@
+#!/bin/sh
+# This script writes DHCP lease information into the cloud-init run directory
+# It is sourced, not executed. For more information see dhclient-script(8).
+is_azure() {
+ local dmi_path="/sys/class/dmi/id/board_vendor" vendor=""
+ if [ -e "$dmi_path" ] && read vendor < "$dmi_path"; then
+ [ "$vendor" = "Microsoft Corporation" ] && return 0
+ fi
+ return 1
+}
+
+is_enabled() {
+ # only execute hooks if cloud-init is enabled and on azure
+ [ -e /run/cloud-init/enabled ] || return 1
+ is_azure
+}
+
+if is_enabled; then
+ case "$reason" in
+ BOUND) cloud-init dhclient-hook up "$interface";;
+ DOWN|RELEASE|REBOOT|STOP|EXPIRE)
+ cloud-init dhclient-hook down "$interface";;
+ esac
+fi
diff --git a/tools/hook-network-manager b/tools/hook-network-manager
new file mode 100755
index 00000000..98a36c8a
--- /dev/null
+++ b/tools/hook-network-manager
@@ -0,0 +1,24 @@
+#!/bin/sh
+# This script hooks into NetworkManager(8) via its scripts
+# arguments are 'interface-name' and 'action'
+#
+is_azure() {
+ local dmi_path="/sys/class/dmi/id/board_vendor" vendor=""
+ if [ -e "$dmi_path" ] && read vendor < "$dmi_path"; then
+ [ "$vendor" = "Microsoft Corporation" ] && return 0
+ fi
+ return 1
+}
+
+is_enabled() {
+ # only execute hooks if cloud-init is enabled and on azure
+ [ -e /run/cloud-init/enabled ] || return 1
+ is_azure
+}
+
+if is_enabled; then
+ case "$1:$2" in
+ *:up) exec cloud-init dhclient-hook up "$1";;
+ *:down) exec cloud-init dhclient-hook down "$1";;
+ esac
+fi
diff --git a/tools/hook-rhel.sh b/tools/hook-rhel.sh
new file mode 100755
index 00000000..8232414c
--- /dev/null
+++ b/tools/hook-rhel.sh
@@ -0,0 +1,27 @@
+#!/bin/sh
+# Current versions of RHEL and CentOS do not honor the directory
+# /etc/dhcp/dhclient-exit-hooks.d so this file can be placed in
+# /etc/dhcp/dhclient.d instead
+is_azure() {
+ local dmi_path="/sys/class/dmi/id/board_vendor" vendor=""
+ if [ -e "$dmi_path" ] && read vendor < "$dmi_path"; then
+ [ "$vendor" = "Microsoft Corporation" ] && return 0
+ fi
+ return 1
+}
+
+is_enabled() {
+ # only execute hooks if cloud-init is enabled and on azure
+ [ -e /run/cloud-init/enabled ] || return 1
+ is_azure
+}
+
+hook-rhel_config(){
+ is_enabled || return 0
+ cloud-init dhclient-hook up "$interface"
+}
+
+hook-rhel_restore(){
+ is_enabled || return 0
+ cloud-init dhclient-hook down "$interface"
+}
diff --git a/tools/make-dist-tarball b/tools/make-dist-tarball
deleted file mode 100755
index 5b078515..00000000
--- a/tools/make-dist-tarball
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/sh
-
-Usage() {
- cat <<EOF
-Usage: ${0##*/} version
- make a tarball of 'version'
- must be in a bzr directory, and 'version' must be a tag
-
-EOF
-}
-
-topdir="$PWD"
-tag="$1"
-
-[ -n "$tag" ] || { Usage 1>&2 ; exit 1; }
-
-out="${topdir}/cloud-init-${tag}.tar.gz"
-
-bzr export --format=tgz --root="cloud-init-$tag" \
- "--revision=tag:${tag}" "$out" "$topdir" &&
- echo "Wrote ${out}"
diff --git a/tools/make-tarball b/tools/make-tarball
index b7039150..c150dd2f 100755
--- a/tools/make-tarball
+++ b/tools/make-tarball
@@ -1,39 +1,63 @@
#!/bin/sh
set -e
-find_root() {
- local topd
- if [ -z "${CLOUD_INIT_TOP_D}" ]; then
- topd=$(cd "$(dirname "${0}")" && cd .. && pwd)
- else
- topd=$(cd "${CLOUD_INIT_TOP_D}" && pwd)
- fi
- [ $? -eq 0 -a -f "${topd}/setup.py" ] || return
- ROOT_DIR="$topd"
+TEMP_D=""
+cleanup() {
+ [ -z "$TEMP_D" ] || rm -Rf "${TEMP_D}"
}
+trap cleanup EXIT
-if ! find_root; then
- echo "Unable to locate 'setup.py' file that should" \
- "exist in the cloud-init root directory." 1>&2
- exit 1;
-fi
+Usage() {
+ cat <<EOF
+Usage: ${0##*/} [revision]
+ create a tarball of revision (default HEAD)
-REVNO=$(bzr revno "$ROOT_DIR")
+ options:
+ -o | --output FILE write to file
+EOF
+}
-if [ ! -z "$1" ]; then
- ARCHIVE_FN="$1"
-else
- VERSION=$("$ROOT_DIR/tools/read-version")
- ARCHIVE_FN="$PWD/cloud-init-$VERSION~bzr$REVNO.tar.gz"
-fi
+short_opts="ho:v"
+long_opts="help,output:,long,verbose"
+getopt_out=$(getopt --name "${0##*/}" \
+ --options "${short_opts}" --long "${long_opts}" -- "$@") &&
+ eval set -- "${getopt_out}" || { Usage 1>&2; exit 1; }
-export_uncommitted=""
-if [ "${UNCOMMITTED:-0}" != "0" ]; then
- export_uncommitted="--uncommitted"
+long_opt=""
+while [ $# -ne 0 ]; do
+ cur=$1; next=$2
+ case "$cur" in
+ -o|--output) output=$next; shift;;
+ --long) long_opt="--long";;
+ --) shift; break;;
+ esac
+ shift;
+done
+
+rev=${1:-HEAD}
+version=$(git describe ${long_opt} $rev)
+
+archive_base="cloud-init-$version"
+if [ -z "$output" ]; then
+ output="$archive_base.tar.gz"
fi
-bzr export ${export_uncommitted} \
- --format=tgz --root="cloud-init-$VERSION~bzr$REVNO" \
- "--revision=${REVNO}" "${ARCHIVE_FN}" "$ROOT_DIR"
+# when building an archiving from HEAD, ensure that there aren't any
+# uncomitted changes in the working directory (because these would not
+# end up in the archive).
+if [ "$rev" = HEAD ] && ! git diff-index --quiet HEAD --; then
+ if [ -z "$SKIP_UNCOMITTED_CHANGES_CHECK" ]; then
+ echo "ERROR: There are uncommitted changes in your working directory." >&2
+ exit 1
+ else
+ echo "WARNING: There are uncommitted changes in your working directory." >&2
+ echo " This changes will not be included in the archive." >&2
+ fi
+fi
-echo "$ARCHIVE_FN"
+TEMP_D=$(mktemp -d)
+tar=${output##*/}
+tar="$TEMP_D/${tar%.gz}"
+git archive --format=tar --prefix="$archive_base/" "$rev" > "$tar"
+gzip -9 -c "$tar" > "$output"
+echo "$output"
diff --git a/tools/read-dependencies b/tools/read-dependencies
index 6a6f3e12..9fc503eb 100755
--- a/tools/read-dependencies
+++ b/tools/read-dependencies
@@ -1,8 +1,13 @@
#!/usr/bin/env python
+# You might be tempted to rewrite this as a shell script, but you
+# would be surprised to discover that things like 'egrep' or 'sed' may
+# differ between Linux and *BSD.
+
import os
import re
import sys
+import subprocess
if 'CLOUD_INIT_TOP_D' in os.environ:
topd = os.path.realpath(os.environ.get('CLOUD_INIT_TOP_D'))
@@ -16,14 +21,21 @@ for fname in ("setup.py", "requirements.txt"):
sys.exit(1)
if len(sys.argv) > 1:
- reqfile = sys.argv[1]
+ reqfile = sys.argv[1]
else:
- reqfile = "requirements.txt"
-
+ reqfile = "requirements.txt"
+
with open(os.path.join(topd, reqfile), "r") as fp:
for line in fp:
- if not line.strip() or line.startswith("#"):
+ line = line.strip()
+ if not line or line.startswith("#"):
continue
- sys.stdout.write(re.split("[>=.<]*", line)[0].strip() + "\n")
+
+ # remove pip-style markers
+ dep = line.split(';')[0]
+
+ # remove version requirements
+ dep = re.split("[>=.<]*", dep)[0].strip()
+ print(dep)
sys.exit(0)
diff --git a/tools/read-version b/tools/read-version
index d02651e9..c10f9b46 100755
--- a/tools/read-version
+++ b/tools/read-version
@@ -1,26 +1,101 @@
#!/usr/bin/env python
import os
-import re
+import json
+import subprocess
import sys
-if 'CLOUD_INIT_TOP_D' in os.environ:
- topd = os.path.realpath(os.environ.get('CLOUD_INIT_TOP_D'))
-else:
- topd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+if "avoid-pep8-E402-import-not-top-of-file":
+ _tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+ sys.path.insert(0, _tdir)
+ from cloudinit import version as ci_version
+
+
+def tiny_p(cmd, capture=True):
+ # python 2.6 doesn't have check_output
+ stdout = subprocess.PIPE
+ stderr = subprocess.PIPE
+ sp = subprocess.Popen(cmd, stdout=stdout,
+ stderr=stderr, stdin=None,
+ universal_newlines=True)
+ (out, err) = sp.communicate()
+ ret = sp.returncode
+ if ret not in [0]:
+ raise RuntimeError("Failed running %s [rc=%s] (%s, %s)" %
+ (cmd, ret, out, err))
+ return out
+
+
+def which(program):
+ # Return path of program for execution if found in path
+ def is_exe(fpath):
+ return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
+
+ _fpath, _ = os.path.split(program)
+ if _fpath:
+ if is_exe(program):
+ return program
+ else:
+ for path in os.environ.get("PATH", "").split(os.pathsep):
+ path = path.strip('"')
+ exe_file = os.path.join(path, program)
+ if is_exe(exe_file):
+ return exe_file
+
+ return None
+
+
+use_long = '--long' in sys.argv or os.environ.get('CI_RV_LONG')
+use_tags = '--tags' in sys.argv or os.environ.get('CI_RV_TAGS')
+output_json = '--json' in sys.argv
-for fname in ("setup.py", "ChangeLog"):
- if not os.path.isfile(os.path.join(topd, fname)):
- sys.stderr.write("Unable to locate '%s' file that should "
- "exist in cloud-init root directory." % fname)
+src_version = ci_version.version_string()
+version_long = None
+
+if os.path.isdir(os.path.join(_tdir, ".git")) and which("git"):
+ flags = []
+ if use_tags:
+ flags = ['--tags']
+ cmd = ['git', 'describe'] + flags
+
+ version = tiny_p(cmd).strip()
+
+ if not version.startswith(src_version):
+ sys.stderr.write("git describe version (%s) differs from "
+ "cloudinit.version (%s)\n" % (version, src_version))
sys.exit(1)
-vermatch = re.compile(r"^[0-9]+[.][0-9]+[.][0-9]+:$")
+ version_long = tiny_p(cmd + ["--long"]).strip()
+else:
+ version = src_version
+ version_long = None
+
+# version is X.Y.Z[+xxx.gHASH]
+# version_long is None or X.Y.Z-xxx-gHASH
+release = version.partition("-")[0]
+extra = None
+commit = None
+distance = None
+
+if version_long:
+ info = version_long.partition("-")[2]
+ extra = "-" + info
+ distance, commit = info.split("-")
+ # remove the 'g' from gHASH
+ commit = commit[1:]
-with open(os.path.join(topd, "ChangeLog"), "r") as fp:
- for line in fp:
- if vermatch.match(line):
- sys.stdout.write(line.strip()[:-1] + "\n")
- break
+data = {
+ 'release': release,
+ 'version': version,
+ 'version_long': version_long,
+ 'extra': extra,
+ 'commit': commit,
+ 'distance': distance,
+}
+
+if output_json:
+ sys.stdout.write(json.dumps(data, indent=1) + "\n")
+else:
+ sys.stdout.write(release + "\n")
sys.exit(0)