summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MANIFEST.in11
-rw-r--r--Makefile35
-rw-r--r--cloudinit/cmd/main.py24
-rw-r--r--cloudinit/config/cc_chef.py2
-rw-r--r--cloudinit/config/cc_growpart.py2
-rw-r--r--cloudinit/config/cc_ntp.py69
-rw-r--r--cloudinit/config/cc_power_state_change.py2
-rw-r--r--cloudinit/config/cc_write_files.py33
-rw-r--r--cloudinit/config/schema.py222
-rwxr-xr-xcloudinit/distros/__init__.py2
-rw-r--r--cloudinit/distros/arch.py2
-rw-r--r--cloudinit/distros/centos.py12
-rw-r--r--cloudinit/distros/debian.py48
-rw-r--r--cloudinit/distros/parsers/networkmanager_conf.py23
-rw-r--r--cloudinit/net/__init__.py315
-rw-r--r--cloudinit/net/eni.py46
-rw-r--r--cloudinit/net/netplan.py17
-rw-r--r--cloudinit/net/network_state.py244
-rw-r--r--cloudinit/net/renderer.py8
-rw-r--r--cloudinit/net/sysconfig.py155
-rw-r--r--cloudinit/net/tests/__init__.py0
-rw-r--r--cloudinit/net/tests/test_init.py522
-rw-r--r--cloudinit/net/udev.py7
-rw-r--r--cloudinit/netinfo.py7
-rw-r--r--cloudinit/settings.py2
-rw-r--r--cloudinit/sources/DataSourceAliYun.py14
-rw-r--r--cloudinit/sources/DataSourceAzure.py151
-rw-r--r--cloudinit/sources/DataSourceEc2.py21
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py12
-rw-r--r--cloudinit/sources/DataSourceScaleway.py234
-rw-r--r--cloudinit/sources/__init__.py15
-rw-r--r--cloudinit/stages.py5
-rw-r--r--cloudinit/url_helper.py10
-rw-r--r--cloudinit/util.py52
-rw-r--r--config/cloud.cfg-freebsd88
-rw-r--r--config/cloud.cfg.tmpl (renamed from config/cloud.cfg)83
-rw-r--r--doc/examples/cloud-config-chef.txt12
-rw-r--r--doc/examples/cloud-config-disk-setup.txt8
-rw-r--r--doc/rtd/conf.py10
-rw-r--r--doc/rtd/topics/datasources.rst26
-rw-r--r--doc/rtd/topics/datasources/azure.rst2
-rw-r--r--doc/rtd/topics/datasources/nocloud.rst32
-rw-r--r--doc/rtd/topics/dir_layout.rst14
-rw-r--r--doc/rtd/topics/merging.rst4
-rw-r--r--doc/rtd/topics/network-config-format-v1.rst4
-rw-r--r--doc/rtd/topics/network-config.rst4
-rw-r--r--doc/rtd/topics/tests.rst631
-rw-r--r--doc/rtd/topics/vendordata.rst4
-rwxr-xr-xpackages/bddeb45
-rwxr-xr-xpackages/brpm45
-rw-r--r--packages/debian/control.in11
-rw-r--r--packages/pkg-deps.json88
-rw-r--r--packages/redhat/cloud-init.spec.in178
-rw-r--r--packages/suse/cloud-init.spec.in52
-rw-r--r--requirements.txt6
-rwxr-xr-xsetup.py183
-rw-r--r--snapcraft.yaml3
-rw-r--r--systemd/cloud-config.service.tmpl (renamed from systemd/cloud-config.service)1
-rw-r--r--systemd/cloud-final.service.tmpl (renamed from systemd/cloud-final.service)7
-rw-r--r--systemd/cloud-init-local.service.tmpl (renamed from systemd/cloud-init-local.service)5
-rw-r--r--systemd/cloud-init.service.tmpl (renamed from systemd/cloud-init.service)10
-rw-r--r--systemd/cloud-init.target2
-rw-r--r--systemd/systemd-fsck@.service.d/cloud-init.conf2
-rwxr-xr-xsysvinit/freebsd/cloudinitlocal2
-rw-r--r--templates/hosts.debian.tmpl4
-rw-r--r--templates/hosts.suse.tmpl3
-rw-r--r--tests/cloud_tests/__init__.py7
-rw-r--r--tests/cloud_tests/__main__.py45
-rw-r--r--tests/cloud_tests/args.py150
-rw-r--r--tests/cloud_tests/bddeb.py118
-rw-r--r--tests/cloud_tests/collect.py114
-rw-r--r--tests/cloud_tests/config.py139
-rw-r--r--tests/cloud_tests/configs/bugs/lp1628337.yaml3
-rw-r--r--tests/cloud_tests/configs/examples/add_apt_repositories.yaml2
-rw-r--r--tests/cloud_tests/configs/modules/apt_configure_conf.yaml2
-rw-r--r--tests/cloud_tests/configs/modules/apt_configure_disable_suites.yaml3
-rw-r--r--tests/cloud_tests/configs/modules/apt_configure_primary.yaml7
-rw-r--r--tests/cloud_tests/configs/modules/apt_configure_proxy.yaml2
-rw-r--r--tests/cloud_tests/configs/modules/apt_configure_security.yaml3
-rw-r--r--tests/cloud_tests/configs/modules/apt_configure_sources_key.yaml3
-rw-r--r--tests/cloud_tests/configs/modules/apt_configure_sources_keyserver.yaml5
-rw-r--r--tests/cloud_tests/configs/modules/apt_configure_sources_list.yaml3
-rw-r--r--tests/cloud_tests/configs/modules/apt_configure_sources_ppa.yaml9
-rw-r--r--tests/cloud_tests/configs/modules/apt_pipelining_disable.yaml2
-rw-r--r--tests/cloud_tests/configs/modules/apt_pipelining_os.yaml2
-rw-r--r--tests/cloud_tests/configs/modules/byobu.yaml2
-rw-r--r--tests/cloud_tests/configs/modules/keys_to_console.yaml2
-rw-r--r--tests/cloud_tests/configs/modules/landscape.yaml2
-rw-r--r--tests/cloud_tests/configs/modules/locale.yaml3
-rw-r--r--tests/cloud_tests/configs/modules/lxd_bridge.yaml2
-rw-r--r--tests/cloud_tests/configs/modules/lxd_dir.yaml2
-rw-r--r--tests/cloud_tests/configs/modules/ntp.yaml9
-rw-r--r--tests/cloud_tests/configs/modules/ntp_pools.yaml8
-rw-r--r--tests/cloud_tests/configs/modules/ntp_servers.yaml5
-rw-r--r--tests/cloud_tests/configs/modules/package_update_upgrade_install.yaml11
-rw-r--r--tests/cloud_tests/configs/modules/set_hostname.yaml2
-rw-r--r--tests/cloud_tests/configs/modules/set_hostname_fqdn.yaml2
-rw-r--r--tests/cloud_tests/configs/modules/set_password.yaml2
-rw-r--r--tests/cloud_tests/configs/modules/set_password_expire.yaml2
-rw-r--r--tests/cloud_tests/configs/modules/snappy.yaml2
-rw-r--r--tests/cloud_tests/configs/modules/ssh_auth_key_fingerprints_disable.yaml2
-rw-r--r--tests/cloud_tests/configs/modules/ssh_auth_key_fingerprints_enable.yaml5
-rw-r--r--tests/cloud_tests/configs/modules/ssh_import_id.yaml3
-rw-r--r--tests/cloud_tests/configs/modules/ssh_keys_generate.yaml2
-rw-r--r--tests/cloud_tests/configs/modules/ssh_keys_provided.yaml3
-rw-r--r--tests/cloud_tests/configs/modules/timezone.yaml2
-rw-r--r--tests/cloud_tests/configs/modules/user_groups.yaml2
-rw-r--r--tests/cloud_tests/configs/modules/write_files.yaml4
-rw-r--r--tests/cloud_tests/images/__init__.py7
-rw-r--r--tests/cloud_tests/images/base.py68
-rw-r--r--tests/cloud_tests/images/lxd.py176
-rw-r--r--tests/cloud_tests/instances/__init__.py6
-rw-r--r--tests/cloud_tests/instances/base.py162
-rw-r--r--tests/cloud_tests/instances/lxd.py132
-rw-r--r--tests/cloud_tests/manage.py29
-rw-r--r--tests/cloud_tests/platforms.yaml50
-rw-r--r--tests/cloud_tests/platforms/__init__.py6
-rw-r--r--tests/cloud_tests/platforms/base.py44
-rw-r--r--tests/cloud_tests/platforms/lxd.py97
-rw-r--r--tests/cloud_tests/releases.yaml306
-rw-r--r--tests/cloud_tests/run_funcs.py75
-rw-r--r--tests/cloud_tests/setup_image.py196
-rw-r--r--tests/cloud_tests/snapshots/__init__.py6
-rw-r--r--tests/cloud_tests/snapshots/base.py43
-rw-r--r--tests/cloud_tests/snapshots/lxd.py51
-rw-r--r--tests/cloud_tests/stage.py52
-rw-r--r--tests/cloud_tests/testcases.yaml1
-rw-r--r--tests/cloud_tests/testcases/__init__.py16
-rw-r--r--tests/cloud_tests/testcases/base.py51
-rw-r--r--tests/cloud_tests/testcases/bugs/__init__.py4
-rw-r--r--tests/cloud_tests/testcases/bugs/lp1511485.py6
-rw-r--r--tests/cloud_tests/testcases/bugs/lp1628337.py8
-rw-r--r--tests/cloud_tests/testcases/examples/__init__.py4
-rw-r--r--tests/cloud_tests/testcases/examples/add_apt_repositories.py8
-rw-r--r--tests/cloud_tests/testcases/examples/alter_completion_message.py23
-rw-r--r--tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.py10
-rw-r--r--tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.py12
-rw-r--r--tests/cloud_tests/testcases/examples/including_user_groups.py16
-rw-r--r--tests/cloud_tests/testcases/examples/install_arbitrary_packages.py8
-rw-r--r--tests/cloud_tests/testcases/examples/install_run_chef_recipes.py6
-rw-r--r--tests/cloud_tests/testcases/examples/run_apt_upgrade.py6
-rw-r--r--tests/cloud_tests/testcases/examples/run_commands.py6
-rw-r--r--tests/cloud_tests/testcases/examples/run_commands_first_boot.py6
-rw-r--r--tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.py12
-rw-r--r--tests/cloud_tests/testcases/main/__init__.py4
-rw-r--r--tests/cloud_tests/testcases/main/command_output_simple.py9
-rw-r--r--tests/cloud_tests/testcases/modules/__init__.py4
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_conf.py8
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_disable_suites.py6
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_primary.py8
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_proxy.py6
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_security.py6
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_key.py8
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.py12
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_list.py6
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py8
-rw-r--r--tests/cloud_tests/testcases/modules/apt_pipelining_disable.py6
-rw-r--r--tests/cloud_tests/testcases/modules/apt_pipelining_os.py6
-rw-r--r--tests/cloud_tests/testcases/modules/bootcmd.py6
-rw-r--r--tests/cloud_tests/testcases/modules/byobu.py10
-rw-r--r--tests/cloud_tests/testcases/modules/ca_certs.py8
-rw-r--r--tests/cloud_tests/testcases/modules/debug_disable.py6
-rw-r--r--tests/cloud_tests/testcases/modules/debug_enable.py6
-rw-r--r--tests/cloud_tests/testcases/modules/final_message.py23
-rw-r--r--tests/cloud_tests/testcases/modules/keys_to_console.py8
-rw-r--r--tests/cloud_tests/testcases/modules/locale.py15
-rw-r--r--tests/cloud_tests/testcases/modules/lxd_bridge.py10
-rw-r--r--tests/cloud_tests/testcases/modules/lxd_dir.py8
-rw-r--r--tests/cloud_tests/testcases/modules/ntp.py13
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_pools.py6
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_servers.py2
-rw-r--r--tests/cloud_tests/testcases/modules/package_update_upgrade_install.py12
-rw-r--r--tests/cloud_tests/testcases/modules/runcmd.py6
-rw-r--r--tests/cloud_tests/testcases/modules/salt_minion.py10
-rw-r--r--tests/cloud_tests/testcases/modules/seed_random_data.py6
-rw-r--r--tests/cloud_tests/testcases/modules/set_hostname.py6
-rw-r--r--tests/cloud_tests/testcases/modules/set_hostname_fqdn.py10
-rw-r--r--tests/cloud_tests/testcases/modules/set_password.py8
-rw-r--r--tests/cloud_tests/testcases/modules/set_password_expire.py8
-rw-r--r--tests/cloud_tests/testcases/modules/set_password_list.py5
-rw-r--r--tests/cloud_tests/testcases/modules/set_password_list_string.py5
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py16
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.py14
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_import_id.py19
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_keys_generate.py22
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_keys_provided.py24
-rw-r--r--tests/cloud_tests/testcases/modules/timezone.py6
-rw-r--r--tests/cloud_tests/testcases/modules/user_groups.py16
-rw-r--r--tests/cloud_tests/testcases/modules/write_files.py12
-rw-r--r--tests/cloud_tests/util.py235
-rw-r--r--tests/cloud_tests/verify.py22
-rw-r--r--tests/unittests/helpers.py47
-rw-r--r--tests/unittests/test_datasource/test_aliyun.py51
-rw-r--r--tests/unittests/test_datasource/test_azure.py250
-rw-r--r--tests/unittests/test_datasource/test_common.py5
-rw-r--r--tests/unittests/test_datasource/test_ec2.py202
-rw-r--r--tests/unittests/test_datasource/test_gce.py17
-rw-r--r--tests/unittests/test_datasource/test_scaleway.py262
-rw-r--r--tests/unittests/test_distros/test_create_users.py30
-rw-r--r--tests/unittests/test_distros/test_debian.py82
-rw-r--r--tests/unittests/test_distros/test_netconfig.py9
-rw-r--r--tests/unittests/test_ds_identify.py57
-rw-r--r--tests/unittests/test_handler/test_handler_disk_setup.py32
-rw-r--r--tests/unittests/test_handler/test_handler_ntp.py127
-rw-r--r--tests/unittests/test_handler/test_handler_write_files.py37
-rw-r--r--tests/unittests/test_handler/test_schema.py232
-rw-r--r--tests/unittests/test_net.py1262
-rw-r--r--tests/unittests/test_runs/test_simple_run.py18
-rw-r--r--tests/unittests/test_util.py48
-rwxr-xr-xtools/build-on-freebsd6
-rwxr-xr-xtools/cloudconfig-schema35
-rwxr-xr-xtools/ds-identify64
-rwxr-xr-xtools/mock-meta.py4
-rwxr-xr-xtools/net-convert.py2
-rwxr-xr-xtools/read-dependencies244
-rwxr-xr-xtools/render-cloudcfg43
-rwxr-xr-xtools/run-centos271
-rw-r--r--tox.ini16
218 files changed, 8342 insertions, 2091 deletions
diff --git a/MANIFEST.in b/MANIFEST.in
index 94264640..1a4d7711 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,6 +1,15 @@
-include *.py MANIFEST.in ChangeLog
+include *.py MANIFEST.in LICENSE* ChangeLog
global-include *.txt *.rst *.ini *.in *.conf *.cfg *.sh
+graft config
+graft doc
+graft packages
+graft systemd
+graft sysvinit
+graft templates
+graft tests
graft tools
+graft udev
+graft upstart
prune build
prune dist
prune .tox
diff --git a/Makefile b/Makefile
index 09cd1479..f280911f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
CWD=$(shell pwd)
PYVER ?= $(shell for p in python3 python2; do \
- out=$(which $$p 2>&1) && echo $$p && exit; done; \
- exit 1)
+ out=$$(command -v $$p 2>&1) && echo $$p && exit; done; exit 1)
+
noseopts ?= -v
YAML_FILES=$(shell find cloudinit bin tests tools -name "*.yaml" -type f )
@@ -46,13 +46,19 @@ pyflakes:
pyflakes3:
@$(CWD)/tools/run-pyflakes3
-
+
unittest: clean_pyc
nosetests $(noseopts) tests/unittests
unittest3: clean_pyc
nosetests3 $(noseopts) tests/unittests
+ci-deps-ubuntu:
+ @$(PYVER) $(CWD)/tools/read-dependencies --distro ubuntu --test-distro
+
+ci-deps-centos:
+ @$(PYVER) $(CWD)/tools/read-dependencies --distro centos --test-distro
+
pip-requirements:
@echo "Installing cloud-init dependencies..."
$(PIP_INSTALL) -r "$@.txt" -q
@@ -69,6 +75,9 @@ check_version:
"not equal to code version '$(CODE_VERSION)'"; exit 2; \
else true; fi
+config/cloud.cfg:
+ $(PYVER) ./tools/render-cloudcfg config/cloud.cfg.tmpl config/cloud.cfg
+
clean_pyc:
@find . -type f -name "*.pyc" -delete
@@ -79,15 +88,25 @@ yaml:
@$(PYVER) $(CWD)/tools/validate-yaml.py $(YAML_FILES)
rpm:
- ./packages/brpm --distro $(distro)
+ $(PYVER) ./packages/brpm --distro=$(distro)
+
+srpm:
+ $(PYVER) ./packages/brpm --srpm --distro=$(distro)
deb:
@which debuild || \
{ echo "Missing devscripts dependency. Install with:"; \
echo sudo apt-get install devscripts; exit 1; }
- ./packages/bddeb
+ $(PYVER) ./packages/bddeb
+
+deb-src:
+ @which debuild || \
+ { echo "Missing devscripts dependency. Install with:"; \
+ echo sudo apt-get install devscripts; exit 1; }
+ $(PYVER) ./packages/bddeb -S -d
+
-.PHONY: test pyflakes pyflakes3 clean pep8 rpm deb yaml check_version
-.PHONY: pip-test-requirements pip-requirements clean_pyc unittest unittest3
-.PHONY: style-check
+.PHONY: test pyflakes pyflakes3 clean pep8 rpm srpm deb deb-src yaml
+.PHONY: check_version pip-test-requirements pip-requirements clean_pyc
+.PHONY: unittest unittest3 style-check
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index 26cc2654..139e03b3 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -3,10 +3,12 @@
# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2012 Yahoo! Inc.
+# Copyright (C) 2017 Amazon.com, Inc. or its affiliates
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+# Author: Andrew Jorgensen <ajorgens@amazon.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
@@ -25,7 +27,6 @@ from cloudinit import netinfo
from cloudinit import signal_handler
from cloudinit import sources
from cloudinit import stages
-from cloudinit import templater
from cloudinit import url_helper
from cloudinit import util
from cloudinit import version
@@ -42,9 +43,9 @@ from cloudinit import atomic_helper
from cloudinit.dhclient_hook import LogDhclient
-# Pretty little cheetah formatted welcome message template
-WELCOME_MSG_TPL = ("Cloud-init v. ${version} running '${action}' at "
- "${timestamp}. Up ${uptime} seconds.")
+# Welcome message template
+WELCOME_MSG_TPL = ("Cloud-init v. {version} running '{action}' at "
+ "{timestamp}. Up {uptime} seconds.")
# Module section template
MOD_SECTION_TPL = "cloud_%s_modules"
@@ -88,13 +89,11 @@ def welcome(action, msg=None):
def welcome_format(action):
- tpl_params = {
- 'version': version.version_string(),
- 'uptime': util.uptime(),
- 'timestamp': util.time_rfc2822(),
- 'action': action,
- }
- return templater.render_string(WELCOME_MSG_TPL, tpl_params)
+ return WELCOME_MSG_TPL.format(
+ version=version.version_string(),
+ uptime=util.uptime(),
+ timestamp=util.time_rfc2822(),
+ action=action)
def extract_fns(args):
@@ -373,6 +372,9 @@ def main_init(name, args):
LOG.debug("[%s] %s is in local mode, will apply init modules now.",
mode, init.datasource)
+ # Give the datasource a chance to use network resources.
+ # This is used on Azure to communicate with the fabric over network.
+ init.setup_datasource()
# update fully realizes user-data (pulling in #include if necessary)
init.update()
# Stage 7
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index 2be2532c..02c70b10 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -92,7 +92,7 @@ REQUIRED_CHEF_DIRS = tuple([
])
# Used if fetching chef from a omnibus style package
-OMNIBUS_URL = "https://www.getchef.com/chef/install.sh"
+OMNIBUS_URL = "https://www.chef.io/chef/install.sh"
OMNIBUS_URL_RETRIES = 5
CHEF_VALIDATION_PEM_PATH = '/etc/chef/validation.pem'
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index d2bc6e6c..bafca9d8 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -214,7 +214,7 @@ def device_part_info(devpath):
# FreeBSD doesn't know of sysfs so just get everything we need from
# the device, like /dev/vtbd0p2.
- if util.system_info()["platform"].startswith('FreeBSD'):
+ if util.is_FreeBSD():
m = re.search('^(/dev/.+)p([0-9])$', devpath)
return (m.group(1), m.group(2))
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index 5cc54536..31ed64e3 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -36,6 +36,7 @@ servers or pools are provided, 4 pools will be used in the format
- 192.168.23.2
"""
+from cloudinit.config.schema import validate_cloudconfig_schema
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
from cloudinit import templater
@@ -43,6 +44,7 @@ from cloudinit import type_utils
from cloudinit import util
import os
+from textwrap import dedent
LOG = logging.getLogger(__name__)
@@ -52,21 +54,84 @@ NR_POOL_SERVERS = 4
distros = ['centos', 'debian', 'fedora', 'opensuse', 'ubuntu']
+# The schema definition for each cloud-config module is a strict contract for
+# describing supported configuration parameters for each cloud-config section.
+# It allows cloud-config to validate and alert users to invalid or ignored
+# configuration options before actually attempting to deploy with said
+# configuration.
+
+schema = {
+ 'id': 'cc_ntp',
+ 'name': 'NTP',
+ 'title': 'enable and configure ntp',
+ 'description': dedent("""\
+ Handle ntp configuration. If ntp is not installed on the system and
+ ntp configuration is specified, ntp will be installed. If there is a
+ default ntp config file in the image or one is present in the
+ distro's ntp package, it will be copied to ``/etc/ntp.conf.dist``
+ before any changes are made. A list of ntp pools and ntp servers can
+ be provided under the ``ntp`` config key. If no ntp ``servers`` or
+ ``pools`` are provided, 4 pools will be used in the format
+ ``{0-3}.{distro}.pool.ntp.org``."""),
+ 'distros': distros,
+ 'examples': [
+ {'ntp': {'pools': ['0.company.pool.ntp.org', '1.company.pool.ntp.org',
+ 'ntp.myorg.org'],
+ 'servers': ['my.ntp.server.local', 'ntp.ubuntu.com',
+ '192.168.23.2']}}],
+ 'frequency': PER_INSTANCE,
+ 'type': 'object',
+ 'properties': {
+ 'ntp': {
+ 'type': ['object', 'null'],
+ 'properties': {
+ 'pools': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string',
+ 'format': 'hostname'
+ },
+ 'uniqueItems': True,
+ 'description': dedent("""\
+ List of ntp pools. If both pools and servers are
+ empty, 4 default pool servers will be provided of
+ the format ``{0-3}.{distro}.pool.ntp.org``.""")
+ },
+ 'servers': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string',
+ 'format': 'hostname'
+ },
+ 'uniqueItems': True,
+ 'description': dedent("""\
+ List of ntp servers. If both pools and servers are
+ empty, 4 default pool servers will be provided with
+ the format ``{0-3}.{distro}.pool.ntp.org``.""")
+ }
+ },
+ 'required': [],
+ 'additionalProperties': False
+ }
+ }
+}
+
+
def handle(name, cfg, cloud, log, _args):
"""Enable and configure ntp."""
-
if 'ntp' not in cfg:
LOG.debug(
"Skipping module named %s, not present or disabled by cfg", name)
return
-
ntp_cfg = cfg.get('ntp', {})
+ # TODO drop this when validate_cloudconfig_schema is strict=True
if not isinstance(ntp_cfg, (dict)):
raise RuntimeError(("'ntp' key existed in config,"
" but not a dictionary type,"
" is a %s %instead"), type_utils.obj_name(ntp_cfg))
+ validate_cloudconfig_schema(cfg, schema)
rename_ntp_conf()
# ensure when ntp is installed it has a configuration file
# to use instead of starting up with packaged defaults
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index c1c6fe7e..eba58b02 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -71,7 +71,7 @@ def givecmdline(pid):
# Example output from procstat -c 1
# PID COMM ARGS
# 1 init /bin/init --
- if util.system_info()["platform"].startswith('FreeBSD'):
+ if util.is_FreeBSD():
(output, _err) = util.subp(['procstat', '-c', str(pid)])
line = output.splitlines()[1]
m = re.search('\d+ (\w|\.|-)+\s+(/\w.+)', line)
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index 72e1cdd6..54ae3a68 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -50,15 +50,19 @@ import base64
import os
import six
+from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
from cloudinit import util
+
frequency = PER_INSTANCE
DEFAULT_OWNER = "root:root"
DEFAULT_PERMS = 0o644
UNKNOWN_ENC = 'text/plain'
+LOG = logging.getLogger(__name__)
+
def handle(name, cfg, _cloud, log, _args):
files = cfg.get('write_files')
@@ -66,10 +70,10 @@ def handle(name, cfg, _cloud, log, _args):
log.debug(("Skipping module named %s,"
" no/empty 'write_files' key in configuration"), name)
return
- write_files(name, files, log)
+ write_files(name, files)
-def canonicalize_extraction(encoding_type, log):
+def canonicalize_extraction(encoding_type):
if not encoding_type:
encoding_type = ''
encoding_type = encoding_type.lower().strip()
@@ -84,31 +88,31 @@ def canonicalize_extraction(encoding_type, log):
if encoding_type in ['b64', 'base64']:
return ['application/base64']
if encoding_type:
- log.warn("Unknown encoding type %s, assuming %s",
- encoding_type, UNKNOWN_ENC)
+ LOG.warning("Unknown encoding type %s, assuming %s",
+ encoding_type, UNKNOWN_ENC)
return [UNKNOWN_ENC]
-def write_files(name, files, log):
+def write_files(name, files):
if not files:
return
for (i, f_info) in enumerate(files):
path = f_info.get('path')
if not path:
- log.warn("No path provided to write for entry %s in module %s",
- i + 1, name)
+ LOG.warning("No path provided to write for entry %s in module %s",
+ i + 1, name)
continue
path = os.path.abspath(path)
- extractions = canonicalize_extraction(f_info.get('encoding'), log)
+ extractions = canonicalize_extraction(f_info.get('encoding'))
contents = extract_contents(f_info.get('content', ''), extractions)
(u, g) = util.extract_usergroup(f_info.get('owner', DEFAULT_OWNER))
- perms = decode_perms(f_info.get('permissions'), DEFAULT_PERMS, log)
+ perms = decode_perms(f_info.get('permissions'), DEFAULT_PERMS)
util.write_file(path, contents, mode=perms)
util.chownbyname(path, u, g)
-def decode_perms(perm, default, log):
+def decode_perms(perm, default):
if perm is None:
return default
try:
@@ -119,7 +123,14 @@ def decode_perms(perm, default, log):
# Force to string and try octal conversion
return int(str(perm), 8)
except (TypeError, ValueError):
- log.warn("Undecodable permissions %s, assuming %s", perm, default)
+ reps = []
+ for r in (perm, default):
+ try:
+ reps.append("%o" % r)
+ except TypeError:
+ reps.append("%r" % r)
+ LOG.warning(
+ "Undecodable permissions %s, returning default %s", *reps)
return default
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
new file mode 100644
index 00000000..6400f005
--- /dev/null
+++ b/cloudinit/config/schema.py
@@ -0,0 +1,222 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""schema.py: Set of module functions for processing cloud-config schema."""
+
+from __future__ import print_function
+
+from cloudinit.util import read_file_or_url
+
+import argparse
+import logging
+import os
+import sys
+import yaml
+
+SCHEMA_UNDEFINED = b'UNDEFINED'
+CLOUD_CONFIG_HEADER = b'#cloud-config'
+SCHEMA_DOC_TMPL = """
+{name}
+---
+**Summary:** {title}
+
+{description}
+
+**Internal name:** ``{id}``
+
+**Module frequency:** {frequency}
+
+**Supported distros:** {distros}
+
+**Config schema**:
+{property_doc}
+{examples}
+"""
+SCHEMA_PROPERTY_TMPL = '{prefix}**{prop_name}:** ({type}) {description}'
+
+
+class SchemaValidationError(ValueError):
+ """Raised when validating a cloud-config file against a schema."""
+
+ def __init__(self, schema_errors=()):
+ """Init the exception an n-tuple of schema errors.
+
+ @param schema_errors: An n-tuple of the format:
+ ((flat.config.key, msg),)
+ """
+ self.schema_errors = schema_errors
+ error_messages = [
+ '{0}: {1}'.format(config_key, message)
+ for config_key, message in schema_errors]
+ message = "Cloud config schema errors: {0}".format(
+ ', '.join(error_messages))
+ super(SchemaValidationError, self).__init__(message)
+
+
+def validate_cloudconfig_schema(config, schema, strict=False):
+ """Validate provided config meets the schema definition.
+
+ @param config: Dict of cloud configuration settings validated against
+ schema.
+ @param schema: jsonschema dict describing the supported schema definition
+ for the cloud config module (config.cc_*).
+ @param strict: Boolean, when True raise SchemaValidationErrors instead of
+ logging warnings.
+
+ @raises: SchemaValidationError when provided config does not validate
+ against the provided schema.
+ """
+ try:
+ from jsonschema import Draft4Validator, FormatChecker
+ except ImportError:
+ logging.warning(
+ 'Ignoring schema validation. python-jsonschema is not present')
+ return
+ validator = Draft4Validator(schema, format_checker=FormatChecker())
+ errors = ()
+ for error in sorted(validator.iter_errors(config), key=lambda e: e.path):
+ path = '.'.join([str(p) for p in error.path])
+ errors += ((path, error.message),)
+ if errors:
+ if strict:
+ raise SchemaValidationError(errors)
+ else:
+ messages = ['{0}: {1}'.format(k, msg) for k, msg in errors]
+ logging.warning('Invalid config:\n%s', '\n'.join(messages))
+
+
+def validate_cloudconfig_file(config_path, schema):
+ """Validate cloudconfig file adheres to a specific jsonschema.
+
+ @param config_path: Path to the yaml cloud-config file to parse.
+ @param schema: Dict describing a valid jsonschema to validate against.
+
+ @raises SchemaValidationError containing any of schema_errors encountered.
+ @raises RuntimeError when config_path does not exist.
+ """
+ if not os.path.exists(config_path):
+ raise RuntimeError('Configfile {0} does not exist'.format(config_path))
+ content = read_file_or_url('file://{0}'.format(config_path)).contents
+ if not content.startswith(CLOUD_CONFIG_HEADER):
+ errors = (
+ ('header', 'File {0} needs to begin with "{1}"'.format(
+ config_path, CLOUD_CONFIG_HEADER.decode())),)
+ raise SchemaValidationError(errors)
+
+ try:
+ cloudconfig = yaml.safe_load(content)
+ except yaml.parser.ParserError as e:
+ errors = (
+ ('format', 'File {0} is not valid yaml. {1}'.format(
+ config_path, str(e))),)
+ raise SchemaValidationError(errors)
+ validate_cloudconfig_schema(
+ cloudconfig, schema, strict=True)
+
+
+def _get_property_type(property_dict):
+ """Return a string representing a property type from a given jsonschema."""
+ property_type = property_dict.get('type', SCHEMA_UNDEFINED)
+ if isinstance(property_type, list):
+ property_type = '/'.join(property_type)
+ item_type = property_dict.get('items', {}).get('type')
+ if item_type:
+ property_type = '{0} of {1}'.format(property_type, item_type)
+ return property_type
+
+
+def _get_property_doc(schema, prefix=' '):
+ """Return restructured text describing the supported schema properties."""
+ new_prefix = prefix + ' '
+ properties = []
+ for prop_key, prop_config in schema.get('properties', {}).items():
+ # Define prop_name and dscription for SCHEMA_PROPERTY_TMPL
+ description = prop_config.get('description', '')
+ properties.append(SCHEMA_PROPERTY_TMPL.format(
+ prefix=prefix,
+ prop_name=prop_key,
+ type=_get_property_type(prop_config),
+ description=description.replace('\n', '')))
+ if 'properties' in prop_config:
+ properties.append(
+ _get_property_doc(prop_config, prefix=new_prefix))
+ return '\n\n'.join(properties)
+
+
+def _get_schema_examples(schema, prefix=''):
+ """Return restructured text describing the schema examples if present."""
+ examples = schema.get('examples')
+ if not examples:
+ return ''
+ rst_content = '\n**Examples**::\n\n'
+ for example in examples:
+ example_yaml = yaml.dump(example, default_flow_style=False)
+ # Python2.6 is missing textwrapper.indent
+ lines = example_yaml.split('\n')
+ indented_lines = [' {0}'.format(line) for line in lines]
+ rst_content += '\n'.join(indented_lines)
+ return rst_content
+
+
+def get_schema_doc(schema):
+ """Return reStructured text rendering the provided jsonschema.
+
+ @param schema: Dict of jsonschema to render.
+ @raise KeyError: If schema lacks an expected key.
+ """
+ schema['property_doc'] = _get_property_doc(schema)
+ schema['examples'] = _get_schema_examples(schema)
+ schema['distros'] = ', '.join(schema['distros'])
+ return SCHEMA_DOC_TMPL.format(**schema)
+
+
+def get_schema(section_key=None):
+ """Return a dict of jsonschema defined in any cc_* module.
+
+ @param: section_key: Optionally limit schema to a specific top-level key.
+ """
+ # TODO use util.find_modules in subsequent branch
+ from cloudinit.config.cc_ntp import schema
+ return schema
+
+
+def error(message):
+ print(message, file=sys.stderr)
+ return 1
+
+
+def get_parser():
+ """Return a parser for supported cmdline arguments."""
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-c', '--config-file',
+ help='Path of the cloud-config yaml file to validate')
+ parser.add_argument('-d', '--doc', action="store_true", default=False,
+ help='Print schema documentation')
+ parser.add_argument('-k', '--key',
+ help='Limit validation or docs to a section key')
+ return parser
+
+
+def main():
+ """Tool to validate schema of a cloud-config file or print schema docs."""
+ parser = get_parser()
+ args = parser.parse_args()
+ exclusive_args = [args.config_file, args.doc]
+ if not any(exclusive_args) or all(exclusive_args):
+ return error('Expected either --config-file argument or --doc')
+
+ schema = get_schema()
+ if args.config_file:
+ try:
+ validate_cloudconfig_file(args.config_file, schema)
+ except (SchemaValidationError, RuntimeError) as e:
+ return error(str(e))
+ print("Valid cloud-config file {0}".format(args.config_file))
+ if args.doc:
+ print(get_schema_doc(schema))
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index f56c0cf7..1fd48a7b 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -32,7 +32,7 @@ from cloudinit.distros.parsers import hosts
OSFAMILIES = {
'debian': ['debian', 'ubuntu'],
- 'redhat': ['fedora', 'rhel'],
+ 'redhat': ['centos', 'fedora', 'rhel'],
'gentoo': ['gentoo'],
'freebsd': ['freebsd'],
'suse': ['sles'],
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
index 75d46201..b4c0ba72 100644
--- a/cloudinit/distros/arch.py
+++ b/cloudinit/distros/arch.py
@@ -119,7 +119,7 @@ class Distro(distros.Distro):
if not conf:
conf = HostnameConf('')
conf.set_hostname(your_hostname)
- util.write_file(out_fn, conf, 0o644)
+ util.write_file(out_fn, str(conf), omode="w", mode=0o644)
def _read_system_hostname(self):
sys_hostname = self._read_hostname(self.hostname_conf_fn)
diff --git a/cloudinit/distros/centos.py b/cloudinit/distros/centos.py
new file mode 100644
index 00000000..4b803d2e
--- /dev/null
+++ b/cloudinit/distros/centos.py
@@ -0,0 +1,12 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.distros import rhel
+from cloudinit import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+class Distro(rhel.Distro):
+ pass
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index d06d46a6..abfb81f4 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -37,11 +37,11 @@ ENI_HEADER = """# This file is generated from information provided by
"""
NETWORK_CONF_FN = "/etc/network/interfaces.d/50-cloud-init.cfg"
+LOCALE_CONF_FN = "/etc/default/locale"
class Distro(distros.Distro):
hostname_conf_fn = "/etc/hostname"
- locale_conf_fn = "/etc/default/locale"
network_conf_fn = {
"eni": "/etc/network/interfaces.d/50-cloud-init.cfg",
"netplan": "/etc/netplan/50-cloud-init.yaml"
@@ -64,16 +64,8 @@ class Distro(distros.Distro):
def apply_locale(self, locale, out_fn=None):
if not out_fn:
- out_fn = self.locale_conf_fn
- util.subp(['locale-gen', locale], capture=False)
- util.subp(['update-locale', locale], capture=False)
- # "" provides trailing newline during join
- lines = [
- util.make_header(),
- 'LANG="%s"' % (locale),
- "",
- ]
- util.write_file(out_fn, "\n".join(lines))
+ out_fn = LOCALE_CONF_FN
+ apply_locale(locale, out_fn)
def install_packages(self, pkglist):
self.update_package_sources()
@@ -225,4 +217,38 @@ def _maybe_remove_legacy_eth0(path="/etc/network/interfaces.d/eth0.cfg"):
LOG.warning(msg)
+
+def apply_locale(locale, sys_path=LOCALE_CONF_FN, keyname='LANG'):
+ """Apply the locale.
+
+ Run locale-gen for the provided locale and set the default
+ system variable `keyname` appropriately in the provided `sys_path`.
+
+ If sys_path indicates that `keyname` is already set to `locale`
+ then no changes will be made and locale-gen not called.
+ This allows images built with a locale already generated to not re-run
+ locale-gen which can be very heavy.
+ """
+ if not locale:
+ raise ValueError('Failed to provide locale value.')
+
+ if not sys_path:
+ raise ValueError('Invalid path: %s' % sys_path)
+
+ if os.path.exists(sys_path):
+ locale_content = util.load_file(sys_path)
+ # if LANG isn't present, regen
+ sys_defaults = util.load_shell_content(locale_content)
+ sys_val = sys_defaults.get(keyname, "")
+ if sys_val.lower() == locale.lower():
+ LOG.debug(
+ "System has '%s=%s' requested '%s', skipping regeneration.",
+ keyname, sys_val, locale)
+ return
+
+ util.subp(['locale-gen', locale], capture=False)
+ util.subp(
+ ['update-locale', '--locale-file=' + sys_path,
+ '%s=%s' % (keyname, locale)], capture=False)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/parsers/networkmanager_conf.py b/cloudinit/distros/parsers/networkmanager_conf.py
new file mode 100644
index 00000000..ac51f122
--- /dev/null
+++ b/cloudinit/distros/parsers/networkmanager_conf.py
@@ -0,0 +1,23 @@
+# Copyright (C) 2017 Red Hat, Inc.
+#
+# Author: Ryan McCabe <rmccabe@redhat.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import configobj
+
+# This module is used to set additional NetworkManager configuration
+# in /etc/NetworkManager/conf.d
+#
+
+
+class NetworkManagerConf(configobj.ConfigObj):
+ def __init__(self, contents):
+ configobj.ConfigObj.__init__(self, contents,
+ interpolation=False,
+ write_empty_values=False)
+
+ def set_section_keypair(self, section_name, key, value):
+ if section_name not in self.sections:
+ self.main[section_name] = {}
+ self.main[section_name] = {key: value}
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 8c6cd057..46cb9c85 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -10,6 +10,7 @@ import logging
import os
import re
+from cloudinit.net.network_state import mask_to_net_prefix
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -17,8 +18,24 @@ SYS_CLASS_NET = "/sys/class/net/"
DEFAULT_PRIMARY_INTERFACE = 'eth0'
+def _natural_sort_key(s, _nsre=re.compile('([0-9]+)')):
+ """Sorting for Humans: natural sort order. Can be use as the key to sort
+ functions.
+ This will sort ['eth0', 'ens3', 'ens10', 'ens12', 'ens8', 'ens0'] as
+ ['ens0', 'ens3', 'ens8', 'ens10', 'ens12', 'eth0'] instead of the simple
+ python way which will produce ['ens0', 'ens10', 'ens12', 'ens3', 'ens8',
+ 'eth0']."""
+ return [int(text) if text.isdigit() else text.lower()
+ for text in re.split(_nsre, s)]
+
+
+def get_sys_class_path():
+ """Simple function to return the global SYS_CLASS_NET."""
+ return SYS_CLASS_NET
+
+
def sys_dev_path(devname, path=""):
- return SYS_CLASS_NET + devname + "/" + path
+ return get_sys_class_path() + devname + "/" + path
def read_sys_net(devname, path, translate=None,
@@ -66,7 +83,7 @@ def read_sys_net_int(iface, field):
return None
try:
return int(val)
- except TypeError:
+ except ValueError:
return None
@@ -86,6 +103,10 @@ def is_bridge(devname):
return os.path.exists(sys_dev_path(devname, "bridge"))
+def is_bond(devname):
+ return os.path.exists(sys_dev_path(devname, "bonding"))
+
+
def is_vlan(devname):
uevent = str(read_sys_net_safe(devname, "uevent"))
return 'DEVTYPE=vlan' in uevent.splitlines()
@@ -113,8 +134,35 @@ def is_present(devname):
return os.path.exists(sys_dev_path(devname))
+def device_driver(devname):
+ """Return the device driver for net device named 'devname'."""
+ driver = None
+ driver_path = sys_dev_path(devname, "device/driver")
+ # driver is a symlink to the driver *dir*
+ if os.path.islink(driver_path):
+ driver = os.path.basename(os.readlink(driver_path))
+
+ return driver
+
+
+def device_devid(devname):
+ """Return the device id string for net device named 'devname'."""
+ dev_id = read_sys_net_safe(devname, "device/device")
+ if dev_id is False:
+ return None
+
+ return dev_id
+
+
def get_devicelist():
- return os.listdir(SYS_CLASS_NET)
+ try:
+ devs = os.listdir(get_sys_class_path())
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ devs = []
+ else:
+ raise
+ return devs
class ParserError(Exception):
@@ -127,12 +175,21 @@ def is_disabled_cfg(cfg):
return cfg.get('config') == "disabled"
-def generate_fallback_config():
+def generate_fallback_config(blacklist_drivers=None, config_driver=None):
"""Determine which attached net dev is most likely to have a connection and
generate network state to run dhcp on that interface"""
+
+ if not config_driver:
+ config_driver = False
+
+ if not blacklist_drivers:
+ blacklist_drivers = []
+
# get list of interfaces that could have connections
invalid_interfaces = set(['lo'])
- potential_interfaces = set(get_devicelist())
+ potential_interfaces = set([device for device in get_devicelist()
+ if device_driver(device) not in
+ blacklist_drivers])
potential_interfaces = potential_interfaces.difference(invalid_interfaces)
# sort into interfaces with carrier, interfaces which could have carrier,
# and ignore interfaces that are definitely disconnected
@@ -144,6 +201,9 @@ def generate_fallback_config():
if is_bridge(interface):
# skip any bridges
continue
+ if is_bond(interface):
+ # skip any bonds
+ continue
carrier = read_sys_net_int(interface, 'carrier')
if carrier:
connected.append(interface)
@@ -169,7 +229,7 @@ def generate_fallback_config():
# if eth0 exists use it above anything else, otherwise get the interface
# that we can read 'first' (using the sorted defintion of first).
- names = list(sorted(potential_interfaces))
+ names = list(sorted(potential_interfaces, key=_natural_sort_key))
if DEFAULT_PRIMARY_INTERFACE in names:
names.remove(DEFAULT_PRIMARY_INTERFACE)
names.insert(0, DEFAULT_PRIMARY_INTERFACE)
@@ -183,9 +243,18 @@ def generate_fallback_config():
break
if target_mac and target_name:
nconf = {'config': [], 'version': 1}
- nconf['config'].append(
- {'type': 'physical', 'name': target_name,
- 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]})
+ cfg = {'type': 'physical', 'name': target_name,
+ 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]}
+ # inject the device driver name, dev_id into config if enabled and
+ # device has a valid device driver value
+ if config_driver:
+ driver = device_driver(target_name)
+ if driver:
+ cfg['params'] = {
+ 'driver': driver,
+ 'device_id': device_devid(target_name),
+ }
+ nconf['config'].append(cfg)
return nconf
else:
# can't read any interfaces addresses (or there are none); give up
@@ -206,10 +275,16 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
if ent.get('type') != 'physical':
continue
mac = ent.get('mac_address')
- name = ent.get('name')
if not mac:
continue
- renames.append([mac, name])
+ name = ent.get('name')
+ driver = ent.get('params', {}).get('driver')
+ device_id = ent.get('params', {}).get('device_id')
+ if not driver:
+ driver = device_driver(name)
+ if not device_id:
+ device_id = device_devid(name)
+ renames.append([mac, name, driver, device_id])
return _rename_interfaces(renames)
@@ -234,15 +309,27 @@ def _get_current_rename_info(check_downable=True):
"""Collect information necessary for rename_interfaces.
returns a dictionary by mac address like:
- {mac:
- {'name': name
- 'up': boolean: is_up(name),
+ {name:
+ {
'downable': None or boolean indicating that the
- device has only automatically assigned ip addrs.}}
+ device has only automatically assigned ip addrs.
+ 'device_id': Device id value (if it has one)
+ 'driver': Device driver (if it has one)
+ 'mac': mac address (in lower case)
+ 'name': name
+ 'up': boolean: is_up(name)
+ }}
"""
- bymac = {}
- for mac, name in get_interfaces_by_mac().items():
- bymac[mac] = {'name': name, 'up': is_up(name), 'downable': None}
+ cur_info = {}
+ for (name, mac, driver, device_id) in get_interfaces():
+ cur_info[name] = {
+ 'downable': None,
+ 'device_id': device_id,
+ 'driver': driver,
+ 'mac': mac.lower(),
+ 'name': name,
+ 'up': is_up(name),
+ }
if check_downable:
nmatch = re.compile(r"[0-9]+:\s+(\w+)[@:]")
@@ -254,11 +341,11 @@ def _get_current_rename_info(check_downable=True):
for bytes_out in (ipv6, ipv4):
nics_with_addresses.update(nmatch.findall(bytes_out))
- for d in bymac.values():
+ for d in cur_info.values():
d['downable'] = (d['up'] is False or
d['name'] not in nics_with_addresses)
- return bymac
+ return cur_info
def _rename_interfaces(renames, strict_present=True, strict_busy=True,
@@ -271,15 +358,17 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
if current_info is None:
current_info = _get_current_rename_info()
- cur_bymac = {}
- for mac, data in current_info.items():
+ cur_info = {}
+ for name, data in current_info.items():
cur = data.copy()
- cur['mac'] = mac
- cur_bymac[mac] = cur
+ if cur.get('mac'):
+ cur['mac'] = cur['mac'].lower()
+ cur['name'] = name
+ cur_info[name] = cur
def update_byname(bymac):
return dict((data['name'], data)
- for data in bymac.values())
+ for data in cur_info.values())
def rename(cur, new):
util.subp(["ip", "link", "set", cur, "name", new], capture=True)
@@ -293,14 +382,50 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
ops = []
errors = []
ups = []
- cur_byname = update_byname(cur_bymac)
+ cur_byname = update_byname(cur_info)
tmpname_fmt = "cirename%d"
tmpi = -1
- for mac, new_name in renames:
- cur = cur_bymac.get(mac, {})
- cur_name = cur.get('name')
+ def entry_match(data, mac, driver, device_id):
+ """match if set and in data"""
+ if mac and driver and device_id:
+ return (data['mac'] == mac and
+ data['driver'] == driver and
+ data['device_id'] == device_id)
+ elif mac and driver:
+ return (data['mac'] == mac and
+ data['driver'] == driver)
+ elif mac:
+ return (data['mac'] == mac)
+
+ return False
+
+ def find_entry(mac, driver, device_id):
+ match = [data for data in cur_info.values()
+ if entry_match(data, mac, driver, device_id)]
+ if len(match):
+ if len(match) > 1:
+ msg = ('Failed to match a single device. Matched devices "%s"'
+ ' with search values "(mac:%s driver:%s device_id:%s)"'
+ % (match, mac, driver, device_id))
+ raise ValueError(msg)
+ return match[0]
+
+ return None
+
+ for mac, new_name, driver, device_id in renames:
+ if mac:
+ mac = mac.lower()
cur_ops = []
+ cur = find_entry(mac, driver, device_id)
+ if not cur:
+ if strict_present:
+ errors.append(
+ "[nic not present] Cannot rename mac=%s to %s"
+ ", not available." % (mac, new_name))
+ continue
+
+ cur_name = cur.get('name')
if cur_name == new_name:
# nothing to do
continue
@@ -340,13 +465,13 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
cur_ops.append(("rename", mac, new_name, (new_name, tmp_name)))
target['name'] = tmp_name
- cur_byname = update_byname(cur_bymac)
+ cur_byname = update_byname(cur_info)
if target['up']:
ups.append(("up", mac, new_name, (tmp_name,)))
cur_ops.append(("rename", mac, new_name, (cur['name'], new_name)))
cur['name'] = new_name
- cur_byname = update_byname(cur_bymac)
+ cur_byname = update_byname(cur_info)
ops += cur_ops
opmap = {'rename': rename, 'down': down, 'up': up}
@@ -385,14 +510,8 @@ def get_interfaces_by_mac():
"""Build a dictionary of tuples {mac: name}.
Bridges and any devices that have a 'stolen' mac are excluded."""
- try:
- devs = get_devicelist()
- except OSError as e:
- if e.errno == errno.ENOENT:
- devs = []
- else:
- raise
ret = {}
+ devs = get_devicelist()
empty_mac = '00:00:00:00:00:00'
for name in devs:
if not interface_has_own_mac(name):
@@ -415,6 +534,126 @@ def get_interfaces_by_mac():
return ret
+def get_interfaces():
+ """Return list of interface tuples (name, mac, driver, device_id)
+
+ Bridges and any devices that have a 'stolen' mac are excluded."""
+ ret = []
+ devs = get_devicelist()
+ empty_mac = '00:00:00:00:00:00'
+ for name in devs:
+ if not interface_has_own_mac(name):
+ continue
+ if is_bridge(name):
+ continue
+ if is_vlan(name):
+ continue
+ mac = get_interface_mac(name)
+ # some devices may not have a mac (tun0)
+ if not mac:
+ continue
+ if mac == empty_mac and name != 'lo':
+ continue
+ ret.append((name, mac, device_driver(name), device_devid(name)))
+ return ret
+
+
+class EphemeralIPv4Network(object):
+ """Context manager which sets up temporary static network configuration.
+
+ No operations are performed if the provided interface is already connected.
+ If unconnected, bring up the interface with valid ip, prefix and broadcast.
+ If router is provided setup a default route for that interface. Upon
+ context exit, clean up the interface leaving no configuration behind.
+ """
+
+ def __init__(self, interface, ip, prefix_or_mask, broadcast, router=None):
+ """Setup context manager and validate call signature.
+
+ @param interface: Name of the network interface to bring up.
+ @param ip: IP address to assign to the interface.
+ @param prefix_or_mask: Either netmask of the format X.X.X.X or an int
+ prefix.
+ @param broadcast: Broadcast address for the IPv4 network.
+ @param router: Optionally the default gateway IP.
+ """
+ if not all([interface, ip, prefix_or_mask, broadcast]):
+ raise ValueError(
+ 'Cannot init network on {0} with {1}/{2} and bcast {3}'.format(
+ interface, ip, prefix_or_mask, broadcast))
+ try:
+ self.prefix = mask_to_net_prefix(prefix_or_mask)
+ except ValueError as e:
+ raise ValueError(
+ 'Cannot setup network: {0}'.format(e))
+ self.interface = interface
+ self.ip = ip
+ self.broadcast = broadcast
+ self.router = router
+ self.cleanup_cmds = [] # List of commands to run to cleanup state.
+
+ def __enter__(self):
+ """Perform ephemeral network setup if interface is not connected."""
+ self._bringup_device()
+ if self.router:
+ self._bringup_router()
+
+ def __exit__(self, excp_type, excp_value, excp_traceback):
+ for cmd in self.cleanup_cmds:
+ util.subp(cmd, capture=True)
+
+ def _delete_address(self, address, prefix):
+ """Perform the ip command to remove the specified address."""
+ util.subp(
+ ['ip', '-family', 'inet', 'addr', 'del',
+ '%s/%s' % (address, prefix), 'dev', self.interface],
+ capture=True)
+
+ def _bringup_device(self):
+ """Perform the ip comands to fully setup the device."""
+ cidr = '{0}/{1}'.format(self.ip, self.prefix)
+ LOG.debug(
+ 'Attempting setup of ephemeral network on %s with %s brd %s',
+ self.interface, cidr, self.broadcast)
+ try:
+ util.subp(
+ ['ip', '-family', 'inet', 'addr', 'add', cidr, 'broadcast',
+ self.broadcast, 'dev', self.interface],
+ capture=True, update_env={'LANG': 'C'})
+ except util.ProcessExecutionError as e:
+ if "File exists" not in e.stderr:
+ raise
+ LOG.debug(
+ 'Skip ephemeral network setup, %s already has address %s',
+ self.interface, self.ip)
+ else:
+ # Address creation success, bring up device and queue cleanup
+ util.subp(
+ ['ip', '-family', 'inet', 'link', 'set', 'dev', self.interface,
+ 'up'], capture=True)
+ self.cleanup_cmds.append(
+ ['ip', '-family', 'inet', 'link', 'set', 'dev', self.interface,
+ 'down'])
+ self.cleanup_cmds.append(
+ ['ip', '-family', 'inet', 'addr', 'del', cidr, 'dev',
+ self.interface])
+
+ def _bringup_router(self):
+ """Perform the ip commands to fully setup the router if needed."""
+ # Check if a default route exists and exit if it does
+ out, _ = util.subp(['ip', 'route', 'show', '0.0.0.0/0'], capture=True)
+ if 'default' in out:
+ LOG.debug(
+ 'Skip ephemeral route setup. %s already has default route: %s',
+ self.interface, out.strip())
+ return
+ util.subp(
+ ['ip', '-4', 'route', 'add', 'default', 'via', self.router,
+ 'dev', self.interface], capture=True)
+ self.cleanup_cmds.insert(
+ 0, ['ip', '-4', 'route', 'del', 'default', 'dev', self.interface])
+
+
class RendererNotFoundError(RuntimeError):
pass
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index 9819d4f5..bb80ec02 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -46,6 +46,10 @@ def _iface_add_subnet(iface, subnet):
'dns_nameservers',
]
for key, value in subnet.items():
+ if key == 'netmask':
+ continue
+ if key == 'address':
+ value = "%s/%s" % (subnet['address'], subnet['prefix'])
if value and key in valid_map:
if type(value) == list:
value = " ".join(value)
@@ -68,6 +72,8 @@ def _iface_add_attrs(iface, index):
content = []
ignore_map = [
'control',
+ 'device_id',
+ 'driver',
'index',
'inet',
'mode',
@@ -75,6 +81,15 @@ def _iface_add_attrs(iface, index):
'subnets',
'type',
]
+
+ # The following parameters require repetitive entries of the key for
+ # each of the values
+ multiline_keys = [
+ 'bridge_pathcost',
+ 'bridge_portprio',
+ 'bridge_waitport',
+ ]
+
renames = {'mac_address': 'hwaddress'}
if iface['type'] not in ['bond', 'bridge', 'vlan']:
ignore_map.append('mac_address')
@@ -82,6 +97,10 @@ def _iface_add_attrs(iface, index):
for key, value in iface.items():
if not value or key in ignore_map:
continue
+ if key in multiline_keys:
+ for v in value:
+ content.append(" {0} {1}".format(renames.get(key, key), v))
+ continue
if type(value) == list:
value = " ".join(value)
content.append(" {0} {1}".format(renames.get(key, key), value))
@@ -304,8 +323,6 @@ class Renderer(renderer.Renderer):
config = {}
self.eni_path = config.get('eni_path', 'etc/network/interfaces')
self.eni_header = config.get('eni_header', None)
- self.links_path_prefix = config.get(
- 'links_path_prefix', 'etc/systemd/network/50-cloud-init-')
self.netrules_path = config.get(
'netrules_path', 'etc/udev/rules.d/70-persistent-net.rules')
@@ -338,7 +355,7 @@ class Renderer(renderer.Renderer):
default_gw = " default gw %s" % route['gateway']
content.append(up + default_gw + or_true)
content.append(down + default_gw + or_true)
- elif route['network'] == '::' and route['netmask'] == 0:
+ elif route['network'] == '::' and route['prefix'] == 0:
# ipv6!
default_gw = " -A inet6 default gw %s" % route['gateway']
content.append(up + default_gw + or_true)
@@ -451,28 +468,6 @@ class Renderer(renderer.Renderer):
util.write_file(netrules,
self._render_persistent_net(network_state))
- if self.links_path_prefix:
- self._render_systemd_links(target, network_state,
- links_prefix=self.links_path_prefix)
-
- def _render_systemd_links(self, target, network_state, links_prefix):
- fp_prefix = util.target_path(target, links_prefix)
- for f in glob.glob(fp_prefix + "*"):
- os.unlink(f)
- for iface in network_state.iter_interfaces():
- if (iface['type'] == 'physical' and 'name' in iface and
- iface.get('mac_address')):
- fname = fp_prefix + iface['name'] + ".link"
- content = "\n".join([
- "[Match]",
- "MACAddress=" + iface['mac_address'],
- "",
- "[Link]",
- "Name=" + iface['name'],
- ""
- ])
- util.write_file(fname, content)
-
def network_state_to_eni(network_state, header=None, render_hwaddress=False):
# render the provided network state, return a string of equivalent eni
@@ -480,7 +475,6 @@ def network_state_to_eni(network_state, header=None, render_hwaddress=False):
renderer = Renderer(config={
'eni_path': eni_path,
'eni_header': header,
- 'links_path_prefix': None,
'netrules_path': None,
})
if not header:
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
index a715f3b0..9f35b72b 100644
--- a/cloudinit/net/netplan.py
+++ b/cloudinit/net/netplan.py
@@ -4,7 +4,7 @@ import copy
import os
from . import renderer
-from .network_state import mask2cidr, subnet_is_ipv6
+from .network_state import subnet_is_ipv6
from cloudinit import log as logging
from cloudinit import util
@@ -118,10 +118,9 @@ def _extract_addresses(config, entry):
sn_type += '4'
entry.update({sn_type: True})
elif sn_type in ['static']:
- addr = '%s' % subnet.get('address')
- netmask = subnet.get('netmask')
- if netmask and '/' not in addr:
- addr += '/%s' % mask2cidr(netmask)
+ addr = "%s" % subnet.get('address')
+ if 'prefix' in subnet:
+ addr += "/%d" % subnet.get('prefix')
if 'gateway' in subnet and subnet.get('gateway'):
gateway = subnet.get('gateway')
if ":" in gateway:
@@ -138,9 +137,8 @@ def _extract_addresses(config, entry):
mtukey += '6'
entry.update({mtukey: subnet.get('mtu')})
for route in subnet.get('routes', []):
- network = route.get('network')
- netmask = route.get('netmask')
- to_net = '%s/%s' % (network, mask2cidr(netmask))
+ to_net = "%s/%s" % (route.get('network'),
+ route.get('prefix'))
route = {
'via': route.get('gateway'),
'to': to_net,
@@ -211,7 +209,8 @@ class Renderer(renderer.Renderer):
# check network state for version
# if v2, then extract network_state.config
# else render_v2_from_state
- fpnplan = os.path.join(target, self.netplan_path)
+ fpnplan = os.path.join(util.target_path(target), self.netplan_path)
+
util.ensure_dir(os.path.dirname(fpnplan))
header = self.netplan_header if self.netplan_header else ""
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 9e9c05a0..87a7222d 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -289,19 +289,15 @@ class NetworkStateInterpreter(object):
iface.update({param: val})
# convert subnet ipv6 netmask to cidr as needed
- subnets = command.get('subnets')
- if subnets:
+ subnets = _normalize_subnets(command.get('subnets'))
+
+ # automatically set 'use_ipv6' if any addresses are ipv6
+ if not self.use_ipv6:
for subnet in subnets:
- if subnet['type'] == 'static':
- if ':' in subnet['address']:
- self.use_ipv6 = True
- if 'netmask' in subnet and ':' in subnet['address']:
- subnet['netmask'] = mask2cidr(subnet['netmask'])
- for route in subnet.get('routes', []):
- if 'netmask' in route:
- route['netmask'] = mask2cidr(route['netmask'])
- elif subnet['type'].endswith('6'):
+ if (subnet.get('type').endswith('6') or
+ is_ipv6_addr(subnet.get('address'))):
self.use_ipv6 = True
+ break
iface.update({
'name': command.get('name'),
@@ -456,16 +452,7 @@ class NetworkStateInterpreter(object):
@ensure_command_keys(['destination'])
def handle_route(self, command):
- routes = self._network_state.get('routes', [])
- network, cidr = command['destination'].split("/")
- netmask = cidr2mask(int(cidr))
- route = {
- 'network': network,
- 'netmask': netmask,
- 'gateway': command.get('gateway'),
- 'metric': command.get('metric'),
- }
- routes.append(route)
+ self._network_state['routes'].append(_normalize_route(command))
# V2 handlers
def handle_bonds(self, command):
@@ -666,18 +653,9 @@ class NetworkStateInterpreter(object):
routes = []
for route in cfg.get('routes', []):
- route_addr = route.get('to')
- if "/" in route_addr:
- route_addr, route_cidr = route_addr.split("/")
- route_netmask = cidr2mask(route_cidr)
- subnet_route = {
- 'address': route_addr,
- 'netmask': route_netmask,
- 'gateway': route.get('via')
- }
- routes.append(subnet_route)
- if len(routes) > 0:
- subnet.update({'routes': routes})
+ routes.append(_normalize_route(
+ {'address': route.get('to'), 'gateway': route.get('via')}))
+ subnet['routes'] = routes
if ":" in address:
if 'gateway6' in cfg and gateway6 is None:
@@ -692,53 +670,219 @@ class NetworkStateInterpreter(object):
return subnets
+def _normalize_subnet(subnet):
+ # Prune all keys with None values.
+ subnet = copy.deepcopy(subnet)
+ normal_subnet = dict((k, v) for k, v in subnet.items() if v)
+
+ if subnet.get('type') in ('static', 'static6'):
+ normal_subnet.update(
+ _normalize_net_keys(normal_subnet, address_keys=('address',)))
+ normal_subnet['routes'] = [_normalize_route(r)
+ for r in subnet.get('routes', [])]
+ return normal_subnet
+
+
+def _normalize_net_keys(network, address_keys=()):
+ """Normalize dictionary network keys returning prefix and address keys.
+
+ @param network: A dict of network-related definition containing prefix,
+ netmask and address_keys.
+ @param address_keys: A tuple of keys to search for representing the address
+ or cidr. The first address_key discovered will be used for
+ normalization.
+
+ @returns: A dict containing normalized prefix and matching addr_key.
+ """
+ net = dict((k, v) for k, v in network.items() if v)
+ addr_key = None
+ for key in address_keys:
+ if net.get(key):
+ addr_key = key
+ break
+ if not addr_key:
+ message = (
+ 'No config network address keys [%s] found in %s' %
+ (','.join(address_keys), network))
+ LOG.error(message)
+ raise ValueError(message)
+
+ addr = net.get(addr_key)
+ ipv6 = is_ipv6_addr(addr)
+ netmask = net.get('netmask')
+ if "/" in addr:
+ addr_part, _, maybe_prefix = addr.partition("/")
+ net[addr_key] = addr_part
+ try:
+ prefix = int(maybe_prefix)
+ except ValueError:
+ # this supports input of <address>/255.255.255.0
+ prefix = mask_to_net_prefix(maybe_prefix)
+ elif netmask:
+ prefix = mask_to_net_prefix(netmask)
+ elif 'prefix' in net:
+ prefix = int(prefix)
+ else:
+ prefix = 64 if ipv6 else 24
+
+ if 'prefix' in net and str(net['prefix']) != str(prefix):
+ LOG.warning("Overwriting existing 'prefix' with '%s' in "
+ "network info: %s", prefix, net)
+ net['prefix'] = prefix
+
+ if ipv6:
+ # TODO: we could/maybe should add this back with the very uncommon
+ # 'netmask' for ipv6. We need a 'net_prefix_to_ipv6_mask' for that.
+ if 'netmask' in net:
+ del net['netmask']
+ else:
+ net['netmask'] = net_prefix_to_ipv4_mask(net['prefix'])
+
+ return net
+
+
+def _normalize_route(route):
+ """normalize a route.
+ return a dictionary with only:
+ 'type': 'route' (only present if it was present in input)
+ 'network': the network portion of the route as a string.
+ 'prefix': the network prefix for address as an integer.
+ 'metric': integer metric (only if present in input).
+ 'netmask': netmask (string) equivalent to prefix iff network is ipv4.
+ """
+ # Prune None-value keys. Specifically allow 0 (a valid metric).
+ normal_route = dict((k, v) for k, v in route.items()
+ if v not in ("", None))
+ if 'destination' in normal_route:
+ normal_route['network'] = normal_route['destination']
+ del normal_route['destination']
+
+ normal_route.update(
+ _normalize_net_keys(
+ normal_route, address_keys=('network', 'destination')))
+
+ metric = normal_route.get('metric')
+ if metric:
+ try:
+ normal_route['metric'] = int(metric)
+ except ValueError:
+ raise TypeError(
+ 'Route config metric {} is not an integer'.format(metric))
+ return normal_route
+
+
+def _normalize_subnets(subnets):
+ if not subnets:
+ subnets = []
+ return [_normalize_subnet(s) for s in subnets]
+
+
+def is_ipv6_addr(address):
+ if not address:
+ return False
+ return ":" in str(address)
+
+
def subnet_is_ipv6(subnet):
"""Common helper for checking network_state subnets for ipv6."""
# 'static6' or 'dhcp6'
if subnet['type'].endswith('6'):
# This is a request for DHCPv6.
return True
- elif subnet['type'] == 'static' and ":" in subnet['address']:
+ elif subnet['type'] == 'static' and is_ipv6_addr(subnet.get('address')):
return True
return False
-def cidr2mask(cidr):
+def net_prefix_to_ipv4_mask(prefix):
+ """Convert a network prefix to an ipv4 netmask.
+
+ This is the inverse of ipv4_mask_to_net_prefix.
+ 24 -> "255.255.255.0"
+ Also supports input as a string."""
+
mask = [0, 0, 0, 0]
- for i in list(range(0, cidr)):
+ for i in list(range(0, int(prefix))):
idx = int(i / 8)
mask[idx] = mask[idx] + (1 << (7 - i % 8))
return ".".join([str(x) for x in mask])
-def ipv4mask2cidr(mask):
- if '.' not in mask:
+def ipv4_mask_to_net_prefix(mask):
+ """Convert an ipv4 netmask into a network prefix length.
+
+ If the input is already an integer or a string representation of
+ an integer, then int(mask) will be returned.
+ "255.255.255.0" => 24
+ str(24) => 24
+ "24" => 24
+ """
+ if isinstance(mask, int):
return mask
- return sum([bin(int(x)).count('1') for x in mask.split('.')])
+ if isinstance(mask, six.string_types):
+ try:
+ return int(mask)
+ except ValueError:
+ pass
+ else:
+ raise TypeError("mask '%s' is not a string or int")
+ if '.' not in mask:
+ raise ValueError("netmask '%s' does not contain a '.'" % mask)
-def ipv6mask2cidr(mask):
- if ':' not in mask:
+ toks = mask.split(".")
+ if len(toks) != 4:
+ raise ValueError("netmask '%s' had only %d parts" % (mask, len(toks)))
+
+ return sum([bin(int(x)).count('1') for x in toks])
+
+
+def ipv6_mask_to_net_prefix(mask):
+ """Convert an ipv6 netmask (very uncommon) or prefix (64) to prefix.
+
+ If 'mask' is an integer or string representation of one then
+ int(mask) will be returned.
+ """
+
+ if isinstance(mask, int):
return mask
+ if isinstance(mask, six.string_types):
+ try:
+ return int(mask)
+ except ValueError:
+ pass
+ else:
+ raise TypeError("mask '%s' is not a string or int")
+
+ if ':' not in mask:
+ raise ValueError("mask '%s' does not have a ':'")
bitCount = [0, 0x8000, 0xc000, 0xe000, 0xf000, 0xf800, 0xfc00, 0xfe00,
0xff00, 0xff80, 0xffc0, 0xffe0, 0xfff0, 0xfff8, 0xfffc,
0xfffe, 0xffff]
- cidr = 0
+ prefix = 0
for word in mask.split(':'):
if not word or int(word, 16) == 0:
break
- cidr += bitCount.index(int(word, 16))
+ prefix += bitCount.index(int(word, 16))
+
+ return prefix
- return cidr
+def mask_to_net_prefix(mask):
+ """Return the network prefix for the netmask provided.
-def mask2cidr(mask):
- if ':' in str(mask):
- return ipv6mask2cidr(mask)
- elif '.' in str(mask):
- return ipv4mask2cidr(mask)
+ Supports ipv4 or ipv6 netmasks."""
+ try:
+ # if 'mask' is a prefix that is an integer.
+ # then just return it.
+ return int(mask)
+ except ValueError:
+ pass
+ if is_ipv6_addr(mask):
+ return ipv6_mask_to_net_prefix(mask)
else:
- return mask
+ return ipv4_mask_to_net_prefix(mask)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py
index c68658dc..57652e27 100644
--- a/cloudinit/net/renderer.py
+++ b/cloudinit/net/renderer.py
@@ -20,6 +20,10 @@ def filter_by_name(match_name):
return lambda iface: match_name == iface['name']
+def filter_by_attr(match_name):
+ return lambda iface: (match_name in iface and iface[match_name])
+
+
filter_by_physical = filter_by_type('physical')
@@ -34,8 +38,10 @@ class Renderer(object):
for iface in network_state.iter_interfaces(filter_by_physical):
# for physical interfaces write out a persist net udev rule
if 'name' in iface and iface.get('mac_address'):
+ driver = iface.get('driver', None)
content.write(generate_udev_rule(iface['name'],
- iface['mac_address']))
+ iface['mac_address'],
+ driver=driver))
return content.getvalue()
@abc.abstractmethod
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 58c5713f..a550f97c 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -5,11 +5,13 @@ import re
import six
+from cloudinit.distros.parsers import networkmanager_conf
from cloudinit.distros.parsers import resolv_conf
from cloudinit import util
from . import renderer
-from .network_state import subnet_is_ipv6
+from .network_state import (
+ is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6)
def _make_header(sep='#'):
@@ -26,11 +28,8 @@ def _make_header(sep='#'):
def _is_default_route(route):
- if route['network'] == '::' and route['netmask'] == 0:
- return True
- if route['network'] == '0.0.0.0' and route['netmask'] == '0.0.0.0':
- return True
- return False
+ default_nets = ('::', '0.0.0.0')
+ return route['prefix'] == 0 and route['network'] in default_nets
def _quote_value(value):
@@ -62,6 +61,9 @@ class ConfigMap(object):
def __getitem__(self, key):
return self._conf[key]
+ def __contains__(self, key):
+ return key in self._conf
+
def drop(self, key):
self._conf.pop(key, None)
@@ -153,9 +155,10 @@ class Route(ConfigMap):
elif proto == "ipv6" and self.is_ipv6_route(address_value):
netmask_value = str(self._conf['NETMASK' + index])
gateway_value = str(self._conf['GATEWAY' + index])
- buf.write("%s/%s via %s\n" % (address_value,
- netmask_value,
- gateway_value))
+ buf.write("%s/%s via %s dev %s\n" % (address_value,
+ netmask_value,
+ gateway_value,
+ self._route_name))
return buf.getvalue()
@@ -252,6 +255,9 @@ class Renderer(renderer.Renderer):
self.netrules_path = config.get(
'netrules_path', 'etc/udev/rules.d/70-persistent-net.rules')
self.dns_path = config.get('dns_path', 'etc/resolv.conf')
+ nm_conf_path = 'etc/NetworkManager/conf.d/99-cloud-init.conf'
+ self.networkmanager_conf_path = config.get('networkmanager_conf_path',
+ nm_conf_path)
@classmethod
def _render_iface_shared(cls, iface, iface_cfg):
@@ -261,6 +267,9 @@ class Renderer(renderer.Renderer):
for (old_key, new_key) in [('mac_address', 'HWADDR'), ('mtu', 'MTU')]:
old_value = iface.get(old_key)
if old_value is not None:
+ # only set HWADDR on physical interfaces
+ if old_key == 'mac_address' and iface['type'] != 'physical':
+ continue
iface_cfg[new_key] = old_value
@classmethod
@@ -270,6 +279,7 @@ class Renderer(renderer.Renderer):
# modifying base values according to subnets
for i, subnet in enumerate(subnets, start=len(iface_cfg.children)):
+ mtu_key = 'MTU'
subnet_type = subnet.get('type')
if subnet_type == 'dhcp6':
iface_cfg['IPV6INIT'] = True
@@ -289,11 +299,20 @@ class Renderer(renderer.Renderer):
# if iface_cfg['BOOTPROTO'] == 'none':
# iface_cfg['BOOTPROTO'] = 'static'
if subnet_is_ipv6(subnet):
+ mtu_key = 'IPV6_MTU'
iface_cfg['IPV6INIT'] = True
+ if 'mtu' in subnet:
+ iface_cfg[mtu_key] = subnet['mtu']
+ elif subnet_type == 'manual':
+ # If the subnet has an MTU setting, then ONBOOT=True
+ # to apply the setting
+ iface_cfg['ONBOOT'] = mtu_key in iface_cfg
else:
raise ValueError("Unknown subnet type '%s' found"
" for interface '%s'" % (subnet_type,
iface_cfg.name))
+ if subnet.get('control') == 'manual':
+ iface_cfg['ONBOOT'] = False
# set IPv4 and IPv6 static addresses
ipv4_index = -1
@@ -307,38 +326,32 @@ class Renderer(renderer.Renderer):
elif subnet_type == 'static':
if subnet_is_ipv6(subnet):
ipv6_index = ipv6_index + 1
- if 'netmask' in subnet and str(subnet['netmask']) != "":
- ipv6_cidr = (subnet['address'] +
- '/' +
- str(subnet['netmask']))
- else:
- ipv6_cidr = subnet['address']
+ ipv6_cidr = "%s/%s" % (subnet['address'], subnet['prefix'])
if ipv6_index == 0:
iface_cfg['IPV6ADDR'] = ipv6_cidr
elif ipv6_index == 1:
iface_cfg['IPV6ADDR_SECONDARIES'] = ipv6_cidr
else:
- iface_cfg['IPV6ADDR_SECONDARIES'] = (
- iface_cfg['IPV6ADDR_SECONDARIES'] +
- " " + ipv6_cidr)
+ iface_cfg['IPV6ADDR_SECONDARIES'] += " " + ipv6_cidr
else:
ipv4_index = ipv4_index + 1
- if ipv4_index == 0:
- iface_cfg['IPADDR'] = subnet['address']
- if 'netmask' in subnet:
- iface_cfg['NETMASK'] = subnet['netmask']
+ suff = "" if ipv4_index == 0 else str(ipv4_index)
+ iface_cfg['IPADDR' + suff] = subnet['address']
+ iface_cfg['NETMASK' + suff] = \
+ net_prefix_to_ipv4_mask(subnet['prefix'])
+
+ if 'gateway' in subnet:
+ iface_cfg['DEFROUTE'] = True
+ if is_ipv6_addr(subnet['gateway']):
+ iface_cfg['IPV6_DEFAULTGW'] = subnet['gateway']
else:
- iface_cfg['IPADDR' + str(ipv4_index)] = \
- subnet['address']
- if 'netmask' in subnet:
- iface_cfg['NETMASK' + str(ipv4_index)] = \
- subnet['netmask']
+ iface_cfg['GATEWAY'] = subnet['gateway']
@classmethod
def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets):
for i, subnet in enumerate(subnets, start=len(iface_cfg.children)):
for route in subnet.get('routes', []):
- is_ipv6 = subnet.get('ipv6')
+ is_ipv6 = subnet.get('ipv6') or is_ipv6_addr(route['gateway'])
if _is_default_route(route):
if (
@@ -360,7 +373,7 @@ class Renderer(renderer.Renderer):
# also provided the default route?
iface_cfg['DEFROUTE'] = True
if 'gateway' in route:
- if is_ipv6:
+ if is_ipv6 or is_ipv6_addr(route['gateway']):
iface_cfg['IPV6_DEFAULTGW'] = route['gateway']
route_cfg.has_set_default_ipv6 = True
else:
@@ -372,11 +385,13 @@ class Renderer(renderer.Renderer):
nm_key = 'NETMASK%s' % route_cfg.last_idx
addr_key = 'ADDRESS%s' % route_cfg.last_idx
route_cfg.last_idx += 1
- for (old_key, new_key) in [('gateway', gw_key),
- ('netmask', nm_key),
- ('network', addr_key)]:
- if old_key in route:
- route_cfg[new_key] = route[old_key]
+ # add default routes only to ifcfg files, not
+ # to route-* or route6-*
+ for (old_key, new_key) in [('gateway', gw_key),
+ ('netmask', nm_key),
+ ('network', addr_key)]:
+ if old_key in route:
+ route_cfg[new_key] = route[old_key]
@classmethod
def _render_bonding_opts(cls, iface_cfg, iface):
@@ -409,24 +424,45 @@ class Renderer(renderer.Renderer):
@classmethod
def _render_bond_interfaces(cls, network_state, iface_contents):
bond_filter = renderer.filter_by_type('bond')
+ slave_filter = renderer.filter_by_attr('bond-master')
for iface in network_state.iter_interfaces(bond_filter):
iface_name = iface['name']
iface_cfg = iface_contents[iface_name]
cls._render_bonding_opts(iface_cfg, iface)
- iface_master_name = iface['bond-master']
- iface_cfg['MASTER'] = iface_master_name
- iface_cfg['SLAVE'] = True
+
# Ensure that the master interface (and any of its children)
# are actually marked as being bond types...
- master_cfg = iface_contents[iface_master_name]
- master_cfgs = [master_cfg]
- master_cfgs.extend(master_cfg.children)
+ master_cfgs = [iface_cfg]
+ master_cfgs.extend(iface_cfg.children)
for master_cfg in master_cfgs:
master_cfg['BONDING_MASTER'] = True
master_cfg.kind = 'bond'
- @staticmethod
- def _render_vlan_interfaces(network_state, iface_contents):
+ if iface.get('mac_address'):
+ iface_cfg['MACADDR'] = iface.get('mac_address')
+
+ iface_subnets = iface.get("subnets", [])
+ route_cfg = iface_cfg.routes
+ cls._render_subnets(iface_cfg, iface_subnets)
+ cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
+
+ # iter_interfaces on network-state is not sorted to produce
+ # consistent numbers we need to sort.
+ bond_slaves = sorted(
+ [slave_iface['name'] for slave_iface in
+ network_state.iter_interfaces(slave_filter)
+ if slave_iface['bond-master'] == iface_name])
+
+ for index, bond_slave in enumerate(bond_slaves):
+ slavestr = 'BONDING_SLAVE%s' % index
+ iface_cfg[slavestr] = bond_slave
+
+ slave_cfg = iface_contents[bond_slave]
+ slave_cfg['MASTER'] = iface_name
+ slave_cfg['SLAVE'] = True
+
+ @classmethod
+ def _render_vlan_interfaces(cls, network_state, iface_contents):
vlan_filter = renderer.filter_by_type('vlan')
for iface in network_state.iter_interfaces(vlan_filter):
iface_name = iface['name']
@@ -434,6 +470,11 @@ class Renderer(renderer.Renderer):
iface_cfg['VLAN'] = True
iface_cfg['PHYSDEV'] = iface_name[:iface_name.rfind('.')]
+ iface_subnets = iface.get("subnets", [])
+ route_cfg = iface_cfg.routes
+ cls._render_subnets(iface_cfg, iface_subnets)
+ cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
+
@staticmethod
def _render_dns(network_state, existing_dns_path=None):
content = resolv_conf.ResolvConf("")
@@ -445,6 +486,21 @@ class Renderer(renderer.Renderer):
content.add_search_domain(searchdomain)
return "\n".join([_make_header(';'), str(content)])
+ @staticmethod
+ def _render_networkmanager_conf(network_state):
+ content = networkmanager_conf.NetworkManagerConf("")
+
+ # If DNS server information is provided, configure
+ # NetworkManager to not manage dns, so that /etc/resolv.conf
+ # does not get clobbered.
+ if network_state.dns_nameservers:
+ content.set_section_keypair('main', 'dns', 'none')
+
+ if len(content) == 0:
+ return None
+ out = "".join([_make_header(), "\n", "\n".join(content.write()), "\n"])
+ return out
+
@classmethod
def _render_bridge_interfaces(cls, network_state, iface_contents):
bridge_filter = renderer.filter_by_type('bridge')
@@ -455,6 +511,10 @@ class Renderer(renderer.Renderer):
for old_key, new_key in cls.bridge_opts_keys:
if old_key in iface:
iface_cfg[new_key] = iface[old_key]
+
+ if iface.get('mac_address'):
+ iface_cfg['MACADDR'] = iface.get('mac_address')
+
# Is this the right key to get all the connected interfaces?
for bridged_iface_name in iface.get('bridge_ports', []):
# Ensure all bridged interfaces are correctly tagged
@@ -465,6 +525,11 @@ class Renderer(renderer.Renderer):
for bridge_cfg in bridged_cfgs:
bridge_cfg['BRIDGE'] = iface_name
+ iface_subnets = iface.get("subnets", [])
+ route_cfg = iface_cfg.routes
+ cls._render_subnets(iface_cfg, iface_subnets)
+ cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
+
@classmethod
def _render_sysconfig(cls, base_sysconf_dir, network_state):
'''Given state, return /etc/sysconfig files + contents'''
@@ -505,6 +570,12 @@ class Renderer(renderer.Renderer):
resolv_content = self._render_dns(network_state,
existing_dns_path=dns_path)
util.write_file(dns_path, resolv_content, file_mode)
+ if self.networkmanager_conf_path:
+ nm_conf_path = util.target_path(target,
+ self.networkmanager_conf_path)
+ nm_conf_content = self._render_networkmanager_conf(network_state)
+ if nm_conf_content:
+ util.write_file(nm_conf_path, nm_conf_content, file_mode)
if self.netrules_path:
netrules_content = self._render_persistent_net(network_state)
netrules_path = util.target_path(target, self.netrules_path)
diff --git a/cloudinit/net/tests/__init__.py b/cloudinit/net/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/cloudinit/net/tests/__init__.py
diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py
new file mode 100644
index 00000000..272a6ebd
--- /dev/null
+++ b/cloudinit/net/tests/test_init.py
@@ -0,0 +1,522 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import copy
+import errno
+import mock
+import os
+
+import cloudinit.net as net
+from cloudinit.util import ensure_file, write_file, ProcessExecutionError
+from tests.unittests.helpers import CiTestCase
+
+
+class TestSysDevPath(CiTestCase):
+
+ def test_sys_dev_path(self):
+ """sys_dev_path returns a path under SYS_CLASS_NET for a device."""
+ dev = 'something'
+ path = 'attribute'
+ expected = net.SYS_CLASS_NET + dev + '/' + path
+ self.assertEqual(expected, net.sys_dev_path(dev, path))
+
+ def test_sys_dev_path_without_path(self):
+ """When path param isn't provided it defaults to empty string."""
+ dev = 'something'
+ expected = net.SYS_CLASS_NET + dev + '/'
+ self.assertEqual(expected, net.sys_dev_path(dev))
+
+
+class TestReadSysNet(CiTestCase):
+ with_logs = True
+
+ def setUp(self):
+ super(TestReadSysNet, self).setUp()
+ sys_mock = mock.patch('cloudinit.net.get_sys_class_path')
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + '/'
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+
+ def test_read_sys_net_strips_contents_of_sys_path(self):
+ """read_sys_net strips whitespace from the contents of a sys file."""
+ content = 'some stuff with trailing whitespace\t\r\n'
+ write_file(os.path.join(self.sysdir, 'dev', 'attr'), content)
+ self.assertEqual(content.strip(), net.read_sys_net('dev', 'attr'))
+
+ def test_read_sys_net_reraises_oserror(self):
+ """read_sys_net raises OSError/IOError when file doesn't exist."""
+ # Non-specific Exception because versions of python OSError vs IOError.
+ with self.assertRaises(Exception) as context_manager: # noqa: H202
+ net.read_sys_net('dev', 'attr')
+ error = context_manager.exception
+ self.assertIn('No such file or directory', str(error))
+
+ def test_read_sys_net_handles_error_with_on_enoent(self):
+ """read_sys_net handles OSError/IOError with on_enoent if provided."""
+ handled_errors = []
+
+ def on_enoent(e):
+ handled_errors.append(e)
+
+ net.read_sys_net('dev', 'attr', on_enoent=on_enoent)
+ error = handled_errors[0]
+ self.assertIsInstance(error, Exception)
+ self.assertIn('No such file or directory', str(error))
+
+ def test_read_sys_net_translates_content(self):
+ """read_sys_net translates content when translate dict is provided."""
+ content = "you're welcome\n"
+ write_file(os.path.join(self.sysdir, 'dev', 'attr'), content)
+ translate = {"you're welcome": 'de nada'}
+ self.assertEqual(
+ 'de nada',
+ net.read_sys_net('dev', 'attr', translate=translate))
+
+ def test_read_sys_net_errors_on_translation_failures(self):
+ """read_sys_net raises a KeyError and logs details on failure."""
+ content = "you're welcome\n"
+ write_file(os.path.join(self.sysdir, 'dev', 'attr'), content)
+ with self.assertRaises(KeyError) as context_manager:
+ net.read_sys_net('dev', 'attr', translate={})
+ error = context_manager.exception
+ self.assertEqual('"you\'re welcome"', str(error))
+ self.assertIn(
+ "Found unexpected (not translatable) value 'you're welcome' in "
+ "'{0}dev/attr".format(self.sysdir),
+ self.logs.getvalue())
+
+ def test_read_sys_net_handles_handles_with_onkeyerror(self):
+ """read_sys_net handles translation errors calling on_keyerror."""
+ content = "you're welcome\n"
+ write_file(os.path.join(self.sysdir, 'dev', 'attr'), content)
+ handled_errors = []
+
+ def on_keyerror(e):
+ handled_errors.append(e)
+
+ net.read_sys_net('dev', 'attr', translate={}, on_keyerror=on_keyerror)
+ error = handled_errors[0]
+ self.assertIsInstance(error, KeyError)
+ self.assertEqual('"you\'re welcome"', str(error))
+
+ def test_read_sys_net_safe_false_on_translate_failure(self):
+ """read_sys_net_safe returns False on translation failures."""
+ content = "you're welcome\n"
+ write_file(os.path.join(self.sysdir, 'dev', 'attr'), content)
+ self.assertFalse(net.read_sys_net_safe('dev', 'attr', translate={}))
+
+ def test_read_sys_net_safe_returns_false_on_noent_failure(self):
+ """read_sys_net_safe returns False on file not found failures."""
+ self.assertFalse(net.read_sys_net_safe('dev', 'attr'))
+
+ def test_read_sys_net_int_returns_none_on_error(self):
+ """read_sys_net_safe returns None on failures."""
+ self.assertFalse(net.read_sys_net_int('dev', 'attr'))
+
+ def test_read_sys_net_int_returns_none_on_valueerror(self):
+ """read_sys_net_safe returns None when content is not an int."""
+ write_file(os.path.join(self.sysdir, 'dev', 'attr'), 'NOTINT\n')
+ self.assertFalse(net.read_sys_net_int('dev', 'attr'))
+
+ def test_read_sys_net_int_returns_integer_from_content(self):
+ """read_sys_net_safe returns None on failures."""
+ write_file(os.path.join(self.sysdir, 'dev', 'attr'), '1\n')
+ self.assertEqual(1, net.read_sys_net_int('dev', 'attr'))
+
+ def test_is_up_true(self):
+ """is_up is True if sys/net/devname/operstate is 'up' or 'unknown'."""
+ for state in ['up', 'unknown']:
+ write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state)
+ self.assertTrue(net.is_up('eth0'))
+
+ def test_is_up_false(self):
+ """is_up is False if sys/net/devname/operstate is 'down' or invalid."""
+ for state in ['down', 'incomprehensible']:
+ write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state)
+ self.assertFalse(net.is_up('eth0'))
+
+ def test_is_wireless(self):
+ """is_wireless is True when /sys/net/devname/wireless exists."""
+ self.assertFalse(net.is_wireless('eth0'))
+ ensure_file(os.path.join(self.sysdir, 'eth0', 'wireless'))
+ self.assertTrue(net.is_wireless('eth0'))
+
+ def test_is_bridge(self):
+ """is_bridge is True when /sys/net/devname/bridge exists."""
+ self.assertFalse(net.is_bridge('eth0'))
+ ensure_file(os.path.join(self.sysdir, 'eth0', 'bridge'))
+ self.assertTrue(net.is_bridge('eth0'))
+
+ def test_is_bond(self):
+ """is_bond is True when /sys/net/devname/bonding exists."""
+ self.assertFalse(net.is_bond('eth0'))
+ ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding'))
+ self.assertTrue(net.is_bond('eth0'))
+
+ def test_is_vlan(self):
+ """is_vlan is True when /sys/net/devname/uevent has DEVTYPE=vlan."""
+ ensure_file(os.path.join(self.sysdir, 'eth0', 'uevent'))
+ self.assertFalse(net.is_vlan('eth0'))
+ content = 'junk\nDEVTYPE=vlan\njunk\n'
+ write_file(os.path.join(self.sysdir, 'eth0', 'uevent'), content)
+ self.assertTrue(net.is_vlan('eth0'))
+
+ def test_is_connected_when_physically_connected(self):
+ """is_connected is True when /sys/net/devname/iflink reports 2."""
+ self.assertFalse(net.is_connected('eth0'))
+ write_file(os.path.join(self.sysdir, 'eth0', 'iflink'), "2")
+ self.assertTrue(net.is_connected('eth0'))
+
+ def test_is_connected_when_wireless_and_carrier_active(self):
+ """is_connected is True if wireless /sys/net/devname/carrier is 1."""
+ self.assertFalse(net.is_connected('eth0'))
+ ensure_file(os.path.join(self.sysdir, 'eth0', 'wireless'))
+ self.assertFalse(net.is_connected('eth0'))
+ write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), "1")
+ self.assertTrue(net.is_connected('eth0'))
+
+ def test_is_physical(self):
+ """is_physical is True when /sys/net/devname/device exists."""
+ self.assertFalse(net.is_physical('eth0'))
+ ensure_file(os.path.join(self.sysdir, 'eth0', 'device'))
+ self.assertTrue(net.is_physical('eth0'))
+
+ def test_is_present(self):
+ """is_present is True when /sys/net/devname exists."""
+ self.assertFalse(net.is_present('eth0'))
+ ensure_file(os.path.join(self.sysdir, 'eth0', 'device'))
+ self.assertTrue(net.is_present('eth0'))
+
+
+class TestGenerateFallbackConfig(CiTestCase):
+
+ def setUp(self):
+ super(TestGenerateFallbackConfig, self).setUp()
+ sys_mock = mock.patch('cloudinit.net.get_sys_class_path')
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + '/'
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+
+ def test_generate_fallback_finds_connected_eth_with_mac(self):
+ """generate_fallback_config finds any connected device with a mac."""
+ write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1')
+ write_file(os.path.join(self.sysdir, 'eth1', 'carrier'), '1')
+ mac = 'aa:bb:cc:aa:bb:cc'
+ write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac)
+ expected = {
+ 'config': [{'type': 'physical', 'mac_address': mac,
+ 'name': 'eth1', 'subnets': [{'type': 'dhcp'}]}],
+ 'version': 1}
+ self.assertEqual(expected, net.generate_fallback_config())
+
+ def test_generate_fallback_finds_dormant_eth_with_mac(self):
+ """generate_fallback_config finds any dormant device with a mac."""
+ write_file(os.path.join(self.sysdir, 'eth0', 'dormant'), '1')
+ mac = 'aa:bb:cc:aa:bb:cc'
+ write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac)
+ expected = {
+ 'config': [{'type': 'physical', 'mac_address': mac,
+ 'name': 'eth0', 'subnets': [{'type': 'dhcp'}]}],
+ 'version': 1}
+ self.assertEqual(expected, net.generate_fallback_config())
+
+ def test_generate_fallback_finds_eth_by_operstate(self):
+ """generate_fallback_config finds any dormant device with a mac."""
+ mac = 'aa:bb:cc:aa:bb:cc'
+ write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac)
+ expected = {
+ 'config': [{'type': 'physical', 'mac_address': mac,
+ 'name': 'eth0', 'subnets': [{'type': 'dhcp'}]}],
+ 'version': 1}
+ valid_operstates = ['dormant', 'down', 'lowerlayerdown', 'unknown']
+ for state in valid_operstates:
+ write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state)
+ self.assertEqual(expected, net.generate_fallback_config())
+ write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), 'noworky')
+ self.assertIsNone(net.generate_fallback_config())
+
+ def test_generate_fallback_config_skips_veth(self):
+ """generate_fallback_config will skip any veth interfaces."""
+ # A connected veth which gets ignored
+ write_file(os.path.join(self.sysdir, 'veth0', 'carrier'), '1')
+ self.assertIsNone(net.generate_fallback_config())
+
+ def test_generate_fallback_config_skips_bridges(self):
+ """generate_fallback_config will skip any bridges interfaces."""
+ # A connected veth which gets ignored
+ write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1')
+ mac = 'aa:bb:cc:aa:bb:cc'
+ write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac)
+ ensure_file(os.path.join(self.sysdir, 'eth0', 'bridge'))
+ self.assertIsNone(net.generate_fallback_config())
+
+ def test_generate_fallback_config_skips_bonds(self):
+ """generate_fallback_config will skip any bonded interfaces."""
+ # A connected veth which gets ignored
+ write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1')
+ mac = 'aa:bb:cc:aa:bb:cc'
+ write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac)
+ ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding'))
+ self.assertIsNone(net.generate_fallback_config())
+
+
+class TestGetDeviceList(CiTestCase):
+
+ def setUp(self):
+ super(TestGetDeviceList, self).setUp()
+ sys_mock = mock.patch('cloudinit.net.get_sys_class_path')
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + '/'
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+
+ def test_get_devicelist_raise_oserror(self):
+ """get_devicelist raise any non-ENOENT OSerror."""
+ error = OSError('Can not do it')
+ error.errno = errno.EPERM # Set non-ENOENT
+ self.m_sys_path.side_effect = error
+ with self.assertRaises(OSError) as context_manager:
+ net.get_devicelist()
+ exception = context_manager.exception
+ self.assertEqual('Can not do it', str(exception))
+
+ def test_get_devicelist_empty_without_sys_net(self):
+ """get_devicelist returns empty list when missing SYS_CLASS_NET."""
+ self.m_sys_path.return_value = 'idontexist'
+ self.assertEqual([], net.get_devicelist())
+
+ def test_get_devicelist_empty_with_no_devices_in_sys_net(self):
+ """get_devicelist returns empty directoty listing for SYS_CLASS_NET."""
+ self.assertEqual([], net.get_devicelist())
+
+ def test_get_devicelist_lists_any_subdirectories_in_sys_net(self):
+ """get_devicelist returns a directory listing for SYS_CLASS_NET."""
+ write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), 'up')
+ write_file(os.path.join(self.sysdir, 'eth1', 'operstate'), 'up')
+ self.assertItemsEqual(['eth0', 'eth1'], net.get_devicelist())
+
+
+class TestGetInterfaceMAC(CiTestCase):
+
+ def setUp(self):
+ super(TestGetInterfaceMAC, self).setUp()
+ sys_mock = mock.patch('cloudinit.net.get_sys_class_path')
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + '/'
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+
+ def test_get_interface_mac_false_with_no_mac(self):
+ """get_device_list returns False when no mac is reported."""
+ ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding'))
+ mac_path = os.path.join(self.sysdir, 'eth0', 'address')
+ self.assertFalse(os.path.exists(mac_path))
+ self.assertFalse(net.get_interface_mac('eth0'))
+
+ def test_get_interface_mac(self):
+ """get_interfaces returns the mac from SYS_CLASS_NET/dev/address."""
+ mac = 'aa:bb:cc:aa:bb:cc'
+ write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac)
+ self.assertEqual(mac, net.get_interface_mac('eth1'))
+
+ def test_get_interface_mac_grabs_bonding_address(self):
+ """get_interfaces returns the source device mac for bonded devices."""
+ source_dev_mac = 'aa:bb:cc:aa:bb:cc'
+ bonded_mac = 'dd:ee:ff:dd:ee:ff'
+ write_file(os.path.join(self.sysdir, 'eth1', 'address'), bonded_mac)
+ write_file(
+ os.path.join(self.sysdir, 'eth1', 'bonding_slave', 'perm_hwaddr'),
+ source_dev_mac)
+ self.assertEqual(source_dev_mac, net.get_interface_mac('eth1'))
+
+ def test_get_interfaces_empty_list_without_sys_net(self):
+ """get_interfaces returns an empty list when missing SYS_CLASS_NET."""
+ self.m_sys_path.return_value = 'idontexist'
+ self.assertEqual([], net.get_interfaces())
+
+ def test_get_interfaces_by_mac_skips_empty_mac(self):
+ """Ignore 00:00:00:00:00:00 addresses from get_interfaces_by_mac."""
+ empty_mac = '00:00:00:00:00:00'
+ mac = 'aa:bb:cc:aa:bb:cc'
+ write_file(os.path.join(self.sysdir, 'eth1', 'address'), empty_mac)
+ write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '0')
+ write_file(os.path.join(self.sysdir, 'eth2', 'addr_assign_type'), '0')
+ write_file(os.path.join(self.sysdir, 'eth2', 'address'), mac)
+ expected = [('eth2', 'aa:bb:cc:aa:bb:cc', None, None)]
+ self.assertEqual(expected, net.get_interfaces())
+
+ def test_get_interfaces_by_mac_skips_missing_mac(self):
+ """Ignore interfaces without an address from get_interfaces_by_mac."""
+ write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '0')
+ address_path = os.path.join(self.sysdir, 'eth1', 'address')
+ self.assertFalse(os.path.exists(address_path))
+ mac = 'aa:bb:cc:aa:bb:cc'
+ write_file(os.path.join(self.sysdir, 'eth2', 'addr_assign_type'), '0')
+ write_file(os.path.join(self.sysdir, 'eth2', 'address'), mac)
+ expected = [('eth2', 'aa:bb:cc:aa:bb:cc', None, None)]
+ self.assertEqual(expected, net.get_interfaces())
+
+
+class TestInterfaceHasOwnMAC(CiTestCase):
+
+ def setUp(self):
+ super(TestInterfaceHasOwnMAC, self).setUp()
+ sys_mock = mock.patch('cloudinit.net.get_sys_class_path')
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + '/'
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+
+ def test_interface_has_own_mac_false_when_stolen(self):
+ """Return False from interface_has_own_mac when address is stolen."""
+ write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '2')
+ self.assertFalse(net.interface_has_own_mac('eth1'))
+
+ def test_interface_has_own_mac_true_when_not_stolen(self):
+ """Return False from interface_has_own_mac when mac isn't stolen."""
+ valid_assign_types = ['0', '1', '3']
+ assign_path = os.path.join(self.sysdir, 'eth1', 'addr_assign_type')
+ for _type in valid_assign_types:
+ write_file(assign_path, _type)
+ self.assertTrue(net.interface_has_own_mac('eth1'))
+
+ def test_interface_has_own_mac_strict_errors_on_absent_assign_type(self):
+ """When addr_assign_type is absent, interface_has_own_mac errors."""
+ with self.assertRaises(ValueError):
+ net.interface_has_own_mac('eth1', strict=True)
+
+
+@mock.patch('cloudinit.net.util.subp')
+class TestEphemeralIPV4Network(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestEphemeralIPV4Network, self).setUp()
+ sys_mock = mock.patch('cloudinit.net.get_sys_class_path')
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + '/'
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+
+ def test_ephemeral_ipv4_network_errors_on_missing_params(self, m_subp):
+ """No required params for EphemeralIPv4Network can be None."""
+ required_params = {
+ 'interface': 'eth0', 'ip': '192.168.2.2',
+ 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255'}
+ for key in required_params.keys():
+ params = copy.deepcopy(required_params)
+ params[key] = None
+ with self.assertRaises(ValueError) as context_manager:
+ net.EphemeralIPv4Network(**params)
+ error = context_manager.exception
+ self.assertIn('Cannot init network on', str(error))
+ self.assertEqual(0, m_subp.call_count)
+
+ def test_ephemeral_ipv4_network_errors_invalid_mask(self, m_subp):
+ """Raise an error when prefix_or_mask is not a netmask or prefix."""
+ params = {
+ 'interface': 'eth0', 'ip': '192.168.2.2',
+ 'broadcast': '192.168.2.255'}
+ invalid_masks = ('invalid', 'invalid.', '123.123.123')
+ for error_val in invalid_masks:
+ params['prefix_or_mask'] = error_val
+ with self.assertRaises(ValueError) as context_manager:
+ with net.EphemeralIPv4Network(**params):
+ pass
+ error = context_manager.exception
+ self.assertIn('Cannot setup network: netmask', str(error))
+ self.assertEqual(0, m_subp.call_count)
+
+ def test_ephemeral_ipv4_network_performs_teardown(self, m_subp):
+ """EphemeralIPv4Network performs teardown on the device if setup."""
+ expected_setup_calls = [
+ mock.call(
+ ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24',
+ 'broadcast', '192.168.2.255', 'dev', 'eth0'],
+ capture=True, update_env={'LANG': 'C'}),
+ mock.call(
+ ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'],
+ capture=True)]
+ expected_teardown_calls = [
+ mock.call(
+ ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0',
+ 'down'], capture=True),
+ mock.call(
+ ['ip', '-family', 'inet', 'addr', 'del', '192.168.2.2/24',
+ 'dev', 'eth0'], capture=True)]
+ params = {
+ 'interface': 'eth0', 'ip': '192.168.2.2',
+ 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255'}
+ with net.EphemeralIPv4Network(**params):
+ self.assertEqual(expected_setup_calls, m_subp.call_args_list)
+ m_subp.assert_has_calls(expected_teardown_calls)
+
+ def test_ephemeral_ipv4_network_noop_when_configured(self, m_subp):
+ """EphemeralIPv4Network handles exception when address is setup.
+
+ It performs no cleanup as the interface was already setup.
+ """
+ params = {
+ 'interface': 'eth0', 'ip': '192.168.2.2',
+ 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255'}
+ m_subp.side_effect = ProcessExecutionError(
+ '', 'RTNETLINK answers: File exists', 2)
+ expected_calls = [
+ mock.call(
+ ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24',
+ 'broadcast', '192.168.2.255', 'dev', 'eth0'],
+ capture=True, update_env={'LANG': 'C'})]
+ with net.EphemeralIPv4Network(**params):
+ pass
+ self.assertEqual(expected_calls, m_subp.call_args_list)
+ self.assertIn(
+ 'Skip ephemeral network setup, eth0 already has address',
+ self.logs.getvalue())
+
+ def test_ephemeral_ipv4_network_with_prefix(self, m_subp):
+ """EphemeralIPv4Network takes a valid prefix to setup the network."""
+ params = {
+ 'interface': 'eth0', 'ip': '192.168.2.2',
+ 'prefix_or_mask': '24', 'broadcast': '192.168.2.255'}
+ for prefix_val in ['24', 16]: # prefix can be int or string
+ params['prefix_or_mask'] = prefix_val
+ with net.EphemeralIPv4Network(**params):
+ pass
+ m_subp.assert_has_calls([mock.call(
+ ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24',
+ 'broadcast', '192.168.2.255', 'dev', 'eth0'],
+ capture=True, update_env={'LANG': 'C'})])
+ m_subp.assert_has_calls([mock.call(
+ ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/16',
+ 'broadcast', '192.168.2.255', 'dev', 'eth0'],
+ capture=True, update_env={'LANG': 'C'})])
+
+ def test_ephemeral_ipv4_network_with_new_default_route(self, m_subp):
+ """Add the route when router is set and no default route exists."""
+ params = {
+ 'interface': 'eth0', 'ip': '192.168.2.2',
+ 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255',
+ 'router': '192.168.2.1'}
+ m_subp.return_value = '', '' # Empty response from ip route gw check
+ expected_setup_calls = [
+ mock.call(
+ ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24',
+ 'broadcast', '192.168.2.255', 'dev', 'eth0'],
+ capture=True, update_env={'LANG': 'C'}),
+ mock.call(
+ ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'],
+ capture=True),
+ mock.call(
+ ['ip', 'route', 'show', '0.0.0.0/0'], capture=True),
+ mock.call(
+ ['ip', '-4', 'route', 'add', 'default', 'via',
+ '192.168.2.1', 'dev', 'eth0'], capture=True)]
+ expected_teardown_calls = [mock.call(
+ ['ip', '-4', 'route', 'del', 'default', 'dev', 'eth0'],
+ capture=True)]
+
+ with net.EphemeralIPv4Network(**params):
+ self.assertEqual(expected_setup_calls, m_subp.call_args_list)
+ m_subp.assert_has_calls(expected_teardown_calls)
diff --git a/cloudinit/net/udev.py b/cloudinit/net/udev.py
index fd2fd8c7..58c0a708 100644
--- a/cloudinit/net/udev.py
+++ b/cloudinit/net/udev.py
@@ -23,7 +23,7 @@ def compose_udev_setting(key, value):
return '%s="%s"' % (key, value)
-def generate_udev_rule(interface, mac):
+def generate_udev_rule(interface, mac, driver=None):
"""Return a udev rule to set the name of network interface with `mac`.
The rule ends up as a single line looking something like:
@@ -31,10 +31,13 @@ def generate_udev_rule(interface, mac):
SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*",
ATTR{address}="ff:ee:dd:cc:bb:aa", NAME="eth0"
"""
+ if not driver:
+ driver = '?*'
+
rule = ', '.join([
compose_udev_equality('SUBSYSTEM', 'net'),
compose_udev_equality('ACTION', 'add'),
- compose_udev_equality('DRIVERS', '?*'),
+ compose_udev_equality('DRIVERS', driver),
compose_udev_attr_equality('address', mac),
compose_udev_setting('NAME', interface),
])
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index ed374a36..39c79dee 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -20,7 +20,7 @@ LOG = logging.getLogger()
def netdev_info(empty=""):
fields = ("hwaddr", "addr", "bcast", "mask")
- (ifcfg_out, _err) = util.subp(["ifconfig", "-a"])
+ (ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1])
devs = {}
for line in str(ifcfg_out).splitlines():
if len(line) == 0:
@@ -85,7 +85,7 @@ def netdev_info(empty=""):
def route_info():
- (route_out, _err) = util.subp(["netstat", "-rn"])
+ (route_out, _err) = util.subp(["netstat", "-rn"], rcs=[0, 1])
routes = {}
routes['ipv4'] = []
@@ -125,7 +125,8 @@ def route_info():
routes['ipv4'].append(entry)
try:
- (route_out6, _err6) = util.subp(["netstat", "-A", "inet6", "-n"])
+ (route_out6, _err6) = util.subp(["netstat", "-A", "inet6", "-n"],
+ rcs=[0, 1])
except util.ProcessExecutionError:
pass
else:
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index 411960d8..c120498f 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -29,11 +29,13 @@ CFG_BUILTIN = {
'MAAS',
'GCE',
'OpenStack',
+ 'AliYun',
'Ec2',
'CloudSigma',
'CloudStack',
'SmartOS',
'Bigstep',
+ 'Scaleway',
# At the end to act as a 'catch' when none of the above work...
'None',
],
diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py
index 9debe947..380e27cb 100644
--- a/cloudinit/sources/DataSourceAliYun.py
+++ b/cloudinit/sources/DataSourceAliYun.py
@@ -4,8 +4,10 @@ import os
from cloudinit import sources
from cloudinit.sources import DataSourceEc2 as EC2
+from cloudinit import util
DEF_MD_VERSION = "2016-01-01"
+ALIYUN_PRODUCT = "Alibaba Cloud ECS"
class DataSourceAliYun(EC2.DataSourceEc2):
@@ -24,7 +26,17 @@ class DataSourceAliYun(EC2.DataSourceEc2):
@property
def cloud_platform(self):
- return EC2.Platforms.ALIYUN
+ if self._cloud_platform is None:
+ if _is_aliyun():
+ self._cloud_platform = EC2.Platforms.ALIYUN
+ else:
+ self._cloud_platform = EC2.Platforms.NO_EC2_METADATA
+
+ return self._cloud_platform
+
+
+def _is_aliyun():
+ return util.read_dmi_data('system-product-name') == ALIYUN_PRODUCT
def parse_public_keys(public_keys):
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index b9458ffa..b5a95a1f 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -16,6 +16,7 @@ from xml.dom import minidom
import xml.etree.ElementTree as ET
from cloudinit import log as logging
+from cloudinit import net
from cloudinit import sources
from cloudinit.sources.helpers.azure import get_metadata_from_fabric
from cloudinit import util
@@ -36,6 +37,8 @@ RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource'
DEFAULT_PRIMARY_NIC = 'eth0'
LEASE_FILE = '/var/lib/dhcp/dhclient.eth0.leases'
DEFAULT_FS = 'ext4'
+# DMI chassis-asset-tag is set static for all azure instances
+AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'
def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid):
@@ -99,7 +102,7 @@ def get_dev_storvsc_sysctl():
sysctl_out, err = util.subp(['sysctl', 'dev.storvsc'])
except util.ProcessExecutionError:
LOG.debug("Fail to execute sysctl dev.storvsc")
- return None
+ sysctl_out = ""
return sysctl_out
@@ -175,6 +178,11 @@ if util.is_FreeBSD():
RESOURCE_DISK_PATH = "/dev/" + res_disk
else:
LOG.debug("resource disk is None")
+ BOUNCE_COMMAND = [
+ 'sh', '-xc',
+ ("i=$interface; x=0; ifconfig down $i || x=$?; "
+ "ifconfig up $i || x=$?; exit $x")
+ ]
BUILTIN_DS_CONFIG = {
'agent_command': AGENT_START_BUILTIN,
@@ -238,7 +246,9 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
set_hostname(previous_hostname, hostname_command)
-class DataSourceAzureNet(sources.DataSource):
+class DataSourceAzure(sources.DataSource):
+ _negotiated = False
+
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.seed_dir = os.path.join(paths.seed_dir, 'azure')
@@ -248,6 +258,7 @@ class DataSourceAzureNet(sources.DataSource):
util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
BUILTIN_DS_CONFIG])
self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file')
+ self._network_config = None
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -320,6 +331,11 @@ class DataSourceAzureNet(sources.DataSource):
# azure removes/ejects the cdrom containing the ovf-env.xml
# file on reboot. So, in order to successfully reboot we
# need to look in the datadir and consider that valid
+ asset_tag = util.read_dmi_data('chassis-asset-tag')
+ if asset_tag != AZURE_CHASSIS_ASSET_TAG:
+ LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag)
+ return False
+
ddir = self.ds_cfg['data_dir']
candidates = [self.seed_dir]
@@ -364,13 +380,14 @@ class DataSourceAzureNet(sources.DataSource):
LOG.debug("using files cached in %s", ddir)
# azure / hyper-v provides random data here
+ # TODO. find the seed on FreeBSD platform
+ # now update ds_cfg to reflect contents pass in config
if not util.is_FreeBSD():
seed = util.load_file("/sys/firmware/acpi/tables/OEM0",
quiet=True, decode=False)
if seed:
self.metadata['random_seed'] = seed
- # TODO. find the seed on FreeBSD platform
- # now update ds_cfg to reflect contents pass in config
+
user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
@@ -378,6 +395,40 @@ class DataSourceAzureNet(sources.DataSource):
# the directory to be protected.
write_files(ddir, files, dirmode=0o700)
+ self.metadata['instance-id'] = util.read_dmi_data('system-uuid')
+
+ return True
+
+ def device_name_to_device(self, name):
+ return self.ds_cfg['disk_aliases'].get(name)
+
+ def get_config_obj(self):
+ return self.cfg
+
+ def check_instance_id(self, sys_cfg):
+ # quickly (local check only) if self.instance_id is still valid
+ return sources.instance_id_matches_system_uuid(self.get_instance_id())
+
+ def setup(self, is_new_instance):
+ if self._negotiated is False:
+ LOG.debug("negotiating for %s (new_instance=%s)",
+ self.get_instance_id(), is_new_instance)
+ fabric_data = self._negotiate()
+ LOG.debug("negotiating returned %s", fabric_data)
+ if fabric_data:
+ self.metadata.update(fabric_data)
+ self._negotiated = True
+ else:
+ LOG.debug("negotiating already done for %s",
+ self.get_instance_id())
+
+ def _negotiate(self):
+ """Negotiate with fabric and return data from it.
+
+ On success, returns a dictionary including 'public_keys'.
+ On failure, returns False.
+ """
+
if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN:
self.bounce_network_with_azure_hostname()
@@ -387,31 +438,64 @@ class DataSourceAzureNet(sources.DataSource):
else:
metadata_func = self.get_metadata_from_agent
+ LOG.debug("negotiating with fabric via agent command %s",
+ self.ds_cfg['agent_command'])
try:
fabric_data = metadata_func()
except Exception as exc:
- LOG.info("Error communicating with Azure fabric; assume we aren't"
- " on Azure.", exc_info=True)
+ LOG.warning(
+ "Error communicating with Azure fabric; You may experience."
+ "connectivity issues.", exc_info=True)
return False
- self.metadata['instance-id'] = util.read_dmi_data('system-uuid')
- self.metadata.update(fabric_data)
-
- return True
-
- def device_name_to_device(self, name):
- return self.ds_cfg['disk_aliases'].get(name)
- def get_config_obj(self):
- return self.cfg
-
- def check_instance_id(self, sys_cfg):
- # quickly (local check only) if self.instance_id is still valid
- return sources.instance_id_matches_system_uuid(self.get_instance_id())
+ return fabric_data
def activate(self, cfg, is_new_instance):
address_ephemeral_resize(is_new_instance=is_new_instance)
return
+ @property
+ def network_config(self):
+ """Generate a network config like net.generate_fallback_network() with
+ the following execptions.
+
+ 1. Probe the drivers of the net-devices present and inject them in
+ the network configuration under params: driver: <driver> value
+ 2. If the driver value is 'mlx4_core', the control mode should be
+ set to manual. The device will be later used to build a bond,
+ for now we want to ensure the device gets named but does not
+ break any network configuration
+ """
+ blacklist = ['mlx4_core']
+ if not self._network_config:
+ LOG.debug('Azure: generating fallback configuration')
+ # generate a network config, blacklist picking any mlx4_core devs
+ netconfig = net.generate_fallback_config(
+ blacklist_drivers=blacklist, config_driver=True)
+
+ # if we have any blacklisted devices, update the network_config to
+ # include the device, mac, and driver values, but with no ip
+ # config; this ensures udev rules are generated but won't affect
+ # ip configuration
+ bl_found = 0
+ for bl_dev in [dev for dev in net.get_devicelist()
+ if net.device_driver(dev) in blacklist]:
+ bl_found += 1
+ cfg = {
+ 'type': 'physical',
+ 'name': 'vf%d' % bl_found,
+ 'mac_address': net.get_interface_mac(bl_dev),
+ 'params': {
+ 'driver': net.device_driver(bl_dev),
+ 'device_id': net.device_devid(bl_dev),
+ },
+ }
+ netconfig['config'].append(cfg)
+
+ self._network_config = netconfig
+
+ return self._network_config
+
def _partitions_on_device(devpath, maxnum=16):
# return a list of tuples (ptnum, path) for each part on devpath
@@ -694,7 +778,7 @@ def read_azure_ovf(contents):
try:
dom = minidom.parseString(contents)
except Exception as e:
- raise BrokenAzureDataSource("invalid xml: %s" % e)
+ raise BrokenAzureDataSource("Invalid ovf-env.xml: %s" % e)
results = find_child(dom.documentElement,
lambda n: n.localName == "ProvisioningSection")
@@ -792,19 +876,23 @@ def encrypt_pass(password, salt_id="$6$"):
return crypt.crypt(password, salt_id + util.rand_str(strlen=16))
+def _check_freebsd_cdrom(cdrom_dev):
+ """Return boolean indicating path to cdrom device has content."""
+ try:
+ with open(cdrom_dev) as fp:
+ fp.read(1024)
+ return True
+ except IOError:
+ LOG.debug("cdrom (%s) is not configured", cdrom_dev)
+ return False
+
+
def list_possible_azure_ds_devs():
- # return a sorted list of devices that might have a azure datasource
devlist = []
if util.is_FreeBSD():
cdrom_dev = "/dev/cd0"
- try:
- util.subp(["mount", "-o", "ro", "-t", "udf", cdrom_dev,
- "/mnt/cdrom/secure"])
- except util.ProcessExecutionError:
- LOG.debug("Fail to mount cd")
- return devlist
- util.subp(["umount", "/mnt/cdrom/secure"])
- devlist.append(cdrom_dev)
+ if _check_freebsd_cdrom(cdrom_dev):
+ return [cdrom_dev]
else:
for fstype in ("iso9660", "udf"):
devlist.extend(util.find_devs_with("TYPE=%s" % fstype))
@@ -834,9 +922,12 @@ class NonAzureDataSource(Exception):
pass
+# Legacy: Must be present in case we load an old pkl object
+DataSourceAzureNet = DataSourceAzure
+
# Used to match classes to dependencies
datasources = [
- (DataSourceAzureNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceAzure, (sources.DEP_FILESYSTEM, )),
]
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 2f9c7edf..4ec9592f 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -32,7 +32,12 @@ class Platforms(object):
AWS = "AWS"
BRIGHTBOX = "Brightbox"
SEEDED = "Seeded"
+ # UNKNOWN indicates no positive id. If strict_id is 'warn' or 'false',
+ # then an attempt at the Ec2 Metadata service will be made.
UNKNOWN = "Unknown"
+ # NO_EC2_METADATA indicates this platform does not have a Ec2 metadata
+ # service available. No attempt at the Ec2 Metadata service will be made.
+ NO_EC2_METADATA = "No-EC2-Metadata"
class DataSourceEc2(sources.DataSource):
@@ -65,6 +70,8 @@ class DataSourceEc2(sources.DataSource):
strict_mode, self.cloud_platform)
if strict_mode == "true" and self.cloud_platform == Platforms.UNKNOWN:
return False
+ elif self.cloud_platform == Platforms.NO_EC2_METADATA:
+ return False
try:
if not self.wait_for_metadata_service():
@@ -309,10 +316,16 @@ def identify_platform():
def _collect_platform_data():
- # returns a dictionary with all lower case values:
- # uuid: system-uuid from dmi or /sys/hypervisor
- # uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi'
- # serial: dmi 'system-serial-number' (/sys/.../product_serial)
+ """Returns a dictionary of platform info from dmi or /sys/hypervisor.
+
+ Keys in the dictionary are as follows:
+ uuid: system-uuid from dmi or /sys/hypervisor
+ uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi'
+ serial: dmi 'system-serial-number' (/sys/.../product_serial)
+
+ On Ec2 instances experimentation is that product_serial is upper case,
+ and product_uuid is lower case. This returns lower case values for both.
+ """
data = {}
try:
uuid = util.load_file("/sys/hypervisor/uuid").strip()
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index c68f6b8c..e641244d 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -43,6 +43,18 @@ class DataSourceNoCloud(sources.DataSource):
'network-config': None}
try:
+ # Parse the system serial label from dmi. If not empty, try parsing
+ # like the commandline
+ md = {}
+ serial = util.read_dmi_data('system-serial-number')
+ if serial and load_cmdline_data(md, serial):
+ found.append("dmi")
+ mydata = _merge_new_seed(mydata, {'meta-data': md})
+ except Exception:
+ util.logexc(LOG, "Unable to parse dmi data")
+ return False
+
+ try:
# Parse the kernel command line, getting data passed in
md = {}
if load_cmdline_data(md):
diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py
new file mode 100644
index 00000000..3a8a8e8f
--- /dev/null
+++ b/cloudinit/sources/DataSourceScaleway.py
@@ -0,0 +1,234 @@
+# Author: Julien Castets <castets.j@gmail.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+# Scaleway API:
+# https://developer.scaleway.com/#metadata
+
+import json
+import os
+import socket
+import time
+
+import requests
+
+# pylint fails to import the two modules below.
+# These are imported via requests.packages rather than urllib3 because:
+# a.) the provider of the requests package should ensure that urllib3
+# contained in it is consistent/correct.
+# b.) cloud-init does not specifically have a dependency on urllib3
+#
+# For future reference, see:
+# https://github.com/kennethreitz/requests/pull/2375
+# https://github.com/requests/requests/issues/4104
+# pylint: disable=E0401
+from requests.packages.urllib3.connection import HTTPConnection
+from requests.packages.urllib3.poolmanager import PoolManager
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import url_helper
+from cloudinit import util
+
+
+LOG = logging.getLogger(__name__)
+
+DS_BASE_URL = 'http://169.254.42.42'
+
+BUILTIN_DS_CONFIG = {
+ 'metadata_url': DS_BASE_URL + '/conf?format=json',
+ 'userdata_url': DS_BASE_URL + '/user_data/cloud-init',
+ 'vendordata_url': DS_BASE_URL + '/vendor_data/cloud-init'
+}
+
+DEF_MD_RETRIES = 5
+DEF_MD_TIMEOUT = 10
+
+
+def on_scaleway():
+ """
+ There are three ways to detect if you are on Scaleway:
+
+ * check DMI data: not yet implemented by Scaleway, but the check is made to
+ be future-proof.
+ * the initrd created the file /var/run/scaleway.
+ * "scaleway" is in the kernel cmdline.
+ """
+ vendor_name = util.read_dmi_data('system-manufacturer')
+ if vendor_name == 'Scaleway':
+ return True
+
+ if os.path.exists('/var/run/scaleway'):
+ return True
+
+ cmdline = util.get_cmdline()
+ if 'scaleway' in cmdline:
+ return True
+
+ return False
+
+
+class SourceAddressAdapter(requests.adapters.HTTPAdapter):
+ """
+ Adapter for requests to choose the local address to bind to.
+ """
+ def __init__(self, source_address, **kwargs):
+ self.source_address = source_address
+ super(SourceAddressAdapter, self).__init__(**kwargs)
+
+ def init_poolmanager(self, connections, maxsize, block=False):
+ socket_options = HTTPConnection.default_socket_options + [
+ (socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+ ]
+ self.poolmanager = PoolManager(num_pools=connections,
+ maxsize=maxsize,
+ block=block,
+ source_address=self.source_address,
+ socket_options=socket_options)
+
+
+def query_data_api_once(api_address, timeout, requests_session):
+ """
+ Retrieve user data or vendor data.
+
+ Scaleway user/vendor data API returns HTTP/404 if user/vendor data is not
+ set.
+
+ This function calls `url_helper.readurl` but instead of considering
+ HTTP/404 as an error that requires a retry, it considers it as empty
+ user/vendor data.
+
+ Also, be aware the user data/vendor API requires the source port to be
+ below 1024 to ensure the client is root (since non-root users can't bind
+ ports below 1024). If requests raises ConnectionError (EADDRINUSE), the
+ caller should retry to call this function on an other port.
+ """
+ try:
+ resp = url_helper.readurl(
+ api_address,
+ data=None,
+ timeout=timeout,
+ # It's the caller's responsability to recall this function in case
+ # of exception. Don't let url_helper.readurl() retry by itself.
+ retries=0,
+ session=requests_session,
+ # If the error is a HTTP/404 or a ConnectionError, go into raise
+ # block below.
+ exception_cb=lambda _, exc: exc.code == 404 or (
+ isinstance(exc.cause, requests.exceptions.ConnectionError)
+ )
+ )
+ return util.decode_binary(resp.contents)
+ except url_helper.UrlError as exc:
+ # Empty user data.
+ if exc.code == 404:
+ return None
+ raise
+
+
+def query_data_api(api_type, api_address, retries, timeout):
+ """Get user or vendor data.
+
+ Handle the retrying logic in case the source port is used.
+
+ Scaleway metadata service requires the source port of the client to
+ be a privileged port (<1024). This is done to ensure that only a
+ privileged user on the system can access the metadata service.
+ """
+ # Query user/vendor data. Try to make a request on the first privileged
+ # port available.
+ for port in range(1, max(retries, 2)):
+ try:
+ LOG.debug(
+ 'Trying to get %s data (bind on port %d)...',
+ api_type, port
+ )
+ requests_session = requests.Session()
+ requests_session.mount(
+ 'http://',
+ SourceAddressAdapter(source_address=('0.0.0.0', port))
+ )
+ data = query_data_api_once(
+ api_address,
+ timeout=timeout,
+ requests_session=requests_session
+ )
+ LOG.debug('%s-data downloaded', api_type)
+ return data
+
+ except url_helper.UrlError as exc:
+ # Local port already in use or HTTP/429.
+ LOG.warning('Error while trying to get %s data: %s', api_type, exc)
+ time.sleep(5)
+ last_exc = exc
+ continue
+
+ # Max number of retries reached.
+ raise last_exc
+
+
+class DataSourceScaleway(sources.DataSource):
+
+ def __init__(self, sys_cfg, distro, paths):
+ super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths)
+
+ self.ds_cfg = util.mergemanydict([
+ util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}),
+ BUILTIN_DS_CONFIG
+ ])
+
+ self.metadata_address = self.ds_cfg['metadata_url']
+ self.userdata_address = self.ds_cfg['userdata_url']
+ self.vendordata_address = self.ds_cfg['vendordata_url']
+
+ self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES))
+ self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT))
+
+ def get_data(self):
+ if not on_scaleway():
+ return False
+
+ resp = url_helper.readurl(self.metadata_address,
+ timeout=self.timeout,
+ retries=self.retries)
+ self.metadata = json.loads(util.decode_binary(resp.contents))
+
+ self.userdata_raw = query_data_api(
+ 'user-data', self.userdata_address,
+ self.retries, self.timeout
+ )
+ self.vendordata_raw = query_data_api(
+ 'vendor-data', self.vendordata_address,
+ self.retries, self.timeout
+ )
+ return True
+
+ @property
+ def launch_index(self):
+ return None
+
+ def get_instance_id(self):
+ return self.metadata['id']
+
+ def get_public_ssh_keys(self):
+ return [key['key'] for key in self.metadata['ssh_public_keys']]
+
+ def get_hostname(self, fqdn=False, resolve_ip=False):
+ return self.metadata['hostname']
+
+ @property
+ def availability_zone(self):
+ return None
+
+ @property
+ def region(self):
+ return None
+
+
+datasources = [
+ (DataSourceScaleway, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index c3ce36d6..952caf35 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -251,10 +251,23 @@ class DataSource(object):
def first_instance_boot(self):
return
+ def setup(self, is_new_instance):
+ """setup(is_new_instance)
+
+ This is called before user-data and vendor-data have been processed.
+
+ Unless the datasource has set mode to 'local', then networking
+ per 'fallback' or per 'network_config' will have been written and
+ brought up the OS at this point.
+ """
+ return
+
def activate(self, cfg, is_new_instance):
"""activate(cfg, is_new_instance)
- This is called before the init_modules will be called.
+ This is called before the init_modules will be called but after
+ the user-data and vendor-data have been fully processed.
+
The cfg is fully up to date config, it contains a merged view of
system config, datasource config, user config, vendor config.
It should be used rather than the sys_cfg passed to __init__.
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index ad557827..a1c4a517 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -362,6 +362,11 @@ class Init(object):
self._store_userdata()
self._store_vendordata()
+ def setup_datasource(self):
+ if self.datasource is None:
+ raise RuntimeError("Datasource is None, cannot setup.")
+ self.datasource.setup(is_new_instance=self.is_new_instance())
+
def activate_datasource(self):
if self.datasource is None:
raise RuntimeError("Datasource is None, cannot activate.")
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index d2b92e6a..7cf76aae 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -172,7 +172,8 @@ def _get_ssl_args(url, ssl_details):
def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
headers=None, headers_cb=None, ssl_details=None,
- check_status=True, allow_redirects=True, exception_cb=None):
+ check_status=True, allow_redirects=True, exception_cb=None,
+ session=None):
url = _cleanurl(url)
req_args = {
'url': url,
@@ -231,7 +232,12 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
LOG.debug("[%s/%s] open '%s' with %s configuration", i,
manual_tries, url, filtered_req_args)
- r = requests.request(**req_args)
+ if session is None:
+ session = requests.Session()
+
+ with session as sess:
+ r = sess.request(**req_args)
+
if check_status:
r.raise_for_status()
LOG.debug("Read from %s (%s, %sb) after %s attempts", url,
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 135e4608..ce2c6034 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -330,7 +330,11 @@ class SeLinuxGuard(object):
LOG.debug("Restoring selinux mode for %s (recursive=%s)",
path, self.recursive)
- self.selinux.restorecon(path, recursive=self.recursive)
+ try:
+ self.selinux.restorecon(path, recursive=self.recursive)
+ except OSError as e:
+ LOG.warning('restorecon failed on %s,%s maybe badness? %s',
+ path, self.recursive, e)
class MountFailedError(Exception):
@@ -569,7 +573,7 @@ def is_ipv4(instr):
def is_FreeBSD():
- return system_info()['platform'].startswith('FreeBSD')
+ return system_info()['variant'] == "freebsd"
def get_cfg_option_bool(yobj, key, default=False):
@@ -592,13 +596,32 @@ def get_cfg_option_int(yobj, key, default=0):
def system_info():
- return {
+ info = {
'platform': platform.platform(),
+ 'system': platform.system(),
'release': platform.release(),
'python': platform.python_version(),
'uname': platform.uname(),
- 'dist': platform.linux_distribution(), # pylint: disable=W1505
+ 'dist': platform.dist(), # pylint: disable=W1505
}
+ system = info['system'].lower()
+ var = 'unknown'
+ if system == "linux":
+ linux_dist = info['dist'][0].lower()
+ if linux_dist in ('centos', 'fedora', 'debian'):
+ var = linux_dist
+ elif linux_dist in ('ubuntu', 'linuxmint', 'mint'):
+ var = 'ubuntu'
+ elif linux_dist == 'redhat':
+ var = 'rhel'
+ else:
+ var = 'linux'
+ elif system in ('windows', 'darwin', "freebsd"):
+ var = system
+
+ info['variant'] = var
+
+ return info
def get_cfg_option_list(yobj, key, default=None):
@@ -1105,14 +1128,14 @@ def is_resolvable(name):
we have to append '.'.
The top level 'invalid' domain is invalid per RFC. And example.com
- should also not exist. The random entry will be resolved inside
- the search list.
+ should also not exist. The '__cloud_init_expected_not_found__' entry will
+ be resolved inside the search list.
"""
global _DNS_REDIRECT_IP
if _DNS_REDIRECT_IP is None:
badips = set()
badnames = ("does-not-exist.example.com.", "example.invalid.",
- rand_str())
+ "__cloud_init_expected_not_found__")
badresults = {}
for iname in badnames:
try:
@@ -1720,8 +1743,12 @@ def write_file(filename, content, mode=0o644, omode="wb", copy_mode=False):
else:
content = decode_binary(content)
write_type = 'characters'
+ try:
+ mode_r = "%o" % mode
+ except TypeError:
+ mode_r = "%r" % mode
LOG.debug("Writing to %s - %s: [%s] %s %s",
- filename, omode, mode, len(content), write_type)
+ filename, omode, mode_r, len(content), write_type)
with SeLinuxGuard(path=filename):
with open(filename, omode) as fh:
fh.write(content)
@@ -2370,6 +2397,10 @@ def read_dmi_data(key):
"""
Wrapper for reading DMI data.
+ If running in a container return None. This is because DMI data is
+ assumed to be not useful in a container as it does not represent the
+ container but rather the host.
+
This will do the following (returning the first that produces a
result):
1) Use a mapping to translate `key` from dmidecode naming to
@@ -2380,6 +2411,9 @@ def read_dmi_data(key):
If all of the above fail to find a value, None will be returned.
"""
+ if is_container():
+ return None
+
syspath_value = _read_dmi_syspath(key)
if syspath_value is not None:
return syspath_value
@@ -2495,7 +2529,7 @@ def load_shell_content(content, add_empty=False, empty_val=None):
if PY26 and isinstance(blob, six.text_type):
# Older versions don't support unicode input
blob = blob.encode("utf8")
- return shlex.split(blob)
+ return shlex.split(blob, comments=True)
data = {}
for line in _shlex_split(content):
diff --git a/config/cloud.cfg-freebsd b/config/cloud.cfg-freebsd
deleted file mode 100644
index d666c397..00000000
--- a/config/cloud.cfg-freebsd
+++ /dev/null
@@ -1,88 +0,0 @@
-# The top level settings are used as module
-# and system configuration.
-
-syslog_fix_perms: root:wheel
-
-# This should not be required, but leave it in place until the real cause of
-# not beeing able to find -any- datasources is resolved.
-datasource_list: ['ConfigDrive', 'Azure', 'OpenStack', 'Ec2']
-
-# A set of users which may be applied and/or used by various modules
-# when a 'default' entry is found it will reference the 'default_user'
-# from the distro configuration specified below
-users:
- - default
-
-# If this is set, 'root' will not be able to ssh in and they
-# will get a message to login instead as the above $user (ubuntu)
-disable_root: false
-
-# This will cause the set+update hostname module to not operate (if true)
-preserve_hostname: false
-
-# Example datasource config
-# datasource:
-# Ec2:
-# metadata_urls: [ 'blah.com' ]
-# timeout: 5 # (defaults to 50 seconds)
-# max_wait: 10 # (defaults to 120 seconds)
-
-# The modules that run in the 'init' stage
-cloud_init_modules:
-# - migrator
- - seed_random
- - bootcmd
-# - write-files
- - growpart
- - resizefs
- - set_hostname
- - update_hostname
-# - update_etc_hosts
-# - ca-certs
-# - rsyslog
- - users-groups
- - ssh
-
-# The modules that run in the 'config' stage
-cloud_config_modules:
-# - disk_setup
-# - mounts
- - ssh-import-id
- - locale
- - set-passwords
- - package-update-upgrade-install
-# - landscape
- - timezone
-# - puppet
-# - chef
-# - salt-minion
-# - mcollective
- - disable-ec2-metadata
- - runcmd
-# - byobu
-
-# The modules that run in the 'final' stage
-cloud_final_modules:
- - rightscale_userdata
- - scripts-vendor
- - scripts-per-once
- - scripts-per-boot
- - scripts-per-instance
- - scripts-user
- - ssh-authkey-fingerprints
- - keys-to-console
- - phone-home
- - final-message
- - power-state-change
-
-# System and/or distro specific settings
-# (not accessible to handlers/transforms)
-system_info:
- distro: freebsd
- default_user:
- name: freebsd
- lock_passwd: True
- gecos: FreeBSD
- groups: [wheel]
- sudo: ["ALL=(ALL) NOPASSWD:ALL"]
- shell: /bin/tcsh
diff --git a/config/cloud.cfg b/config/cloud.cfg.tmpl
index 1b93e7f9..f4b9069b 100644
--- a/config/cloud.cfg
+++ b/config/cloud.cfg.tmpl
@@ -1,22 +1,43 @@
+## template:jinja
# The top level settings are used as module
# and system configuration.
+{% if variant in ["freebsd"] %}
+syslog_fix_perms: root:wheel
+{% endif %}
# A set of users which may be applied and/or used by various modules
# when a 'default' entry is found it will reference the 'default_user'
# from the distro configuration specified below
users:
- default
-# If this is set, 'root' will not be able to ssh in and they
-# will get a message to login instead as the above $user (ubuntu)
+# If this is set, 'root' will not be able to ssh in and they
+# will get a message to login instead as the default $user
+{% if variant in ["freebsd"] %}
+disable_root: false
+{% else %}
disable_root: true
+{% endif %}
+{% if variant in ["centos", "fedora", "rhel"] %}
+mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2']
+resize_rootfs_tmp: /dev
+ssh_deletekeys: 0
+ssh_genkeytypes: ~
+ssh_pwauth: 0
+
+{% endif %}
# This will cause the set+update hostname module to not operate (if true)
preserve_hostname: false
+{% if variant in ["freebsd"] %}
+# This should not be required, but leave it in place until the real cause of
+# not beeing able to find -any- datasources is resolved.
+datasource_list: ['ConfigDrive', 'Azure', 'OpenStack', 'Ec2']
+{% endif %}
# Example datasource config
-# datasource:
-# Ec2:
+# datasource:
+# Ec2:
# metadata_urls: [ 'blah.com' ]
# timeout: 5 # (defaults to 50 seconds)
# max_wait: 10 # (defaults to 120 seconds)
@@ -24,51 +45,75 @@ preserve_hostname: false
# The modules that run in the 'init' stage
cloud_init_modules:
- migrator
+{% if variant in ["ubuntu", "unknown", "debian"] %}
- ubuntu-init-switch
+{% endif %}
- seed_random
- bootcmd
- write-files
- growpart
- resizefs
+{% if variant not in ["freebsd"] %}
- disk_setup
- mounts
+{% endif %}
- set_hostname
- update_hostname
+{% if variant not in ["freebsd"] %}
- update_etc_hosts
- ca-certs
- rsyslog
+{% endif %}
- users-groups
- ssh
# The modules that run in the 'config' stage
cloud_config_modules:
+{% if variant in ["ubuntu", "unknown", "debian"] %}
# Emit the cloud config ready event
# this can be used by upstart jobs for 'start on cloud-config'.
- emit_upstart
- snap_config
+{% endif %}
- ssh-import-id
- locale
- set-passwords
+{% if variant in ["rhel", "fedora"] %}
+ - spacewalk
+ - yum-add-repo
+{% endif %}
+{% if variant in ["ubuntu", "unknown", "debian"] %}
- grub-dpkg
- apt-pipelining
- apt-configure
+{% endif %}
+{% if variant not in ["freebsd"] %}
- ntp
+{% endif %}
- timezone
- disable-ec2-metadata
- runcmd
+{% if variant in ["ubuntu", "unknown", "debian"] %}
- byobu
+{% endif %}
# The modules that run in the 'final' stage
cloud_final_modules:
+{% if variant in ["ubuntu", "unknown", "debian"] %}
- snappy
+{% endif %}
- package-update-upgrade-install
+{% if variant in ["ubuntu", "unknown", "debian"] %}
- fan
- landscape
- lxd
+{% endif %}
+{% if variant not in ["freebsd"] %}
- puppet
- chef
- salt-minion
- mcollective
+{% endif %}
- rightscale_userdata
- scripts-vendor
- scripts-per-once
@@ -85,7 +130,13 @@ cloud_final_modules:
# (not accessible to handlers/transforms)
system_info:
# This will affect which distro class gets used
+{% if variant in ["centos", "debian", "fedora", "rhel", "ubuntu", "freebsd"] %}
+ distro: {{ variant }}
+{% else %}
+ # Unknown/fallback distro.
distro: ubuntu
+{% endif %}
+{% if variant in ["ubuntu", "unknown", "debian"] %}
# Default user name + that default users groups (if added/used)
default_user:
name: ubuntu
@@ -115,3 +166,27 @@ system_info:
primary: http://ports.ubuntu.com/ubuntu-ports
security: http://ports.ubuntu.com/ubuntu-ports
ssh_svcname: ssh
+{% elif variant in ["centos", "rhel", "fedora"] %}
+ # Default user name + that default users groups (if added/used)
+ default_user:
+ name: {{ variant }}
+ lock_passwd: True
+ gecos: {{ variant }} Cloud User
+ groups: [wheel, adm, systemd-journal]
+ sudo: ["ALL=(ALL) NOPASSWD:ALL"]
+ shell: /bin/bash
+ # Other config here will be given to the distro class and/or path classes
+ paths:
+ cloud_dir: /var/lib/cloud/
+ templates_dir: /etc/cloud/templates/
+ ssh_svcname: sshd
+{% elif variant in ["freebsd"] %}
+ # Default user name + that default users groups (if added/used)
+ default_user:
+ name: freebsd
+ lock_passwd: True
+ gecos: FreeBSD
+ groups: [wheel]
+ sudo: ["ALL=(ALL) NOPASSWD:ALL"]
+ shell: /bin/tcsh
+{% endif %}
diff --git a/doc/examples/cloud-config-chef.txt b/doc/examples/cloud-config-chef.txt
index 3cb62006..9d235817 100644
--- a/doc/examples/cloud-config-chef.txt
+++ b/doc/examples/cloud-config-chef.txt
@@ -1,6 +1,6 @@
#cloud-config
#
-# This is an example file to automatically install chef-client and run a
+# This is an example file to automatically install chef-client and run a
# list of recipes when the instance boots for the first time.
# Make sure that this file is valid yaml before starting instances.
# It should be passed as user-data when starting the instance.
@@ -8,7 +8,7 @@
# This example assumes the instance is 16.04 (xenial)
-# The default is to install from packages.
+# The default is to install from packages.
# Key from https://packages.chef.io/chef.asc
apt:
@@ -60,7 +60,7 @@ chef:
force_install: false
# Chef settings
- server_url: "https://chef.yourorg.com:4000"
+ server_url: "https://chef.yourorg.com"
# Node Name
# Defaults to the instance-id if not present
@@ -78,8 +78,8 @@ chef:
-----BEGIN RSA PRIVATE KEY-----
YOUR-ORGS-VALIDATION-KEY-HERE
-----END RSA PRIVATE KEY-----
-
- # A run list for a first boot json
+
+ # A run list for a first boot json, an example (not required)
run_list:
- "recipe[apache2]"
- "role[db]"
@@ -92,7 +92,7 @@ chef:
keepalive: "off"
# if install_type is 'omnibus', change the url to download
- omnibus_url: "https://www.opscode.com/chef/install.sh"
+ omnibus_url: "https://www.chef.io/chef/install.sh"
# Capture all subprocess output into a logfile
diff --git a/doc/examples/cloud-config-disk-setup.txt b/doc/examples/cloud-config-disk-setup.txt
index 38ad0528..dd91477d 100644
--- a/doc/examples/cloud-config-disk-setup.txt
+++ b/doc/examples/cloud-config-disk-setup.txt
@@ -98,11 +98,11 @@ disk_setup:
#
# table_type=<TYPE>: Currently the following are supported:
# 'mbr': default and setups a MS-DOS partition table
+# 'gpt': setups a GPT partition table
#
-# Note: At this time only 'mbr' partition tables are allowed.
-# It is anticipated in the future that we'll have GPT as
-# option in the future, or even "RAID" to create a mdadm
-# RAID.
+# Note: At this time only 'mbr' and 'gpt' partition tables
+# are allowed. It is anticipated in the future that
+# we'll also have "RAID" to create a mdadm RAID.
#
# layout={...}: The device layout. This is a list of values, with the
# percentage of disk that partition will take.
diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py
index 66b3b654..0ea3b6bf 100644
--- a/doc/rtd/conf.py
+++ b/doc/rtd/conf.py
@@ -10,6 +10,7 @@ sys.path.insert(0, os.path.abspath('./'))
sys.path.insert(0, os.path.abspath('.'))
from cloudinit import version
+from cloudinit.config.schema import get_schema_doc
# Supress warnings for docs that aren't used yet
# unused_docs = [
@@ -75,3 +76,12 @@ html_theme_options = {
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'static/logo.png'
+
+def generate_docstring_from_schema(app, what, name, obj, options, lines):
+ """Override module docs from schema when present."""
+ if what == 'module' and hasattr(obj, "schema"):
+ del lines[:]
+ lines.extend(get_schema_doc(obj.schema).split('\n'))
+
+def setup(app):
+ app.connect('autodoc-process-docstring', generate_docstring_from_schema)
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
index 9acecc53..a60f5eb7 100644
--- a/doc/rtd/topics/datasources.rst
+++ b/doc/rtd/topics/datasources.rst
@@ -20,7 +20,7 @@ through the typical usage of subclasses.
The current interface that a datasource object must provide is the following:
.. sourcecode:: python
-
+
# returns a mime multipart message that contains
# all the various fully-expanded components that
# were found from processing the raw userdata string
@@ -28,47 +28,47 @@ The current interface that a datasource object must provide is the following:
# this instance id will be returned (or messages with
# no instance id)
def get_userdata(self, apply_filter=False)
-
+
# returns the raw userdata string (or none)
def get_userdata_raw(self)
-
+
# returns a integer (or none) which can be used to identify
# this instance in a group of instances which are typically
- # created from a single command, thus allowing programatic
+ # created from a single command, thus allowing programmatic
# filtering on this launch index (or other selective actions)
@property
def launch_index(self)
-
- # the data sources' config_obj is a cloud-config formated
+
+ # the data sources' config_obj is a cloud-config formatted
# object that came to it from ways other than cloud-config
# because cloud-config content would be handled elsewhere
def get_config_obj(self)
-
+
#returns a list of public ssh keys
def get_public_ssh_keys(self)
-
+
# translates a device 'short' name into the actual physical device
# fully qualified name (or none if said physical device is not attached
# or does not exist)
def device_name_to_device(self, name)
-
+
# gets the locale string this instance should be applying
# which typically used to adjust the instances locale settings files
def get_locale(self)
-
+
@property
def availability_zone(self)
-
+
# gets the instance id that was assigned to this instance by the
# cloud provider or when said instance id does not exist in the backing
# metadata this will return 'iid-datasource'
def get_instance_id(self)
-
+
# gets the fully qualified domain name that this host should be using
# when configuring network or hostname releated settings, typically
# assigned either by the cloud provider or the user creating the vm
def get_hostname(self, fqdn=False)
-
+
def get_package_mirror_info(self)
diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst
index 4a3735b5..559011ef 100644
--- a/doc/rtd/topics/datasources/azure.rst
+++ b/doc/rtd/topics/datasources/azure.rst
@@ -8,7 +8,7 @@ This datasource finds metadata and user-data from the Azure cloud platform.
Azure Platform
--------------
The azure cloud-platform provides initial data to an instance via an attached
-CD formated in UDF. That CD contains a 'ovf-env.xml' file that provides some
+CD formatted in UDF. That CD contains a 'ovf-env.xml' file that provides some
information. Additional information is obtained via interaction with the
"endpoint".
diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst
index 0159e853..08578e86 100644
--- a/doc/rtd/topics/datasources/nocloud.rst
+++ b/doc/rtd/topics/datasources/nocloud.rst
@@ -11,6 +11,38 @@ You can provide meta-data and user-data to a local vm boot via files on a
`vfat`_ or `iso9660`_ filesystem. The filesystem volume label must be
``cidata``.
+Alternatively, you can provide meta-data via kernel command line or SMBIOS
+"serial number" option. The data must be passed in the form of a string:
+
+::
+
+ ds=nocloud[;key=val;key=val]
+
+or
+
+::
+
+ ds=nocloud-net[;key=val;key=val]
+
+The permitted keys are:
+
+- ``h`` or ``local-hostname``
+- ``i`` or ``instance-id``
+- ``s`` or ``seedfrom``
+
+With ``ds=nocloud``, the ``seedfrom`` value must start with ``/`` or
+``file://``. With ``ds=nocloud-net``, the ``seedfrom`` value must start
+with ``http://``, ``https://`` or ``ftp://``
+
+e.g. you can pass this option to QEMU:
+
+::
+
+ -smbios type=1,serial=ds=nocloud-net;s=http://10.10.0.1:8000/
+
+to cause NoCloud to fetch the full meta-data from http://10.10.0.1:8000/meta-data
+after the network initialization is complete.
+
These user-data and meta-data files are expected to be in the following format.
::
diff --git a/doc/rtd/topics/dir_layout.rst b/doc/rtd/topics/dir_layout.rst
index 3f5aa205..7a6265eb 100644
--- a/doc/rtd/topics/dir_layout.rst
+++ b/doc/rtd/topics/dir_layout.rst
@@ -41,9 +41,9 @@ Cloudinits's directory structure is somewhat different from a regular applicatio
``data/``
- Contains information releated to instance ids, datasources and hostnames of the previous
+ Contains information related to instance ids, datasources and hostnames of the previous
and current instance if they are different. These can be examined as needed to
- determine any information releated to a previous boot (if applicable).
+ determine any information related to a previous boot (if applicable).
``handlers/``
@@ -59,9 +59,9 @@ Cloudinits's directory structure is somewhat different from a regular applicatio
``instances/``
- All instances that were created using this image end up with instance identifer
+ All instances that were created using this image end up with instance identifier
subdirectories (and corresponding data for each instance). The currently active
- instance will be symlinked the the ``instance`` symlink file defined previously.
+ instance will be symlinked the ``instance`` symlink file defined previously.
``scripts/``
@@ -74,9 +74,9 @@ Cloudinits's directory structure is somewhat different from a regular applicatio
``sem/``
- Cloud-init has a concept of a module sempahore, which basically consists
+ Cloud-init has a concept of a module semaphore, which basically consists
of the module name and its frequency. These files are used to ensure a module
- is only ran `per-once`, `per-instance`, `per-always`. This folder contains
- sempaphore `files` which are only supposed to run `per-once` (not tied to the instance id).
+ is only ran `per-once`, `per-instance`, `per-always`. This folder contains
+ semaphore `files` which are only supposed to run `per-once` (not tied to the instance id).
.. vi: textwidth=78
diff --git a/doc/rtd/topics/merging.rst b/doc/rtd/topics/merging.rst
index 2f927a47..c75ca59c 100644
--- a/doc/rtd/topics/merging.rst
+++ b/doc/rtd/topics/merging.rst
@@ -7,7 +7,7 @@ Overview
This was implemented because it has been a common feature request that there be
a way to specify how cloud-config yaml "dictionaries" provided as user-data are
-merged together when there are multiple yamls to merge together (say when
+merged together when there are multiple yaml files to merge together (say when
performing an #include).
Since previously the merging algorithm was very simple and would only overwrite
@@ -128,7 +128,7 @@ for your own usage.
for, both of which can define the way merging is done (the first header to
exist wins). These new headers (in lookup order) are 'Merge-Type' and
'X-Merge-Type'. The value should be a string which will satisfy the new
- merging format defintion (see below for this format).
+ merging format definition (see below for this format).
2. The second way is actually specifying the merge-type in the body of the
cloud-config dictionary. There are 2 ways to specify this, either as a
diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst
index 36326b59..ce3a1bde 100644
--- a/doc/rtd/topics/network-config-format-v1.rst
+++ b/doc/rtd/topics/network-config-format-v1.rst
@@ -246,8 +246,8 @@ Valid keys are:
- jumbo0
params:
bridge_ageing: 250
- bridge_bridgeprio: 22
- bridge_fd: 1
+ bridge_bridgeprio: 22
+ bridge_fd: 1
bridge_hello: 1
bridge_maxage: 10
bridge_maxwait: 0
diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst
index 109c86f5..96c1cf59 100644
--- a/doc/rtd/topics/network-config.rst
+++ b/doc/rtd/topics/network-config.rst
@@ -31,7 +31,7 @@ A ``network:`` entry in /etc/cloud/cloud.cfg.d/* configuration files.
``ip=`` or ``network-config=<YAML config string>``
-User-data cannot change an instance's network configuration. In the absense
+User-data cannot change an instance's network configuration. In the absence
of network configuration in any of the above sources , `Cloud-init`_ will
write out a network configuration that will issue a DHCP request on a "first"
network interface.
@@ -220,7 +220,7 @@ CLI Interface :
--output-kind {eni,netplan,sysconfig}, -ok {eni,netplan,sysconfig}
-Example output convertion V2 to sysconfig:
+Example output converting V2 to sysconfig:
.. code-block:: bash
diff --git a/doc/rtd/topics/tests.rst b/doc/rtd/topics/tests.rst
index 0663811e..d668e3f4 100644
--- a/doc/rtd/topics/tests.rst
+++ b/doc/rtd/topics/tests.rst
@@ -1,14 +1,186 @@
-****************
-Test Development
-****************
-
+*******************
+Integration Testing
+*******************
Overview
========
-The purpose of this page is to describe how to write integration tests for
-cloud-init. As a test writer you need to develop a test configuration and
-a verification file:
+This page describes the execution, development, and architecture of the
+cloud-init integration tests:
+
+* Execution explains the options available and running of tests
+* Development shows how to write test cases
+* Architecture explains the internal processes
+
+Execution
+=========
+
+Overview
+--------
+
+In order to avoid the need for dependencies and ease the setup and
+configuration users can run the integration tests via tox:
+
+.. code-block:: bash
+
+ $ git clone https://git.launchpad.net/cloud-init
+ $ cd cloud-init
+ $ tox -e citest -- -h
+
+Everything after the double dash will be passed to the integration tests.
+Executing tests has several options:
+
+* ``run`` an alias to run both ``collect`` and ``verify``. The ``tree_run``
+ command does the same thing, except uses a deb built from the current
+ working tree.
+
+* ``collect`` deploys on the specified platform and distro, patches with the
+ requested deb or rpm, and finally collects output of the arbitrary
+ commands. Similarly, ```tree_collect`` will collect output using a deb
+ built from the current working tree.
+
+* ``verify`` given a directory of test data, run the Python unit tests on
+ it to generate results.
+
+* ``bddeb`` will build a deb of the current working tree.
+
+Run
+---
+
+The first example will provide a complete end-to-end run of data
+collection and verification. There are additional examples below
+explaining how to run one or the other independently.
+
+.. code-block:: bash
+
+ $ git clone https://git.launchpad.net/cloud-init
+ $ cd cloud-init
+ $ tox -e citest -- run --verbose \
+ --os-name stretch --os-name xenial \
+ --deb cloud-init_0.7.8~my_patch_all.deb \
+ --preserve-data --data-dir ~/collection
+
+The above command will do the following:
+
+* ``run`` both collect output and run tests the output
+
+* ``--verbose`` verbose output
+
+* ``--os-name stretch`` on the Debian Stretch release
+
+* ``--os-name xenial`` on the Ubuntu Xenial release
+
+* ``--deb cloud-init_0.7.8~patch_all.deb`` use this deb as the version of
+ cloud-init to run with
+
+* ``--preserve-data`` always preserve collected data, do not remove data
+ after successful test run
+
+* ``--data-dir ~/collection`` write collected data into `~/collection`,
+ rather than using a temporary directory
+
+For a more detailed explanation of each option see below.
+
+.. note::
+ By default, data collected by the run command will be written into a
+ temporary directory and deleted after a successful. If you would
+ like to preserve this data, please use the option ``--preserve-data``.
+
+Collect
+-------
+
+If developing tests it may be necessary to see if cloud-config works as
+expected and the correct files are pulled down. In this case only a
+collect can be ran by running:
+
+.. code-block:: bash
+
+ $ tox -e citest -- collect -n xenial --data-dir /tmp/collection
+
+The above command will run the collection tests on xenial and place
+all results into `/tmp/collection`.
+
+Verify
+------
+
+When developing tests it is much easier to simply rerun the verify scripts
+without the more lengthy collect process. This can be done by running:
+
+.. code-block:: bash
+
+ $ tox -e citest -- verify --data-dir /tmp/collection
+
+The above command will run the verify scripts on the data discovered in
+`/tmp/collection`.
+
+TreeRun and TreeCollect
+-----------------------
+
+If working on a cloud-init feature or resolving a bug, it may be useful to
+run the current copy of cloud-init in the integration testing environment.
+The integration testing suite can automatically build a deb based on the
+current working tree of cloud-init and run the test suite using this deb.
+
+The ``tree_run`` and ``tree_collect`` commands take the same arguments as
+the ``run`` and ``collect`` commands. These commands will build a deb and
+write it into a temporary file, then start the test suite and pass that deb
+in. To build a deb only, and not run the test suite, the ``bddeb`` command
+can be used.
+
+Note that code in the cloud-init working tree that has not been committed
+when the cloud-init deb is built will still be included. To build a
+cloud-init deb from or use the ``tree_run`` command using a copy of
+cloud-init located in a different directory, use the option ``--cloud-init
+/path/to/cloud-init``.
+
+.. code-block:: bash
+
+ $ tox -e citest -- tree_run --verbose \
+ --os-name xenial --os-name stretch \
+ --test modules/final_message --test modules/write_files \
+ --result /tmp/result.yaml
+
+Bddeb
+-----
+
+The ``bddeb`` command can be used to generate a deb file. This is used by
+the tree_run and tree_collect commands to build a deb of the current
+working tree. It can also be used a user to generate a deb for use in other
+situations and avoid needing to have all the build and test dependencies
+installed locally.
+
+* ``--bddeb-args``: arguments to pass through to bddeb
+* ``--build-os``: distribution to use as build system (default is xenial)
+* ``--build-platform``: platform to use for build system (default is lxd)
+* ``--cloud-init``: path to base of cloud-init tree (default is '.')
+* ``--deb``: path to write output deb to (default is '.')
+
+Setup Image
+-----------
+
+By default an image that is used will remain unmodified, but certain
+scenarios may require image modification. For example, many images may use
+a much older cloud-init. As a result tests looking at newer functionality
+will fail because a newer version of cloud-init may be required. The
+following options can be used for further customization:
+
+* ``--deb``: install the specified deb into the image
+* ``--rpm``: install the specified rpm into the image
+* ``--repo``: enable a repository and upgrade cloud-init afterwards
+* ``--ppa``: enable a ppa and upgrade cloud-init afterwards
+* ``--upgrade``: upgrade cloud-init from repos
+* ``--upgrade-full``: run a full system upgrade
+* ``--script``: execute a script in the image. This can perform any setup
+ required that is not covered by the other options
+
+Test Case Development
+=====================
+
+Overview
+--------
+
+As a test writer you need to develop a test configuration and a
+verification file:
* The test configuration specifies a specific cloud-config to be used by
cloud-init and a list of arbitrary commands to capture the output of
@@ -21,20 +193,28 @@ The names must match, however the extensions will of course be different,
yaml vs py.
Configuration
-=============
+-------------
The test configuration is a YAML file such as *ntp_server.yaml* below:
.. code-block:: yaml
#
- # NTP config using specific servers (ntp_server.yaml)
+ # Empty NTP config to setup using defaults
#
+ # NOTE: this should not require apt feature, use 'which' rather than 'dpkg -l'
+ # NOTE: this should not require no_ntpdate feature, use 'which' to check for
+ # installation rather than 'dpkg -l', as 'grep ntp' matches 'ntpdate'
+ # NOTE: the verifier should check for any ntp server not 'ubuntu.pool.ntp.org'
cloud_config: |
#cloud-config
ntp:
servers:
- pool.ntp.org
+ required_features:
+ - apt
+ - no_ntpdate
+ - ubuntu_ntp
collect_scripts:
ntp_installed_servers: |
#!/bin/bash
@@ -46,21 +226,30 @@ The test configuration is a YAML file such as *ntp_server.yaml* below:
#!/bin/bash
cat /etc/ntp.conf | grep '^server'
-
-There are two keys, 1 required and 1 optional, in the YAML file:
+There are several keys, 1 required and some optional, in the YAML file:
1. The required key is ``cloud_config``. This should be a string of valid
- YAML that is exactly what would normally be placed in a cloud-config file,
- including the cloud-config header. This essentially sets up the scenario
- under test.
+ YAML that is exactly what would normally be placed in a cloud-config
+ file, including the cloud-config header. This essentially sets up the
+ scenario under test.
-2. The optional key is ``collect_scripts``. This key has one or more
+2. One optional key is ``collect_scripts``. This key has one or more
sub-keys containing strings of arbitrary commands to execute (e.g.
```cat /var/log/cloud-config-output.log```). In the example above the
output of dpkg is captured, grep for ntp, and the number of lines
reported. The name of the sub-key is important. The sub-key is used by
the verification script to recall the output of the commands ran.
+3. The optional ``enabled`` key enables or disables the test case. By
+ default the test case will be enabled.
+
+4. The optional ``required_features`` key may be used to specify a list
+ of features flags that an image must have to be able to run the test
+ case. For example, if a test case relies on an image supporting apt,
+ then the config for the test case should include ``required_features:
+ [ apt ]``.
+
+
Default Collect Scripts
-----------------------
@@ -75,51 +264,68 @@ no need to specify these items:
* ```dpkg-query -W -f='${Version}' cloud-init```
Verification
-============
+------------
The verification script is a Python file with unit tests like the one,
`ntp_server.py`, below:
.. code-block:: python
- """cloud-init Integration Test Verify Script (ntp_server.yaml)"""
+ # This file is part of cloud-init. See LICENSE file for license information.
+
+ """cloud-init Integration Test Verify Script"""
from tests.cloud_tests.testcases import base
- class TestNtpServers(base.CloudTestCase):
+ class TestNtp(base.CloudTestCase):
"""Test ntp module"""
def test_ntp_installed(self):
"""Test ntp installed"""
- out = self.get_data_file('ntp_installed_servers')
+ out = self.get_data_file('ntp_installed_empty')
self.assertEqual(1, int(out))
def test_ntp_dist_entries(self):
"""Test dist config file has one entry"""
- out = self.get_data_file('ntp_conf_dist_servers')
+ out = self.get_data_file('ntp_conf_dist_empty')
self.assertEqual(1, int(out))
def test_ntp_entires(self):
"""Test config entries"""
- out = self.get_data_file('ntp_conf_servers')
- self.assertIn('server pool.ntp.org iburst', out)
+ out = self.get_data_file('ntp_conf_empty')
+ self.assertIn('pool 0.ubuntu.pool.ntp.org iburst', out)
+ self.assertIn('pool 1.ubuntu.pool.ntp.org iburst', out)
+ self.assertIn('pool 2.ubuntu.pool.ntp.org iburst', out)
+ self.assertIn('pool 3.ubuntu.pool.ntp.org iburst', out)
+
+ # vi: ts=4 expandtab
Here is a breakdown of the unit test file:
* The import statement allows access to the output files.
-* The class can be named anything, but must import the ``base.CloudTestCase``
+* The class can be named anything, but must import the
+ ``base.CloudTestCase``, either directly or via another test class.
* There can be 1 to N number of functions with any name, however only
- tests starting with ``test_*`` will be executed.
+ functions starting with ``test_*`` will be executed.
+
+* There can be 1 to N number of classes in a test module, however only
+ classes inheriting from ``base.CloudTestCase`` will be loaded.
* Output from the commands can be accessed via
``self.get_data_file('key')`` where key is the sub-key of
``collect_scripts`` above.
+* The cloud config that the test ran with can be accessed via
+ ``self.cloud_config``, or any entry from the cloud config can be accessed
+ via ``self.get_config_entry('key')``.
+
+* See the base ``CloudTestCase`` for additional helper functions.
+
Layout
-======
+------
Integration tests are located under the `tests/cloud_tests` directory.
Test configurations are placed under `configs` and the test verification
@@ -144,126 +350,65 @@ The sub-folders of bugs, examples, main, and modules help organize the
tests. View the README.md in each to understand in more detail each
directory.
+Test Creation Helper
+--------------------
+
+The integration testing suite has a built in helper to aid in test
+development. Help can be invoked via ``tox -e citest -- create --help``. It
+can create a template test case config file with user data passed in from
+the command line, as well as a template test case verifier module.
+
+The following would create a test case named ``example`` under the
+``modules`` category with the given description, and cloud config data read
+in from ``/tmp/user_data``.
+
+.. code-block:: bash
+
+ $ tox -e citest -- create modules/example \
+ -d "a simple example test case" -c "$(< /tmp/user_data)"
+
Development Checklist
-=====================
+---------------------
* Configuration File
- * Named 'your_test_here.yaml'
+ * Named 'your_test.yaml'
* Contains at least a valid cloud-config
* Optionally, commands to capture additional output
* Valid YAML
* Placed in the appropriate sub-folder in the configs directory
+ * Any image features required for the test are specified
* Verification File
- * Named 'your_test_here.py'
+ * Named 'your_test.py'
* Valid unit tests validating output collected
* Passes pylint & pep8 checks
- * Placed in the appropriate sub-folder in the testcsaes directory
+ * Placed in the appropriate sub-folder in the test cases directory
* Tested by running the test:
.. code-block:: bash
- $ python3 -m tests.cloud_tests run -v -n <release of choice> \
- --deb <build of cloud-init> \
- -t tests/cloud_tests/configs/<dir>/your_test_here.yaml
-
-
-Execution
-=========
-
-Executing tests has three options:
-
-* ``run`` an alias to run both ``collect`` and ``verify``
-
-* ``collect`` deploys on the specified platform and os, patches with the
- requested deb or rpm, and finally collects output of the arbitrary
- commands.
-
-* ``verify`` given a directory of test data, run the Python unit tests on
- it to generate results.
-
-Run
----
-The first example will provide a complete end-to-end run of data
-collection and verification. There are additional examples below
-explaining how to run one or the other independently.
-
-.. code-block:: bash
-
- $ git clone https://git.launchpad.net/cloud-init
- $ cd cloud-init
- $ python3 -m tests.cloud_tests run -v -n trusty -n xenial \
- --deb cloud-init_0.7.8~my_patch_all.deb
-
-The above command will do the following:
-
-* ``-v`` verbose output
-
-* ``run`` both collect output and run tests the output
-
-* ``-n trusty`` on the Ubuntu Trusty release
-
-* ``-n xenial`` on the Ubuntu Xenial release
-
-* ``--deb cloud-init_0.7.8~patch_all.deb`` use this deb as the version of
- cloud-init to run with
-
-For a more detailed explanation of each option see below.
-
-Collect
--------
-
-If developing tests it may be necessary to see if cloud-config works as
-expected and the correct files are pulled down. In this case only a
-collect can be ran by running:
-
-.. code-block:: bash
-
- $ python3 -m tests.cloud_tests collect -n xenial -d /tmp/collection \
- --deb cloud-init_0.7.8~my_patch_all.deb
-
-The above command will run the collection tests on xenial with the
-provided deb and place all results into `/tmp/collection`.
-
-Verify
-------
-
-When developing tests it is much easier to simply rerun the verify scripts
-without the more lengthy collect process. This can be done by running:
-
-.. code-block:: bash
-
- $ python3 -m tests.cloud_tests verify -d /tmp/collection
-
-The above command will run the verify scripts on the data discovered in
-`/tmp/collection`.
-
-Run via tox
------------
-In order to avoid the need for dependencies and ease the setup and
-configuration users can run the integration tests via tox:
-
-.. code-block:: bash
-
- $ tox -e citest -- run [integration test arguments]
- $ tox -e citest -- run -v -n zesty --deb=cloud-init_all.deb
- $ tox -e citest -- run -t module/user_groups.yaml
-
-Users need to invoke the citest enviornment and then pass any additional
-arguments.
-
+ $ tox -e citest -- run -verbose \
+ --os-name <release target> \
+ --test modules/your_test.yaml \
+ [--deb <build of cloud-init>]
Architecture
============
-The following outlines the process flow during a complete end-to-end LXD-backed test.
+The following section outlines the high-level architecture of the
+integration process.
+
+Overview
+--------
+The process flow during a complete end-to-end LXD-backed test.
1. Configuration
- * The back end and specific OS releases are verified as supported
- * The test or tests that need to be run are determined either by directory or by individual yaml
+ * The back end and specific distro releases are verified as supported
+ * The test or tests that need to be run are determined either by
+ directory or by individual yaml
2. Image Creation
- * Acquire the daily LXD image
+ * Acquire the request LXD image
* Install the specified cloud-init package
* Clean the image so that it does not appear to have been booted
* A snapshot of the image is created and reused by all tests
@@ -285,5 +430,247 @@ The following outlines the process flow during a complete end-to-end LXD-backed
5. Results
* If any failures were detected the test suite returns a failure
+ * Results can be dumped in yaml format to a specified file using the
+ ``-r <result_file_name>.yaml`` option
+
+Configuring the Test Suite
+--------------------------
+
+Most of the behavior of the test suite is configurable through several yaml
+files. These control the behavior of the test suite's platforms, images, and
+tests. The main config files for platforms, images and test cases are
+``platforms.yaml``, ``releases.yaml`` and ``testcases.yaml``.
+Config handling
+^^^^^^^^^^^^^^^
+
+All configurable parts of the test suite use a defaults + overrides system
+for managing config entries. All base config items are dictionaries.
+
+Merging is done on a key-by-key basis, with all keys in the default and
+override represented in the final result. If a key exists both in
+the defaults and the overrides, then the behavior depends on the type of data
+the key refers to. If it is atomic data or a list, then the overrides will
+replace the default. If the data is a dictionary then the value will be the
+result of merging that dictionary from the default config and that
+dictionary from the overrides.
+
+Merging is done using the function
+``tests.cloud_tests.config.merge_config``, which can be examined for more
+detail on config merging behavior.
+
+The following demonstrates merge behavior:
+
+.. code-block:: yaml
+
+ defaults:
+ list_item:
+ - list_entry_1
+ - list_entry_2
+ int_item_1: 123
+ int_item_2: 234
+ dict_item:
+ subkey_1: 1
+ subkey_2: 2
+ subkey_dict:
+ subsubkey_1: a
+ subsubkey_2: b
+
+ overrides:
+ list_item:
+ - overridden_list_entry
+ int_item_1: 0
+ dict_item:
+ subkey_2: false
+ subkey_dict:
+ subsubkey_2: 'new value'
+
+ result:
+ list_item:
+ - overridden_list_entry
+ int_item_1: 0
+ int_item_2: 234
+ dict_item:
+ subkey_1: 1
+ subkey_2: false
+ subkey_dict:
+ subsubkey_1: a
+ subsubkey_2: 'new value'
+
+
+Image Config
+------------
+
+Image configuration is handled in ``releases.yaml``. The image configuration
+controls how platforms locate and acquire images, how the platforms should
+interact with the images, how platforms should detect when an image has
+fully booted, any options that are required to set the image up, and
+features that the image supports.
+
+Since settings for locating an image and interacting with it differ from
+platform to platform, there are 4 levels of settings available for images on
+top of the default image settings. The structure of the image config file
+is:
+
+.. code-block:: yaml
+
+ default_release_config:
+ default:
+ ...
+ <platform>:
+ ...
+ <platform>:
+ ...
+
+ releases:
+ <release name>:
+ <default>:
+ ...
+ <platform>:
+ ...
+ <platform>:
+ ...
+
+
+The base config is created from the overall defaults and the overrides for
+the platform. The overrides are created from the default config for the
+image and the platform specific overrides for the image.
+
+System Boot
+^^^^^^^^^^^
+
+The test suite must be able to test if a system has fully booted and if
+cloud-init has finished running, so that running collect scripts does not
+race against the target image booting. This is done using the
+``system_ready_script`` and ``cloud_init_ready_script`` image config keys.
+
+Each of these keys accepts a small bash test statement as a string that must
+return 0 or 1. Since this test statement will be added into a larger bash
+statement it must be a single statement using the ``[`` test syntax.
+
+The default image config provides a system ready script that works for any
+systemd based image. If the image is not systemd based, then a different
+test statement must be provided. The default config also provides a test
+for whether or not cloud-init has finished which checks for the file
+``/run/cloud-init/result.json``. This should be sufficient for most systems
+as writing this file is one of the last things cloud-init does.
+
+The setting ``boot_timeout`` controls how long, in seconds, the platform
+should wait for an image to boot. If the system ready script has not
+indicated that the system is fully booted within this time an error will be
+raised.
+
+Feature Flags
+^^^^^^^^^^^^^
+
+Not all test cases can work on all images due to features the test case
+requires not being present on that image. If a test case requires features
+in an image that are not likely to be present across all distros and
+platforms that the test suite supports, then the test can be skipped
+everywhere it is not supported.
+
+Feature flags, which are names for features supported on some images, but
+not all that may be required by test cases. Configuration for feature flags
+is provided in ``releases.yaml`` under the ``features`` top level key. The
+features config includes a list of all currently defined feature flags,
+their meanings, and a list of feature groups.
+
+Feature groups are groups of features that many images have in common. For
+example, the ``Ubuntu_specific`` feature group includes features that
+should be present across most Ubuntu releases, but may or may not be for
+other distros. Feature groups are specified for an image as a list under
+the key ``feature_groups``.
+
+An image's feature flags are derived from the features groups that that
+image has and any feature overrides provided. Feature overrides can be
+specified under the ``features`` key which accepts a dictionary of
+``{<feature_name>: true/false}`` mappings. If a feature is omitted from an
+image's feature flags or set to false in the overrides then the test suite
+will skip any tests that require that feature when using that image.
+
+Feature flags may be overridden at run time using the ``--feature-override``
+command line argument. It accepts a feature flag and value to set in the
+format ``<feature name>=true/false``. Multiple ``--feature-override``
+flags can be used, and will all be applied to all feature flags for images
+used during a test.
+
+Setup Overrides
+^^^^^^^^^^^^^^^
+
+If an image requires some of the options for image setup to be used, then it
+may specify overrides for the command line arguments passed into setup
+image. These may be specified as a dictionary under the ``setup_overrides``
+key. When an image is set up, the arguments that control how it is set up
+will be the arguments from the command line, with any entries in
+``setup_overrides`` used to override these arguments.
+
+For example, images that do not come with cloud-init already installed
+should have ``setup_overrides: {upgrade: true}`` specified so that in the
+event that no additional setup options are given, cloud-init will be
+installed from the image's repos before running tests. Note that if other
+options such as ``--deb`` are passed in on the command line, these will
+still work as expected, since apt's policy for cloud-init would prefer the
+locally installed deb over an older version from the repos.
+
+Platform Specific Options
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+There are many platform specific options in image configuration that allow
+platforms to locate images and that control additional setup that the
+platform may have to do to make the image usable. For information on how
+these work, please consult the documentation for that platform in the
+integration testing suite and the ``releases.yaml`` file for examples.
+
+Error Handling
+--------------
+
+The test suite makes an attempt to run as many tests as possible even in the
+event of some failing so that automated runs collect as much data as
+possible. In the event that something goes wrong while setting up for or
+running a test, the test suite will attempt to continue running any tests
+which have not been affected by the error.
+
+For example, if the test suite was told to run tests on one platform for two
+releases and an error occurred setting up the first image, all tests for
+that image would be skipped, and the test suite would continue to set up
+the second image and run tests on it. Or, if the system does not start
+properly for one test case out of many to run on that image, that test case
+will be skipped and the next one will be run.
+
+Note that if any errors occur, the test suite will record the failure and
+where it occurred in the result data and write it out to the specified
+result file.
+
+Results
+-------
+The test suite generates result data that includes how long each stage of
+the test suite took and which parts were and were not successful. This data
+is dumped to the log after the collect and verify stages, and may also be
+written out in yaml format to a file. If part of the setup failed, the
+traceback for the failure and the error message will be included in the
+result file. If a test verifier finds a problem with the collected data
+from a test run, the class, test function and test will be recorded in the
+result data.
+
+Exit Codes
+^^^^^^^^^^
+
+The test suite counts how many errors occur throughout a run. The exit code
+after a run is the number of errors that occurred. If the exit code is
+non-zero then something is wrong either with the test suite, the
+configuration for an image, a test case, or cloud-init itself.
+
+Note that the exit code does not always directly correspond to the number
+of failed test cases, since in some cases, a single error during image setup
+can mean that several test cases are not run. If run is used, then the exit
+code will be the sum of the number of errors in the collect and verify
+stages.
+
+Data Dir
+^^^^^^^^
+
+When using run, the collected data is written into a temporary directory. In
+the event that all tests pass, this directory is deleted, but if a test
+fails or an error occurs, this data will be left in place, and a message
+will be written to the log giving the location of the data.
diff --git a/doc/rtd/topics/vendordata.rst b/doc/rtd/topics/vendordata.rst
index 2a94318e..cdb552d0 100644
--- a/doc/rtd/topics/vendordata.rst
+++ b/doc/rtd/topics/vendordata.rst
@@ -22,7 +22,7 @@ caveats:
Users providing cloud-config data can use the '#cloud-config-jsonp' method to
more finely control their modifications to the vendor supplied cloud-config.
-For example, if both vendor and user have provided 'runcnmd' then the default
+For example, if both vendor and user have provided 'runcmd' then the default
merge handler will cause the user's runcmd to override the one provided by the
vendor. To append to 'runcmd', the user could better provide multipart input
with a cloud-config-jsonp part like:
@@ -31,7 +31,7 @@ with a cloud-config-jsonp part like:
#cloud-config-jsonp
[{ "op": "add", "path": "/runcmd", "value": ["my", "command", "here"]}]
-
+
Further, we strongly advise vendors to not 'be evil'. By evil, we
mean any action that could compromise a system. Since users trust
you, please take care to make sure that any vendordata is safe,
diff --git a/packages/bddeb b/packages/bddeb
index f415209f..609a94fb 100755
--- a/packages/bddeb
+++ b/packages/bddeb
@@ -24,19 +24,6 @@ if "avoid-pep8-E402-import-not-top-of-file":
from cloudinit import templater
from cloudinit import util
-# Package names that will showup in requires which have unique package names.
-# Format is '<pypi-name>': {'<python_major_version>': <pkg_name_or_none>, ...}.
-NONSTD_NAMED_PACKAGES = {
- 'argparse': {'2': 'python-argparse', '3': None},
- 'contextlib2': {'2': 'python-contextlib2', '3': None},
- 'cheetah': {'2': 'python-cheetah', '3': None},
- 'pyserial': {'2': 'python-serial', '3': 'python3-serial'},
- 'pyyaml': {'2': 'python-yaml', '3': 'python3-yaml'},
- 'six': {'2': 'python-six', '3': 'python3-six'},
- 'pep8': {'2': 'pep8', '3': 'python3-pep8'},
- 'pyflakes': {'2': 'pyflakes', '3': 'pyflakes'},
-}
-
DEBUILD_ARGS = ["-S", "-d"]
@@ -59,7 +46,6 @@ def write_debian_folder(root, templ_data, is_python2, cloud_util_deps):
else:
pyver = "3"
python = "python3"
- pkgfmt = "{}-{}"
deb_dir = util.abs_join(root, 'debian')
@@ -74,30 +60,25 @@ def write_debian_folder(root, templ_data, is_python2, cloud_util_deps):
params=templ_data)
# Write out the control file template
- reqs = run_helper('read-dependencies').splitlines()
+ reqs_output = run_helper(
+ 'read-dependencies',
+ args=['--distro', 'debian', '--python-version', pyver])
+ reqs = reqs_output.splitlines()
test_reqs = run_helper(
- 'read-dependencies', ['test-requirements.txt']).splitlines()
-
- pypi_pkgs = [p.lower().strip() for p in reqs]
- pypi_test_pkgs = [p.lower().strip() for p in test_reqs]
+ 'read-dependencies',
+ ['--requirements-file', 'test-requirements.txt',
+ '--system-pkg-names', '--python-version', pyver]).splitlines()
- # Map to known packages
requires = ['cloud-utils | cloud-guest-utils'] if cloud_util_deps else []
- test_requires = []
- lists = ((pypi_pkgs, requires), (pypi_test_pkgs, test_requires))
- for pypilist, target in lists:
- for p in pypilist:
- if p in NONSTD_NAMED_PACKAGES:
- if NONSTD_NAMED_PACKAGES[p][pyver]:
- target.append(NONSTD_NAMED_PACKAGES[p][pyver])
- else: # Then standard package prefix
- target.append(pkgfmt.format(python, p))
-
+ # We consolidate all deps as Build-Depends as our package build runs all
+ # tests so we need all runtime dependencies anyway.
+ # NOTE: python package was moved to the front after debuild -S would fail with
+ # 'Please add apropriate interpreter' errors (as in debian bug 861132)
+ requires.extend([python] + reqs + test_reqs)
templater.render_to_file(util.abs_join(find_root(),
'packages', 'debian', 'control.in'),
util.abs_join(deb_dir, 'control'),
- params={'requires': ','.join(requires),
- 'test_requires': ','.join(test_requires),
+ params={'build_depends': ','.join(requires),
'python': python})
templater.render_to_file(util.abs_join(find_root(),
diff --git a/packages/brpm b/packages/brpm
index 89696ab8..3439cf35 100755
--- a/packages/brpm
+++ b/packages/brpm
@@ -27,17 +27,6 @@ if "avoid-pep8-E402-import-not-top-of-file":
from cloudinit import templater
from cloudinit import util
-# Map python requirements to package names. If a match isn't found
-# here, we assume 'python-<pypi_name>'.
-PACKAGE_MAP = {
- 'redhat': {
- 'pyserial': 'pyserial',
- 'pyyaml': 'PyYAML',
- },
- 'suse': {
- 'pyyaml': 'python-yaml',
- }
-}
# Subdirectories of the ~/rpmbuild dir
RPM_BUILD_SUBDIRS = ['BUILD', 'RPMS', 'SOURCES', 'SPECS', 'SRPMS']
@@ -53,23 +42,18 @@ def run_helper(helper, args=None, strip=True):
return stdout
-def read_dependencies():
- '''Returns the Python depedencies from requirements.txt. This explicitly
- removes 'argparse' from the list of requirements for python >= 2.7,
- because with 2.7 argparse became part of the standard library.'''
- stdout = run_helper('read-dependencies')
- return [p.lower().strip() for p in stdout.splitlines()
- if p != 'argparse' or (p == 'argparse' and
- sys.version_info[0:2] < (2, 7))]
+def read_dependencies(requirements_file='requirements.txt'):
+ """Returns the Python package depedencies from requirements.txt files.
-
-def translate_dependencies(deps, distro):
- '''Maps python requirements into package names. We assume
- python-<pypi_name> for packages not listed explicitly in
- PACKAGE_MAP.'''
- return [PACKAGE_MAP[distro][req]
- if req in PACKAGE_MAP[distro] else 'python-%s' % req
- for req in deps]
+ @returns a tuple of (requirements, test_requirements)
+ """
+ pkg_deps = run_helper(
+ 'read-dependencies', args=['--distro', 'redhat']).splitlines()
+ test_deps = run_helper(
+ 'read-dependencies', args=[
+ '--requirements-file', 'test-requirements.txt',
+ '--system-pkg-names']).splitlines()
+ return (pkg_deps, test_deps)
def read_version():
@@ -99,10 +83,9 @@ def generate_spec_contents(args, version_data, tmpl_fn, top_dir, arc_fn):
rpm_upstream_version = version_data['version']
subs['rpm_upstream_version'] = rpm_upstream_version
- # Map to known packages
- python_deps = read_dependencies()
- package_deps = translate_dependencies(python_deps, args.distro)
- subs['requires'] = package_deps
+ deps, test_deps = read_dependencies()
+ subs['buildrequires'] = deps + test_deps
+ subs['requires'] = deps
if args.boot == 'sysvinit':
subs['sysvinit'] = True
diff --git a/packages/debian/control.in b/packages/debian/control.in
index 6c39d531..265b261f 100644
--- a/packages/debian/control.in
+++ b/packages/debian/control.in
@@ -3,20 +3,13 @@ Source: cloud-init
Section: admin
Priority: optional
Maintainer: Scott Moser <smoser@ubuntu.com>
-Build-Depends: debhelper (>= 9),
- dh-python,
- dh-systemd,
- ${python},
- ${test_requires},
- ${requires}
+Build-Depends: ${build_depends}
XS-Python-Version: all
Standards-Version: 3.9.6
Package: cloud-init
Architecture: all
-Depends: procps,
- ${python},
- ${misc:Depends},
+Depends: ${misc:Depends},
${${python}:Depends}
Recommends: eatmydata, sudo, software-properties-common, gdisk
XB-Python-Version: ${python:Versions}
diff --git a/packages/pkg-deps.json b/packages/pkg-deps.json
new file mode 100644
index 00000000..822d29d9
--- /dev/null
+++ b/packages/pkg-deps.json
@@ -0,0 +1,88 @@
+{
+ "debian" : {
+ "build-requires" : [
+ "debhelper",
+ "dh-python",
+ "dh-systemd"
+ ],
+ "renames" : {
+ "pyyaml" : {
+ "2" : "python-yaml",
+ "3" : "python3-yaml"
+ },
+ "contextlib2" : {
+ "2" : "python-contextlib2"
+ },
+ "pyserial" : {
+ "2" : "python-serial",
+ "3" : "python3-serial"
+ }
+ },
+ "requires" : [
+ "procps"
+ ]
+ },
+ "redhat" : {
+ "build-requires" : [
+ "python-devel",
+ "python-setuptools"
+ ],
+ "renames" : {
+ "jinja2" : {
+ "3" : "python34-jinja2"
+ },
+ "jsonschema" : {
+ "3" : "python34-jsonschema"
+ },
+ "prettytable" : {
+ "3" : "python34-prettytable"
+ },
+ "pyflakes" : {
+ "2" : "pyflakes",
+ "3" : "python34-pyflakes"
+ },
+ "pyyaml" : {
+ "2" : "PyYAML",
+ "3" : "python34-PyYAML"
+ },
+ "pyserial" : {
+ "2" : "pyserial"
+ },
+ "requests" : {
+ "3" : "python34-requests"
+ },
+ "six" : {
+ "3" : "python34-six"
+ }
+ },
+ "requires" : [
+ "e2fsprogs",
+ "iproute",
+ "net-tools",
+ "procps",
+ "rsyslog",
+ "shadow-utils",
+ "sudo"
+ ]
+ },
+ "suse" : {
+ "renames" : {
+ "pyyaml" : {
+ "2" : "python-yaml"
+ }
+ },
+ "build-requires" : [
+ "fdupes",
+ "filesystem",
+ "python-devel",
+ "python-setuptools"
+ ],
+ "requires" : [
+ "iproute2",
+ "e2fsprogs",
+ "net-tools",
+ "procps",
+ "sudo"
+ ]
+ }
+}
diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in
index fd3cf938..d995b85f 100644
--- a/packages/redhat/cloud-init.spec.in
+++ b/packages/redhat/cloud-init.spec.in
@@ -1,62 +1,84 @@
-## template: cheetah
+## template: jinja
%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
+%define use_systemd (0%{?fedora} && 0%{?fedora} >= 18) || (0%{?rhel} && 0%{?rhel} >= 7)
+
+%if %{use_systemd}
+%define init_system systemd
+%else
+%define init_system sysvinit
+%endif
+
# See: http://www.zarb.org/~jasonc/macros.php
# Or: http://fedoraproject.org/wiki/Packaging:ScriptletSnippets
# Or: http://www.rpm.org/max-rpm/ch-rpm-inside.html
Name: cloud-init
-Version: ${rpm_upstream_version}
-Release: 1${subrelease}%{?dist}
+Version: {{rpm_upstream_version}}
+Release: 1{{subrelease}}%{?dist}
Summary: Cloud instance init scripts
Group: System Environment/Base
License: Dual-licesed GPLv3 or Apache 2.0
URL: http://launchpad.net/cloud-init
-Source0: ${archive_name}
+Source0: {{archive_name}}
BuildArch: noarch
BuildRoot: %{_tmppath}
-BuildRequires: python-devel
-BuildRequires: python-setuptools
-BuildRequires: python-cheetah
+%if "%{?el6}" == "1"
+BuildRequires: python-argparse
+%endif
+%if %{use_systemd}
+Requires: systemd
+BuildRequires: systemd
+Requires: systemd-units
+BuildRequires: systemd-units
+%else
+Requires: initscripts >= 8.36
+Requires(postun): initscripts
+Requires(post): chkconfig
+Requires(preun): chkconfig
+%endif
+
+# These are runtime dependencies, but declared as BuildRequires so that
+# - tests can be run here.
+# - parts of cloud-init such (setup.py) use these dependencies.
+{% for r in requires %}
+BuildRequires: {{r}}
+{% endfor %}
# System util packages needed
-Requires: shadow-utils
-Requires: rsyslog
-Requires: iproute
-Requires: e2fsprogs
-Requires: net-tools
-Requires: procps
-Requires: shadow-utils
-Requires: sudo >= 1.7.2p2-3
-
-# Install pypi 'dynamic' requirements
-#for $r in $requires
-Requires: ${r}
-#end for
+%ifarch %{?ix86} x86_64 ia64
+Requires: dmidecode
+%endif
+
+# python2.6 needs argparse
+%if "%{?el6}" == "1"
+Requires: python-argparse
+%endif
-# Custom patches
-#set $size = 0
-#for $p in $patches
-Patch${size}: $p
-#set $size += 1
-#end for
-#if $sysvinit
+# Install 'dynamic' runtime reqs from *requirements.txt and pkg-deps.json
+{% for r in requires %}
+Requires: {{r}}
+{% endfor %}
+
+# Custom patches
+{% for p in patches %}
+Patch{{loop.index0}}: {{p}}
+{% endfor %}
+
+%if "%{init_system}" == "systemd"
+Requires(post): systemd
+Requires(preun): systemd
+Requires(postun): systemd
+%else
Requires(post): chkconfig
Requires(postun): initscripts
Requires(preun): chkconfig
Requires(preun): initscripts
-#end if
-
-#if $systemd
-BuildRequires: systemd-units
-Requires(post): systemd-units
-Requires(postun): systemd-units
-Requires(preun): systemd-units
-#end if
+%endif
%description
Cloud-init is a set of init scripts for cloud instances. Cloud instances
@@ -64,14 +86,12 @@ need special scripts to run during initialization to retrieve and install
ssh keys and to let the user run various scripts.
%prep
-%setup -q -n ${source_name}
+%setup -q -n {{source_name}}
# Custom patches activation
-#set $size = 0
-#for $p in $patches
-%patch${size} -p1
-#set $size += 1
-#end for
+{% for p in patches %}
+%patch{{loop.index0}} -p1
+{% endfor %}
%build
%{__python} setup.py build
@@ -79,53 +99,60 @@ ssh keys and to let the user run various scripts.
%install
%{__python} setup.py install -O1 \
- --skip-build --root \$RPM_BUILD_ROOT \
- --init-system=${init_sys}
+ --skip-build --root $RPM_BUILD_ROOT \
+ --init-system=%{init_system}
# Note that /etc/rsyslog.d didn't exist by default until F15.
# el6 request: https://bugzilla.redhat.com/show_bug.cgi?id=740420
-mkdir -p \$RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d
+mkdir -p $RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d
cp -p tools/21-cloudinit.conf \
- \$RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d/21-cloudinit.conf
+ $RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d/21-cloudinit.conf
# Remove the tests
-rm -rf \$RPM_BUILD_ROOT%{python_sitelib}/tests
+rm -rf $RPM_BUILD_ROOT%{python_sitelib}/tests
# Required dirs...
-mkdir -p \$RPM_BUILD_ROOT/%{_sharedstatedir}/cloud
-mkdir -p \$RPM_BUILD_ROOT/%{_libexecdir}/%{name}
+mkdir -p $RPM_BUILD_ROOT/%{_sharedstatedir}/cloud
+mkdir -p $RPM_BUILD_ROOT/%{_libexecdir}/%{name}
+
+# LP: #1691489: Remove systemd-fsck dropin (currently not expected to work)
+%if "%{init_system}" == "systemd"
+rm $RPM_BUILD_ROOT/usr/lib/systemd/system/systemd-fsck@.service.d/cloud-init.conf
+%endif
-#if $systemd
-mkdir -p \$RPM_BUILD_ROOT/%{_unitdir}
-cp -p systemd/* \$RPM_BUILD_ROOT/%{_unitdir}
-#end if
%clean
-rm -rf \$RPM_BUILD_ROOT
+rm -rf $RPM_BUILD_ROOT
%post
-#if $systemd
-if [ \$1 -eq 1 ]
+%if "%{init_system}" == "systemd"
+if [ $1 -eq 1 ]
then
/bin/systemctl enable cloud-config.service >/dev/null 2>&1 || :
/bin/systemctl enable cloud-final.service >/dev/null 2>&1 || :
/bin/systemctl enable cloud-init.service >/dev/null 2>&1 || :
/bin/systemctl enable cloud-init-local.service >/dev/null 2>&1 || :
fi
-#end if
-
-#if $sysvinit
+%else
/sbin/chkconfig --add %{_initrddir}/cloud-init-local
/sbin/chkconfig --add %{_initrddir}/cloud-init
/sbin/chkconfig --add %{_initrddir}/cloud-config
/sbin/chkconfig --add %{_initrddir}/cloud-final
-#end if
+%endif
%preun
-#if $sysvinit
-if [ \$1 -eq 0 ]
+%if "%{init_system}" == "systemd"
+if [ $1 -eq 0 ]
+then
+ /bin/systemctl --no-reload disable cloud-config.service >/dev/null 2>&1 || :
+ /bin/systemctl --no-reload disable cloud-final.service >/dev/null 2>&1 || :
+ /bin/systemctl --no-reload disable cloud-init.service >/dev/null 2>&1 || :
+ /bin/systemctl --no-reload disable cloud-init-local.service >/dev/null 2>&1 || :
+fi
+%else
+if [ $1 -eq 0 ]
then
/sbin/service cloud-init stop >/dev/null 2>&1 || :
/sbin/chkconfig --del cloud-init || :
@@ -136,40 +163,27 @@ then
/sbin/service cloud-final stop >/dev/null 2>&1 || :
/sbin/chkconfig --del cloud-final || :
fi
-#end if
-
-#if $systemd
-if [ \$1 -eq 0 ]
-then
- /bin/systemctl --no-reload disable cloud-config.service >/dev/null 2>&1 || :
- /bin/systemctl --no-reload disable cloud-final.service >/dev/null 2>&1 || :
- /bin/systemctl --no-reload disable cloud-init.service >/dev/null 2>&1 || :
- /bin/systemctl --no-reload disable cloud-init-local.service >/dev/null 2>&1 || :
-fi
-#end if
+%endif
%postun
-#if $systemd
+%if "%{init_system}" == "systemd"
/bin/systemctl daemon-reload >/dev/null 2>&1 || :
-#end if
+%endif
%files
/lib/udev/rules.d/66-azure-ephemeral.rules
-#if $sysvinit
+%if "%{init_system}" == "systemd"
+/usr/lib/systemd/system-generators/cloud-init-generator
+%{_unitdir}/cloud-*
+%else
%attr(0755, root, root) %{_initddir}/cloud-config
%attr(0755, root, root) %{_initddir}/cloud-final
%attr(0755, root, root) %{_initddir}/cloud-init-local
%attr(0755, root, root) %{_initddir}/cloud-init
-#end if
-
-#if $systemd
-/usr/lib/systemd/system-generators/cloud-init-generator
-%{_unitdir}/cloud-*
-%{_unitdir}/cloud-*
-#end if
+%endif
%{_sysconfdir}/NetworkManager/dispatcher.d/hook-network-manager
%{_sysconfdir}/dhcp/dhclient-exit-hooks.d/hook-dhclient
diff --git a/packages/suse/cloud-init.spec.in b/packages/suse/cloud-init.spec.in
index 6ce0be8c..86e18b1b 100644
--- a/packages/suse/cloud-init.spec.in
+++ b/packages/suse/cloud-init.spec.in
@@ -1,19 +1,19 @@
-## template: cheetah
+## template: jinja
# See: http://www.zarb.org/~jasonc/macros.php
# Or: http://fedoraproject.org/wiki/Packaging:ScriptletSnippets
# Or: http://www.rpm.org/max-rpm/ch-rpm-inside.html
Name: cloud-init
-Version: ${version}
-Release: 1${subrelease}%{?dist}
+Version: {{version}}
+Release: 1{{subrelease}}%{?dist}
Summary: Cloud instance init scripts
Group: System/Management
License: Dual licensed GPLv3 or Apache 2.0
URL: http://launchpad.net/cloud-init
-Source0: ${archive_name}
+Source0: {{archive_name}}
BuildRoot: %{_tmppath}/%{name}-%{version}-build
%if 0%{?suse_version} && 0%{?suse_version} <= 1110
@@ -22,11 +22,9 @@ BuildRoot: %{_tmppath}/%{name}-%{version}-build
BuildArch: noarch
%endif
-BuildRequires: fdupes
-BuildRequires: filesystem
-BuildRequires: python-devel
-BuildRequires: python-setuptools
-BuildRequires: python-cheetah
+{% for r in buildrequires %}
+BuildRequires: {{r}}
+{% endfor %}
%if 0%{?suse_version} && 0%{?suse_version} <= 1210
%define initsys sysvinit
@@ -34,24 +32,15 @@ BuildRequires: python-cheetah
%define initsys systemd
%endif
-# System util packages needed
-Requires: iproute2
-Requires: e2fsprogs
-Requires: net-tools
-Requires: procps
-Requires: sudo
-
# Install pypi 'dynamic' requirements
-#for $r in $requires
-Requires: ${r}
-#end for
+{% for r in requires %}
+Requires: {{r}}
+{% endfor %}
# Custom patches
-#set $size = 0
-#for $p in $patches
-Patch${size}: $p
-#set $size += 1
-#end for
+{% for p in patches %}
+Patch{{loop.index0}: {{p}}
+{% endfor %}
%description
Cloud-init is a set of init scripts for cloud instances. Cloud instances
@@ -59,14 +48,13 @@ need special scripts to run during initialization to retrieve and install
ssh keys and to let the user run various scripts.
%prep
-%setup -q -n ${source_name}
+%setup -q -n {{source_name}}
# Custom patches activation
-#set $size = 0
-#for $p in $patches
-%patch${size} -p1
-#set $size += 1
-#end for
+{% for p in patches %}
+%patch{{loop.index0}} -p1
+end for
+{% endfor %}
%build
%{__python} setup.py build
@@ -95,7 +83,7 @@ rm -r %{buildroot}/%{python_sitelib}/tests
mkdir -p %{buildroot}/%{_sbindir}
pushd %{buildroot}/%{_initddir}
for file in * ; do
- ln -s %{_initddir}/\${file} %{buildroot}/%{_sbindir}/rc\${file}
+ ln -s %{_initddir}/${file} %{buildroot}/%{_sbindir}/rc${file}
done
popd
%endif
@@ -104,7 +92,7 @@ rm -r %{buildroot}/%{python_sitelib}/tests
mkdir -p %{buildroot}/%{_defaultdocdir}
mv %{buildroot}/usr/share/doc/cloud-init %{buildroot}/%{_defaultdocdir}
for doc in TODO LICENSE ChangeLog requirements.txt; do
- cp \${doc} %{buildroot}/%{_defaultdocdir}/cloud-init
+ cp ${doc} %{buildroot}/%{_defaultdocdir}/cloud-init
done
# Remove duplicate files
diff --git a/requirements.txt b/requirements.txt
index 0c4951f5..61d1e90b 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -27,14 +27,14 @@ configobj>=5.0.2
# All new style configurations are in the yaml format
pyyaml
-# The new main entrypoint uses argparse instead of optparse
-argparse
-
# Requests handles ssl correctly!
requests
# For patching pieces of cloud-config together
jsonpatch
+# For validating cloud-config sections per schema definitions
+jsonschema
+
# For Python 2/3 compatibility
six
diff --git a/setup.py b/setup.py
index 4616599b..5c65c7fe 100755
--- a/setup.py
+++ b/setup.py
@@ -10,8 +10,11 @@
from glob import glob
+import atexit
import os
+import shutil
import sys
+import tempfile
import setuptools
from setuptools.command.install import install
@@ -53,47 +56,15 @@ def pkg_config_read(library, var):
cmd = ['pkg-config', '--variable=%s' % var, library]
try:
(path, err) = tiny_p(cmd)
+ path = path.strip()
except Exception:
- return fallbacks[library][var]
- return str(path).strip()
+ path = fallbacks[library][var]
+ if path.startswith("/"):
+ path = path[1:]
-
-INITSYS_FILES = {
- 'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)],
- 'sysvinit_freebsd': [f for f in glob('sysvinit/freebsd/*') if is_f(f)],
- 'sysvinit_deb': [f for f in glob('sysvinit/debian/*') if is_f(f)],
- 'sysvinit_openrc': [f for f in glob('sysvinit/gentoo/*') if is_f(f)],
- 'systemd': [f for f in (glob('systemd/*.service') +
- glob('systemd/*.target')) if is_f(f)],
- 'systemd.generators': [f for f in glob('systemd/*-generator') if is_f(f)],
- 'upstart': [f for f in glob('upstart/*') if is_f(f)],
-}
-INITSYS_ROOTS = {
- 'sysvinit': '/etc/rc.d/init.d',
- 'sysvinit_freebsd': '/usr/local/etc/rc.d',
- 'sysvinit_deb': '/etc/init.d',
- 'sysvinit_openrc': '/etc/init.d',
- 'systemd': pkg_config_read('systemd', 'systemdsystemunitdir'),
- 'systemd.generators': pkg_config_read('systemd',
- 'systemdsystemgeneratordir'),
- 'upstart': '/etc/init/',
-}
-INITSYS_TYPES = sorted([f.partition(".")[0] for f in INITSYS_ROOTS.keys()])
-
-# Install everything in the right location and take care of Linux (default) and
-# FreeBSD systems.
-USR = "/usr"
-ETC = "/etc"
-USR_LIB_EXEC = "/usr/lib"
-LIB = "/lib"
-if os.uname()[0] == 'FreeBSD':
- USR = "/usr/local"
- USR_LIB_EXEC = "/usr/local/lib"
-elif os.path.isfile('/etc/redhat-release'):
- USR_LIB_EXEC = "/usr/libexec"
+ return path
-# Avoid having datafiles installed in a virtualenv...
def in_virtualenv():
try:
if sys.real_prefix == sys.prefix:
@@ -116,6 +87,77 @@ def read_requires():
return str(deps).splitlines()
+def render_tmpl(template):
+ """render template into a tmpdir under same dir as setup.py
+
+ This is rendered to a temporary directory under the top level
+ directory with the name 'cloud.cfg'. The reason for not just rendering
+ to config/cloud.cfg is for a.) don't want to write over contents
+ in that file if user had something there. b.) debuild will complain
+ that files are different outside of the debian directory."""
+
+ # older versions of tox use bdist (xenial), and then install from there.
+ # newer versions just use install.
+ if not (sys.argv[1] == 'install' or sys.argv[1].startswith('bdist*')):
+ return template
+
+ tmpl_ext = ".tmpl"
+ # we may get passed a non-template file, just pass it back
+ if not template.endswith(tmpl_ext):
+ return template
+
+ topdir = os.path.dirname(sys.argv[0])
+ tmpd = tempfile.mkdtemp(dir=topdir)
+ atexit.register(shutil.rmtree, tmpd)
+ bname = os.path.basename(template).rstrip(tmpl_ext)
+ fpath = os.path.join(tmpd, bname)
+ tiny_p([sys.executable, './tools/render-cloudcfg', template, fpath])
+ # return path relative to setup.py
+ return os.path.join(os.path.basename(tmpd), bname)
+
+
+INITSYS_FILES = {
+ 'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)],
+ 'sysvinit_freebsd': [f for f in glob('sysvinit/freebsd/*') if is_f(f)],
+ 'sysvinit_deb': [f for f in glob('sysvinit/debian/*') if is_f(f)],
+ 'sysvinit_openrc': [f for f in glob('sysvinit/gentoo/*') if is_f(f)],
+ 'systemd': [render_tmpl(f)
+ for f in (glob('systemd/*.tmpl') +
+ glob('systemd/*.service') +
+ glob('systemd/*.target')) if is_f(f)],
+ 'systemd.fsck-dropin': ['systemd/systemd-fsck@.service.d/cloud-init.conf'],
+ 'systemd.generators': [f for f in glob('systemd/*-generator') if is_f(f)],
+ 'upstart': [f for f in glob('upstart/*') if is_f(f)],
+}
+INITSYS_ROOTS = {
+ 'sysvinit': 'etc/rc.d/init.d',
+ 'sysvinit_freebsd': 'usr/local/etc/rc.d',
+ 'sysvinit_deb': 'etc/init.d',
+ 'sysvinit_openrc': 'etc/init.d',
+ 'systemd': pkg_config_read('systemd', 'systemdsystemunitdir'),
+ 'systemd.fsck-dropin': (
+ os.path.sep.join([pkg_config_read('systemd', 'systemdsystemunitdir'),
+ 'systemd-fsck@.service.d'])),
+ 'systemd.generators': pkg_config_read('systemd',
+ 'systemdsystemgeneratordir'),
+ 'upstart': 'etc/init/',
+}
+INITSYS_TYPES = sorted([f.partition(".")[0] for f in INITSYS_ROOTS.keys()])
+
+
+# Install everything in the right location and take care of Linux (default) and
+# FreeBSD systems.
+USR = "usr"
+ETC = "etc"
+USR_LIB_EXEC = "usr/lib"
+LIB = "lib"
+if os.uname()[0] == 'FreeBSD':
+ USR = "usr/local"
+ USR_LIB_EXEC = "usr/local/lib"
+elif os.path.isfile('/etc/redhat-release'):
+ USR_LIB_EXEC = "usr/libexec"
+
+
# TODO: Is there a better way to do this??
class InitsysInstallData(install):
init_system = None
@@ -155,40 +197,41 @@ class InitsysInstallData(install):
self.distribution.reinitialize_command('install_data', True)
-if in_virtualenv():
- data_files = []
- cmdclass = {}
-else:
- data_files = [
- (ETC + '/cloud', glob('config/*.cfg')),
- (ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')),
- (ETC + '/cloud/templates', glob('templates/*')),
- (USR_LIB_EXEC + '/cloud-init', ['tools/ds-identify',
- 'tools/uncloud-init',
- 'tools/write-ssh-key-fingerprints']),
- (USR + '/share/doc/cloud-init', [f for f in glob('doc/*') if is_f(f)]),
- (USR + '/share/doc/cloud-init/examples',
- [f for f in glob('doc/examples/*') if is_f(f)]),
- (USR + '/share/doc/cloud-init/examples/seed',
- [f for f in glob('doc/examples/seed/*') if is_f(f)]),
- ]
- if os.uname()[0] != 'FreeBSD':
- data_files.extend([
- (ETC + '/NetworkManager/dispatcher.d/',
- ['tools/hook-network-manager']),
- (ETC + '/dhcp/dhclient-exit-hooks.d/', ['tools/hook-dhclient']),
- (LIB + '/udev/rules.d', [f for f in glob('udev/*.rules')])
- ])
- # Use a subclass for install that handles
- # adding on the right init system configuration files
- cmdclass = {
- 'install': InitsysInstallData,
- }
-
+if not in_virtualenv():
+ USR = "/" + USR
+ ETC = "/" + ETC
+ USR_LIB_EXEC = "/" + USR_LIB_EXEC
+ LIB = "/" + LIB
+ for k in INITSYS_ROOTS.keys():
+ INITSYS_ROOTS[k] = "/" + INITSYS_ROOTS[k]
+
+data_files = [
+ (ETC + '/cloud', [render_tmpl("config/cloud.cfg.tmpl")]),
+ (ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')),
+ (ETC + '/cloud/templates', glob('templates/*')),
+ (USR_LIB_EXEC + '/cloud-init', ['tools/ds-identify',
+ 'tools/uncloud-init',
+ 'tools/write-ssh-key-fingerprints']),
+ (USR + '/share/doc/cloud-init', [f for f in glob('doc/*') if is_f(f)]),
+ (USR + '/share/doc/cloud-init/examples',
+ [f for f in glob('doc/examples/*') if is_f(f)]),
+ (USR + '/share/doc/cloud-init/examples/seed',
+ [f for f in glob('doc/examples/seed/*') if is_f(f)]),
+]
+if os.uname()[0] != 'FreeBSD':
+ data_files.extend([
+ (ETC + '/NetworkManager/dispatcher.d/',
+ ['tools/hook-network-manager']),
+ (ETC + '/dhcp/dhclient-exit-hooks.d/', ['tools/hook-dhclient']),
+ (LIB + '/udev/rules.d', [f for f in glob('udev/*.rules')])
+ ])
+# Use a subclass for install that handles
+# adding on the right init system configuration files
+cmdclass = {
+ 'install': InitsysInstallData,
+}
requirements = read_requires()
-if sys.version_info < (3,):
- requirements.append('cheetah')
setuptools.setup(
name='cloud-init',
@@ -197,7 +240,7 @@ setuptools.setup(
author='Scott Moser',
author_email='scott.moser@canonical.com',
url='http://launchpad.net/cloud-init/',
- packages=setuptools.find_packages(exclude=['tests']),
+ packages=setuptools.find_packages(exclude=['tests.*', '*.tests', 'tests']),
scripts=['tools/cloud-init-per'],
license='Dual-licensed under GPLv3 or Apache 2.0',
data_files=data_files,
diff --git a/snapcraft.yaml b/snapcraft.yaml
index 24e8e74d..8f07592e 100644
--- a/snapcraft.yaml
+++ b/snapcraft.yaml
@@ -2,7 +2,7 @@ name: cloud-init
version: master
summary: Init scripts for cloud instances
description: |
- Cloud instances need special scripts to run during initialisation to
+ Cloud instances need special scripts to run during initialization to
retrieve and install ssh keys and to let the user run various scripts.
grade: stable
@@ -12,7 +12,6 @@ apps:
cloud-init:
# LP: #1669306
command: usr/bin/python3 $SNAP/bin/cloud-init
- plugs: [network]
parts:
cloud-init:
diff --git a/systemd/cloud-config.service b/systemd/cloud-config.service.tmpl
index 3309e08a..bdee3ce0 100644
--- a/systemd/cloud-config.service
+++ b/systemd/cloud-config.service.tmpl
@@ -1,3 +1,4 @@
+## template:jinja
[Unit]
Description=Apply the settings specified in cloud-config
After=network-online.target cloud-config.target
diff --git a/systemd/cloud-final.service b/systemd/cloud-final.service.tmpl
index b8f69b78..fc01b891 100644
--- a/systemd/cloud-final.service
+++ b/systemd/cloud-final.service.tmpl
@@ -1,7 +1,12 @@
+## template:jinja
[Unit]
Description=Execute cloud user/final scripts
-After=network-online.target cloud-config.service rc-local.service multi-user.target
+After=network-online.target cloud-config.service rc-local.service
+{% if variant in ["ubuntu", "unknown", "debian"] %}
+After=multi-user.target
+{% endif %}
Wants=network-online.target cloud-config.service
+Before=apt-daily.service
[Service]
Type=oneshot
diff --git a/systemd/cloud-init-local.service b/systemd/cloud-init-local.service.tmpl
index 7ee43eda..ff9c644d 100644
--- a/systemd/cloud-init-local.service
+++ b/systemd/cloud-init-local.service.tmpl
@@ -1,13 +1,18 @@
+## template:jinja
[Unit]
Description=Initial cloud-init job (pre-networking)
+{% if variant in ["ubuntu", "unknown", "debian"] %}
DefaultDependencies=no
+{% endif %}
Wants=network-pre.target
After=systemd-remount-fs.service
Before=NetworkManager.service
Before=network-pre.target
Before=shutdown.target
+{% if variant in ["ubuntu", "unknown", "debian"] %}
Before=sysinit.target
Conflicts=shutdown.target
+{% endif %}
RequiresMountsFor=/var/lib/cloud
[Service]
diff --git a/systemd/cloud-init.service b/systemd/cloud-init.service.tmpl
index 39acc20a..2c71889d 100644
--- a/systemd/cloud-init.service
+++ b/systemd/cloud-init.service.tmpl
@@ -1,3 +1,4 @@
+## template:jinja
[Unit]
Description=Initial cloud-init job (metadata service crawler)
DefaultDependencies=no
@@ -6,13 +7,20 @@ Wants=sshd-keygen.service
Wants=sshd.service
After=cloud-init-local.service
After=systemd-networkd-wait-online.service
+{% if variant in ["ubuntu", "unknown", "debian"] %}
After=networking.service
+{% endif %}
+{% if variant in ["centos", "fedora", "redhat"] %}
+After=network.service
+{% endif %}
Before=network-online.target
Before=sshd-keygen.service
Before=sshd.service
+{% if variant in ["ubuntu", "unknown", "debian"] %}
Before=sysinit.target
-Before=systemd-user-sessions.service
Conflicts=shutdown.target
+{% endif %}
+Before=systemd-user-sessions.service
[Service]
Type=oneshot
diff --git a/systemd/cloud-init.target b/systemd/cloud-init.target
index d5684582..083c3b6f 100644
--- a/systemd/cloud-init.target
+++ b/systemd/cloud-init.target
@@ -1,6 +1,6 @@
# cloud-init target is enabled by cloud-init-generator
# To disable it you can either:
-# a.) boot with kernel cmdline of 'cloudinit=disabled'
+# a.) boot with kernel cmdline of 'cloud-init=disabled'
# b.) touch a file /etc/cloud/cloud-init.disabled
[Unit]
Description=Cloud-init target
diff --git a/systemd/systemd-fsck@.service.d/cloud-init.conf b/systemd/systemd-fsck@.service.d/cloud-init.conf
new file mode 100644
index 00000000..0bfa465b
--- /dev/null
+++ b/systemd/systemd-fsck@.service.d/cloud-init.conf
@@ -0,0 +1,2 @@
+[Unit]
+After=cloud-init.service
diff --git a/sysvinit/freebsd/cloudinitlocal b/sysvinit/freebsd/cloudinitlocal
index 11a5eb1c..7a034b3b 100755
--- a/sysvinit/freebsd/cloudinitlocal
+++ b/sysvinit/freebsd/cloudinitlocal
@@ -2,7 +2,7 @@
# PROVIDE: cloudinitlocal
# REQUIRE: ldconfig mountcritlocal
-# BEFORE: NETWORKING FILESYSTEMS cloudinit cloudconfig cloudfinal
+# BEFORE: NETWORKING cloudinit cloudconfig cloudfinal
. /etc/rc.subr
diff --git a/templates/hosts.debian.tmpl b/templates/hosts.debian.tmpl
index a1d97212..7e29907a 100644
--- a/templates/hosts.debian.tmpl
+++ b/templates/hosts.debian.tmpl
@@ -1,6 +1,6 @@
## template:jinja
{#
-This file (/etc/cloud/templates/hosts.tmpl) is only utilized
+This file (/etc/cloud/templates/hosts.debian.tmpl) is only utilized
if enabled in cloud-config. Specifically, in order to enable it
you need to add the following to config:
manage_etc_hosts: True
@@ -8,7 +8,7 @@ you need to add the following to config:
# Your system has configured 'manage_etc_hosts' as True.
# As a result, if you wish for changes to this file to persist
# then you will need to either
-# a.) make changes to the master file in /etc/cloud/templates/hosts.tmpl
+# a.) make changes to the master file in /etc/cloud/templates/hosts.debian.tmpl
# b.) change or remove the value of 'manage_etc_hosts' in
# /etc/cloud/cloud.cfg or cloud-config from user-data
#
diff --git a/templates/hosts.suse.tmpl b/templates/hosts.suse.tmpl
index b6082692..399ec9b4 100644
--- a/templates/hosts.suse.tmpl
+++ b/templates/hosts.suse.tmpl
@@ -14,9 +14,12 @@ you need to add the following to config:
#
# The following lines are desirable for IPv4 capable hosts
127.0.0.1 localhost
+127.0.0.1 {{fqdn}} {{hostname}}
+
# The following lines are desirable for IPv6 capable hosts
::1 localhost ipv6-localhost ipv6-loopback
+::1 {{fqdn}} {{hostname}}
fe00::0 ipv6-localnet
ff00::0 ipv6-mcastprefix
diff --git a/tests/cloud_tests/__init__.py b/tests/cloud_tests/__init__.py
index 099c357f..07148c12 100644
--- a/tests/cloud_tests/__init__.py
+++ b/tests/cloud_tests/__init__.py
@@ -1,17 +1,18 @@
# This file is part of cloud-init. See LICENSE file for license information.
+"""Main init."""
+
import logging
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
TESTCASES_DIR = os.path.join(BASE_DIR, 'testcases')
TEST_CONF_DIR = os.path.join(BASE_DIR, 'configs')
+TREE_BASE = os.sep.join(BASE_DIR.split(os.sep)[:-2])
def _initialize_logging():
- """
- configure logging for cloud_tests
- """
+ """Configure logging for cloud_tests."""
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
diff --git a/tests/cloud_tests/__main__.py b/tests/cloud_tests/__main__.py
index ed654ad3..260ddb3f 100644
--- a/tests/cloud_tests/__main__.py
+++ b/tests/cloud_tests/__main__.py
@@ -1,19 +1,17 @@
# This file is part of cloud-init. See LICENSE file for license information.
+"""Main entry point."""
+
import argparse
import logging
-import shutil
import sys
-import tempfile
-from tests.cloud_tests import (args, collect, manage, verify)
+from tests.cloud_tests import args, bddeb, collect, manage, run_funcs, verify
from tests.cloud_tests import LOG
def configure_log(args):
- """
- configure logging
- """
+ """Configure logging."""
level = logging.INFO
if args.verbose:
level = logging.DEBUG
@@ -22,41 +20,15 @@ def configure_log(args):
LOG.setLevel(level)
-def run(args):
- """
- run full test suite
- """
- failed = 0
- args.data_dir = tempfile.mkdtemp(prefix='cloud_test_data_')
- LOG.debug('using tmpdir %s', args.data_dir)
- try:
- failed += collect.collect(args)
- failed += verify.verify(args)
- except Exception:
- failed += 1
- raise
- finally:
- # TODO: make this configurable via environ or cmdline
- if failed:
- LOG.warning('some tests failed, leaving data in %s', args.data_dir)
- else:
- shutil.rmtree(args.data_dir)
- return failed
-
-
def main():
- """
- entry point for cloud test suite
- """
+ """Entry point for cloud test suite."""
# configure parser
parser = argparse.ArgumentParser(prog='cloud_tests')
subparsers = parser.add_subparsers(dest="subcmd")
subparsers.required = True
def add_subparser(name, description, arg_sets):
- """
- add arguments to subparser
- """
+ """Add arguments to subparser."""
subparser = subparsers.add_parser(name, help=description)
for (_args, _kwargs) in (a for arg_set in arg_sets for a in arg_set):
subparser.add_argument(*_args, **_kwargs)
@@ -80,9 +52,12 @@ def main():
# run handler
LOG.debug('running with args: %s\n', parsed)
return {
+ 'bddeb': bddeb.bddeb,
'collect': collect.collect,
'create': manage.create,
- 'run': run,
+ 'run': run_funcs.run,
+ 'tree_collect': run_funcs.tree_collect,
+ 'tree_run': run_funcs.tree_run,
'verify': verify.verify,
}[parsed.subcmd](parsed)
diff --git a/tests/cloud_tests/args.py b/tests/cloud_tests/args.py
index 371b0444..369d60db 100644
--- a/tests/cloud_tests/args.py
+++ b/tests/cloud_tests/args.py
@@ -1,23 +1,43 @@
# This file is part of cloud-init. See LICENSE file for license information.
+"""Argparse argument setup and sanitization."""
+
import os
from tests.cloud_tests import config, util
-from tests.cloud_tests import LOG
+from tests.cloud_tests import LOG, TREE_BASE
ARG_SETS = {
+ 'BDDEB': (
+ (('--bddeb-args',),
+ {'help': 'args to pass through to bddeb',
+ 'action': 'store', 'default': None, 'required': False}),
+ (('--build-os',),
+ {'help': 'OS to use as build system (default is xenial)',
+ 'action': 'store', 'choices': config.ENABLED_DISTROS,
+ 'default': 'xenial', 'required': False}),
+ (('--build-platform',),
+ {'help': 'platform to use for build system (default is lxd)',
+ 'action': 'store', 'choices': config.ENABLED_PLATFORMS,
+ 'default': 'lxd', 'required': False}),
+ (('--cloud-init',),
+ {'help': 'path to base of cloud-init tree', 'metavar': 'DIR',
+ 'action': 'store', 'required': False, 'default': TREE_BASE}),),
'COLLECT': (
(('-p', '--platform'),
{'help': 'platform(s) to run tests on', 'metavar': 'PLATFORM',
- 'action': 'append', 'choices': config.list_enabled_platforms(),
+ 'action': 'append', 'choices': config.ENABLED_PLATFORMS,
'default': []}),
(('-n', '--os-name'),
{'help': 'the name(s) of the OS(s) to test', 'metavar': 'NAME',
- 'action': 'append', 'choices': config.list_enabled_distros(),
+ 'action': 'append', 'choices': config.ENABLED_DISTROS,
'default': []}),
(('-t', '--test-config'),
{'help': 'test config file(s) to use', 'metavar': 'FILE',
- 'action': 'append', 'default': []}),),
+ 'action': 'append', 'default': []}),
+ (('--feature-override',),
+ {'help': 'feature flags override(s), <flagname>=<true/false>',
+ 'action': 'append', 'default': [], 'required': False}),),
'CREATE': (
(('-c', '--config'),
{'help': 'cloud-config yaml for testcase', 'metavar': 'DATA',
@@ -41,7 +61,15 @@ ARG_SETS = {
'OUTPUT': (
(('-d', '--data-dir'),
{'help': 'directory to store test data in',
- 'action': 'store', 'metavar': 'DIR', 'required': True}),),
+ 'action': 'store', 'metavar': 'DIR', 'required': False}),
+ (('--preserve-data',),
+ {'help': 'do not remove collected data after successful run',
+ 'action': 'store_true', 'default': False, 'required': False}),),
+ 'OUTPUT_DEB': (
+ (('--deb',),
+ {'help': 'path to write output deb to', 'metavar': 'FILE',
+ 'action': 'store', 'required': False,
+ 'default': 'cloud-init_all.deb'}),),
'RESULT': (
(('-r', '--result'),
{'help': 'file to write results to',
@@ -61,31 +89,54 @@ ARG_SETS = {
{'help': 'ppa to enable (implies -u)', 'metavar': 'NAME',
'action': 'store'}),
(('-u', '--upgrade'),
- {'help': 'upgrade before starting tests', 'action': 'store_true',
- 'default': False}),),
+ {'help': 'upgrade or install cloud-init from repo',
+ 'action': 'store_true', 'default': False}),
+ (('--upgrade-full',),
+ {'help': 'do full system upgrade from repo (implies -u)',
+ 'action': 'store_true', 'default': False}),),
+
}
SUBCMDS = {
+ 'bddeb': ('build cloud-init deb from tree',
+ ('BDDEB', 'OUTPUT_DEB', 'INTERFACE')),
'collect': ('collect test data',
('COLLECT', 'INTERFACE', 'OUTPUT', 'RESULT', 'SETUP')),
'create': ('create new test case', ('CREATE', 'INTERFACE')),
- 'run': ('run test suite', ('COLLECT', 'INTERFACE', 'RESULT', 'SETUP')),
+ 'run': ('run test suite',
+ ('COLLECT', 'INTERFACE', 'RESULT', 'OUTPUT', 'SETUP')),
+ 'tree_collect': ('collect using current working tree',
+ ('BDDEB', 'COLLECT', 'INTERFACE', 'OUTPUT', 'RESULT')),
+ 'tree_run': ('run using current working tree',
+ ('BDDEB', 'COLLECT', 'INTERFACE', 'OUTPUT', 'RESULT')),
'verify': ('verify test data', ('INTERFACE', 'OUTPUT', 'RESULT')),
}
def _empty_normalizer(args):
+ """Do not normalize arguments."""
+ return args
+
+
+def normalize_bddeb_args(args):
+ """Normalize BDDEB arguments.
+
+ @param args: parsed args
+ @return_value: updated args, or None if errors encountered
"""
- do not normalize arguments
- """
+ # make sure cloud-init dir is accessible
+ if not (args.cloud_init and os.path.isdir(args.cloud_init)):
+ LOG.error('invalid cloud-init tree path')
+ return None
+
return args
def normalize_create_args(args):
- """
- normalize CREATE arguments
- args: parsed args
- return_value: updated args, or None if errors occurred
+ """Normalize CREATE arguments.
+
+ @param args: parsed args
+ @return_value: updated args, or None if errors occurred
"""
# ensure valid name for new test
if len(args.name.split('/')) != 2:
@@ -114,22 +165,22 @@ def normalize_create_args(args):
def normalize_collect_args(args):
- """
- normalize COLLECT arguments
- args: parsed args
- return_value: updated args, or None if errors occurred
+ """Normalize COLLECT arguments.
+
+ @param args: parsed args
+ @return_value: updated args, or None if errors occurred
"""
# platform should default to all supported
if len(args.platform) == 0:
- args.platform = config.list_enabled_platforms()
+ args.platform = config.ENABLED_PLATFORMS
args.platform = util.sorted_unique(args.platform)
# os name should default to all enabled
# if os name is provided ensure that all provided are supported
if len(args.os_name) == 0:
- args.os_name = config.list_enabled_distros()
+ args.os_name = config.ENABLED_DISTROS
else:
- supported = config.list_enabled_distros()
+ supported = config.ENABLED_DISTROS
invalid = [os_name for os_name in args.os_name
if os_name not in supported]
if len(invalid) != 0:
@@ -158,18 +209,33 @@ def normalize_collect_args(args):
args.test_config = valid
args.test_config = util.sorted_unique(args.test_config)
+ # parse feature flag overrides and ensure all are valid
+ if args.feature_override:
+ overrides = args.feature_override
+ args.feature_override = util.parse_conf_list(
+ overrides, boolean=True, valid=config.list_feature_flags())
+ if not args.feature_override:
+ LOG.error('invalid feature flag override(s): %s', overrides)
+ return None
+ else:
+ args.feature_override = {}
+
return args
def normalize_output_args(args):
+ """Normalize OUTPUT arguments.
+
+ @param args: parsed args
+ @return_value: updated args, or None if errors occurred
"""
- normalize OUTPUT arguments
- args: parsed args
- return_value: updated args, or None if errors occurred
- """
+ if args.data_dir:
+ args.data_dir = os.path.abspath(args.data_dir)
+ if not os.path.exists(args.data_dir):
+ os.mkdir(args.data_dir)
+
if not args.data_dir:
- LOG.error('--data-dir must be specified')
- return None
+ args.data_dir = None
# ensure clean output dir if collect
# ensure data exists if verify
@@ -177,19 +243,31 @@ def normalize_output_args(args):
if not util.is_clean_writable_dir(args.data_dir):
LOG.error('data_dir must be empty/new and must be writable')
return None
- elif args.subcmd == 'verify':
- if not os.path.exists(args.data_dir):
- LOG.error('data_dir %s does not exist', args.data_dir)
- return None
return args
-def normalize_setup_args(args):
+def normalize_output_deb_args(args):
+ """Normalize OUTPUT_DEB arguments.
+
+ @param args: parsed args
+ @return_value: updated args, or None if erros occurred
"""
- normalize SETUP arguments
- args: parsed args
- return_value: updated_args, or None if errors occurred
+ # make sure to use abspath for deb
+ args.deb = os.path.abspath(args.deb)
+
+ if not args.deb.endswith('.deb'):
+ LOG.error('output filename does not end in ".deb"')
+ return None
+
+ return args
+
+
+def normalize_setup_args(args):
+ """Normalize SETUP arguments.
+
+ @param args: parsed args
+ @return_value: updated_args, or None if errors occurred
"""
# ensure deb or rpm valid if specified
for pkg in (args.deb, args.rpm):
@@ -210,10 +288,12 @@ def normalize_setup_args(args):
NORMALIZERS = {
+ 'BDDEB': normalize_bddeb_args,
'COLLECT': normalize_collect_args,
'CREATE': normalize_create_args,
'INTERFACE': _empty_normalizer,
'OUTPUT': normalize_output_args,
+ 'OUTPUT_DEB': normalize_output_deb_args,
'RESULT': _empty_normalizer,
'SETUP': normalize_setup_args,
}
diff --git a/tests/cloud_tests/bddeb.py b/tests/cloud_tests/bddeb.py
new file mode 100644
index 00000000..53dbf74e
--- /dev/null
+++ b/tests/cloud_tests/bddeb.py
@@ -0,0 +1,118 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Used to build a deb."""
+
+from functools import partial
+import os
+import tempfile
+
+from cloudinit import util as c_util
+from tests.cloud_tests import (config, LOG)
+from tests.cloud_tests import (platforms, images, snapshots, instances)
+from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single)
+
+build_deps = ['devscripts', 'equivs', 'git', 'tar']
+
+
+def _out(cmd_res):
+ """Get clean output from cmd result."""
+ return cmd_res[0].strip()
+
+
+def build_deb(args, instance):
+ """Build deb on system and copy out to location at args.deb.
+
+ @param args: cmdline arguments
+ @return_value: tuple of results and fail count
+ """
+ # update remote system package list and install build deps
+ LOG.debug('installing build deps')
+ pkgs = ' '.join(build_deps)
+ cmd = 'apt-get update && apt-get install --yes {}'.format(pkgs)
+ instance.execute(['/bin/sh', '-c', cmd])
+ # TODO Remove this call once we have a ci-deps Makefile target
+ instance.execute(['mk-build-deps', '--install', '-t',
+ 'apt-get --no-install-recommends --yes', 'cloud-init'])
+
+ # local tmpfile that must be deleted
+ local_tarball = tempfile.NamedTemporaryFile().name
+
+ # paths to use in remote system
+ output_link = '/root/cloud-init_all.deb'
+ remote_tarball = _out(instance.execute(['mktemp']))
+ extract_dir = _out(instance.execute(['mktemp', '--directory']))
+ bddeb_path = os.path.join(extract_dir, 'packages', 'bddeb')
+ git_env = {'GIT_DIR': os.path.join(extract_dir, '.git'),
+ 'GIT_WORK_TREE': extract_dir}
+
+ LOG.debug('creating tarball of cloud-init at: %s', local_tarball)
+ c_util.subp(['tar', 'cf', local_tarball, '--owner', 'root',
+ '--group', 'root', '-C', args.cloud_init, '.'])
+ LOG.debug('copying to remote system at: %s', remote_tarball)
+ instance.push_file(local_tarball, remote_tarball)
+
+ LOG.debug('extracting tarball in remote system at: %s', extract_dir)
+ instance.execute(['tar', 'xf', remote_tarball, '-C', extract_dir])
+ instance.execute(['git', 'commit', '-a', '-m', 'tmp', '--allow-empty'],
+ env=git_env)
+
+ LOG.debug('building deb in remote system at: %s', output_link)
+ bddeb_args = args.bddeb_args.split() if args.bddeb_args else []
+ instance.execute([bddeb_path, '-d'] + bddeb_args, env=git_env)
+
+ # copy the deb back to the host system
+ LOG.debug('copying built deb to host at: %s', args.deb)
+ instance.pull_file(output_link, args.deb)
+
+
+def setup_build(args):
+ """Set build system up then run build.
+
+ @param args: cmdline arguments
+ @return_value: tuple of results and fail count
+ """
+ res = ({}, 1)
+
+ # set up platform
+ LOG.info('setting up platform: %s', args.build_platform)
+ platform_config = config.load_platform_config(args.build_platform)
+ platform_call = partial(platforms.get_platform, args.build_platform,
+ platform_config)
+ with PlatformComponent(platform_call) as platform:
+
+ # set up image
+ LOG.info('acquiring image for os: %s', args.build_os)
+ img_conf = config.load_os_config(platform.platform_name, args.build_os)
+ image_call = partial(images.get_image, platform, img_conf)
+ with PlatformComponent(image_call) as image:
+
+ # set up snapshot
+ snapshot_call = partial(snapshots.get_snapshot, image)
+ with PlatformComponent(snapshot_call) as snapshot:
+
+ # create instance with cloud-config to set it up
+ LOG.info('creating instance to build deb in')
+ empty_cloud_config = "#cloud-config\n{}"
+ instance_call = partial(
+ instances.get_instance, snapshot, empty_cloud_config,
+ use_desc='build cloud-init deb')
+ with PlatformComponent(instance_call) as instance:
+
+ # build the deb
+ res = run_single('build deb on system',
+ partial(build_deb, args, instance))
+
+ return res
+
+
+def bddeb(args):
+ """Entry point for build deb.
+
+ @param args: cmdline arguments
+ @return_value: fail count
+ """
+ LOG.info('preparing to build cloud-init deb')
+ (res, failed) = run_stage('build deb', [partial(setup_build, args)])
+ return failed
+
+# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py
index 02fc0e52..b44e8bdd 100644
--- a/tests/cloud_tests/collect.py
+++ b/tests/cloud_tests/collect.py
@@ -1,34 +1,39 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from tests.cloud_tests import (config, LOG, setup_image, util)
-from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single)
-from tests.cloud_tests import (platforms, images, snapshots, instances)
+"""Used to collect data from platforms during tests."""
from functools import partial
import os
+from cloudinit import util as c_util
+from tests.cloud_tests import (config, LOG, setup_image, util)
+from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single)
+from tests.cloud_tests import (platforms, images, snapshots, instances)
+
def collect_script(instance, base_dir, script, script_name):
- """
- collect script data
- instance: instance to run script on
- base_dir: base directory for output data
- script: script contents
- script_name: name of script to run
- return_value: None, may raise errors
+ """Collect script data.
+
+ @param instance: instance to run script on
+ @param base_dir: base directory for output data
+ @param script: script contents
+ @param script_name: name of script to run
+ @return_value: None, may raise errors
"""
LOG.debug('running collect script: %s', script_name)
- util.write_file(os.path.join(base_dir, script_name),
- instance.run_script(script))
+ (out, err, exit) = instance.run_script(
+ script, rcs=range(0, 256),
+ description='collect: {}'.format(script_name))
+ c_util.write_file(os.path.join(base_dir, script_name), out)
def collect_test_data(args, snapshot, os_name, test_name):
- """
- collect data for test case
- args: cmdline arguments
- snapshot: instantiated snapshot
- test_name: name or path of test to run
- return_value: tuple of results and fail count
+ """Collect data for test case.
+
+ @param args: cmdline arguments
+ @param snapshot: instantiated snapshot
+ @param test_name: name or path of test to run
+ @return_value: tuple of results and fail count
"""
res = ({}, 1)
@@ -39,15 +44,27 @@ def collect_test_data(args, snapshot, os_name, test_name):
test_scripts = test_config['collect_scripts']
test_output_dir = os.sep.join(
(args.data_dir, snapshot.platform_name, os_name, test_name))
- boot_timeout = (test_config.get('boot_timeout')
- if isinstance(test_config.get('boot_timeout'), int) else
- snapshot.config.get('timeout'))
# if test is not enabled, skip and return 0 failures
if not test_config.get('enabled', False):
LOG.warning('test config %s is not enabled, skipping', test_name)
return ({}, 0)
+ # if testcase requires a feature flag that the image does not support,
+ # skip the testcase with a warning
+ req_features = test_config.get('required_features', [])
+ if any(feature not in snapshot.features for feature in req_features):
+ LOG.warn('test config %s requires features not supported by image, '
+ 'skipping.\nrequired features: %s\nsupported features: %s',
+ test_name, req_features, snapshot.features)
+ return ({}, 0)
+
+ # if there are user data overrides required for this test case, apply them
+ overrides = snapshot.config.get('user_data_overrides', {})
+ if overrides:
+ LOG.debug('updating user data for collect with: %s', overrides)
+ user_data = util.update_user_data(user_data, overrides)
+
# create test instance
component = PlatformComponent(
partial(instances.get_instance, snapshot, user_data,
@@ -56,7 +73,7 @@ def collect_test_data(args, snapshot, os_name, test_name):
LOG.info('collecting test data for test: %s', test_name)
with component as instance:
start_call = partial(run_single, 'boot instance', partial(
- instance.start, wait=True, wait_time=boot_timeout))
+ instance.start, wait=True, wait_for_cloud_init=True))
collect_calls = [partial(run_single, 'script {}'.format(script_name),
partial(collect_script, instance,
test_output_dir, script, script_name))
@@ -69,11 +86,11 @@ def collect_test_data(args, snapshot, os_name, test_name):
def collect_snapshot(args, image, os_name):
- """
- collect data for snapshot of image
- args: cmdline arguments
- image: instantiated image with set up complete
- return_value tuple of results and fail count
+ """Collect data for snapshot of image.
+
+ @param args: cmdline arguments
+ @param image: instantiated image with set up complete
+ @return_value tuple of results and fail count
"""
res = ({}, 1)
@@ -91,19 +108,18 @@ def collect_snapshot(args, image, os_name):
def collect_image(args, platform, os_name):
- """
- collect data for image
- args: cmdline arguments
- platform: instantiated platform
- os_name: name of distro to collect for
- return_value: tuple of results and fail count
+ """Collect data for image.
+
+ @param args: cmdline arguments
+ @param platform: instantiated platform
+ @param os_name: name of distro to collect for
+ @return_value: tuple of results and fail count
"""
res = ({}, 1)
- os_config = config.load_os_config(os_name)
- if not os_config.get('enabled'):
- raise ValueError('OS {} not enabled'.format(os_name))
-
+ os_config = config.load_os_config(
+ platform.platform_name, os_name, require_enabled=True,
+ feature_overrides=args.feature_override)
component = PlatformComponent(
partial(images.get_image, platform, os_config))
@@ -118,18 +134,16 @@ def collect_image(args, platform, os_name):
def collect_platform(args, platform_name):
- """
- collect data for platform
- args: cmdline arguments
- platform_name: platform to collect for
- return_value: tuple of results and fail count
+ """Collect data for platform.
+
+ @param args: cmdline arguments
+ @param platform_name: platform to collect for
+ @return_value: tuple of results and fail count
"""
res = ({}, 1)
- platform_config = config.load_platform_config(platform_name)
- if not platform_config.get('enabled'):
- raise ValueError('Platform {} not enabled'.format(platform_name))
-
+ platform_config = config.load_platform_config(
+ platform_name, require_enabled=True)
component = PlatformComponent(
partial(platforms.get_platform, platform_name, platform_config))
@@ -143,10 +157,10 @@ def collect_platform(args, platform_name):
def collect(args):
- """
- entry point for collection
- args: cmdline arguments
- return_value: fail count
+ """Entry point for collection.
+
+ @param args: cmdline arguments
+ @return_value: fail count
"""
(res, failed) = run_stage(
'collect data', [partial(collect_platform, args, platform_name)
diff --git a/tests/cloud_tests/config.py b/tests/cloud_tests/config.py
index f3a13c9a..4d5dc801 100644
--- a/tests/cloud_tests/config.py
+++ b/tests/cloud_tests/config.py
@@ -1,5 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
+"""Used to setup test configuration."""
+
import glob
import os
@@ -14,46 +16,44 @@ RELEASES_CONF = os.path.join(BASE_DIR, 'releases.yaml')
TESTCASE_CONF = os.path.join(BASE_DIR, 'testcases.yaml')
+def get(base, key):
+ """Get config entry 'key' from base, ensuring is dictionary."""
+ return base[key] if key in base and base[key] is not None else {}
+
+
+def enabled(config):
+ """Test if config item is enabled."""
+ return isinstance(config, dict) and config.get('enabled', False)
+
+
def path_to_name(path):
- """
- convert abs or rel path to test config to path under configs/
- if already a test name, do nothing
- """
+ """Convert abs or rel path to test config to path under 'sconfigs/'."""
dir_path, file_name = os.path.split(os.path.normpath(path))
name = os.path.splitext(file_name)[0]
return os.sep.join((os.path.basename(dir_path), name))
def name_to_path(name):
- """
- convert test config path under configs/ to full config path,
- if already a full path, do nothing
- """
+ """Convert test config path under configs/ to full config path."""
name = os.path.normpath(name)
if not name.endswith(CONF_EXT):
name = name + CONF_EXT
return name if os.path.isabs(name) else os.path.join(TEST_CONF_DIR, name)
-def name_sanatize(name):
- """
- sanatize test name to be used as a module name
- """
+def name_sanitize(name):
+ """Sanitize test name to be used as a module name."""
return name.replace('-', '_')
def name_to_module(name):
- """
- convert test name to a loadable module name under testcases/
- """
- name = name_sanatize(path_to_name(name))
+ """Convert test name to a loadable module name under 'testcases/'."""
+ name = name_sanitize(path_to_name(name))
return name.replace(os.path.sep, '.')
def merge_config(base, override):
- """
- merge config and base
- """
+ """Merge config and base."""
res = base.copy()
res.update(override)
res.update({k: merge_config(base.get(k, {}), v)
@@ -61,53 +61,102 @@ def merge_config(base, override):
return res
-def load_platform_config(platform):
+def merge_feature_groups(feature_conf, feature_groups, overrides):
+ """Combine feature groups and overrides to construct a supported list.
+
+ @param feature_conf: feature config from releases.yaml
+ @param feature_groups: feature groups the release is a member of
+ @param overrides: overrides specified by the release's config
+ @return_value: dict of {feature: true/false} settings
"""
- load configuration for platform
+ res = dict().fromkeys(feature_conf['all'])
+ for group in feature_groups:
+ res.update(feature_conf['groups'][group])
+ res.update(overrides)
+ return res
+
+
+def load_platform_config(platform_name, require_enabled=False):
+ """Load configuration for platform.
+
+ @param platform_name: name of platform to retrieve config for
+ @param require_enabled: if true, raise error if 'enabled' not True
+ @return_value: config dict
"""
main_conf = c_util.read_conf(PLATFORM_CONF)
- return merge_config(main_conf.get('default_platform_config'),
- main_conf.get('platforms')[platform])
+ conf = merge_config(main_conf['default_platform_config'],
+ main_conf['platforms'][platform_name])
+ if require_enabled and not enabled(conf):
+ raise ValueError('Platform is not enabled')
+ return conf
-def load_os_config(os_name):
- """
- load configuration for os
+def load_os_config(platform_name, os_name, require_enabled=False,
+ feature_overrides={}):
+ """Load configuration for os.
+
+ @param platform_name: platform name to load os config for
+ @param os_name: name of os to retrieve config for
+ @param require_enabled: if true, raise error if 'enabled' not True
+ @param feature_overrides: feature flag overrides to merge with features
+ @return_value: config dict
"""
main_conf = c_util.read_conf(RELEASES_CONF)
- return merge_config(main_conf.get('default_release_config'),
- main_conf.get('releases')[os_name])
+ default = main_conf['default_release_config']
+ image = main_conf['releases'][os_name]
+ conf = merge_config(merge_config(get(default, 'default'),
+ get(default, platform_name)),
+ merge_config(get(image, 'default'),
+ get(image, platform_name)))
+
+ feature_conf = main_conf['features']
+ feature_groups = conf.get('feature_groups', [])
+ overrides = merge_config(get(conf, 'features'), feature_overrides)
+ conf['features'] = merge_feature_groups(
+ feature_conf, feature_groups, overrides)
+
+ if require_enabled and not enabled(conf):
+ raise ValueError('OS is not enabled')
+ return conf
def load_test_config(path):
- """
- load a test config file by either abs path or rel path
- """
+ """Load a test config file by either abs path or rel path."""
return merge_config(c_util.read_conf(TESTCASE_CONF)['base_test_data'],
c_util.read_conf(name_to_path(path)))
+def list_feature_flags():
+ """List all supported feature flags."""
+ feature_conf = get(c_util.read_conf(RELEASES_CONF), 'features')
+ return feature_conf.get('all', [])
+
+
def list_enabled_platforms():
- """
- list all platforms enabled for testing
- """
- platforms = c_util.read_conf(PLATFORM_CONF).get('platforms')
- return [k for k, v in platforms.items() if v.get('enabled')]
+ """List all platforms enabled for testing."""
+ platforms = get(c_util.read_conf(PLATFORM_CONF), 'platforms')
+ return [k for k, v in platforms.items() if enabled(v)]
-def list_enabled_distros():
- """
- list all distros enabled for testing
- """
- releases = c_util.read_conf(RELEASES_CONF).get('releases')
- return [k for k, v in releases.items() if v.get('enabled')]
+def list_enabled_distros(platforms):
+ """List all distros enabled for testing on specified platforms."""
+ def platform_has_enabled(config):
+ """List if platform is enabled."""
+ return any(enabled(merge_config(get(config, 'default'),
+ get(config, platform)))
+ for platform in platforms)
+
+ releases = get(c_util.read_conf(RELEASES_CONF), 'releases')
+ return [k for k, v in releases.items() if platform_has_enabled(v)]
def list_test_configs():
- """
- list all available test config files by abspath
- """
+ """List all available test config files by abspath."""
return [os.path.abspath(f) for f in
glob.glob(os.sep.join((TEST_CONF_DIR, '*', '*.yaml')))]
+
+ENABLED_PLATFORMS = sorted(list_enabled_platforms())
+ENABLED_DISTROS = sorted(list_enabled_distros(ENABLED_PLATFORMS))
+
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/configs/bugs/lp1628337.yaml b/tests/cloud_tests/configs/bugs/lp1628337.yaml
index 1d6bf483..e39b3cd8 100644
--- a/tests/cloud_tests/configs/bugs/lp1628337.yaml
+++ b/tests/cloud_tests/configs/bugs/lp1628337.yaml
@@ -1,6 +1,9 @@
#
# LP Bug 1628337: cloud-init tries to install NTP before even configuring the archives
#
+required_features:
+ - apt
+ - lsb_release
cloud_config: |
#cloud-config
ntp:
diff --git a/tests/cloud_tests/configs/examples/add_apt_repositories.yaml b/tests/cloud_tests/configs/examples/add_apt_repositories.yaml
index b8964357..4b8575f7 100644
--- a/tests/cloud_tests/configs/examples/add_apt_repositories.yaml
+++ b/tests/cloud_tests/configs/examples/add_apt_repositories.yaml
@@ -4,6 +4,8 @@
# 2016-11-17: Disabled as covered by module based tests
#
enabled: False
+required_features:
+ - apt
cloud_config: |
#cloud-config
apt:
diff --git a/tests/cloud_tests/configs/modules/apt_configure_conf.yaml b/tests/cloud_tests/configs/modules/apt_configure_conf.yaml
index 163ae3fc..de453000 100644
--- a/tests/cloud_tests/configs/modules/apt_configure_conf.yaml
+++ b/tests/cloud_tests/configs/modules/apt_configure_conf.yaml
@@ -1,6 +1,8 @@
#
# Provide a configuration for APT
#
+required_features:
+ - apt
cloud_config: |
#cloud-config
apt:
diff --git a/tests/cloud_tests/configs/modules/apt_configure_disable_suites.yaml b/tests/cloud_tests/configs/modules/apt_configure_disable_suites.yaml
index 73e4a538..98800673 100644
--- a/tests/cloud_tests/configs/modules/apt_configure_disable_suites.yaml
+++ b/tests/cloud_tests/configs/modules/apt_configure_disable_suites.yaml
@@ -1,6 +1,9 @@
#
# Disables everything in sources.list
#
+required_features:
+ - apt
+ - lsb_release
cloud_config: |
#cloud-config
apt:
diff --git a/tests/cloud_tests/configs/modules/apt_configure_primary.yaml b/tests/cloud_tests/configs/modules/apt_configure_primary.yaml
index 2ec30ca1..41bcf2fd 100644
--- a/tests/cloud_tests/configs/modules/apt_configure_primary.yaml
+++ b/tests/cloud_tests/configs/modules/apt_configure_primary.yaml
@@ -1,6 +1,9 @@
#
# Setup a custome primary sources.list
#
+required_features:
+ - apt
+ - apt_src_cont
cloud_config: |
#cloud-config
apt:
@@ -16,4 +19,8 @@ collect_scripts:
#!/bin/bash
grep -v '^#' /etc/apt/sources.list | sed '/^\s*$/d' | grep -c gtlib.gatech.edu
+ sources.list: |
+ #!/bin/bash
+ cat /etc/apt/sources.list
+
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/configs/modules/apt_configure_proxy.yaml b/tests/cloud_tests/configs/modules/apt_configure_proxy.yaml
index e7371305..be6c6f81 100644
--- a/tests/cloud_tests/configs/modules/apt_configure_proxy.yaml
+++ b/tests/cloud_tests/configs/modules/apt_configure_proxy.yaml
@@ -1,6 +1,8 @@
#
# Set apt proxy
#
+required_features:
+ - apt
cloud_config: |
#cloud-config
apt:
diff --git a/tests/cloud_tests/configs/modules/apt_configure_security.yaml b/tests/cloud_tests/configs/modules/apt_configure_security.yaml
index f6a2c828..83dd51df 100644
--- a/tests/cloud_tests/configs/modules/apt_configure_security.yaml
+++ b/tests/cloud_tests/configs/modules/apt_configure_security.yaml
@@ -1,6 +1,9 @@
#
# Add security to sources.list
#
+required_features:
+ - apt
+ - ubuntu_repos
cloud_config: |
#cloud-config
apt:
diff --git a/tests/cloud_tests/configs/modules/apt_configure_sources_key.yaml b/tests/cloud_tests/configs/modules/apt_configure_sources_key.yaml
index e7568a6a..bde9398a 100644
--- a/tests/cloud_tests/configs/modules/apt_configure_sources_key.yaml
+++ b/tests/cloud_tests/configs/modules/apt_configure_sources_key.yaml
@@ -1,6 +1,9 @@
#
# Add a sources.list entry with a given key (Debian Jessie)
#
+required_features:
+ - apt
+ - lsb_release
cloud_config: |
#cloud-config
apt:
diff --git a/tests/cloud_tests/configs/modules/apt_configure_sources_keyserver.yaml b/tests/cloud_tests/configs/modules/apt_configure_sources_keyserver.yaml
index 1a4a238f..25088135 100644
--- a/tests/cloud_tests/configs/modules/apt_configure_sources_keyserver.yaml
+++ b/tests/cloud_tests/configs/modules/apt_configure_sources_keyserver.yaml
@@ -1,12 +1,15 @@
#
# Add a sources.list entry with a key from a keyserver
#
+required_features:
+ - apt
+ - lsb_release
cloud_config: |
#cloud-config
apt:
sources:
source1:
- keyid: 0165013E
+ keyid: 1FF0D8535EF7E719E5C81B9C083D06FBE4D304DF
keyserver: keyserver.ubuntu.com
source: "deb http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu $RELEASE main"
collect_scripts:
diff --git a/tests/cloud_tests/configs/modules/apt_configure_sources_list.yaml b/tests/cloud_tests/configs/modules/apt_configure_sources_list.yaml
index 057fc72c..143cb080 100644
--- a/tests/cloud_tests/configs/modules/apt_configure_sources_list.yaml
+++ b/tests/cloud_tests/configs/modules/apt_configure_sources_list.yaml
@@ -1,6 +1,9 @@
#
# Generate a sources.list
#
+required_features:
+ - apt
+ - lsb_release
cloud_config: |
#cloud-config
apt:
diff --git a/tests/cloud_tests/configs/modules/apt_configure_sources_ppa.yaml b/tests/cloud_tests/configs/modules/apt_configure_sources_ppa.yaml
index dee9dc70..9efdae52 100644
--- a/tests/cloud_tests/configs/modules/apt_configure_sources_ppa.yaml
+++ b/tests/cloud_tests/configs/modules/apt_configure_sources_ppa.yaml
@@ -1,6 +1,12 @@
#
# Add a PPA to source.list
#
+# NOTE: on older ubuntu releases the sources file added is named
+# 'curtin-dev-test-archive-trusty', without 'ubuntu' in the middle
+required_features:
+ - apt
+ - ppa
+ - ppa_file_name
cloud_config: |
#cloud-config
apt:
@@ -16,5 +22,8 @@ collect_scripts:
apt-key: |
#!/bin/bash
apt-key finger
+ sources_full: |
+ #!/bin/bash
+ cat /etc/apt/sources.list
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/configs/modules/apt_pipelining_disable.yaml b/tests/cloud_tests/configs/modules/apt_pipelining_disable.yaml
index 5fa0cee9..bd9b5d08 100644
--- a/tests/cloud_tests/configs/modules/apt_pipelining_disable.yaml
+++ b/tests/cloud_tests/configs/modules/apt_pipelining_disable.yaml
@@ -1,6 +1,8 @@
#
# Disable apt pipelining value
#
+required_features:
+ - apt
cloud_config: |
#cloud-config
apt:
diff --git a/tests/cloud_tests/configs/modules/apt_pipelining_os.yaml b/tests/cloud_tests/configs/modules/apt_pipelining_os.yaml
index 87d183e7..cbed3ba3 100644
--- a/tests/cloud_tests/configs/modules/apt_pipelining_os.yaml
+++ b/tests/cloud_tests/configs/modules/apt_pipelining_os.yaml
@@ -1,6 +1,8 @@
#
# Set apt pipelining value to OS
#
+required_features:
+ - apt
cloud_config: |
#cloud-config
apt:
diff --git a/tests/cloud_tests/configs/modules/byobu.yaml b/tests/cloud_tests/configs/modules/byobu.yaml
index fd648c77..a9aa1f3f 100644
--- a/tests/cloud_tests/configs/modules/byobu.yaml
+++ b/tests/cloud_tests/configs/modules/byobu.yaml
@@ -1,6 +1,8 @@
#
# Install and enable byobu system wide and default user
#
+required_features:
+ - byobu
cloud_config: |
#cloud-config
byobu_by_default: enable
diff --git a/tests/cloud_tests/configs/modules/keys_to_console.yaml b/tests/cloud_tests/configs/modules/keys_to_console.yaml
index a90e42c1..5d86e739 100644
--- a/tests/cloud_tests/configs/modules/keys_to_console.yaml
+++ b/tests/cloud_tests/configs/modules/keys_to_console.yaml
@@ -1,6 +1,8 @@
#
# Hide printing of ssh key and fingerprints for specific keys
#
+required_features:
+ - syslog
cloud_config: |
#cloud-config
ssh_fp_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256]
diff --git a/tests/cloud_tests/configs/modules/landscape.yaml b/tests/cloud_tests/configs/modules/landscape.yaml
index e6f4955a..ed2c37c4 100644
--- a/tests/cloud_tests/configs/modules/landscape.yaml
+++ b/tests/cloud_tests/configs/modules/landscape.yaml
@@ -4,6 +4,8 @@
# 2016-11-17: Disabled due to this not working
#
enabled: false
+required_features:
+ - landscape
cloud_config: |
#cloud-conifg
landscape:
diff --git a/tests/cloud_tests/configs/modules/locale.yaml b/tests/cloud_tests/configs/modules/locale.yaml
index af5ad636..e01518a1 100644
--- a/tests/cloud_tests/configs/modules/locale.yaml
+++ b/tests/cloud_tests/configs/modules/locale.yaml
@@ -1,6 +1,9 @@
#
# Set locale to non-default option and verify
#
+required_features:
+ - engb_locale
+ - locale_gen
cloud_config: |
#cloud-config
locale: en_GB.UTF-8
diff --git a/tests/cloud_tests/configs/modules/lxd_bridge.yaml b/tests/cloud_tests/configs/modules/lxd_bridge.yaml
index 568bb700..e6b7e76a 100644
--- a/tests/cloud_tests/configs/modules/lxd_bridge.yaml
+++ b/tests/cloud_tests/configs/modules/lxd_bridge.yaml
@@ -1,6 +1,8 @@
#
# LXD configured with directory backend and IPv4 bridge
#
+required_features:
+ - lxd
cloud_config: |
#cloud-config
lxd:
diff --git a/tests/cloud_tests/configs/modules/lxd_dir.yaml b/tests/cloud_tests/configs/modules/lxd_dir.yaml
index 99b92195..f93a3fa7 100644
--- a/tests/cloud_tests/configs/modules/lxd_dir.yaml
+++ b/tests/cloud_tests/configs/modules/lxd_dir.yaml
@@ -1,6 +1,8 @@
#
# LXD configured with directory backend
#
+required_features:
+ - lxd
cloud_config: |
#cloud-config
lxd:
diff --git a/tests/cloud_tests/configs/modules/ntp.yaml b/tests/cloud_tests/configs/modules/ntp.yaml
index d0941578..fbef431b 100644
--- a/tests/cloud_tests/configs/modules/ntp.yaml
+++ b/tests/cloud_tests/configs/modules/ntp.yaml
@@ -7,14 +7,15 @@ cloud_config: |
pools: {}
servers: {}
collect_scripts:
- ntp_installed_empty: |
+ ntp_installed: |
#!/bin/bash
- dpkg -l | grep ntp | wc -l
+ ntpd --version > /dev/null 2>&1
+ echo $?
ntp_conf_dist_empty: |
#!/bin/bash
ls /etc/ntp.conf.dist | wc -l
- ntp_conf_empty: |
+ ntp_conf_pool_list: |
#!/bin/bash
- grep '^pool' /etc/ntp.conf
+ grep 'pool.ntp.org' /etc/ntp.conf | grep -v ^#
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/configs/modules/ntp_pools.yaml b/tests/cloud_tests/configs/modules/ntp_pools.yaml
index e040cc32..3a93faa2 100644
--- a/tests/cloud_tests/configs/modules/ntp_pools.yaml
+++ b/tests/cloud_tests/configs/modules/ntp_pools.yaml
@@ -1,6 +1,11 @@
#
# NTP config using specific pools
#
+# NOTE: lsb_release listed here because with recent cloud-init deb with
+# (LP: 1628337) resolved, cloud-init will attempt to configure archives.
+# this fails without lsb_release as UNAVAILABLE is used for $RELEASE
+required_features:
+ - lsb_release
cloud_config: |
#cloud-config
ntp:
@@ -11,7 +16,8 @@ cloud_config: |
collect_scripts:
ntp_installed_pools: |
#!/bin/bash
- dpkg -l | grep ntp | wc -l
+ ntpd --version > /dev/null 2>&1
+ echo $?
ntp_conf_dist_pools: |
#!/bin/bash
ls /etc/ntp.conf.dist | wc -l
diff --git a/tests/cloud_tests/configs/modules/ntp_servers.yaml b/tests/cloud_tests/configs/modules/ntp_servers.yaml
index e0564a03..d59d45a8 100644
--- a/tests/cloud_tests/configs/modules/ntp_servers.yaml
+++ b/tests/cloud_tests/configs/modules/ntp_servers.yaml
@@ -1,6 +1,8 @@
#
# NTP config using specific servers
#
+required_features:
+ - lsb_release
cloud_config: |
#cloud-config
ntp:
@@ -10,7 +12,8 @@ cloud_config: |
collect_scripts:
ntp_installed_servers: |
#!/bin/sh
- dpkg -l | grep -c ntp
+ ntpd --version > /dev/null 2>&1
+ echo $?
ntp_conf_dist_servers: |
#!/bin/sh
cat /etc/ntp.conf.dist | wc -l
diff --git a/tests/cloud_tests/configs/modules/package_update_upgrade_install.yaml b/tests/cloud_tests/configs/modules/package_update_upgrade_install.yaml
index d027d540..71d24b83 100644
--- a/tests/cloud_tests/configs/modules/package_update_upgrade_install.yaml
+++ b/tests/cloud_tests/configs/modules/package_update_upgrade_install.yaml
@@ -1,6 +1,17 @@
#
# Update/upgrade via apt and then install a pair of packages
#
+# NOTE: this should not require apt feature, use 'which' rather than 'dpkg -l'
+# NOTE: the testcase for this looks for the command in history.log as
+# /usr/bin/apt-get..., which is not how it always appears. it should
+# instead look for just apt-get...
+# NOTE: this testcase should not require 'apt_up_out', and should look for a
+# call to 'apt-get upgrade' or 'apt-get dist-upgrade' in cloud-init.log
+# rather than 'Calculating upgrade...' in output
+required_features:
+ - apt
+ - apt_hist_fmt
+ - apt_up_out
cloud_config: |
#cloud-config
packages:
diff --git a/tests/cloud_tests/configs/modules/set_hostname.yaml b/tests/cloud_tests/configs/modules/set_hostname.yaml
index 5aae1506..c96344cf 100644
--- a/tests/cloud_tests/configs/modules/set_hostname.yaml
+++ b/tests/cloud_tests/configs/modules/set_hostname.yaml
@@ -1,6 +1,8 @@
#
# Set the hostname and update /etc/hosts
#
+required_features:
+ - hostname
cloud_config: |
#cloud-config
hostname: myhostname
diff --git a/tests/cloud_tests/configs/modules/set_hostname_fqdn.yaml b/tests/cloud_tests/configs/modules/set_hostname_fqdn.yaml
index 0014c197..daf75931 100644
--- a/tests/cloud_tests/configs/modules/set_hostname_fqdn.yaml
+++ b/tests/cloud_tests/configs/modules/set_hostname_fqdn.yaml
@@ -1,6 +1,8 @@
#
# Set the hostname and update /etc/hosts
#
+required_features:
+ - hostname
cloud_config: |
#cloud-config
manage_etc_hosts: true
diff --git a/tests/cloud_tests/configs/modules/set_password.yaml b/tests/cloud_tests/configs/modules/set_password.yaml
index 8fa46d9f..04d7c58a 100644
--- a/tests/cloud_tests/configs/modules/set_password.yaml
+++ b/tests/cloud_tests/configs/modules/set_password.yaml
@@ -1,6 +1,8 @@
#
# Set password of default user
#
+required_features:
+ - ubuntu_user
cloud_config: |
#cloud-config
password: password
diff --git a/tests/cloud_tests/configs/modules/set_password_expire.yaml b/tests/cloud_tests/configs/modules/set_password_expire.yaml
index 926731f0..789604b0 100644
--- a/tests/cloud_tests/configs/modules/set_password_expire.yaml
+++ b/tests/cloud_tests/configs/modules/set_password_expire.yaml
@@ -1,6 +1,8 @@
#
# Expire password for all users
#
+required_features:
+ - sshd
cloud_config: |
#cloud-config
chpasswd: { expire: True }
diff --git a/tests/cloud_tests/configs/modules/snappy.yaml b/tests/cloud_tests/configs/modules/snappy.yaml
index 0e7dc852..43f93295 100644
--- a/tests/cloud_tests/configs/modules/snappy.yaml
+++ b/tests/cloud_tests/configs/modules/snappy.yaml
@@ -1,6 +1,8 @@
#
# Install snappy
#
+required_features:
+ - snap
cloud_config: |
#cloud-config
snappy:
diff --git a/tests/cloud_tests/configs/modules/ssh_auth_key_fingerprints_disable.yaml b/tests/cloud_tests/configs/modules/ssh_auth_key_fingerprints_disable.yaml
index 33943bdd..746653ec 100644
--- a/tests/cloud_tests/configs/modules/ssh_auth_key_fingerprints_disable.yaml
+++ b/tests/cloud_tests/configs/modules/ssh_auth_key_fingerprints_disable.yaml
@@ -1,6 +1,8 @@
#
# Disable fingerprint printing
#
+required_features:
+ - syslog
cloud_config: |
#cloud-config
ssh_genkeytypes: []
diff --git a/tests/cloud_tests/configs/modules/ssh_auth_key_fingerprints_enable.yaml b/tests/cloud_tests/configs/modules/ssh_auth_key_fingerprints_enable.yaml
index 4c970778..9f5dc34a 100644
--- a/tests/cloud_tests/configs/modules/ssh_auth_key_fingerprints_enable.yaml
+++ b/tests/cloud_tests/configs/modules/ssh_auth_key_fingerprints_enable.yaml
@@ -1,6 +1,11 @@
#
# Print auth keys with different hash than md5
#
+# NOTE: testcase checks for '256 SHA256:.*(ECDSA)' on output line on trusty
+# this fails as line in output reads '256:.*(ECDSA)'
+required_features:
+ - syslog
+ - ssh_key_fmt
cloud_config: |
#cloud-config
ssh_genkeytypes:
diff --git a/tests/cloud_tests/configs/modules/ssh_import_id.yaml b/tests/cloud_tests/configs/modules/ssh_import_id.yaml
index 6e5a1635..b62d3f69 100644
--- a/tests/cloud_tests/configs/modules/ssh_import_id.yaml
+++ b/tests/cloud_tests/configs/modules/ssh_import_id.yaml
@@ -1,6 +1,9 @@
#
# Import a user's ssh key via gh or lp
#
+required_features:
+ - ubuntu_user
+ - sudo
cloud_config: |
#cloud-config
ssh_import_id:
diff --git a/tests/cloud_tests/configs/modules/ssh_keys_generate.yaml b/tests/cloud_tests/configs/modules/ssh_keys_generate.yaml
index 637d7835..659fd939 100644
--- a/tests/cloud_tests/configs/modules/ssh_keys_generate.yaml
+++ b/tests/cloud_tests/configs/modules/ssh_keys_generate.yaml
@@ -1,6 +1,8 @@
#
# SSH keys generated using cloud-init
#
+required_features:
+ - ubuntu_user
cloud_config: |
#cloud-config
ssh_genkeytypes:
diff --git a/tests/cloud_tests/configs/modules/ssh_keys_provided.yaml b/tests/cloud_tests/configs/modules/ssh_keys_provided.yaml
index 25df6452..5ceb3623 100644
--- a/tests/cloud_tests/configs/modules/ssh_keys_provided.yaml
+++ b/tests/cloud_tests/configs/modules/ssh_keys_provided.yaml
@@ -2,6 +2,9 @@
# SSH keys provided via cloud config
#
enabled: False
+required_features:
+ - ubuntu_user
+ - sudo
cloud_config: |
#cloud-config
disable_root: false
diff --git a/tests/cloud_tests/configs/modules/timezone.yaml b/tests/cloud_tests/configs/modules/timezone.yaml
index 8c96ed47..5112aa9f 100644
--- a/tests/cloud_tests/configs/modules/timezone.yaml
+++ b/tests/cloud_tests/configs/modules/timezone.yaml
@@ -1,6 +1,8 @@
#
# Set system timezone
#
+required_features:
+ - daylight_time
cloud_config: |
#cloud-config
timezone: US/Aleutian
diff --git a/tests/cloud_tests/configs/modules/user_groups.yaml b/tests/cloud_tests/configs/modules/user_groups.yaml
index 92655958..71cc9da3 100644
--- a/tests/cloud_tests/configs/modules/user_groups.yaml
+++ b/tests/cloud_tests/configs/modules/user_groups.yaml
@@ -1,6 +1,8 @@
#
# Create groups and users with various options
#
+required_features:
+ - ubuntu_user
cloud_config: |
#cloud-config
# Add groups to the system
diff --git a/tests/cloud_tests/configs/modules/write_files.yaml b/tests/cloud_tests/configs/modules/write_files.yaml
index 4bb2991a..ce936b7b 100644
--- a/tests/cloud_tests/configs/modules/write_files.yaml
+++ b/tests/cloud_tests/configs/modules/write_files.yaml
@@ -1,6 +1,10 @@
#
# Write various file types
#
+# NOTE: on trusty 'file' has an output formatting error for binary files and
+# has 2 spaces in 'LSB executable', which causes a failure here
+required_features:
+ - no_file_fmt_e
cloud_config: |
#cloud-config
write_files:
diff --git a/tests/cloud_tests/images/__init__.py b/tests/cloud_tests/images/__init__.py
index b27d6931..106c59f3 100644
--- a/tests/cloud_tests/images/__init__.py
+++ b/tests/cloud_tests/images/__init__.py
@@ -1,11 +1,10 @@
# This file is part of cloud-init. See LICENSE file for license information.
+"""Main init."""
+
def get_image(platform, config):
- """
- get image from platform object using os_name, looking up img_conf in main
- config file
- """
+ """Get image from platform object using os_name."""
return platform.get_image(config)
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/images/base.py b/tests/cloud_tests/images/base.py
index 394b11ff..0a1e0563 100644
--- a/tests/cloud_tests/images/base.py
+++ b/tests/cloud_tests/images/base.py
@@ -1,65 +1,69 @@
# This file is part of cloud-init. See LICENSE file for license information.
+"""Base class for images."""
+
class Image(object):
- """
- Base class for images
- """
+ """Base class for images."""
+
platform_name = None
- def __init__(self, name, config, platform):
- """
- setup
+ def __init__(self, platform, config):
+ """Set up image.
+
+ @param platform: platform object
+ @param config: image configuration
"""
- self.name = name
- self.config = config
self.platform = platform
+ self.config = config
def __str__(self):
- """
- a brief description of the image
- """
+ """A brief description of the image."""
return '-'.join((self.properties['os'], self.properties['release']))
@property
def properties(self):
- """
- {} containing: 'arch', 'os', 'version', 'release'
- """
+ """{} containing: 'arch', 'os', 'version', 'release'."""
raise NotImplementedError
- # FIXME: instead of having execute and push_file and other instance methods
- # here which pass through to a hidden instance, it might be better
- # to expose an instance that the image can be modified through
- def execute(self, command, stdin=None, stdout=None, stderr=None, env={}):
+ @property
+ def features(self):
+ """Feature flags supported by this image.
+
+ @return_value: list of feature names
"""
- execute command in image, modifying image
+ return [k for k, v in self.config.get('features', {}).items() if v]
+
+ @property
+ def setup_overrides(self):
+ """Setup options that need to be overridden for the image.
+
+ @return_value: dictionary to update args with
"""
+ # NOTE: more sophisticated options may be requied at some point
+ return self.config.get('setup_overrides', {})
+
+ def execute(self, *args, **kwargs):
+ """Execute command in image, modifying image."""
raise NotImplementedError
def push_file(self, local_path, remote_path):
- """
- copy file at 'local_path' to instance at 'remote_path', modifying image
- """
+ """Copy file at 'local_path' to instance at 'remote_path'."""
raise NotImplementedError
- def run_script(self, script):
- """
- run script in image, modifying image
- return_value: script output
+ def run_script(self, *args, **kwargs):
+ """Run script in image, modifying image.
+
+ @return_value: script output
"""
raise NotImplementedError
def snapshot(self):
- """
- create snapshot of image, block until done
- """
+ """Create snapshot of image, block until done."""
raise NotImplementedError
def destroy(self):
- """
- clean up data associated with image
- """
+ """Clean up data associated with image."""
pass
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/images/lxd.py b/tests/cloud_tests/images/lxd.py
index 7a416141..fd4e93c2 100644
--- a/tests/cloud_tests/images/lxd.py
+++ b/tests/cloud_tests/images/lxd.py
@@ -1,43 +1,67 @@
# This file is part of cloud-init. See LICENSE file for license information.
+"""LXD Image Base Class."""
+
+import os
+import shutil
+import tempfile
+
+from cloudinit import util as c_util
from tests.cloud_tests.images import base
from tests.cloud_tests.snapshots import lxd as lxd_snapshot
+from tests.cloud_tests import util
class LXDImage(base.Image):
- """
- LXD backed image
- """
+ """LXD backed image."""
+
platform_name = "lxd"
- def __init__(self, name, config, platform, pylxd_image):
- """
- setup
+ def __init__(self, platform, config, pylxd_image):
+ """Set up image.
+
+ @param platform: platform object
+ @param config: image configuration
"""
- self.platform = platform
- self._pylxd_image = pylxd_image
+ self.modified = False
self._instance = None
- super(LXDImage, self).__init__(name, config, platform)
+ self._pylxd_image = None
+ self.pylxd_image = pylxd_image
+ super(LXDImage, self).__init__(platform, config)
@property
def pylxd_image(self):
- self._pylxd_image.sync()
+ """Property function."""
+ if self._pylxd_image:
+ self._pylxd_image.sync()
return self._pylxd_image
+ @pylxd_image.setter
+ def pylxd_image(self, pylxd_image):
+ if self._instance:
+ self._instance.destroy()
+ self._instance = None
+ if (self._pylxd_image and
+ (self._pylxd_image is not pylxd_image) and
+ (not self.config.get('cache_base_image') or self.modified)):
+ self._pylxd_image.delete(wait=True)
+ self.modified = False
+ self._pylxd_image = pylxd_image
+
@property
def instance(self):
+ """Property function."""
if not self._instance:
self._instance = self.platform.launch_container(
- image=self.pylxd_image.fingerprint,
- image_desc=str(self), use_desc='image-modification')
- self._instance.start(wait=True, wait_time=self.config.get('timeout'))
+ self.properties, self.config, self.features,
+ use_desc='image-modification', image_desc=str(self),
+ image=self.pylxd_image.fingerprint)
+ self._instance.start()
return self._instance
@property
def properties(self):
- """
- {} containing: 'arch', 'os', 'version', 'release'
- """
+ """{} containing: 'arch', 'os', 'version', 'release'."""
properties = self.pylxd_image.properties
return {
'arch': properties.get('architecture'),
@@ -46,47 +70,121 @@ class LXDImage(base.Image):
'release': properties.get('release'),
}
- def execute(self, *args, **kwargs):
+ def export_image(self, output_dir):
+ """Export image from lxd image store to (split) tarball on disk.
+
+ @param output_dir: dir to store tarballs in
+ @return_value: tuple of path to metadata tarball and rootfs tarball
"""
- execute command in image, modifying image
+ # pylxd's image export feature doesn't do split exports, so use cmdline
+ c_util.subp(['lxc', 'image', 'export', self.pylxd_image.fingerprint,
+ output_dir], capture=True)
+ tarballs = [p for p in os.listdir(output_dir) if p.endswith('tar.xz')]
+ metadata = os.path.join(
+ output_dir, next(p for p in tarballs if p.startswith('meta-')))
+ rootfs = os.path.join(
+ output_dir, next(p for p in tarballs if not p.startswith('meta-')))
+ return (metadata, rootfs)
+
+ def import_image(self, metadata, rootfs):
+ """Import image to lxd image store from (split) tarball on disk.
+
+ Note, this will replace and delete the current pylxd_image
+
+ @param metadata: metadata tarball
+ @param rootfs: rootfs tarball
+ @return_value: imported image fingerprint
+ """
+ alias = util.gen_instance_name(
+ image_desc=str(self), use_desc='update-metadata')
+ c_util.subp(['lxc', 'image', 'import', metadata, rootfs,
+ '--alias', alias], capture=True)
+ self.pylxd_image = self.platform.query_image_by_alias(alias)
+ return self.pylxd_image.fingerprint
+
+ def update_templates(self, template_config, template_data):
+ """Update the image's template configuration.
+
+ Note, this will replace and delete the current pylxd_image
+
+ @param template_config: config overrides for template metadata
+ @param template_data: template data to place into templates/
"""
+ # set up tmp files
+ export_dir = tempfile.mkdtemp(prefix='cloud_test_util_')
+ extract_dir = tempfile.mkdtemp(prefix='cloud_test_util_')
+ new_metadata = os.path.join(export_dir, 'new-meta.tar.xz')
+ metadata_yaml = os.path.join(extract_dir, 'metadata.yaml')
+ template_dir = os.path.join(extract_dir, 'templates')
+
+ try:
+ # extract old data
+ (metadata, rootfs) = self.export_image(export_dir)
+ shutil.unpack_archive(metadata, extract_dir)
+
+ # update metadata
+ metadata = c_util.read_conf(metadata_yaml)
+ templates = metadata.get('templates', {})
+ templates.update(template_config)
+ metadata['templates'] = templates
+ util.yaml_dump(metadata, metadata_yaml)
+
+ # write out template files
+ for name, content in template_data.items():
+ path = os.path.join(template_dir, name)
+ c_util.write_file(path, content)
+
+ # store new data, mark new image as modified
+ util.flat_tar(new_metadata, extract_dir)
+ self.import_image(new_metadata, rootfs)
+ self.modified = True
+
+ finally:
+ # remove tmpfiles
+ shutil.rmtree(export_dir)
+ shutil.rmtree(extract_dir)
+
+ def execute(self, *args, **kwargs):
+ """Execute command in image, modifying image."""
return self.instance.execute(*args, **kwargs)
def push_file(self, local_path, remote_path):
- """
- copy file at 'local_path' to instance at 'remote_path', modifying image
- """
+ """Copy file at 'local_path' to instance at 'remote_path'."""
return self.instance.push_file(local_path, remote_path)
- def run_script(self, script):
- """
- run script in image, modifying image
- return_value: script output
+ def run_script(self, *args, **kwargs):
+ """Run script in image, modifying image.
+
+ @return_value: script output
"""
- return self.instance.run_script(script)
+ return self.instance.run_script(*args, **kwargs)
def snapshot(self):
- """
- create snapshot of image, block until done
- """
- # clone current instance, start and freeze clone
+ """Create snapshot of image, block until done."""
+ # get empty user data to pass in to instance
+ # if overrides for user data provided, use them
+ empty_userdata = util.update_user_data(
+ {}, self.config.get('user_data_overrides', {}))
+ conf = {'user.user-data': empty_userdata}
+ # clone current instance
instance = self.platform.launch_container(
+ self.properties, self.config, self.features,
container=self.instance.name, image_desc=str(self),
- use_desc='snapshot')
- instance.start(wait=True, wait_time=self.config.get('timeout'))
+ use_desc='snapshot', container_config=conf)
+ # wait for cloud-init before boot_clean_script is run to ensure
+ # /var/lib/cloud is removed cleanly
+ instance.start(wait=True, wait_for_cloud_init=True)
if self.config.get('boot_clean_script'):
instance.run_script(self.config.get('boot_clean_script'))
+ # freeze current instance and return snapshot
instance.freeze()
return lxd_snapshot.LXDSnapshot(
- self.properties, self.config, self.platform, instance)
+ self.platform, self.properties, self.config,
+ self.features, instance)
def destroy(self):
- """
- clean up data associated with image
- """
- if self._instance:
- self._instance.destroy()
- self.pylxd_image.delete(wait=True)
+ """Clean up data associated with image."""
+ self.pylxd_image = None
super(LXDImage, self).destroy()
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/instances/__init__.py b/tests/cloud_tests/instances/__init__.py
index 85bea99f..fc2e9cbc 100644
--- a/tests/cloud_tests/instances/__init__.py
+++ b/tests/cloud_tests/instances/__init__.py
@@ -1,10 +1,10 @@
# This file is part of cloud-init. See LICENSE file for license information.
+"""Main init."""
+
def get_instance(snapshot, *args, **kwargs):
- """
- get instance from snapshot
- """
+ """Get instance from snapshot."""
return snapshot.launch(*args, **kwargs)
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/instances/base.py b/tests/cloud_tests/instances/base.py
index 9559d286..959e9cce 100644
--- a/tests/cloud_tests/instances/base.py
+++ b/tests/cloud_tests/instances/base.py
@@ -1,120 +1,148 @@
# This file is part of cloud-init. See LICENSE file for license information.
-import os
-import uuid
+"""Base instance."""
class Instance(object):
- """
- Base instance object
- """
+ """Base instance object."""
+
platform_name = None
- def __init__(self, name):
- """
- setup
+ def __init__(self, platform, name, properties, config, features):
+ """Set up instance.
+
+ @param platform: platform object
+ @param name: hostname of instance
+ @param properties: image properties
+ @param config: image config
+ @param features: supported feature flags
"""
+ self.platform = platform
self.name = name
+ self.properties = properties
+ self.config = config
+ self.features = features
- def execute(self, command, stdin=None, stdout=None, stderr=None, env={}):
- """
- command: the command to execute as root inside the image
- stdin, stderr, stdout: file handles
- env: environment variables
+ def execute(self, command, stdout=None, stderr=None, env={},
+ rcs=None, description=None):
+ """Execute command in instance, recording output, error and exit code.
- Execute assumes functional networking and execution as root with the
+ Assumes functional networking and execution as root with the
target filesystem being available at /.
- return_value: tuple containing stdout data, stderr data, exit code
+ @param command: the command to execute as root inside the image
+ @param stdout, stderr: file handles to write output and error to
+ @param env: environment variables
+ @param rcs: allowed return codes from command
+ @param description: purpose of command
+ @return_value: tuple containing stdout data, stderr data, exit code
"""
raise NotImplementedError
- def read_data(self, remote_path, encode=False):
- """
- read_data from instance filesystem
- remote_path: path in instance
- decode: return as string
- return_value: data as str or bytes
+ def read_data(self, remote_path, decode=False):
+ """Read data from instance filesystem.
+
+ @param remote_path: path in instance
+ @param decode: return as string
+ @return_value: data as str or bytes
"""
raise NotImplementedError
def write_data(self, remote_path, data):
- """
- write data to instance filesystem
- remote_path: path in instance
- data: data to write, either str or bytes
+ """Write data to instance filesystem.
+
+ @param remote_path: path in instance
+ @param data: data to write, either str or bytes
"""
raise NotImplementedError
def pull_file(self, remote_path, local_path):
- """
- copy file at 'remote_path', from instance to 'local_path'
+ """Copy file at 'remote_path', from instance to 'local_path'.
+
+ @param remote_path: path on remote instance
+ @param local_path: path on local instance
"""
with open(local_path, 'wb') as fp:
- fp.write(self.read_data(remote_path), encode=True)
+ fp.write(self.read_data(remote_path))
def push_file(self, local_path, remote_path):
- """
- copy file at 'local_path' to instance at 'remote_path'
+ """Copy file at 'local_path' to instance at 'remote_path'.
+
+ @param local_path: path on local instance
+ @param remote_path: path on remote instance
"""
with open(local_path, 'rb') as fp:
self.write_data(remote_path, fp.read())
- def run_script(self, script):
+ def run_script(self, script, rcs=None, description=None):
+ """Run script in target and return stdout.
+
+ @param script: script contents
+ @param rcs: allowed return codes from script
+ @param description: purpose of script
+ @return_value: stdout from script
"""
- run script in target and return stdout
+ script_path = self.tmpfile()
+ try:
+ self.write_data(script_path, script)
+ return self.execute(
+ ['/bin/bash', script_path], rcs=rcs, description=description)
+ finally:
+ self.execute(['rm', script_path], rcs=rcs)
+
+ def tmpfile(self):
+ """Get a tmp file in the target.
+
+ @return_value: path to new file in target
"""
- script_path = os.path.join('/tmp', str(uuid.uuid1()))
- self.write_data(script_path, script)
- (out, err, exit_code) = self.execute(['/bin/bash', script_path])
- return out
+ return self.execute(['mktemp'])[0].strip()
def console_log(self):
- """
- return_value: bytes of this instance’s console
+ """Instance console.
+
+ @return_value: bytes of this instance’s console
"""
raise NotImplementedError
def reboot(self, wait=True):
- """
- reboot instance
- """
+ """Reboot instance."""
raise NotImplementedError
def shutdown(self, wait=True):
- """
- shutdown instance
- """
+ """Shutdown instance."""
raise NotImplementedError
- def start(self, wait=True):
- """
- start instance
- """
+ def start(self, wait=True, wait_for_cloud_init=False):
+ """Start instance."""
raise NotImplementedError
def destroy(self):
- """
- clean up instance
- """
+ """Clean up instance."""
pass
- def _wait_for_cloud_init(self, wait_time):
- """
- wait until system has fully booted and cloud-init has finished
+ def _wait_for_system(self, wait_for_cloud_init):
+ """Wait until system has fully booted and cloud-init has finished.
+
+ @param wait_time: maximum time to wait
+ @return_value: None, may raise OSError if wait_time exceeded
"""
- if not wait_time:
- return
-
- found_msg = 'found'
- cmd = ('for ((i=0;i<{wait};i++)); do [ -f "{file}" ] && '
- '{{ echo "{msg}";break; }} || sleep 1; done').format(
- file='/run/cloud-init/result.json',
- wait=wait_time, msg=found_msg)
-
- (out, err, exit) = self.execute(['/bin/bash', '-c', cmd])
- if out.strip() != found_msg:
- raise OSError('timeout: after {}s, cloud-init has not started'
- .format(wait_time))
+ def clean_test(test):
+ """Clean formatting for system ready test testcase."""
+ return ' '.join(l for l in test.strip().splitlines()
+ if not l.lstrip().startswith('#'))
+
+ time = self.config['boot_timeout']
+ tests = [self.config['system_ready_script']]
+ if wait_for_cloud_init:
+ tests.append(self.config['cloud_init_ready_script'])
+
+ formatted_tests = ' && '.join(clean_test(t) for t in tests)
+ test_cmd = ('for ((i=0;i<{time};i++)); do {test} && exit 0; sleep 1; '
+ 'done; exit 1;').format(time=time, test=formatted_tests)
+ cmd = ['/bin/bash', '-c', test_cmd]
+
+ if self.execute(cmd, rcs=(0, 1))[-1] != 0:
+ raise OSError('timeout: after {}s system not started'.format(time))
+
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/instances/lxd.py b/tests/cloud_tests/instances/lxd.py
index f0aa1214..b9c2cc6b 100644
--- a/tests/cloud_tests/instances/lxd.py
+++ b/tests/cloud_tests/instances/lxd.py
@@ -1,115 +1,135 @@
# This file is part of cloud-init. See LICENSE file for license information.
+"""Base LXD instance."""
+
from tests.cloud_tests.instances import base
+from tests.cloud_tests import util
class LXDInstance(base.Instance):
- """
- LXD container backed instance
- """
+ """LXD container backed instance."""
+
platform_name = "lxd"
- def __init__(self, name, platform, pylxd_container):
- """
- setup
+ def __init__(self, platform, name, properties, config, features,
+ pylxd_container):
+ """Set up instance.
+
+ @param platform: platform object
+ @param name: hostname of instance
+ @param properties: image properties
+ @param config: image config
+ @param features: supported feature flags
"""
- self.platform = platform
self._pylxd_container = pylxd_container
- super(LXDInstance, self).__init__(name)
+ super(LXDInstance, self).__init__(
+ platform, name, properties, config, features)
@property
def pylxd_container(self):
+ """Property function."""
self._pylxd_container.sync()
return self._pylxd_container
- def execute(self, command, stdin=None, stdout=None, stderr=None, env={}):
- """
- command: the command to execute as root inside the image
- stdin, stderr, stdout: file handles
- env: environment variables
+ def execute(self, command, stdout=None, stderr=None, env={},
+ rcs=None, description=None):
+ """Execute command in instance, recording output, error and exit code.
- Execute assumes functional networking and execution as root with the
+ Assumes functional networking and execution as root with the
target filesystem being available at /.
- return_value: tuple containing stdout data, stderr data, exit code
+ @param command: the command to execute as root inside the image
+ @param stdout: file handler to write output
+ @param stderr: file handler to write error
+ @param env: environment variables
+ @param rcs: allowed return codes from command
+ @param description: purpose of command
+ @return_value: tuple containing stdout data, stderr data, exit code
"""
- # TODO: the pylxd api handler for container.execute needs to be
- # extended to properly pass in stdin
- # TODO: the pylxd api handler for container.execute needs to be
- # extended to get the return code, for now just use 0
+ # ensure instance is running and execute the command
self.start()
- if stdin:
- raise NotImplementedError
res = self.pylxd_container.execute(command, environment=env)
- for (f, data) in (i for i in zip((stdout, stderr), res) if i[0]):
- f.write(data)
- return res + (0,)
+
+ # get out, exit and err from pylxd return
+ if hasattr(res, 'exit_code'):
+ # pylxd 2.2 returns ContainerExecuteResult, named tuple of
+ # (exit_code, out, err)
+ (exit, out, err) = res
+ else:
+ # pylxd 2.1.3 and earlier only return out and err, no exit
+ # LOG.warning('using pylxd version < 2.2')
+ (out, err) = res
+ exit = 0
+
+ # write data to file descriptors if needed
+ if stdout:
+ stdout.write(out)
+ if stderr:
+ stderr.write(err)
+
+ # if the command exited with a code not allowed in rcs, then fail
+ if exit not in (rcs if rcs else (0,)):
+ error_desc = ('Failed command to: {}'.format(description)
+ if description else None)
+ raise util.InTargetExecuteError(
+ out, err, exit, command, self.name, error_desc)
+
+ return (out, err, exit)
def read_data(self, remote_path, decode=False):
- """
- read data from instance filesystem
- remote_path: path in instance
- decode: return as string
- return_value: data as str or bytes
+ """Read data from instance filesystem.
+
+ @param remote_path: path in instance
+ @param decode: return as string
+ @return_value: data as str or bytes
"""
data = self.pylxd_container.files.get(remote_path)
return data.decode() if decode and isinstance(data, bytes) else data
def write_data(self, remote_path, data):
- """
- write data to instance filesystem
- remote_path: path in instance
- data: data to write, either str or bytes
+ """Write data to instance filesystem.
+
+ @param remote_path: path in instance
+ @param data: data to write, either str or bytes
"""
self.pylxd_container.files.put(remote_path, data)
def console_log(self):
- """
- return_value: bytes of this instance’s console
+ """Console log.
+
+ @return_value: bytes of this instance’s console
"""
raise NotImplementedError
def reboot(self, wait=True):
- """
- reboot instance
- """
+ """Reboot instance."""
self.shutdown(wait=wait)
self.start(wait=wait)
def shutdown(self, wait=True):
- """
- shutdown instance
- """
+ """Shutdown instance."""
if self.pylxd_container.status != 'Stopped':
self.pylxd_container.stop(wait=wait)
- def start(self, wait=True, wait_time=None):
- """
- start instance
- """
+ def start(self, wait=True, wait_for_cloud_init=False):
+ """Start instance."""
if self.pylxd_container.status != 'Running':
self.pylxd_container.start(wait=wait)
- if wait and isinstance(wait_time, int):
- self._wait_for_cloud_init(wait_time)
+ if wait:
+ self._wait_for_system(wait_for_cloud_init)
def freeze(self):
- """
- freeze instance
- """
+ """Freeze instance."""
if self.pylxd_container.status != 'Frozen':
self.pylxd_container.freeze(wait=True)
def unfreeze(self):
- """
- unfreeze instance
- """
+ """Unfreeze instance."""
if self.pylxd_container.status == 'Frozen':
self.pylxd_container.unfreeze(wait=True)
def destroy(self):
- """
- clean up instance
- """
+ """Clean up instance."""
self.unfreeze()
self.shutdown()
self.pylxd_container.delete(wait=True)
diff --git a/tests/cloud_tests/manage.py b/tests/cloud_tests/manage.py
index 5342612b..5f0cfd23 100644
--- a/tests/cloud_tests/manage.py
+++ b/tests/cloud_tests/manage.py
@@ -1,11 +1,15 @@
# This file is part of cloud-init. See LICENSE file for license information.
+"""Create test cases automatically given a user_data script."""
+
+import os
+import textwrap
+
+from cloudinit import util as c_util
from tests.cloud_tests.config import VERIFY_EXT
from tests.cloud_tests import (config, util)
from tests.cloud_tests import TESTCASES_DIR
-import os
-import textwrap
_verifier_fmt = textwrap.dedent(
"""
@@ -35,29 +39,24 @@ _config_fmt = textwrap.dedent(
def write_testcase_config(args, fmt_args, testcase_file):
- """
- write the testcase config file
- """
+ """Write the testcase config file."""
testcase_config = {'enabled': args.enable, 'collect_scripts': {}}
if args.config:
testcase_config['cloud_config'] = args.config
fmt_args['config'] = util.yaml_format(testcase_config)
- util.write_file(testcase_file, _config_fmt.format(**fmt_args), omode='w')
+ c_util.write_file(testcase_file, _config_fmt.format(**fmt_args), omode='w')
def write_verifier(args, fmt_args, verifier_file):
- """
- write the verifier script
- """
+ """Write the verifier script."""
fmt_args['test_class'] = 'Test{}'.format(
- config.name_sanatize(fmt_args['test_name']).title())
- util.write_file(verifier_file, _verifier_fmt.format(**fmt_args), omode='w')
+ config.name_sanitize(fmt_args['test_name']).title())
+ c_util.write_file(verifier_file,
+ _verifier_fmt.format(**fmt_args), omode='w')
def create(args):
- """
- create a new testcase
- """
+ """Create a new testcase."""
(test_category, test_name) = args.name.split('/')
fmt_args = {'test_name': test_name, 'test_category': test_category,
'test_description': str(args.description)}
@@ -65,7 +64,7 @@ def create(args):
testcase_file = config.name_to_path(args.name)
verifier_file = os.path.join(
TESTCASES_DIR, test_category,
- config.name_sanatize(test_name) + VERIFY_EXT)
+ config.name_sanitize(test_name) + VERIFY_EXT)
write_testcase_config(args, fmt_args, testcase_file)
write_verifier(args, fmt_args, verifier_file)
diff --git a/tests/cloud_tests/platforms.yaml b/tests/cloud_tests/platforms.yaml
index 5972b32b..b91834ab 100644
--- a/tests/cloud_tests/platforms.yaml
+++ b/tests/cloud_tests/platforms.yaml
@@ -10,7 +10,55 @@ default_platform_config:
platforms:
lxd:
enabled: true
- get_image_timeout: 600
+ # overrides for image templates
+ template_overrides:
+ /var/lib/cloud/seed/nocloud-net/meta-data:
+ when:
+ - create
+ - copy
+ template: cloud-init-meta.tpl
+ /var/lib/cloud/seed/nocloud-net/network-config:
+ when:
+ - create
+ - copy
+ template: cloud-init-network.tpl
+ /var/lib/cloud/seed/nocloud-net/user-data:
+ when:
+ - create
+ - copy
+ template: cloud-init-user.tpl
+ properties:
+ default: |
+ #cloud-config
+ {}
+ /var/lib/cloud/seed/nocloud-net/vendor-data:
+ when:
+ - create
+ - copy
+ template: cloud-init-vendor.tpl
+ properties:
+ default: |
+ #cloud-config
+ {}
+ # overrides image template files
+ template_files:
+ cloud-init-meta.tpl: |
+ #cloud-config
+ instance-id: {{ container.name }}
+ local-hostname: {{ container.name }}
+ {{ config_get("user.meta-data", "") }}
+ cloud-init-network.tpl: |
+ {% if config_get("user.network-config", "") == "" %}version: 1
+ config:
+ - type: physical
+ name: eth0
+ subnets:
+ - type: {% if config_get("user.network_mode", "") == "link-local" %}manual{% else %}dhcp{% endif %}
+ control: auto{% else %}{{ config_get("user.network-config", "") }}{% endif %}
+ cloud-init-user.tpl: |
+ {{ config_get("user.user-data", properties.default) }}
+ cloud-init-vendor.tpl: |
+ {{ config_get("user.vendor-data", properties.default) }}
ec2: {}
azure: {}
diff --git a/tests/cloud_tests/platforms/__init__.py b/tests/cloud_tests/platforms/__init__.py
index f9f56035..443f6d44 100644
--- a/tests/cloud_tests/platforms/__init__.py
+++ b/tests/cloud_tests/platforms/__init__.py
@@ -1,5 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
+"""Main init."""
+
from tests.cloud_tests.platforms import lxd
PLATFORMS = {
@@ -8,9 +10,7 @@ PLATFORMS = {
def get_platform(platform_name, config):
- """
- Get the platform object for 'platform_name' and init
- """
+ """Get the platform object for 'platform_name' and init."""
platform_cls = PLATFORMS.get(platform_name)
if not platform_cls:
raise ValueError('invalid platform name: {}'.format(platform_name))
diff --git a/tests/cloud_tests/platforms/base.py b/tests/cloud_tests/platforms/base.py
index 615e2e06..28975368 100644
--- a/tests/cloud_tests/platforms/base.py
+++ b/tests/cloud_tests/platforms/base.py
@@ -1,53 +1,27 @@
# This file is part of cloud-init. See LICENSE file for license information.
+"""Base platform class."""
+
class Platform(object):
- """
- Base class for platforms
- """
+ """Base class for platforms."""
+
platform_name = None
def __init__(self, config):
- """
- Set up platform
- """
+ """Set up platform."""
self.config = config
def get_image(self, img_conf):
- """
- Get image using 'img_conf', where img_conf is a dict containing all
- image configuration parameters
-
- in this dict there must be a 'platform_ident' key containing
- configuration for identifying each image on a per platform basis
-
- see implementations for get_image() for details about the contents
- of the platform's config entry
+ """Get image using specified image configuration.
- note: see 'releases' main_config.yaml for example entries
-
- img_conf: configuration for image
- return_value: cloud_tests.images instance
+ @param img_conf: configuration for image
+ @return_value: cloud_tests.images instance
"""
raise NotImplementedError
def destroy(self):
- """
- Clean up platform data
- """
+ """Clean up platform data."""
pass
- def _extract_img_platform_config(self, img_conf):
- """
- extract platform configuration for current platform from img_conf
- """
- platform_ident = img_conf.get('platform_ident')
- if not platform_ident:
- raise ValueError('invalid img_conf, missing \'platform_ident\'')
- ident = platform_ident.get(self.platform_name)
- if not ident:
- raise ValueError('img_conf: {} missing config for platform {}'
- .format(img_conf, self.platform_name))
- return ident
-
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/lxd.py b/tests/cloud_tests/platforms/lxd.py
index 847cc549..ead0955b 100644
--- a/tests/cloud_tests/platforms/lxd.py
+++ b/tests/cloud_tests/platforms/lxd.py
@@ -1,5 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
+"""Base LXD platform."""
+
from pylxd import (Client, exceptions)
from tests.cloud_tests.images import lxd as lxd_image
@@ -11,48 +13,49 @@ DEFAULT_SSTREAMS_SERVER = "https://images.linuxcontainers.org:8443"
class LXDPlatform(base.Platform):
- """
- Lxd test platform
- """
+ """LXD test platform."""
+
platform_name = 'lxd'
def __init__(self, config):
- """
- Set up platform
- """
+ """Set up platform."""
super(LXDPlatform, self).__init__(config)
# TODO: allow configuration of remote lxd host via env variables
# set up lxd connection
self.client = Client()
def get_image(self, img_conf):
+ """Get image using specified image configuration.
+
+ @param img_conf: configuration for image
+ @return_value: cloud_tests.images instance
"""
- Get image
- img_conf: dict containing config for image. platform_ident must have:
- alias: alias to use for simplestreams server
- sstreams_server: simplestreams server to use, or None for default
- return_value: cloud_tests.images instance
- """
- lxd_conf = self._extract_img_platform_config(img_conf)
- image = self.client.images.create_from_simplestreams(
- lxd_conf.get('sstreams_server', DEFAULT_SSTREAMS_SERVER),
- lxd_conf['alias'])
- return lxd_image.LXDImage(
- image.properties['description'], img_conf, self, image)
-
- def launch_container(self, image=None, container=None, ephemeral=False,
- config=None, block=True,
- image_desc=None, use_desc=None):
- """
- launch a container
- image: image fingerprint to launch from
- container: container to copy
- ephemeral: delete image after first shutdown
- config: config options for instance as dict
- block: wait until container created
- image_desc: description of image being launched
- use_desc: description of container's use
- return_value: cloud_tests.instances instance
+ pylxd_image = self.client.images.create_from_simplestreams(
+ img_conf.get('sstreams_server', DEFAULT_SSTREAMS_SERVER),
+ img_conf['alias'])
+ image = lxd_image.LXDImage(self, img_conf, pylxd_image)
+ if img_conf.get('override_templates', False):
+ image.update_templates(self.config.get('template_overrides', {}),
+ self.config.get('template_files', {}))
+ return image
+
+ def launch_container(self, properties, config, features,
+ image=None, container=None, ephemeral=False,
+ container_config=None, block=True, image_desc=None,
+ use_desc=None):
+ """Launch a container.
+
+ @param properties: image properties
+ @param config: image configuration
+ @param features: image features
+ @param image: image fingerprint to launch from
+ @param container: container to copy
+ @param ephemeral: delete image after first shutdown
+ @param container_config: config options for instance as dict
+ @param block: wait until container created
+ @param image_desc: description of image being launched
+ @param use_desc: description of container's use
+ @return_value: cloud_tests.instances instance
"""
if not (image or container):
raise ValueError("either image or container must be specified")
@@ -61,16 +64,18 @@ class LXDPlatform(base.Platform):
use_desc=use_desc,
used_list=self.list_containers()),
'ephemeral': bool(ephemeral),
- 'config': config if isinstance(config, dict) else {},
+ 'config': (container_config
+ if isinstance(container_config, dict) else {}),
'source': ({'type': 'image', 'fingerprint': image} if image else
{'type': 'copy', 'source': container})
}, wait=block)
- return lxd_instance.LXDInstance(container.name, self, container)
+ return lxd_instance.LXDInstance(self, container.name, properties,
+ config, features, container)
def container_exists(self, container_name):
- """
- check if container with name 'container_name' exists
- return_value: True if exists else False
+ """Check if container with name 'container_name' exists.
+
+ @return_value: True if exists else False
"""
res = True
try:
@@ -82,16 +87,22 @@ class LXDPlatform(base.Platform):
return res
def list_containers(self):
- """
- list names of all containers
- return_value: list of names
+ """List names of all containers.
+
+ @return_value: list of names
"""
return [container.name for container in self.client.containers.all()]
- def destroy(self):
- """
- Clean up platform data
+ def query_image_by_alias(self, alias):
+ """Get image by alias in local image store.
+
+ @param alias: alias of image
+ @return_value: pylxd image (not cloud_tests.images instance)
"""
+ return self.client.images.get_by_alias(alias)
+
+ def destroy(self):
+ """Clean up platform data."""
super(LXDPlatform, self).destroy()
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml
index 183f78c1..c8dd1427 100644
--- a/tests/cloud_tests/releases.yaml
+++ b/tests/cloud_tests/releases.yaml
@@ -1,86 +1,240 @@
# ============================= Release Config ================================
default_release_config:
- # all are disabled by default
- enabled: false
- # timeout for booting image and running cloud init
- timeout: 120
- # platform_ident values for the image, with data to identify the image
- # on that platform. see platforms.base for more information
- platform_ident: {}
- # a script to run after a boot that is used to modify an image, before
- # making a snapshot of the image. may be useful for removing data left
- # behind from cloud-init booting, such as logs, to ensure that data from
- # snapshot.launch() will not include a cloud-init.log from a boot used to
- # create the snapshot, if cloud-init has not run
- boot_clean_script: |
- #!/bin/bash
- rm -rf /var/log/cloud-init.log /var/log/cloud-init-output.log \
- /var/lib/cloud/ /run/cloud-init/ /var/log/syslog
+ # global default configuration options
+ default:
+ # all are disabled by default
+ enabled: false
+ # timeout for booting image and running cloud init
+ boot_timeout: 120
+ # a script to run after a boot that is used to modify an image, before
+ # making a snapshot of the image. may be useful for removing data left
+ # behind from cloud-init booting, such as logs, to ensure that data
+ # from snapshot.launch() will not include a cloud-init.log from a boot
+ # used to create the snapshot, if cloud-init has not run
+ boot_clean_script: |
+ #!/bin/bash
+ rm -rf /var/log/cloud-init.log /var/log/cloud-init-output.log \
+ /var/lib/cloud/ /run/cloud-init/ /var/log/syslog
+ # test script to determine if system is booted fully
+ system_ready_script: |
+ # permit running or degraded state as both indicate complete boot
+ [ $(systemctl is-system-running) = 'running' -o
+ $(systemctl is-system-running) = 'degraded' ]
+ # test script to determine if cloud-init has finished
+ cloud_init_ready_script: |
+ [ -f '/run/cloud-init/result.json' ]
+ # currently used features and their uses are:
+ # features groups and additional feature settings
+ feature_groups: []
+ features: {}
+
+ # lxd specific default configuration options
+ lxd:
+ # default sstreams server to use for lxd image retrieval
+ sstreams_server: https://us.images.linuxcontainers.org:8443
+ # keep base image, avoids downloading again next run
+ cache_base_image: true
+ # lxd images from linuxcontainers.org do not have the nocloud seed
+ # templates in place, so the image metadata must be modified
+ override_templates: true
+ # arg overrides to set image up
+ setup_overrides:
+ # lxd images from linuxcontainers.org do not come with
+ # cloud-init, so must pull cloud-init in from repo using
+ # setup_image.upgrade
+ upgrade: true
+
+features:
+ # all currently supported feature flags
+ all:
+ - apt # image supports apt package manager
+ - byobu # byobu is available in repositories
+ - landscape # landscape-client available in repos
+ - lxd # lxd is available in the image
+ - ppa # image supports ppas
+ - rpm # image supports rpms
+ - snap # supports snapd
+ # NOTE: the following feature flags are to work around bugs in the
+ # images, and can be removed when no longer needed
+ - hostname # setting system hostname works
+ # NOTE: the following feature flags are to work around issues in the
+ # testcases, and can be removed when no longer needed
+ - apt_src_cont # default contents and format of sources.list matches
+ # ubuntu sources.list
+ - apt_hist_fmt # apt command history entries use full paths to apt
+ # executable rather than relative paths
+ - daylight_time # timezones are daylight not standard time
+ - apt_up_out # 'Calculating upgrade..' present in log output from
+ # apt-get dist-upgrade output
+ - engb_locale # locale en_GB.UTF-8 is available
+ - locale_gen # the /etc/locale.gen file exists
+ - no_ntpdate # 'ntpdate' is not installed by default
+ - no_file_fmt_e # the 'file' utility does not have a formatting error
+ - ppa_file_name # the name of the source file added to sources.list.d has
+ # the expected format for newer ubuntu releases
+ - sshd # requires ssh server to be installed by default
+ - ssh_key_fmt # ssh auth keys printed to console have expected format
+ - syslog # test case requires syslog to be written by default
+ - ubuntu_ntp # expect ubuntu.pool.ntp.org to be used as ntp server
+ - ubuntu_repos # test case requres ubuntu repositories to be used
+ - ubuntu_user # test case needs user with the name 'ubuntu' to exist
+ # NOTE: the following feature flags are to work around issues that may
+ # be considered bugs in cloud-init
+ - lsb_release # image has lsb_release installed, maybe should install
+ # if missing by default
+ - sudo # image has sudo installed, should not be required
+ # feature flag groups
+ groups:
+ base:
+ hostname: true
+ no_file_fmt_e: true
+ ubuntu_specific:
+ apt_src_cont: true
+ apt_hist_fmt: true
+ byobu: true
+ daylight_time: true
+ engb_locale: true
+ landscape: true
+ locale_gen: true
+ lsb_release: true
+ lxd: true
+ ppa: true
+ ppa_file_name: true
+ snap: true
+ sshd: true
+ ssh_key_fmt: true
+ sudo: true
+ syslog: true
+ ubuntu_ntp: true
+ ubuntu_repos: true
+ ubuntu_user: true
+ debian_base:
+ apt: true
+ apt_up_out: true
+ no_ntpdate: true
+ rhel_base:
+ rpm: true
releases:
- trusty:
- enabled: true
- platform_ident:
- lxd:
- # if sstreams_server is omitted, default is used, defined in
- # tests.cloud_tests.platforms.lxd.DEFAULT_SSTREAMS_SERVER as:
- # sstreams_server: https://us.images.linuxcontainers.org:8443
- #alias: ubuntu/trusty/default
- alias: t
- sstreams_server: https://cloud-images.ubuntu.com/daily
- xenial:
- enabled: true
- platform_ident:
- lxd:
- #alias: ubuntu/xenial/default
- alias: x
- sstreams_server: https://cloud-images.ubuntu.com/daily
- yakkety:
- enabled: true
- platform_ident:
- lxd:
- #alias: ubuntu/yakkety/default
- alias: y
- sstreams_server: https://cloud-images.ubuntu.com/daily
- zesty:
- enabled: true
- platform_ident:
- lxd:
- #alias: ubuntu/zesty/default
- alias: z
- sstreams_server: https://cloud-images.ubuntu.com/daily
+ # UBUNTU =================================================================
artful:
- enabled: true
- platform_ident:
- lxd:
- #alias: ubuntu/artful/default
- alias: a
- sstreams_server: https://cloud-images.ubuntu.com/daily
- jessie:
- platform_ident:
- lxd:
- alias: debian/jessie/default
- sid:
- platform_ident:
- lxd:
- alias: debian/sid/default
+ # EOL: Jul 2018
+ default:
+ enabled: true
+ feature_groups:
+ - base
+ - debian_base
+ - ubuntu_specific
+ lxd:
+ sstreams_server: https://cloud-images.ubuntu.com/daily
+ alias: artful
+ setup_overrides: null
+ override_templates: false
+ zesty:
+ # EOL: Jan 2018
+ default:
+ enabled: true
+ feature_groups:
+ - base
+ - debian_base
+ - ubuntu_specific
+ lxd:
+ sstreams_server: https://cloud-images.ubuntu.com/daily
+ alias: zesty
+ setup_overrides: null
+ override_templates: false
+ xenial:
+ # EOL: Apr 2021
+ default:
+ enabled: true
+ feature_groups:
+ - base
+ - debian_base
+ - ubuntu_specific
+ lxd:
+ sstreams_server: https://cloud-images.ubuntu.com/daily
+ alias: xenial
+ setup_overrides: null
+ override_templates: false
+ trusty:
+ # EOL: Apr 2019
+ default:
+ enabled: true
+ feature_groups:
+ - base
+ - debian_base
+ - ubuntu_specific
+ features:
+ apt_up_out: false
+ locale_gen: false
+ lxd: false
+ ppa_file_name: false
+ snap: false
+ ssh_key_fmt: false
+ no_ntpdate: false
+ no_file_fmt_e: false
+ system_ready_script: |
+ #!/bin/bash
+ # upstart based, so use old style runlevels
+ [ $(runlevel | awk '{print $2}') = '2' ]
+ lxd:
+ sstreams_server: https://cloud-images.ubuntu.com/daily
+ alias: trusty
+ setup_overrides: null
+ override_templates: false
+ # DEBIAN =================================================================
stretch:
- platform_ident:
- lxd:
- alias: debian/stretch/default
- wheezy:
- platform_ident:
- lxd:
- alias: debian/wheezy/default
+ # EOL: Not yet released
+ default:
+ enabled: true
+ feature_groups:
+ - base
+ - debian_base
+ lxd:
+ alias: debian/stretch/default
+ jessie:
+ # EOL: Jun 2020
+ # NOTE: the cloud-init version shipped with jessie is out of date
+ # tests work if an up to date deb is used
+ default:
+ enabled: true
+ feature_groups:
+ - base
+ - debian_base
+ lxd:
+ alias: debian/jessie/default
+ # CENTOS =================================================================
centos70:
- timeout: 180
- platform_ident:
- lxd:
- alias: centos/7/default
+ # EOL: Jun 2024 (2020 - end of full updates)
+ default:
+ enabled: true
+ feature_groups:
+ - base
+ - rhel_base
+ user_data_overrides:
+ preserve_hostname: true
+ lxd:
+ features:
+ # NOTE: (LP: #1575779)
+ hostname: false
+ alias: centos/7/default
centos66:
- timeout: 180
- platform_ident:
- lxd:
- alias: centos/6/default
+ # EOL: Nov 2020
+ default:
+ enabled: true
+ feature_groups:
+ - base
+ - rhel_base
+ # still supported, but only bugfixes after may 2017
+ system_ready_script: |
+ #!/bin/bash
+ [ $(runlevel | awk '{print $2}') = '3' ]
+ user_data_overrides:
+ preserve_hostname: true
+ lxd:
+ features:
+ # NOTE: (LP: #1575779)
+ hostname: false
+ alias: centos/6/default
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/run_funcs.py b/tests/cloud_tests/run_funcs.py
new file mode 100644
index 00000000..8ae91120
--- /dev/null
+++ b/tests/cloud_tests/run_funcs.py
@@ -0,0 +1,75 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Run functions."""
+
+import os
+
+from tests.cloud_tests import bddeb, collect, util, verify
+
+
+def tree_collect(args):
+ """Collect data using deb build from current tree.
+
+ @param args: cmdline args
+ @return_value: fail count
+ """
+ failed = 0
+ tmpdir = util.TempDir(tmpdir=args.data_dir, preserve=args.preserve_data)
+
+ with tmpdir as data_dir:
+ args.data_dir = data_dir
+ args.deb = os.path.join(tmpdir.tmpdir, 'cloud-init_all.deb')
+ try:
+ failed += bddeb.bddeb(args)
+ failed += collect.collect(args)
+ except Exception:
+ failed += 1
+ raise
+
+ return failed
+
+
+def tree_run(args):
+ """Run test suite using deb build from current tree.
+
+ @param args: cmdline args
+ @return_value: fail count
+ """
+ failed = 0
+ tmpdir = util.TempDir(tmpdir=args.data_dir, preserve=args.preserve_data)
+
+ with tmpdir as data_dir:
+ args.data_dir = data_dir
+ args.deb = os.path.join(tmpdir.tmpdir, 'cloud-init_all.deb')
+ try:
+ failed += bddeb.bddeb(args)
+ failed += collect.collect(args)
+ failed += verify.verify(args)
+ except Exception:
+ failed += 1
+ raise
+
+ return failed
+
+
+def run(args):
+ """Run test suite.
+
+ @param args: cmdline args
+ @return_value: fail count
+ """
+ failed = 0
+ tmpdir = util.TempDir(tmpdir=args.data_dir, preserve=args.preserve_data)
+
+ with tmpdir as data_dir:
+ args.data_dir = data_dir
+ try:
+ failed += collect.collect(args)
+ failed += verify.verify(args)
+ except Exception:
+ failed += 1
+ raise
+
+ return failed
+
+# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/setup_image.py b/tests/cloud_tests/setup_image.py
index 5d6c6387..8053a093 100644
--- a/tests/cloud_tests/setup_image.py
+++ b/tests/cloud_tests/setup_image.py
@@ -1,18 +1,42 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from tests.cloud_tests import LOG
-from tests.cloud_tests import stage, util
+"""Setup image for testing."""
from functools import partial
import os
+from tests.cloud_tests import LOG
+from tests.cloud_tests import stage, util
-def install_deb(args, image):
+
+def installed_package_version(image, package, ensure_installed=True):
+ """Get installed version of package.
+
+ @param image: cloud_tests.images instance to operate on
+ @param package: name of package
+ @param ensure_installed: raise error if not installed
+ @return_value: cloud-init version string
"""
- install deb into image
- args: cmdline arguments, must contain --deb
- image: cloud_tests.images instance to operate on
- return_value: None, may raise errors
+ os_family = util.get_os_family(image.properties['os'])
+ if os_family == 'debian':
+ cmd = ['dpkg-query', '-W', "--showformat='${Version}'", package]
+ elif os_family == 'redhat':
+ cmd = ['rpm', '-q', '--queryformat', "'%{VERSION}'", package]
+ else:
+ raise NotImplementedError
+
+ msg = 'query version for package: {}'.format(package)
+ (out, err, exit) = image.execute(
+ cmd, description=msg, rcs=(0,) if ensure_installed else range(0, 256))
+ return out.strip()
+
+
+def install_deb(args, image):
+ """Install deb into image.
+
+ @param args: cmdline arguments, must contain --deb
+ @param image: cloud_tests.images instance to operate on
+ @return_value: None, may raise errors
"""
# ensure system is compatible with package format
os_family = util.get_os_family(image.properties['os'])
@@ -21,20 +45,18 @@ def install_deb(args, image):
'family: {}'.format(args.deb, os_family))
# install deb
- LOG.debug('installing deb: %s into target', args.deb)
+ msg = 'install deb: "{}" into target'.format(args.deb)
+ LOG.debug(msg)
remote_path = os.path.join('/tmp', os.path.basename(args.deb))
image.push_file(args.deb, remote_path)
- (out, err, exit) = image.execute(['dpkg', '-i', remote_path])
- if exit != 0:
- raise OSError('failed install deb: {}\n\tstdout: {}\n\tstderr: {}'
- .format(args.deb, out, err))
+ cmd = 'dpkg -i {} || apt-get install --yes -f'.format(remote_path)
+ image.execute(['/bin/sh', '-c', cmd], description=msg)
# check installed deb version matches package
fmt = ['-W', "--showformat='${Version}'"]
(out, err, exit) = image.execute(['dpkg-deb'] + fmt + [remote_path])
expected_version = out.strip()
- (out, err, exit) = image.execute(['dpkg-query'] + fmt + ['cloud-init'])
- found_version = out.strip()
+ found_version = installed_package_version(image, 'cloud-init')
if expected_version != found_version:
raise OSError('install deb version "{}" does not match expected "{}"'
.format(found_version, expected_version))
@@ -44,32 +66,28 @@ def install_deb(args, image):
def install_rpm(args, image):
+ """Install rpm into image.
+
+ @param args: cmdline arguments, must contain --rpm
+ @param image: cloud_tests.images instance to operate on
+ @return_value: None, may raise errors
"""
- install rpm into image
- args: cmdline arguments, must contain --rpm
- image: cloud_tests.images instance to operate on
- return_value: None, may raise errors
- """
- # ensure system is compatible with package format
os_family = util.get_os_family(image.properties['os'])
- if os_family not in ['redhat', 'sles']:
+ if os_family != 'redhat':
raise NotImplementedError('install rpm: {} not supported on os '
'family: {}'.format(args.rpm, os_family))
# install rpm
- LOG.debug('installing rpm: %s into target', args.rpm)
+ msg = 'install rpm: "{}" into target'.format(args.rpm)
+ LOG.debug(msg)
remote_path = os.path.join('/tmp', os.path.basename(args.rpm))
image.push_file(args.rpm, remote_path)
- (out, err, exit) = image.execute(['rpm', '-U', remote_path])
- if exit != 0:
- raise OSError('failed to install rpm: {}\n\tstdout: {}\n\tstderr: {}'
- .format(args.rpm, out, err))
+ image.execute(['rpm', '-U', remote_path], description=msg)
fmt = ['--queryformat', '"%{VERSION}"']
(out, err, exit) = image.execute(['rpm', '-q'] + fmt + [remote_path])
expected_version = out.strip()
- (out, err, exit) = image.execute(['rpm', '-q'] + fmt + ['cloud-init'])
- found_version = out.strip()
+ found_version = installed_package_version(image, 'cloud-init')
if expected_version != found_version:
raise OSError('install rpm version "{}" does not match expected "{}"'
.format(found_version, expected_version))
@@ -79,14 +97,32 @@ def install_rpm(args, image):
def upgrade(args, image):
+ """Upgrade or install cloud-init from repo.
+
+ @param args: cmdline arguments
+ @param image: cloud_tests.images instance to operate on
+ @return_value: None, may raise errors
"""
- run the system's upgrade command
- args: cmdline arguments
- image: cloud_tests.images instance to operate on
- return_value: None, may raise errors
+ os_family = util.get_os_family(image.properties['os'])
+ if os_family == 'debian':
+ cmd = 'apt-get update && apt-get install cloud-init --yes'
+ elif os_family == 'redhat':
+ cmd = 'sleep 10 && yum install cloud-init --assumeyes'
+ else:
+ raise NotImplementedError
+
+ msg = 'upgrading cloud-init'
+ LOG.debug(msg)
+ image.execute(['/bin/sh', '-c', cmd], description=msg)
+
+
+def upgrade_full(args, image):
+ """Run the system's full upgrade command.
+
+ @param args: cmdline arguments
+ @param image: cloud_tests.images instance to operate on
+ @return_value: None, may raise errors
"""
- # determine appropriate upgrade command for os_family
- # TODO: maybe use cloudinit.distros for this?
os_family = util.get_os_family(image.properties['os'])
if os_family == 'debian':
cmd = 'apt-get update && apt-get upgrade --yes'
@@ -96,53 +132,48 @@ def upgrade(args, image):
raise NotImplementedError('upgrade command not configured for distro '
'from family: {}'.format(os_family))
- # upgrade system
- LOG.debug('upgrading system')
- (out, err, exit) = image.execute(['/bin/sh', '-c', cmd])
- if exit != 0:
- raise OSError('failed to upgrade system\n\tstdout: {}\n\tstderr:{}'
- .format(out, err))
+ msg = 'full system upgrade'
+ LOG.debug(msg)
+ image.execute(['/bin/sh', '-c', cmd], description=msg)
def run_script(args, image):
+ """Run a script in the target image.
+
+ @param args: cmdline arguments, must contain --script
+ @param image: cloud_tests.images instance to operate on
+ @return_value: None, may raise errors
"""
- run a script in the target image
- args: cmdline arguments, must contain --script
- image: cloud_tests.images instance to operate on
- return_value: None, may raise errors
- """
- # TODO: get exit status back from script and add error handling here
- LOG.debug('running setup image script in target image')
- image.run_script(args.script)
+ msg = 'run setup image script in target image'
+ LOG.debug(msg)
+ image.run_script(args.script, description=msg)
def enable_ppa(args, image):
- """
- enable a ppa in the target image
- args: cmdline arguments, must contain --ppa
- image: cloud_tests.image instance to operate on
- return_value: None, may raise errors
+ """Enable a ppa in the target image.
+
+ @param args: cmdline arguments, must contain --ppa
+ @param image: cloud_tests.image instance to operate on
+ @return_value: None, may raise errors
"""
# ppa only supported on ubuntu (maybe debian?)
- if image.properties['os'] != 'ubuntu':
+ if image.properties['os'].lower() != 'ubuntu':
raise NotImplementedError('enabling a ppa is only available on ubuntu')
# add ppa with add-apt-repository and update
ppa = 'ppa:{}'.format(args.ppa)
- LOG.debug('enabling %s', ppa)
+ msg = 'enable ppa: "{}" in target'.format(ppa)
+ LOG.debug(msg)
cmd = 'add-apt-repository --yes {} && apt-get update'.format(ppa)
- (out, err, exit) = image.execute(['/bin/sh', '-c', cmd])
- if exit != 0:
- raise OSError('enable ppa for {} failed\n\tstdout: {}\n\tstderr: {}'
- .format(ppa, out, err))
+ image.execute(['/bin/sh', '-c', cmd], description=msg)
def enable_repo(args, image):
- """
- enable a repository in the target image
- args: cmdline arguments, must contain --repo
- image: cloud_tests.image instance to operate on
- return_value: None, may raise errors
+ """Enable a repository in the target image.
+
+ @param args: cmdline arguments, must contain --repo
+ @param image: cloud_tests.image instance to operate on
+ @return_value: None, may raise errors
"""
# find enable repo command for the distro
os_family = util.get_os_family(image.properties['os'])
@@ -155,20 +186,23 @@ def enable_repo(args, image):
raise NotImplementedError('enable repo command not configured for '
'distro from family: {}'.format(os_family))
- LOG.debug('enabling repo: "%s"', args.repo)
- (out, err, exit) = image.execute(['/bin/sh', '-c', cmd])
- if exit != 0:
- raise OSError('enable repo {} failed\n\tstdout: {}\n\tstderr: {}'
- .format(args.repo, out, err))
+ msg = 'enable repo: "{}" in target'.format(args.repo)
+ LOG.debug(msg)
+ image.execute(['/bin/sh', '-c', cmd], description=msg)
def setup_image(args, image):
+ """Set up image as specified in args.
+
+ @param args: cmdline arguments
+ @param image: cloud_tests.image instance to operate on
+ @return_value: tuple of results and fail count
"""
- set up image as specified in args
- args: cmdline arguments
- image: cloud_tests.image instance to operate on
- return_value: tuple of results and fail count
- """
+ # update the args if necessary for this image
+ overrides = image.setup_overrides
+ LOG.debug('updating args for setup with: %s', overrides)
+ args = util.update_args(args, overrides, preserve_old=True)
+
# mapping of setup cmdline arg name to setup function
# represented as a tuple rather than a dict or odict as lookup by name not
# needed, and order is important as --script and --upgrade go at the end
@@ -179,17 +213,19 @@ def setup_image(args, image):
('repo', enable_repo, 'setup func for --repo, enable repo'),
('ppa', enable_ppa, 'setup func for --ppa, enable ppa'),
('script', run_script, 'setup func for --script, run script'),
- ('upgrade', upgrade, 'setup func for --upgrade, upgrade pkgs'),
+ ('upgrade', upgrade, 'setup func for --upgrade, upgrade cloud-init'),
+ ('upgrade-full', upgrade_full, 'setup func for --upgrade-full'),
)
# determine which setup functions needed
calls = [partial(stage.run_single, desc, partial(func, args, image))
for name, func, desc in handlers if getattr(args, name, None)]
- image_name = 'image: distro={}, release={}'.format(
- image.properties['os'], image.properties['release'])
- LOG.info('setting up %s', image_name)
- return stage.run_stage('set up for {}'.format(image_name), calls,
- continue_after_error=False)
+ LOG.info('setting up %s', image)
+ res = stage.run_stage(
+ 'set up for {}'.format(image), calls, continue_after_error=False)
+ LOG.debug('after setup complete, installed cloud-init version is: %s',
+ installed_package_version(image, 'cloud-init'))
+ return res
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/snapshots/__init__.py b/tests/cloud_tests/snapshots/__init__.py
index 2ab654de..93a54f5e 100644
--- a/tests/cloud_tests/snapshots/__init__.py
+++ b/tests/cloud_tests/snapshots/__init__.py
@@ -1,10 +1,10 @@
# This file is part of cloud-init. See LICENSE file for license information.
+"""Main init."""
+
def get_snapshot(image):
- """
- get snapshot from image
- """
+ """Get snapshot from image."""
return image.snapshot()
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/snapshots/base.py b/tests/cloud_tests/snapshots/base.py
index d715f037..94328982 100644
--- a/tests/cloud_tests/snapshots/base.py
+++ b/tests/cloud_tests/snapshots/base.py
@@ -1,44 +1,45 @@
# This file is part of cloud-init. See LICENSE file for license information.
+"""Base snapshot."""
+
class Snapshot(object):
- """
- Base class for snapshots
- """
+ """Base class for snapshots."""
+
platform_name = None
- def __init__(self, properties, config):
- """
- Set up snapshot
+ def __init__(self, platform, properties, config, features):
+ """Set up snapshot.
+
+ @param platform: platform object
+ @param properties: image properties
+ @param config: image config
+ @param features: supported feature flags
"""
+ self.platform = platform
self.properties = properties
self.config = config
+ self.features = features
def __str__(self):
- """
- a brief description of the snapshot
- """
+ """A brief description of the snapshot."""
return '-'.join((self.properties['os'], self.properties['release']))
def launch(self, user_data, meta_data=None, block=True, start=True,
use_desc=None):
- """
- launch instance
-
- user_data: user-data for the instance
- instance_id: instance-id for the instance
- block: wait until instance is created
- start: start instance and wait until fully started
- use_desc: description of snapshot instance use
+ """Launch instance.
- return_value: an Instance
+ @param user_data: user-data for the instance
+ @param instance_id: instance-id for the instance
+ @param block: wait until instance is created
+ @param start: start instance and wait until fully started
+ @param use_desc: description of snapshot instance use
+ @return_value: an Instance
"""
raise NotImplementedError
def destroy(self):
- """
- Clean up snapshot data
- """
+ """Clean up snapshot data."""
pass
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/snapshots/lxd.py b/tests/cloud_tests/snapshots/lxd.py
index eabbce3f..39c55c5e 100644
--- a/tests/cloud_tests/snapshots/lxd.py
+++ b/tests/cloud_tests/snapshots/lxd.py
@@ -1,49 +1,52 @@
# This file is part of cloud-init. See LICENSE file for license information.
+"""Base LXD snapshot."""
+
from tests.cloud_tests.snapshots import base
class LXDSnapshot(base.Snapshot):
- """
- LXD image copy backed snapshot
- """
+ """LXD image copy backed snapshot."""
+
platform_name = "lxd"
- def __init__(self, properties, config, platform, pylxd_frozen_instance):
- """
- Set up snapshot
+ def __init__(self, platform, properties, config, features,
+ pylxd_frozen_instance):
+ """Set up snapshot.
+
+ @param platform: platform object
+ @param properties: image properties
+ @param config: image config
+ @param features: supported feature flags
"""
- self.platform = platform
self.pylxd_frozen_instance = pylxd_frozen_instance
- super(LXDSnapshot, self).__init__(properties, config)
+ super(LXDSnapshot, self).__init__(
+ platform, properties, config, features)
def launch(self, user_data, meta_data=None, block=True, start=True,
use_desc=None):
- """
- launch instance
-
- user_data: user-data for the instance
- instance_id: instance-id for the instance
- block: wait until instance is created
- start: start instance and wait until fully started
- use_desc: description of snapshot instance use
-
- return_value: an Instance
+ """Launch instance.
+
+ @param user_data: user-data for the instance
+ @param instance_id: instance-id for the instance
+ @param block: wait until instance is created
+ @param start: start instance and wait until fully started
+ @param use_desc: description of snapshot instance use
+ @return_value: an Instance
"""
inst_config = {'user.user-data': user_data}
if meta_data:
inst_config['user.meta-data'] = meta_data
instance = self.platform.launch_container(
- container=self.pylxd_frozen_instance.name, config=inst_config,
- block=block, image_desc=str(self), use_desc=use_desc)
+ self.properties, self.config, self.features, block=block,
+ image_desc=str(self), container=self.pylxd_frozen_instance.name,
+ use_desc=use_desc, container_config=inst_config)
if start:
- instance.start(wait=True, wait_time=self.config.get('timeout'))
+ instance.start()
return instance
def destroy(self):
- """
- Clean up snapshot data
- """
+ """Clean up snapshot data."""
self.pylxd_frozen_instance.destroy()
super(LXDSnapshot, self).destroy()
diff --git a/tests/cloud_tests/stage.py b/tests/cloud_tests/stage.py
index 584cdaee..74a7d46d 100644
--- a/tests/cloud_tests/stage.py
+++ b/tests/cloud_tests/stage.py
@@ -1,5 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
+"""Stage a run."""
+
import sys
import time
import traceback
@@ -8,38 +10,29 @@ from tests.cloud_tests import LOG
class PlatformComponent(object):
- """
- context manager to safely handle platform components, ensuring that
- .destroy() is called
- """
+ """Context manager to safely handle platform components."""
def __init__(self, get_func):
- """
- store get_<platform component> function as partial taking no args
- """
+ """Store get_<platform component> function as partial with no args."""
self.get_func = get_func
def __enter__(self):
- """
- create instance of platform component
- """
+ """Create instance of platform component."""
self.instance = self.get_func()
return self.instance
def __exit__(self, etype, value, trace):
- """
- destroy instance
- """
+ """Destroy instance."""
if self.instance is not None:
self.instance.destroy()
def run_single(name, call):
- """
- run a single function, keeping track of results and failures and time
- name: name of part
- call: call to make
- return_value: a tuple of result and fail count
+ """Run a single function, keeping track of results and time.
+
+ @param name: name of part
+ @param call: call to make
+ @return_value: a tuple of result and fail count
"""
res = {
'name': name,
@@ -67,17 +60,18 @@ def run_single(name, call):
def run_stage(parent_name, calls, continue_after_error=True):
- """
- run a stage of collection, keeping track of results and failures
- parent_name: name of stage calls are under
- calls: list of function call taking no params. must return a tuple
- of results and failures. may raise exceptions
- continue_after_error: whether or not to proceed to the next call after
- catching an exception or recording a failure
- return_value: a tuple of results and failures, with result containing
- results from the function call under 'stages', and a list
- of errors (if any on this level), and elapsed time
- running stage, and the name
+ """Run a stage of collection, keeping track of results and failures.
+
+ @param parent_name: name of stage calls are under
+ @param calls: list of function call taking no params. must return a tuple
+ of results and failures. may raise exceptions
+ @param continue_after_error: whether or not to proceed to the next call
+ after catching an exception or recording a
+ failure
+ @return_value: a tuple of results and failures, with result containing
+ results from the function call under 'stages', and a list
+ of errors (if any on this level), and elapsed time
+ running stage, and the name
"""
res = {
'name': parent_name,
diff --git a/tests/cloud_tests/testcases.yaml b/tests/cloud_tests/testcases.yaml
index c22b08ef..7183e017 100644
--- a/tests/cloud_tests/testcases.yaml
+++ b/tests/cloud_tests/testcases.yaml
@@ -2,6 +2,7 @@
base_test_data:
script_timeout: 20
enabled: True
+ required_features: []
cloud_config: |
#cloud-config
collect_scripts:
diff --git a/tests/cloud_tests/testcases/__init__.py b/tests/cloud_tests/testcases/__init__.py
index a1d86d45..47217ce6 100644
--- a/tests/cloud_tests/testcases/__init__.py
+++ b/tests/cloud_tests/testcases/__init__.py
@@ -1,5 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
+"""Main init."""
+
import importlib
import inspect
import unittest
@@ -9,12 +11,12 @@ from tests.cloud_tests.testcases.base import CloudTestCase as base_test
def discover_tests(test_name):
- """
- discover tests in test file for 'testname'
- return_value: list of test classes
+ """Discover tests in test file for 'testname'.
+
+ @return_value: list of test classes
"""
testmod_name = 'tests.cloud_tests.testcases.{}'.format(
- config.name_sanatize(test_name))
+ config.name_sanitize(test_name))
try:
testmod = importlib.import_module(testmod_name)
except NameError:
@@ -26,9 +28,9 @@ def discover_tests(test_name):
def get_suite(test_name, data, conf):
- """
- get test suite with all tests for 'testname'
- return_value: a test suite
+ """Get test suite with all tests for 'testname'.
+
+ @return_value: a test suite
"""
suite = unittest.TestSuite()
for test_class in discover_tests(test_name):
diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py
index 64d5507a..bb545ab9 100644
--- a/tests/cloud_tests/testcases/base.py
+++ b/tests/cloud_tests/testcases/base.py
@@ -1,61 +1,55 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import util as c_util
+"""Base test case module."""
import crypt
import json
import unittest
+from cloudinit import util as c_util
+
class CloudTestCase(unittest.TestCase):
- """
- base test class for verifiers
- """
+ """Base test class for verifiers."""
+
data = None
conf = None
_cloud_config = None
def shortDescription(self):
+ """Prevent nose from using docstrings."""
return None
@property
def cloud_config(self):
- """
- get the cloud-config used by the test
- """
+ """Get the cloud-config used by the test."""
if not self._cloud_config:
self._cloud_config = c_util.load_yaml(self.conf)
return self._cloud_config
def get_config_entry(self, name):
- """
- get a config entry from cloud-config ensuring that it is present
- """
+ """Get a config entry from cloud-config ensuring that it is present."""
if name not in self.cloud_config:
raise AssertionError('Key "{}" not in cloud config'.format(name))
return self.cloud_config[name]
def get_data_file(self, name):
- """
- get data file failing test if it is not present
- """
+ """Get data file failing test if it is not present."""
if name not in self.data:
raise AssertionError('File "{}" missing from collect data'
.format(name))
return self.data[name]
def get_instance_id(self):
- """
- get recorded instance id
- """
+ """Get recorded instance id."""
return self.get_data_file('instance-id').strip()
def get_status_data(self, data, version=None):
- """
- parse result.json and status.json like data files
- data: data to load
- version: cloud-init output version, defaults to 'v1'
- return_value: dict of data or None if missing
+ """Parse result.json and status.json like data files.
+
+ @param data: data to load
+ @param version: cloud-init output version, defaults to 'v1'
+ @return_value: dict of data or None if missing
"""
if not version:
version = 'v1'
@@ -63,16 +57,12 @@ class CloudTestCase(unittest.TestCase):
return data.get(version)
def get_datasource(self):
- """
- get datasource name
- """
+ """Get datasource name."""
data = self.get_status_data(self.get_data_file('result.json'))
return data.get('datasource')
def test_no_stages_errors(self):
- """
- ensure that there were no errors in any stage
- """
+ """Ensure that there were no errors in any stage."""
status = self.get_status_data(self.get_data_file('status.json'))
for stage in ('init', 'init-local', 'modules-config', 'modules-final'):
self.assertIn(stage, status)
@@ -84,7 +74,10 @@ class CloudTestCase(unittest.TestCase):
class PasswordListTest(CloudTestCase):
+ """Base password test case class."""
+
def test_shadow_passwords(self):
+ """Test shadow passwords."""
shadow = self.get_data_file('shadow')
users = {}
dupes = []
@@ -121,7 +114,7 @@ class PasswordListTest(CloudTestCase):
self.assertNotEqual(users['harry'], users['dick'])
def test_shadow_expected_users(self):
- """Test every tom, dick, and harry user in shadow"""
+ """Test every tom, dick, and harry user in shadow."""
out = self.get_data_file('shadow')
self.assertIn('tom:', out)
self.assertIn('dick:', out)
@@ -130,7 +123,7 @@ class PasswordListTest(CloudTestCase):
self.assertIn('mikey:', out)
def test_sshd_config(self):
- """Test sshd config allows passwords"""
+ """Test sshd config allows passwords."""
out = self.get_data_file('sshd_config')
self.assertIn('PasswordAuthentication yes', out)
diff --git a/tests/cloud_tests/testcases/bugs/__init__.py b/tests/cloud_tests/testcases/bugs/__init__.py
index 5251d7c1..c6452f9c 100644
--- a/tests/cloud_tests/testcases/bugs/__init__.py
+++ b/tests/cloud_tests/testcases/bugs/__init__.py
@@ -1,7 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Test verifiers for cloud-init bugs
+"""Test verifiers for cloud-init bugs.
+
See configs/bugs/README.md for more information
"""
diff --git a/tests/cloud_tests/testcases/bugs/lp1511485.py b/tests/cloud_tests/testcases/bugs/lp1511485.py
index ac5ccb42..670d3aff 100644
--- a/tests/cloud_tests/testcases/bugs/lp1511485.py
+++ b/tests/cloud_tests/testcases/bugs/lp1511485.py
@@ -1,14 +1,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestLP1511485(base.CloudTestCase):
- """Test LP# 1511485"""
+ """Test LP# 1511485."""
def test_final_message(self):
- """Test final message exists"""
+ """Test final message exists."""
out = self.get_data_file('cloud-init-output.log')
self.assertIn('Final message from cloud-config', out)
diff --git a/tests/cloud_tests/testcases/bugs/lp1628337.py b/tests/cloud_tests/testcases/bugs/lp1628337.py
index af0ffc75..a2c90481 100644
--- a/tests/cloud_tests/testcases/bugs/lp1628337.py
+++ b/tests/cloud_tests/testcases/bugs/lp1628337.py
@@ -1,14 +1,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestLP1628337(base.CloudTestCase):
- """Test LP# 1511485"""
+ """Test LP# 1511485."""
def test_fetch_indices(self):
- """Verify no apt errors"""
+ """Verify no apt errors."""
out = self.get_data_file('cloud-init-output.log')
self.assertNotIn('W: Failed to fetch', out)
self.assertNotIn('W: Some index files failed to download. '
@@ -16,7 +16,7 @@ class TestLP1628337(base.CloudTestCase):
out)
def test_ntp(self):
- """Verify can find ntp and install it"""
+ """Verify can find ntp and install it."""
out = self.get_data_file('cloud-init-output.log')
self.assertNotIn('E: Unable to locate package ntp', out)
diff --git a/tests/cloud_tests/testcases/examples/__init__.py b/tests/cloud_tests/testcases/examples/__init__.py
index b3af7f8a..39af88c2 100644
--- a/tests/cloud_tests/testcases/examples/__init__.py
+++ b/tests/cloud_tests/testcases/examples/__init__.py
@@ -1,7 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Test verifiers for cloud-init examples
+"""Test verifiers for cloud-init examples.
+
See configs/examples/README.md for more information
"""
diff --git a/tests/cloud_tests/testcases/examples/add_apt_repositories.py b/tests/cloud_tests/testcases/examples/add_apt_repositories.py
index 15b8f01c..71eede97 100644
--- a/tests/cloud_tests/testcases/examples/add_apt_repositories.py
+++ b/tests/cloud_tests/testcases/examples/add_apt_repositories.py
@@ -1,19 +1,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestAptconfigurePrimary(base.CloudTestCase):
- """Example cloud-config test"""
+ """Example cloud-config test."""
def test_ubuntu_sources(self):
- """Test no default Ubuntu entries exist"""
+ """Test no default Ubuntu entries exist."""
out = self.get_data_file('ubuntu.sources.list')
self.assertEqual(0, int(out))
def test_gatech_sources(self):
- """Test GaTech entires exist"""
+ """Test GaTech entires exist."""
out = self.get_data_file('gatech.sources.list')
self.assertEqual(20, int(out))
diff --git a/tests/cloud_tests/testcases/examples/alter_completion_message.py b/tests/cloud_tests/testcases/examples/alter_completion_message.py
index b06ad01b..b7b5d5e0 100644
--- a/tests/cloud_tests/testcases/examples/alter_completion_message.py
+++ b/tests/cloud_tests/testcases/examples/alter_completion_message.py
@@ -1,34 +1,27 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestFinalMessage(base.CloudTestCase):
- """
- test cloud init module `cc_final_message`
- """
+ """Test cloud init module `cc_final_message`."""
+
subs_char = '$'
def get_final_message_config(self):
- """
- get config for final message
- """
+ """Get config for final message."""
self.assertIn('final_message', self.cloud_config)
return self.cloud_config['final_message']
def get_final_message(self):
- """
- get final message from log
- """
+ """Get final message from log."""
out = self.get_data_file('cloud-init-output.log')
lines = len(self.get_final_message_config().splitlines())
return '\n'.join(out.splitlines()[-1 * lines:])
def test_final_message_string(self):
- """
- ensure final handles regular strings
- """
+ """Ensure final handles regular strings."""
for actual, config in zip(
self.get_final_message().splitlines(),
self.get_final_message_config().splitlines()):
@@ -36,9 +29,7 @@ class TestFinalMessage(base.CloudTestCase):
self.assertEqual(actual, config)
def test_final_message_subs(self):
- """
- test variable substitution in final message
- """
+ """Test variable substitution in final message."""
# TODO: add verification of other substitutions
patterns = {'$datasource': self.get_datasource()}
for key, expected in patterns.items():
diff --git a/tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.py b/tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.py
index 8a4a0db0..38540eb8 100644
--- a/tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.py
+++ b/tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.py
@@ -1,24 +1,24 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestTrustedCA(base.CloudTestCase):
- """Example cloud-config test"""
+ """Example cloud-config test."""
def test_cert_count_ca(self):
- """Test correct count of CAs in .crt"""
+ """Test correct count of CAs in .crt."""
out = self.get_data_file('cert_count_ca')
self.assertIn('7 /etc/ssl/certs/ca-certificates.crt', out)
def test_cert_count_cloudinit(self):
- """Test correct count of CAs in .pem"""
+ """Test correct count of CAs in .pem."""
out = self.get_data_file('cert_count_cloudinit')
self.assertIn('7 /etc/ssl/certs/cloud-init-ca-certs.pem', out)
def test_cloudinit_certs(self):
- """Test text of cert"""
+ """Test text of cert."""
out = self.get_data_file('cloudinit_certs')
self.assertIn('-----BEGIN CERTIFICATE-----', out)
self.assertIn('YOUR-ORGS-TRUSTED-CA-CERT-HERE', out)
diff --git a/tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.py b/tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.py
index 4f651703..691a316b 100644
--- a/tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.py
+++ b/tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.py
@@ -1,29 +1,29 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestSSHKeys(base.CloudTestCase):
- """Example cloud-config test"""
+ """Example cloud-config test."""
def test_cert_count(self):
- """Test cert count"""
+ """Test cert count."""
out = self.get_data_file('cert_count')
self.assertEqual(20, int(out))
def test_dsa_public(self):
- """Test DSA key has ending"""
+ """Test DSA key has ending."""
out = self.get_data_file('dsa_public')
self.assertIn('ZN4XnifuO5krqAybngIy66PMEoQ= smoser@localhost', out)
def test_rsa_public(self):
- """Test RSA key has specific ending"""
+ """Test RSA key has specific ending."""
out = self.get_data_file('rsa_public')
self.assertIn('PemAWthxHO18QJvWPocKJtlsDNi3 smoser@localhost', out)
def test_auth_keys(self):
- """Test authorized keys has specific ending"""
+ """Test authorized keys has specific ending."""
out = self.get_data_file('auth_keys')
self.assertIn('QPOt5Q8zWd9qG7PBl9+eiH5qV7NZ mykey@host', out)
self.assertIn('Hj29SCmXp5Kt5/82cD/VN3NtHw== smoser@brickies', out)
diff --git a/tests/cloud_tests/testcases/examples/including_user_groups.py b/tests/cloud_tests/testcases/examples/including_user_groups.py
index e5732322..67af527b 100644
--- a/tests/cloud_tests/testcases/examples/including_user_groups.py
+++ b/tests/cloud_tests/testcases/examples/including_user_groups.py
@@ -1,42 +1,42 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestUserGroups(base.CloudTestCase):
- """Example cloud-config test"""
+ """Example cloud-config test."""
def test_group_ubuntu(self):
- """Test ubuntu group exists"""
+ """Test ubuntu group exists."""
out = self.get_data_file('group_ubuntu')
self.assertRegex(out, r'ubuntu:x:[0-9]{4}:')
def test_group_cloud_users(self):
- """Test cloud users group exists"""
+ """Test cloud users group exists."""
out = self.get_data_file('group_cloud_users')
self.assertRegex(out, r'cloud-users:x:[0-9]{4}:barfoo')
def test_user_ubuntu(self):
- """Test ubuntu user exists"""
+ """Test ubuntu user exists."""
out = self.get_data_file('user_ubuntu')
self.assertRegex(
out, r'ubuntu:x:[0-9]{4}:[0-9]{4}:Ubuntu:/home/ubuntu:/bin/bash')
def test_user_foobar(self):
- """Test foobar user exists"""
+ """Test foobar user exists."""
out = self.get_data_file('user_foobar')
self.assertRegex(
out, r'foobar:x:[0-9]{4}:[0-9]{4}:Foo B. Bar:/home/foobar:')
def test_user_barfoo(self):
- """Test barfoo user exists"""
+ """Test barfoo user exists."""
out = self.get_data_file('user_barfoo')
self.assertRegex(
out, r'barfoo:x:[0-9]{4}:[0-9]{4}:Bar B. Foo:/home/barfoo:')
def test_user_cloudy(self):
- """Test cloudy user exists"""
+ """Test cloudy user exists."""
out = self.get_data_file('user_cloudy')
self.assertRegex(out, r'cloudy:x:[0-9]{3,4}:')
diff --git a/tests/cloud_tests/testcases/examples/install_arbitrary_packages.py b/tests/cloud_tests/testcases/examples/install_arbitrary_packages.py
index 660d1aa3..df133844 100644
--- a/tests/cloud_tests/testcases/examples/install_arbitrary_packages.py
+++ b/tests/cloud_tests/testcases/examples/install_arbitrary_packages.py
@@ -1,19 +1,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestInstall(base.CloudTestCase):
- """Example cloud-config test"""
+ """Example cloud-config test."""
def test_htop(self):
- """Verify htop installed"""
+ """Verify htop installed."""
out = self.get_data_file('htop')
self.assertEqual(1, int(out))
def test_tree(self):
- """Verify tree installed"""
+ """Verify tree installed."""
out = self.get_data_file('treeutils')
self.assertEqual(1, int(out))
diff --git a/tests/cloud_tests/testcases/examples/install_run_chef_recipes.py b/tests/cloud_tests/testcases/examples/install_run_chef_recipes.py
index b36486f0..4ec26b8f 100644
--- a/tests/cloud_tests/testcases/examples/install_run_chef_recipes.py
+++ b/tests/cloud_tests/testcases/examples/install_run_chef_recipes.py
@@ -1,14 +1,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestChefExample(base.CloudTestCase):
- """Test chef module"""
+ """Test chef module."""
def test_chef_basic(self):
- """Test chef installed"""
+ """Test chef installed."""
out = self.get_data_file('chef_installed')
self.assertIn('install ok', out)
diff --git a/tests/cloud_tests/testcases/examples/run_apt_upgrade.py b/tests/cloud_tests/testcases/examples/run_apt_upgrade.py
index 4c04d315..744e49cb 100644
--- a/tests/cloud_tests/testcases/examples/run_apt_upgrade.py
+++ b/tests/cloud_tests/testcases/examples/run_apt_upgrade.py
@@ -1,14 +1,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestUpgrade(base.CloudTestCase):
- """Example cloud-config test"""
+ """Example cloud-config test."""
def test_upgrade(self):
- """Test upgrade exists in apt history"""
+ """Test upgrade exists in apt history."""
out = self.get_data_file('cloud-init.log')
self.assertIn(
'[CLOUDINIT] util.py[DEBUG]: apt-upgrade '
diff --git a/tests/cloud_tests/testcases/examples/run_commands.py b/tests/cloud_tests/testcases/examples/run_commands.py
index 0be21d0f..01d5d4fc 100644
--- a/tests/cloud_tests/testcases/examples/run_commands.py
+++ b/tests/cloud_tests/testcases/examples/run_commands.py
@@ -1,14 +1,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestRunCmd(base.CloudTestCase):
- """Example cloud-config test"""
+ """Example cloud-config test."""
def test_run_cmd(self):
- """Test run command worked"""
+ """Test run command worked."""
out = self.get_data_file('run_cmd')
self.assertIn('cloud-init run cmd test', out)
diff --git a/tests/cloud_tests/testcases/examples/run_commands_first_boot.py b/tests/cloud_tests/testcases/examples/run_commands_first_boot.py
index baa23130..3f3d8f84 100644
--- a/tests/cloud_tests/testcases/examples/run_commands_first_boot.py
+++ b/tests/cloud_tests/testcases/examples/run_commands_first_boot.py
@@ -1,14 +1,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestBootCmd(base.CloudTestCase):
- """Example cloud-config test"""
+ """Example cloud-config test."""
def test_bootcmd_host(self):
- """Test boot command worked"""
+ """Test boot command worked."""
out = self.get_data_file('hosts')
self.assertIn('192.168.1.130 us.archive.ubuntu.com', out)
diff --git a/tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.py b/tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.py
index 97dfeec3..7bd520f6 100644
--- a/tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.py
+++ b/tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.py
@@ -1,29 +1,29 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestWriteFiles(base.CloudTestCase):
- """Example cloud-config test"""
+ """Example cloud-config test."""
def test_b64(self):
- """Test b64 encoded file reads as ascii"""
+ """Test b64 encoded file reads as ascii."""
out = self.get_data_file('file_b64')
self.assertIn('ASCII text', out)
def test_binary(self):
- """Test binary file reads as executable"""
+ """Test binary file reads as executable."""
out = self.get_data_file('file_binary')
self.assertIn('ELF 64-bit LSB executable, x86-64, version 1', out)
def test_gzip(self):
- """Test gzip file shows up as a shell script"""
+ """Test gzip file shows up as a shell script."""
out = self.get_data_file('file_gzip')
self.assertIn('POSIX shell script, ASCII text executable', out)
def test_text(self):
- """Test text shows up as ASCII text"""
+ """Test text shows up as ASCII text."""
out = self.get_data_file('file_text')
self.assertIn('ASCII text', out)
diff --git a/tests/cloud_tests/testcases/main/__init__.py b/tests/cloud_tests/testcases/main/__init__.py
index 5888990d..0a592637 100644
--- a/tests/cloud_tests/testcases/main/__init__.py
+++ b/tests/cloud_tests/testcases/main/__init__.py
@@ -1,7 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Test verifiers for cloud-init main features
+"""Test verifiers for cloud-init main features.
+
See configs/main/README.md for more information
"""
diff --git a/tests/cloud_tests/testcases/main/command_output_simple.py b/tests/cloud_tests/testcases/main/command_output_simple.py
index c0461a08..fe4c7670 100644
--- a/tests/cloud_tests/testcases/main/command_output_simple.py
+++ b/tests/cloud_tests/testcases/main/command_output_simple.py
@@ -1,17 +1,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestCommandOutputSimple(base.CloudTestCase):
- """
- test functionality of simple output redirection
- """
+ """Test functionality of simple output redirection."""
def test_output_file(self):
- """
- ensure that the output file is not empty and has all stages
- """
+ """Ensure that the output file is not empty and has all stages."""
data = self.get_data_file('cloud-init-test-output')
self.assertNotEqual(len(data), 0, "specified log empty")
self.assertEqual(self.get_config_entry('final_message'),
diff --git a/tests/cloud_tests/testcases/modules/__init__.py b/tests/cloud_tests/testcases/modules/__init__.py
index 9560fb26..6ab8114d 100644
--- a/tests/cloud_tests/testcases/modules/__init__.py
+++ b/tests/cloud_tests/testcases/modules/__init__.py
@@ -1,7 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Test verifiers for cloud-init cc modules
+"""Test verifiers for cloud-init cc modules.
+
See configs/modules/README.md for more information
"""
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_conf.py b/tests/cloud_tests/testcases/modules/apt_configure_conf.py
index 5d96d95c..3bf93447 100644
--- a/tests/cloud_tests/testcases/modules/apt_configure_conf.py
+++ b/tests/cloud_tests/testcases/modules/apt_configure_conf.py
@@ -1,19 +1,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestAptconfigureConf(base.CloudTestCase):
- """Test apt-configure module"""
+ """Test apt-configure module."""
def test_apt_conf_assumeyes(self):
- """Test config assumes true"""
+ """Test config assumes true."""
out = self.get_data_file('94cloud-init-config')
self.assertIn('Assume-Yes "true";', out)
def test_apt_conf_fixbroken(self):
- """Test config fixes broken"""
+ """Test config fixes broken."""
out = self.get_data_file('94cloud-init-config')
self.assertIn('Fix-Broken "true";', out)
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_disable_suites.py b/tests/cloud_tests/testcases/modules/apt_configure_disable_suites.py
index 0e2dfdeb..eabe4607 100644
--- a/tests/cloud_tests/testcases/modules/apt_configure_disable_suites.py
+++ b/tests/cloud_tests/testcases/modules/apt_configure_disable_suites.py
@@ -1,14 +1,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestAptconfigureDisableSuites(base.CloudTestCase):
- """Test apt-configure module"""
+ """Test apt-configure module."""
def test_empty_sourcelist(self):
- """Test source list is empty"""
+ """Test source list is empty."""
out = self.get_data_file('sources.list')
self.assertEqual('', out)
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_primary.py b/tests/cloud_tests/testcases/modules/apt_configure_primary.py
index 2918785d..c1c4bbc0 100644
--- a/tests/cloud_tests/testcases/modules/apt_configure_primary.py
+++ b/tests/cloud_tests/testcases/modules/apt_configure_primary.py
@@ -1,19 +1,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestAptconfigurePrimary(base.CloudTestCase):
- """Test apt-configure module"""
+ """Test apt-configure module."""
def test_ubuntu_sources(self):
- """Test no default Ubuntu entries exist"""
+ """Test no default Ubuntu entries exist."""
out = self.get_data_file('ubuntu.sources.list')
self.assertEqual(0, int(out))
def test_gatech_sources(self):
- """Test GaTech entires exist"""
+ """Test GaTech entires exist."""
out = self.get_data_file('gatech.sources.list')
self.assertEqual(20, int(out))
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_proxy.py b/tests/cloud_tests/testcases/modules/apt_configure_proxy.py
index 93ae64c6..0c61b6cc 100644
--- a/tests/cloud_tests/testcases/modules/apt_configure_proxy.py
+++ b/tests/cloud_tests/testcases/modules/apt_configure_proxy.py
@@ -1,14 +1,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestAptconfigureProxy(base.CloudTestCase):
- """Test apt-configure module"""
+ """Test apt-configure module."""
def test_proxy_config(self):
- """Test proxy options added to apt config"""
+ """Test proxy options added to apt config."""
out = self.get_data_file('90cloud-init-aptproxy')
self.assertIn(
'Acquire::http::Proxy "http://squid.internal:3128";', out)
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_security.py b/tests/cloud_tests/testcases/modules/apt_configure_security.py
index 19c79c64..7d7e2585 100644
--- a/tests/cloud_tests/testcases/modules/apt_configure_security.py
+++ b/tests/cloud_tests/testcases/modules/apt_configure_security.py
@@ -1,14 +1,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestAptconfigureSecurity(base.CloudTestCase):
- """Test apt-configure module"""
+ """Test apt-configure module."""
def test_security_mirror(self):
- """Test security lines added and uncommented in source.list"""
+ """Test security lines added and uncommented in source.list."""
out = self.get_data_file('sources.list')
self.assertEqual(6, int(out))
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_key.py b/tests/cloud_tests/testcases/modules/apt_configure_sources_key.py
index d2ee2611..d9061f3c 100644
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_key.py
+++ b/tests/cloud_tests/testcases/modules/apt_configure_sources_key.py
@@ -1,21 +1,21 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestAptconfigureSourcesKey(base.CloudTestCase):
- """Test apt-configure module"""
+ """Test apt-configure module."""
def test_apt_key_list(self):
- """Test key list updated"""
+ """Test key list updated."""
out = self.get_data_file('apt_key_list')
self.assertIn(
'1FF0 D853 5EF7 E719 E5C8 1B9C 083D 06FB E4D3 04DF', out)
self.assertIn('Launchpad PPA for cloud init development team', out)
def test_source_list(self):
- """Test source.list updated"""
+ """Test source.list updated."""
out = self.get_data_file('sources.list')
self.assertIn(
'http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu', out)
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.py b/tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.py
index 3931a92c..ddc86174 100644
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.py
+++ b/tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.py
@@ -1,21 +1,21 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestAptconfigureSourcesKeyserver(base.CloudTestCase):
- """Test apt-configure module"""
+ """Test apt-configure module."""
def test_apt_key_list(self):
- """Test specific key added"""
+ """Test specific key added."""
out = self.get_data_file('apt_key_list')
self.assertIn(
- '1BC3 0F71 5A3B 8612 47A8 1A5E 55FE 7C8C 0165 013E', out)
- self.assertIn('Launchpad PPA for curtin developers', out)
+ '1FF0 D853 5EF7 E719 E5C8 1B9C 083D 06FB E4D3 04DF', out)
+ self.assertIn('Launchpad PPA for cloud init development team', out)
def test_source_list(self):
- """Test source.list updated"""
+ """Test source.list updated."""
out = self.get_data_file('sources.list')
self.assertIn(
'http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu', out)
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_list.py b/tests/cloud_tests/testcases/modules/apt_configure_sources_list.py
index a0bb5e6b..129d2264 100644
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_list.py
+++ b/tests/cloud_tests/testcases/modules/apt_configure_sources_list.py
@@ -1,14 +1,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestAptconfigureSourcesList(base.CloudTestCase):
- """Test apt-configure module"""
+ """Test apt-configure module."""
def test_sources_list(self):
- """Test sources.list includes sources"""
+ """Test sources.list includes sources."""
out = self.get_data_file('sources.list')
self.assertRegex(out, r'deb http:\/\/archive.ubuntu.com\/ubuntu '
'[a-z].* main restricted')
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py b/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py
index dcdb3767..d299e9ad 100644
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py
+++ b/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py
@@ -1,20 +1,20 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestAptconfigureSourcesPPA(base.CloudTestCase):
- """Test apt-configure module"""
+ """Test apt-configure module."""
def test_ppa(self):
- """test specific ppa added"""
+ """Test specific ppa added."""
out = self.get_data_file('sources.list')
self.assertIn(
'http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu', out)
def test_ppa_key(self):
- """test ppa key added"""
+ """Test ppa key added."""
out = self.get_data_file('apt-key')
self.assertIn(
'1BC3 0F71 5A3B 8612 47A8 1A5E 55FE 7C8C 0165 013E', out)
diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.py b/tests/cloud_tests/testcases/modules/apt_pipelining_disable.py
index 446c597d..c98eedef 100644
--- a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.py
+++ b/tests/cloud_tests/testcases/modules/apt_pipelining_disable.py
@@ -1,14 +1,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestAptPipeliningDisable(base.CloudTestCase):
- """Test apt-pipelining module"""
+ """Test apt-pipelining module."""
def test_disable_pipelining(self):
- """Test pipelining disabled"""
+ """Test pipelining disabled."""
out = self.get_data_file('90cloud-init-pipelining')
self.assertIn('Acquire::http::Pipeline-Depth "0";', out)
diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_os.py b/tests/cloud_tests/testcases/modules/apt_pipelining_os.py
index ad2a8884..740dc7c0 100644
--- a/tests/cloud_tests/testcases/modules/apt_pipelining_os.py
+++ b/tests/cloud_tests/testcases/modules/apt_pipelining_os.py
@@ -1,14 +1,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestAptPipeliningOS(base.CloudTestCase):
- """Test apt-pipelining module"""
+ """Test apt-pipelining module."""
def test_os_pipelining(self):
- """Test pipelining set to os"""
+ """Test pipelining set to os."""
out = self.get_data_file('90cloud-init-pipelining')
self.assertIn('Acquire::http::Pipeline-Depth "0";', out)
diff --git a/tests/cloud_tests/testcases/modules/bootcmd.py b/tests/cloud_tests/testcases/modules/bootcmd.py
index 47a51e0a..f5b86b03 100644
--- a/tests/cloud_tests/testcases/modules/bootcmd.py
+++ b/tests/cloud_tests/testcases/modules/bootcmd.py
@@ -1,14 +1,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestBootCmd(base.CloudTestCase):
- """Test bootcmd module"""
+ """Test bootcmd module."""
def test_bootcmd_host(self):
- """Test boot cmd worked"""
+ """Test boot cmd worked."""
out = self.get_data_file('hosts')
self.assertIn('192.168.1.130 us.archive.ubuntu.com', out)
diff --git a/tests/cloud_tests/testcases/modules/byobu.py b/tests/cloud_tests/testcases/modules/byobu.py
index 204b37b9..005ca014 100644
--- a/tests/cloud_tests/testcases/modules/byobu.py
+++ b/tests/cloud_tests/testcases/modules/byobu.py
@@ -1,24 +1,24 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestByobu(base.CloudTestCase):
- """Test Byobu module"""
+ """Test Byobu module."""
def test_byobu_installed(self):
- """Test byobu installed"""
+ """Test byobu installed."""
out = self.get_data_file('byobu_installed')
self.assertIn('/usr/bin/byobu', out)
def test_byobu_profile_enabled(self):
- """Test byobu profile.d file exists"""
+ """Test byobu profile.d file exists."""
out = self.get_data_file('byobu_profile_enabled')
self.assertIn('/etc/profile.d/Z97-byobu.sh', out)
def test_byobu_launch_exists(self):
- """Test byobu-launch exists"""
+ """Test byobu-launch exists."""
out = self.get_data_file('byobu_launch_exists')
self.assertIn('/usr/bin/byobu-launch', out)
diff --git a/tests/cloud_tests/testcases/modules/ca_certs.py b/tests/cloud_tests/testcases/modules/ca_certs.py
index 7448e480..e75f0413 100644
--- a/tests/cloud_tests/testcases/modules/ca_certs.py
+++ b/tests/cloud_tests/testcases/modules/ca_certs.py
@@ -1,19 +1,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestCaCerts(base.CloudTestCase):
- """Test ca certs module"""
+ """Test ca certs module."""
def test_cert_count(self):
- """Test the count is proper"""
+ """Test the count is proper."""
out = self.get_data_file('cert_count')
self.assertEqual(5, int(out))
def test_cert_installed(self):
- """Test line from our cert exists"""
+ """Test line from our cert exists."""
out = self.get_data_file('cert')
self.assertIn('a36c744454555024e7f82edc420fd2c8', out)
diff --git a/tests/cloud_tests/testcases/modules/debug_disable.py b/tests/cloud_tests/testcases/modules/debug_disable.py
index 9899fdfe..e40e4b89 100644
--- a/tests/cloud_tests/testcases/modules/debug_disable.py
+++ b/tests/cloud_tests/testcases/modules/debug_disable.py
@@ -1,14 +1,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestDebugDisable(base.CloudTestCase):
- """Disable debug messages"""
+ """Disable debug messages."""
def test_debug_disable(self):
- """Test verbose output missing from logs"""
+ """Test verbose output missing from logs."""
out = self.get_data_file('cloud-init.log')
self.assertNotIn(
out, r'Skipping module named [a-z].* verbose printing disabled')
diff --git a/tests/cloud_tests/testcases/modules/debug_enable.py b/tests/cloud_tests/testcases/modules/debug_enable.py
index 21c89524..28d26062 100644
--- a/tests/cloud_tests/testcases/modules/debug_enable.py
+++ b/tests/cloud_tests/testcases/modules/debug_enable.py
@@ -1,14 +1,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestDebugEnable(base.CloudTestCase):
- """Test debug messages"""
+ """Test debug messages."""
def test_debug_enable(self):
- """Test debug messages in cloud-init log"""
+ """Test debug messages in cloud-init log."""
out = self.get_data_file('cloud-init.log')
self.assertIn('[DEBUG]', out)
diff --git a/tests/cloud_tests/testcases/modules/final_message.py b/tests/cloud_tests/testcases/modules/final_message.py
index b06ad01b..b7b5d5e0 100644
--- a/tests/cloud_tests/testcases/modules/final_message.py
+++ b/tests/cloud_tests/testcases/modules/final_message.py
@@ -1,34 +1,27 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestFinalMessage(base.CloudTestCase):
- """
- test cloud init module `cc_final_message`
- """
+ """Test cloud init module `cc_final_message`."""
+
subs_char = '$'
def get_final_message_config(self):
- """
- get config for final message
- """
+ """Get config for final message."""
self.assertIn('final_message', self.cloud_config)
return self.cloud_config['final_message']
def get_final_message(self):
- """
- get final message from log
- """
+ """Get final message from log."""
out = self.get_data_file('cloud-init-output.log')
lines = len(self.get_final_message_config().splitlines())
return '\n'.join(out.splitlines()[-1 * lines:])
def test_final_message_string(self):
- """
- ensure final handles regular strings
- """
+ """Ensure final handles regular strings."""
for actual, config in zip(
self.get_final_message().splitlines(),
self.get_final_message_config().splitlines()):
@@ -36,9 +29,7 @@ class TestFinalMessage(base.CloudTestCase):
self.assertEqual(actual, config)
def test_final_message_subs(self):
- """
- test variable substitution in final message
- """
+ """Test variable substitution in final message."""
# TODO: add verification of other substitutions
patterns = {'$datasource': self.get_datasource()}
for key, expected in patterns.items():
diff --git a/tests/cloud_tests/testcases/modules/keys_to_console.py b/tests/cloud_tests/testcases/modules/keys_to_console.py
index b36c96cf..88b6812e 100644
--- a/tests/cloud_tests/testcases/modules/keys_to_console.py
+++ b/tests/cloud_tests/testcases/modules/keys_to_console.py
@@ -1,20 +1,20 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestKeysToConsole(base.CloudTestCase):
- """Test proper keys are included and excluded to console"""
+ """Test proper keys are included and excluded to console."""
def test_excluded_keys(self):
- """Test excluded keys missing"""
+ """Test excluded keys missing."""
out = self.get_data_file('syslog')
self.assertNotIn('DSA', out)
self.assertNotIn('ECDSA', out)
def test_expected_keys(self):
- """Test expected keys exist"""
+ """Test expected keys exist."""
out = self.get_data_file('syslog')
self.assertIn('ED25519', out)
self.assertIn('RSA', out)
diff --git a/tests/cloud_tests/testcases/modules/locale.py b/tests/cloud_tests/testcases/modules/locale.py
index bf4e1b07..cb9e1dce 100644
--- a/tests/cloud_tests/testcases/modules/locale.py
+++ b/tests/cloud_tests/testcases/modules/locale.py
@@ -1,19 +1,22 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
+from cloudinit import util
+
class TestLocale(base.CloudTestCase):
- """Test locale is set properly"""
+ """Test locale is set properly."""
def test_locale(self):
- """Test locale is set properly"""
- out = self.get_data_file('locale_default')
- self.assertIn('LANG="en_GB.UTF-8"', out)
+ """Test locale is set properly."""
+ data = util.load_shell_content(self.get_data_file('locale_default'))
+ self.assertIn("LANG", data)
+ self.assertEqual('en_GB.UTF-8', data['LANG'])
def test_locale_a(self):
- """Test locale -a has both options"""
+ """Test locale -a has both options."""
out = self.get_data_file('locale_a')
self.assertIn('en_GB.utf8', out)
self.assertIn('en_US.utf8', out)
diff --git a/tests/cloud_tests/testcases/modules/lxd_bridge.py b/tests/cloud_tests/testcases/modules/lxd_bridge.py
index 4087e2f2..c0262ba3 100644
--- a/tests/cloud_tests/testcases/modules/lxd_bridge.py
+++ b/tests/cloud_tests/testcases/modules/lxd_bridge.py
@@ -1,24 +1,24 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestLxdBridge(base.CloudTestCase):
- """Test LXD module"""
+ """Test LXD module."""
def test_lxd(self):
- """Test lxd installed"""
+ """Test lxd installed."""
out = self.get_data_file('lxd')
self.assertIn('/usr/bin/lxd', out)
def test_lxc(self):
- """Test lxc installed"""
+ """Test lxc installed."""
out = self.get_data_file('lxc')
self.assertIn('/usr/bin/lxc', out)
def test_bridge(self):
- """Test bridge config"""
+ """Test bridge config."""
out = self.get_data_file('lxc-bridge')
self.assertIn('lxdbr0', out)
self.assertIn('10.100.100.1/24', out)
diff --git a/tests/cloud_tests/testcases/modules/lxd_dir.py b/tests/cloud_tests/testcases/modules/lxd_dir.py
index 51a9a1f1..1495674e 100644
--- a/tests/cloud_tests/testcases/modules/lxd_dir.py
+++ b/tests/cloud_tests/testcases/modules/lxd_dir.py
@@ -1,19 +1,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestLxdDir(base.CloudTestCase):
- """Test LXD module"""
+ """Test LXD module."""
def test_lxd(self):
- """Test lxd installed"""
+ """Test lxd installed."""
out = self.get_data_file('lxd')
self.assertIn('/usr/bin/lxd', out)
def test_lxc(self):
- """Test lxc installed"""
+ """Test lxc installed."""
out = self.get_data_file('lxc')
self.assertIn('/usr/bin/lxc', out)
diff --git a/tests/cloud_tests/testcases/modules/ntp.py b/tests/cloud_tests/testcases/modules/ntp.py
index 82d32880..b50e52fe 100644
--- a/tests/cloud_tests/testcases/modules/ntp.py
+++ b/tests/cloud_tests/testcases/modules/ntp.py
@@ -1,6 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
@@ -9,8 +9,8 @@ class TestNtp(base.CloudTestCase):
def test_ntp_installed(self):
"""Test ntp installed"""
- out = self.get_data_file('ntp_installed_empty')
- self.assertEqual(1, int(out))
+ out = self.get_data_file('ntp_installed')
+ self.assertEqual(0, int(out))
def test_ntp_dist_entries(self):
"""Test dist config file is empty"""
@@ -19,10 +19,7 @@ class TestNtp(base.CloudTestCase):
def test_ntp_entires(self):
"""Test config entries"""
- out = self.get_data_file('ntp_conf_empty')
- self.assertIn('pool 0.ubuntu.pool.ntp.org iburst', out)
- self.assertIn('pool 1.ubuntu.pool.ntp.org iburst', out)
- self.assertIn('pool 2.ubuntu.pool.ntp.org iburst', out)
- self.assertIn('pool 3.ubuntu.pool.ntp.org iburst', out)
+ out = self.get_data_file('ntp_conf_pool_list')
+ self.assertIn('pool.ntp.org iburst', out)
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_pools.py b/tests/cloud_tests/testcases/modules/ntp_pools.py
index ff6d8fa4..152fd3f1 100644
--- a/tests/cloud_tests/testcases/modules/ntp_pools.py
+++ b/tests/cloud_tests/testcases/modules/ntp_pools.py
@@ -1,16 +1,16 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestNtpPools(base.CloudTestCase):
- """Test ntp module"""
+ """Test ntp module."""
def test_ntp_installed(self):
"""Test ntp installed"""
out = self.get_data_file('ntp_installed_pools')
- self.assertEqual(1, int(out))
+ self.assertEqual(0, int(out))
def test_ntp_dist_entries(self):
"""Test dist config file is empty"""
diff --git a/tests/cloud_tests/testcases/modules/ntp_servers.py b/tests/cloud_tests/testcases/modules/ntp_servers.py
index 4010cf80..8d2a68b3 100644
--- a/tests/cloud_tests/testcases/modules/ntp_servers.py
+++ b/tests/cloud_tests/testcases/modules/ntp_servers.py
@@ -10,7 +10,7 @@ class TestNtpServers(base.CloudTestCase):
def test_ntp_installed(self):
"""Test ntp installed"""
out = self.get_data_file('ntp_installed_servers')
- self.assertEqual(1, int(out))
+ self.assertEqual(0, int(out))
def test_ntp_dist_entries(self):
"""Test dist config file is empty"""
diff --git a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py
index 00353ead..a92dec22 100644
--- a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py
+++ b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py
@@ -1,24 +1,24 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestPackageInstallUpdateUpgrade(base.CloudTestCase):
- """Test package install update upgrade module"""
+ """Test package install update upgrade module."""
def test_installed_htop(self):
- """Test htop got installed"""
+ """Test htop got installed."""
out = self.get_data_file('dpkg_htop')
self.assertEqual(1, int(out))
def test_installed_tree(self):
- """Test tree got installed"""
+ """Test tree got installed."""
out = self.get_data_file('dpkg_tree')
self.assertEqual(1, int(out))
def test_apt_history(self):
- """Test apt history for update command"""
+ """Test apt history for update command."""
out = self.get_data_file('apt_history_cmdline')
self.assertIn(
'Commandline: /usr/bin/apt-get --option=Dpkg::Options'
@@ -26,7 +26,7 @@ class TestPackageInstallUpdateUpgrade(base.CloudTestCase):
'--assume-yes --quiet install htop tree', out)
def test_cloud_init_output(self):
- """Test cloud-init-output for install & upgrade stuff"""
+ """Test cloud-init-output for install & upgrade stuff."""
out = self.get_data_file('cloud-init-output.log')
self.assertIn('Setting up tree (', out)
self.assertIn('Setting up htop (', out)
diff --git a/tests/cloud_tests/testcases/modules/runcmd.py b/tests/cloud_tests/testcases/modules/runcmd.py
index 780cd186..9fce3062 100644
--- a/tests/cloud_tests/testcases/modules/runcmd.py
+++ b/tests/cloud_tests/testcases/modules/runcmd.py
@@ -1,14 +1,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestRunCmd(base.CloudTestCase):
- """Test runcmd module"""
+ """Test runcmd module."""
def test_run_cmd(self):
- """Test run command worked"""
+ """Test run command worked."""
out = self.get_data_file('run_cmd')
self.assertIn('cloud-init run cmd test', out)
diff --git a/tests/cloud_tests/testcases/modules/salt_minion.py b/tests/cloud_tests/testcases/modules/salt_minion.py
index 3ef30f7e..c697db2d 100644
--- a/tests/cloud_tests/testcases/modules/salt_minion.py
+++ b/tests/cloud_tests/testcases/modules/salt_minion.py
@@ -1,26 +1,26 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class Test(base.CloudTestCase):
- """Test salt minion module"""
+ """Test salt minion module."""
def test_minon_master(self):
- """Test master value in config"""
+ """Test master value in config."""
out = self.get_data_file('minion')
self.assertIn('master: salt.mydomain.com', out)
def test_minion_pem(self):
- """Test private key"""
+ """Test private key."""
out = self.get_data_file('minion.pem')
self.assertIn('------BEGIN PRIVATE KEY------', out)
self.assertIn('<key data>', out)
self.assertIn('------END PRIVATE KEY-------', out)
def test_minion_pub(self):
- """Test public key"""
+ """Test public key."""
out = self.get_data_file('minion.pub')
self.assertIn('------BEGIN PUBLIC KEY-------', out)
self.assertIn('<key data>', out)
diff --git a/tests/cloud_tests/testcases/modules/seed_random_data.py b/tests/cloud_tests/testcases/modules/seed_random_data.py
index b2121569..db433d26 100644
--- a/tests/cloud_tests/testcases/modules/seed_random_data.py
+++ b/tests/cloud_tests/testcases/modules/seed_random_data.py
@@ -1,14 +1,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestSeedRandom(base.CloudTestCase):
- """Test seed random module"""
+ """Test seed random module."""
def test_random_seed_data(self):
- """Test random data passed in exists"""
+ """Test random data passed in exists."""
out = self.get_data_file('seed_data')
self.assertIn('MYUb34023nD:LFDK10913jk;dfnk:Df', out)
diff --git a/tests/cloud_tests/testcases/modules/set_hostname.py b/tests/cloud_tests/testcases/modules/set_hostname.py
index 9501b069..6e96a75c 100644
--- a/tests/cloud_tests/testcases/modules/set_hostname.py
+++ b/tests/cloud_tests/testcases/modules/set_hostname.py
@@ -1,14 +1,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestHostname(base.CloudTestCase):
- """Test hostname module"""
+ """Test hostname module."""
def test_hostname(self):
- """Test hostname command shows correct output"""
+ """Test hostname command shows correct output."""
out = self.get_data_file('hostname')
self.assertIn('myhostname', out)
diff --git a/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py b/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py
index d89c299d..398f3d40 100644
--- a/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py
+++ b/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py
@@ -1,24 +1,24 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestHostnameFqdn(base.CloudTestCase):
- """Test Hostname module"""
+ """Test Hostname module."""
def test_hostname(self):
- """Test hostname output"""
+ """Test hostname output."""
out = self.get_data_file('hostname')
self.assertIn('myhostname', out)
def test_hostname_fqdn(self):
- """Test hostname fqdn output"""
+ """Test hostname fqdn output."""
out = self.get_data_file('fqdn')
self.assertIn('host.myorg.com', out)
def test_hosts(self):
- """Test /etc/hosts file"""
+ """Test /etc/hosts file."""
out = self.get_data_file('hosts')
self.assertIn('127.0.1.1 host.myorg.com myhostname', out)
self.assertIn('127.0.0.1 localhost', out)
diff --git a/tests/cloud_tests/testcases/modules/set_password.py b/tests/cloud_tests/testcases/modules/set_password.py
index 1411a296..a29b2261 100644
--- a/tests/cloud_tests/testcases/modules/set_password.py
+++ b/tests/cloud_tests/testcases/modules/set_password.py
@@ -1,21 +1,21 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestPassword(base.CloudTestCase):
- """Test password module"""
+ """Test password module."""
# TODO add test to make sure password is actually "password"
def test_shadow(self):
- """Test ubuntu user in shadow"""
+ """Test ubuntu user in shadow."""
out = self.get_data_file('shadow')
self.assertIn('ubuntu:', out)
def test_sshd_config(self):
- """Test sshd config allows passwords"""
+ """Test sshd config allows passwords."""
out = self.get_data_file('sshd_config')
self.assertIn('PasswordAuthentication yes', out)
diff --git a/tests/cloud_tests/testcases/modules/set_password_expire.py b/tests/cloud_tests/testcases/modules/set_password_expire.py
index 1ac9c23f..a1c3aa08 100644
--- a/tests/cloud_tests/testcases/modules/set_password_expire.py
+++ b/tests/cloud_tests/testcases/modules/set_password_expire.py
@@ -1,14 +1,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestPasswordExpire(base.CloudTestCase):
- """Test password module"""
+ """Test password module."""
def test_shadow(self):
- """Test user frozen in shadow"""
+ """Test user frozen in shadow."""
out = self.get_data_file('shadow')
self.assertIn('harry:!:', out)
self.assertIn('dick:!:', out)
@@ -16,7 +16,7 @@ class TestPasswordExpire(base.CloudTestCase):
self.assertIn('harry:!:', out)
def test_sshd_config(self):
- """Test sshd config allows passwords"""
+ """Test sshd config allows passwords."""
out = self.get_data_file('sshd_config')
self.assertIn('PasswordAuthentication no', out)
diff --git a/tests/cloud_tests/testcases/modules/set_password_list.py b/tests/cloud_tests/testcases/modules/set_password_list.py
index 6819d259..375cd27d 100644
--- a/tests/cloud_tests/testcases/modules/set_password_list.py
+++ b/tests/cloud_tests/testcases/modules/set_password_list.py
@@ -1,11 +1,12 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestPasswordList(base.PasswordListTest, base.CloudTestCase):
- """Test password setting via list in chpasswd/list"""
+ """Test password setting via list in chpasswd/list."""
+
__test__ = True
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password_list_string.py b/tests/cloud_tests/testcases/modules/set_password_list_string.py
index 2c34fada..8c2634c5 100644
--- a/tests/cloud_tests/testcases/modules/set_password_list_string.py
+++ b/tests/cloud_tests/testcases/modules/set_password_list_string.py
@@ -1,11 +1,12 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestPasswordListString(base.PasswordListTest, base.CloudTestCase):
- """Test password setting via string in chpasswd/list"""
+ """Test password setting via string in chpasswd/list."""
+
__test__ = True
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py b/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py
index a0f8896b..82223217 100644
--- a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py
+++ b/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py
@@ -1,24 +1,24 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestSshKeyFingerprintsDisable(base.CloudTestCase):
- """Test ssh key fingerprints module"""
+ """Test ssh key fingerprints module."""
def test_cloud_init_log(self):
- """Verify disabled"""
+ """Verify disabled."""
out = self.get_data_file('cloud-init.log')
self.assertIn('Skipping module named ssh-authkey-fingerprints, '
'logging of ssh fingerprints disabled', out)
def test_syslog(self):
- """Verify output of syslog"""
+ """Verify output of syslog."""
out = self.get_data_file('syslog')
- self.assertNotRegexpMatches(out, r'256 SHA256:.*(ECDSA)')
- self.assertNotRegexpMatches(out, r'256 SHA256:.*(ED25519)')
- self.assertNotRegexpMatches(out, r'1024 SHA256:.*(DSA)')
- self.assertNotRegexpMatches(out, r'2048 SHA256:.*(RSA)')
+ self.assertNotRegex(out, r'256 SHA256:.*(ECDSA)')
+ self.assertNotRegex(out, r'256 SHA256:.*(ED25519)')
+ self.assertNotRegex(out, r'1024 SHA256:.*(DSA)')
+ self.assertNotRegex(out, r'2048 SHA256:.*(RSA)')
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.py b/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.py
index 3c44b0cc..3510e75a 100644
--- a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.py
+++ b/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.py
@@ -1,18 +1,18 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestSshKeyFingerprintsEnable(base.CloudTestCase):
- """Test ssh key fingerprints module"""
+ """Test ssh key fingerprints module."""
def test_syslog(self):
- """Verify output of syslog"""
+ """Verify output of syslog."""
out = self.get_data_file('syslog')
- self.assertRegexpMatches(out, r'256 SHA256:.*(ECDSA)')
- self.assertRegexpMatches(out, r'256 SHA256:.*(ED25519)')
- self.assertNotRegexpMatches(out, r'1024 SHA256:.*(DSA)')
- self.assertNotRegexpMatches(out, r'2048 SHA256:.*(RSA)')
+ self.assertRegex(out, r'256 SHA256:.*(ECDSA)')
+ self.assertRegex(out, r'256 SHA256:.*(ED25519)')
+ self.assertNotRegex(out, r'1024 SHA256:.*(DSA)')
+ self.assertNotRegex(out, r'2048 SHA256:.*(RSA)')
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_import_id.py b/tests/cloud_tests/testcases/modules/ssh_import_id.py
index 214e710d..ef156f47 100644
--- a/tests/cloud_tests/testcases/modules/ssh_import_id.py
+++ b/tests/cloud_tests/testcases/modules/ssh_import_id.py
@@ -1,26 +1,17 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestSshImportId(base.CloudTestCase):
- """Test ssh import id module"""
+ """Test ssh import id module."""
def test_authorized_keys(self):
- """Test that ssh keys were imported"""
+ """Test that ssh keys were imported."""
out = self.get_data_file('auth_keys_ubuntu')
- # Rather than checking the key fingerprints, you could just check
- # the ending comment for where it got imported from in case these
- # change in the future :\
- self.assertIn('8sXGTYYw3iQSkOvDUUlIsqdaO+w== powersj@github/'
- '18564351 # ssh-import-id gh:powersj', out)
- self.assertIn('Hj29SCmXp5Kt5/82cD/VN3NtHw== smoser@brickies-'
- 'canonical # ssh-import-id lp:smoser', out)
- self.assertIn('7cUDQSXbabilgnzTjHo9mjd/kZ7cLOHP smoser@bart-'
- 'canonical # ssh-import-id lp:smoser', out)
- self.assertIn('aX0VHGXvHAQlPl4n7+FzAE1UmWFYEGrsSoNvLv3 smose'
- 'r@kaypeah # ssh-import-id lp:smoser', out)
+ self.assertIn('# ssh-import-id gh:powersj', out)
+ self.assertIn('# ssh-import-id lp:smoser', out)
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_keys_generate.py b/tests/cloud_tests/testcases/modules/ssh_keys_generate.py
index 161ace5f..fd6d9ba5 100644
--- a/tests/cloud_tests/testcases/modules/ssh_keys_generate.py
+++ b/tests/cloud_tests/testcases/modules/ssh_keys_generate.py
@@ -1,56 +1,56 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestSshKeysGenerate(base.CloudTestCase):
- """Test ssh keys module"""
+ """Test ssh keys module."""
# TODO: Check cloud-init-output for the correct keys being generated
def test_ubuntu_authorized_keys(self):
- """Test passed in key is not in list for ubuntu"""
+ """Test passed in key is not in list for ubuntu."""
out = self.get_data_file('auth_keys_ubuntu')
self.assertEqual('', out)
def test_dsa_public(self):
- """Test dsa public key not generated"""
+ """Test dsa public key not generated."""
out = self.get_data_file('dsa_public')
self.assertEqual('', out)
def test_dsa_private(self):
- """Test dsa private key not generated"""
+ """Test dsa private key not generated."""
out = self.get_data_file('dsa_private')
self.assertEqual('', out)
def test_rsa_public(self):
- """Test rsa public key not generated"""
+ """Test rsa public key not generated."""
out = self.get_data_file('rsa_public')
self.assertEqual('', out)
def test_rsa_private(self):
- """Test rsa public key not generated"""
+ """Test rsa public key not generated."""
out = self.get_data_file('rsa_private')
self.assertEqual('', out)
def test_ecdsa_public(self):
- """Test ecdsa public key generated"""
+ """Test ecdsa public key generated."""
out = self.get_data_file('ecdsa_public')
self.assertIsNotNone(out)
def test_ecdsa_private(self):
- """Test ecdsa public key generated"""
+ """Test ecdsa public key generated."""
out = self.get_data_file('ecdsa_private')
self.assertIsNotNone(out)
def test_ed25519_public(self):
- """Test ed25519 public key generated"""
+ """Test ed25519 public key generated."""
out = self.get_data_file('ed25519_public')
self.assertIsNotNone(out)
def test_ed25519_private(self):
- """Test ed25519 public key generated"""
+ """Test ed25519 public key generated."""
out = self.get_data_file('ed25519_private')
self.assertIsNotNone(out)
diff --git a/tests/cloud_tests/testcases/modules/ssh_keys_provided.py b/tests/cloud_tests/testcases/modules/ssh_keys_provided.py
index 8f18cb94..544649da 100644
--- a/tests/cloud_tests/testcases/modules/ssh_keys_provided.py
+++ b/tests/cloud_tests/testcases/modules/ssh_keys_provided.py
@@ -1,67 +1,67 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestSshKeysProvided(base.CloudTestCase):
- """Test ssh keys module"""
+ """Test ssh keys module."""
def test_ubuntu_authorized_keys(self):
- """Test passed in key is not in list for ubuntu"""
+ """Test passed in key is not in list for ubuntu."""
out = self.get_data_file('auth_keys_ubuntu')
self.assertEqual('', out)
def test_root_authorized_keys(self):
- """Test passed in key is in authorized list for root"""
+ """Test passed in key is in authorized list for root."""
out = self.get_data_file('auth_keys_root')
self.assertIn('lzrkPqONphoZx0LDV86w7RUz1ksDzAdcm0tvmNRFMN1a0frDs50'
'6oA3aWK0oDk4Nmvk8sXGTYYw3iQSkOvDUUlIsqdaO+w==', out)
def test_dsa_public(self):
- """Test dsa public key passed in"""
+ """Test dsa public key passed in."""
out = self.get_data_file('dsa_public')
self.assertIn('AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4RZS8c'
'NM4ZpeuE5UB/Nnr6OSU/nmbO8LuM', out)
def test_dsa_private(self):
- """Test dsa private key passed in"""
+ """Test dsa private key passed in."""
out = self.get_data_file('dsa_private')
self.assertIn('MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXr'
'hOVAfzZ6+jklP', out)
def test_rsa_public(self):
- """Test rsa public key passed in"""
+ """Test rsa public key passed in."""
out = self.get_data_file('rsa_public')
self.assertIn('AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgT'
'LnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4', out)
def test_rsa_private(self):
- """Test rsa public key passed in"""
+ """Test rsa public key passed in."""
out = self.get_data_file('rsa_private')
self.assertIn('4DOkqNiUGl80Zp1RgZNohHUXlJMtAbrIlAVEk+mTmg7vjfyp2un'
'RQvLZpMRdywBm', out)
def test_ecdsa_public(self):
- """Test ecdsa public key passed in"""
+ """Test ecdsa public key passed in."""
out = self.get_data_file('ecdsa_public')
self.assertIn('AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAAB'
'BBFsS5Tvky/IC/dXhE/afxxU', out)
def test_ecdsa_private(self):
- """Test ecdsa public key passed in"""
+ """Test ecdsa public key passed in."""
out = self.get_data_file('ecdsa_private')
self.assertIn('AwEHoUQDQgAEWxLlO+TL8gL91eET9p/HFQbqR1A691AkJgZk3jY'
'5mpZqxgX4vcgb', out)
def test_ed25519_public(self):
- """Test ed25519 public key passed in"""
+ """Test ed25519 public key passed in."""
out = self.get_data_file('ed25519_public')
self.assertIn('AAAAC3NzaC1lZDI1NTE5AAAAINudAZSu4vjZpVWzId5pXmZg1M6'
'G15dqjQ2XkNVOEnb5', out)
def test_ed25519_private(self):
- """Test ed25519 public key passed in"""
+ """Test ed25519 public key passed in."""
out = self.get_data_file('ed25519_private')
self.assertIn('XAAAAAtzc2gtZWQyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNT'
'OhteXao0Nl5DVThJ2+Q', out)
diff --git a/tests/cloud_tests/testcases/modules/timezone.py b/tests/cloud_tests/testcases/modules/timezone.py
index bf91d490..654fa53d 100644
--- a/tests/cloud_tests/testcases/modules/timezone.py
+++ b/tests/cloud_tests/testcases/modules/timezone.py
@@ -1,14 +1,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestTimezone(base.CloudTestCase):
- """Test timezone module"""
+ """Test timezone module."""
def test_timezone(self):
- """Test date prints correct timezone"""
+ """Test date prints correct timezone."""
out = self.get_data_file('timezone')
self.assertEqual('HDT', out.rstrip())
diff --git a/tests/cloud_tests/testcases/modules/user_groups.py b/tests/cloud_tests/testcases/modules/user_groups.py
index e5732322..67af527b 100644
--- a/tests/cloud_tests/testcases/modules/user_groups.py
+++ b/tests/cloud_tests/testcases/modules/user_groups.py
@@ -1,42 +1,42 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestUserGroups(base.CloudTestCase):
- """Example cloud-config test"""
+ """Example cloud-config test."""
def test_group_ubuntu(self):
- """Test ubuntu group exists"""
+ """Test ubuntu group exists."""
out = self.get_data_file('group_ubuntu')
self.assertRegex(out, r'ubuntu:x:[0-9]{4}:')
def test_group_cloud_users(self):
- """Test cloud users group exists"""
+ """Test cloud users group exists."""
out = self.get_data_file('group_cloud_users')
self.assertRegex(out, r'cloud-users:x:[0-9]{4}:barfoo')
def test_user_ubuntu(self):
- """Test ubuntu user exists"""
+ """Test ubuntu user exists."""
out = self.get_data_file('user_ubuntu')
self.assertRegex(
out, r'ubuntu:x:[0-9]{4}:[0-9]{4}:Ubuntu:/home/ubuntu:/bin/bash')
def test_user_foobar(self):
- """Test foobar user exists"""
+ """Test foobar user exists."""
out = self.get_data_file('user_foobar')
self.assertRegex(
out, r'foobar:x:[0-9]{4}:[0-9]{4}:Foo B. Bar:/home/foobar:')
def test_user_barfoo(self):
- """Test barfoo user exists"""
+ """Test barfoo user exists."""
out = self.get_data_file('user_barfoo')
self.assertRegex(
out, r'barfoo:x:[0-9]{4}:[0-9]{4}:Bar B. Foo:/home/barfoo:')
def test_user_cloudy(self):
- """Test cloudy user exists"""
+ """Test cloudy user exists."""
out = self.get_data_file('user_cloudy')
self.assertRegex(out, r'cloudy:x:[0-9]{3,4}:')
diff --git a/tests/cloud_tests/testcases/modules/write_files.py b/tests/cloud_tests/testcases/modules/write_files.py
index 97dfeec3..7bd520f6 100644
--- a/tests/cloud_tests/testcases/modules/write_files.py
+++ b/tests/cloud_tests/testcases/modules/write_files.py
@@ -1,29 +1,29 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""cloud-init Integration Test Verify Script"""
+"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestWriteFiles(base.CloudTestCase):
- """Example cloud-config test"""
+ """Example cloud-config test."""
def test_b64(self):
- """Test b64 encoded file reads as ascii"""
+ """Test b64 encoded file reads as ascii."""
out = self.get_data_file('file_b64')
self.assertIn('ASCII text', out)
def test_binary(self):
- """Test binary file reads as executable"""
+ """Test binary file reads as executable."""
out = self.get_data_file('file_binary')
self.assertIn('ELF 64-bit LSB executable, x86-64, version 1', out)
def test_gzip(self):
- """Test gzip file shows up as a shell script"""
+ """Test gzip file shows up as a shell script."""
out = self.get_data_file('file_gzip')
self.assertIn('POSIX shell script, ASCII text executable', out)
def test_text(self):
- """Test text shows up as ASCII text"""
+ """Test text shows up as ASCII text."""
out = self.get_data_file('file_text')
self.assertIn('ASCII text', out)
diff --git a/tests/cloud_tests/util.py b/tests/cloud_tests/util.py
index 64a86672..2bbe21c7 100644
--- a/tests/cloud_tests/util.py
+++ b/tests/cloud_tests/util.py
@@ -1,28 +1,43 @@
# This file is part of cloud-init. See LICENSE file for license information.
+"""Utilities for re-use across integration tests."""
+
+import copy
import glob
import os
import random
+import shutil
import string
import tempfile
import yaml
-from cloudinit.distros import OSFAMILIES
from cloudinit import util as c_util
from tests.cloud_tests import LOG
+OS_FAMILY_MAPPING = {
+ 'debian': ['debian', 'ubuntu'],
+ 'redhat': ['centos', 'rhel', 'fedora'],
+ 'gentoo': ['gentoo'],
+ 'freebsd': ['freebsd'],
+ 'suse': ['sles'],
+ 'arch': ['arch'],
+}
+
def list_test_data(data_dir):
- """
- find all tests with test data available in data_dir
- data_dir should contain <platforms>/<os_name>/<testnames>/<data>
- return_value: {<platform>: {<os_name>: [<testname>]}}
+ """Find all tests with test data available in data_dir.
+
+ @param data_dir: should contain <platforms>/<os_name>/<testnames>/<data>
+ @return_value: {<platform>: {<os_name>: [<testname>]}}
"""
if not os.path.isdir(data_dir):
raise ValueError("bad data dir")
res = {}
for platform in os.listdir(data_dir):
+ if not os.path.isdir(os.path.join(data_dir, platform)):
+ continue
+
res[platform] = {}
for os_name in os.listdir(os.path.join(data_dir, platform)):
res[platform][os_name] = [
@@ -36,39 +51,33 @@ def list_test_data(data_dir):
def gen_instance_name(prefix='cloud-test', image_desc=None, use_desc=None,
max_len=63, delim='-', max_tries=16, used_list=None,
valid=string.ascii_lowercase + string.digits):
- """
- generate an unique name for a test instance
- prefix: name prefix, defaults to cloud-test, default should be left
- image_desc: short string with image desc, will be truncated to 16 chars
- use_desc: short string with usage desc, will be truncated to 30 chars
- max_len: maximum name length, defaults to 64 chars
- delim: delimiter to use between tokens
- max_tries: maximum tries to find a unique name before giving up
- used_list: already used names, or none to not check
- valid: string of valid characters for name
- return_value: valid, unused name, may raise StopIteration
+ """Generate an unique name for a test instance.
+
+ @param prefix: name prefix, defaults to cloud-test, default should be left
+ @param image_desc: short string (len <= 16) with image desc
+ @param use_desc: short string (len <= 30) with usage desc
+ @param max_len: maximum name length, defaults to 64 chars
+ @param delim: delimiter to use between tokens
+ @param max_tries: maximum tries to find a unique name before giving up
+ @param used_list: already used names, or none to not check
+ @param valid: string of valid characters for name
+ @return_value: valid, unused name, may raise StopIteration
"""
unknown = 'unknown'
def join(*args):
- """
- join args with delim
- """
+ """Join args with delim."""
return delim.join(args)
def fill(*args):
- """
- join name elems and fill rest with random data
- """
+ """Join name elems and fill rest with random data."""
name = join(*args)
num = max_len - len(name) - len(delim)
return join(name, ''.join(random.choice(valid) for _ in range(num)))
def clean(elem, max_len):
- """
- filter bad characters out of elem and trim to length
- """
- elem = elem[:max_len] if elem else unknown
+ """Filter bad characters out of elem and trim to length."""
+ elem = elem.lower()[:max_len] if elem else unknown
return ''.join(c if c in valid else delim for c in elem)
return next(name for name in
@@ -78,30 +87,39 @@ def gen_instance_name(prefix='cloud-test', image_desc=None, use_desc=None,
def sorted_unique(iterable, key=None, reverse=False):
- """
- return_value: a sorted list of unique items in iterable
+ """Create unique sorted list.
+
+ @param iterable: the data structure to sort
+ @param key: if you have a specific key
+ @param reverse: to reverse or not
+ @return_value: a sorted list of unique items in iterable
"""
return sorted(set(iterable), key=key, reverse=reverse)
def get_os_family(os_name):
+ """Get os family type for os_name.
+
+ @param os_name: name of os
+ @return_value: family name for os_name
"""
- get os family type for os_name
- """
- return next((k for k, v in OSFAMILIES.items() if os_name in v), None)
+ return next((k for k, v in OS_FAMILY_MAPPING.items()
+ if os_name.lower() in v), None)
def current_verbosity():
- """
- get verbosity currently in effect from log level
- return_value: verbosity, 0-2, 2 = verbose, 0 = quiet
+ """Get verbosity currently in effect from log level.
+
+ @return_value: verbosity, 0-2, 2=verbose, 0=quiet
"""
return max(min(3 - int(LOG.level / 10), 2), 0)
def is_writable_dir(path):
- """
- make sure dir is writable
+ """Make sure dir is writable.
+
+ @param path: path to determine if writable
+ @return_value: boolean with result
"""
try:
c_util.ensure_dir(path)
@@ -112,9 +130,10 @@ def is_writable_dir(path):
def is_clean_writable_dir(path):
- """
- make sure dir is empty and writable, creating it if it does not exist
- return_value: True/False if successful
+ """Make sure dir is empty and writable, creating it if it does not exist.
+
+ @param path: path to check
+ @return_value: True/False if successful
"""
path = os.path.abspath(path)
if not (is_writable_dir(path) and len(os.listdir(path)) == 0):
@@ -123,29 +142,31 @@ def is_clean_writable_dir(path):
def configure_yaml():
+ """Clean yaml."""
yaml.add_representer(str, (lambda dumper, data: dumper.represent_scalar(
'tag:yaml.org,2002:str', data, style='|' if '\n' in data else '')))
-def yaml_format(data):
- """
- format data as yaml
+def yaml_format(data, content_type=None):
+ """Format data as yaml.
+
+ @param data: data to dump
+ @param header: if specified, add a header to the dumped data
+ @return_value: yaml string
"""
configure_yaml()
- return yaml.dump(data, indent=2, default_flow_style=False)
+ content_type = (
+ '#{}\n'.format(content_type.strip('#\n')) if content_type else '')
+ return content_type + yaml.dump(data, indent=2, default_flow_style=False)
def yaml_dump(data, path):
- """
- dump data to path in yaml format
- """
- write_file(os.path.abspath(path), yaml_format(data), omode='w')
+ """Dump data to path in yaml format."""
+ c_util.write_file(os.path.abspath(path), yaml_format(data), omode='w')
def merge_results(data, path):
- """
- handle merging results from collect phase and verify phase
- """
+ """Handle merging results from collect phase and verify phase."""
current = {}
if os.path.exists(path):
with open(path, 'r') as fp:
@@ -154,10 +175,118 @@ def merge_results(data, path):
yaml_dump(current, path)
-def write_file(*args, **kwargs):
+def rel_files(basedir):
+ """List of files under directory by relative path, not including dirs.
+
+ @param basedir: directory to search
+ @return_value: list or relative paths
+ """
+ basedir = os.path.normpath(basedir)
+ return [path[len(basedir) + 1:] for path in
+ glob.glob(os.path.join(basedir, '**'), recursive=True)
+ if not os.path.isdir(path)]
+
+
+def flat_tar(output, basedir, owner='root', group='root'):
+ """Create a flat tar archive (no leading ./) from basedir.
+
+ @param output: output tar file to write
+ @param basedir: base directory for archive
+ @param owner: owner of archive files
+ @param group: group archive files belong to
+ @return_value: none
+ """
+ c_util.subp(['tar', 'cf', output, '--owner', owner, '--group', group,
+ '-C', basedir] + rel_files(basedir), capture=True)
+
+
+def parse_conf_list(entries, valid=None, boolean=False):
+ """Parse config in a list of strings in key=value format.
+
+ @param entries: list of key=value strings
+ @param valid: list of valid keys in result, return None if invalid input
+ @param boolean: if true, then interpret all values as booleans
+ @return_value: dict of configuration or None if invalid
"""
- write a file using cloudinit.util.write_file
+ res = {key: value.lower() == 'true' if boolean else value
+ for key, value in (i.split('=') for i in entries)}
+ return res if not valid or all(k in valid for k in res.keys()) else None
+
+
+def update_args(args, updates, preserve_old=True):
+ """Update cmdline arguments from a dictionary.
+
+ @param args: cmdline arguments
+ @param updates: dictionary of {arg_name: new_value} mappings
+ @param preserve_old: if true, create a deep copy of args before updating
+ @return_value: updated cmdline arguments
+ """
+ args = copy.deepcopy(args) if preserve_old else args
+ if updates:
+ vars(args).update(updates)
+ return args
+
+
+def update_user_data(user_data, updates, dump_to_yaml=True):
+ """Update user_data from dictionary.
+
+ @param user_data: user data as yaml string or dict
+ @param updates: dictionary to merge with user data
+ @param dump_to_yaml: return as yaml dumped string if true
+ @return_value: updated user data, as yaml string if dump_to_yaml is true
"""
- c_util.write_file(*args, **kwargs)
+ user_data = (c_util.load_yaml(user_data)
+ if isinstance(user_data, str) else copy.deepcopy(user_data))
+ user_data.update(updates)
+ return (yaml_format(user_data, content_type='cloud-config')
+ if dump_to_yaml else user_data)
+
+
+class InTargetExecuteError(c_util.ProcessExecutionError):
+ """Error type for in target commands that fail."""
+
+ default_desc = 'Unexpected error while running command in target instance'
+
+ def __init__(self, stdout, stderr, exit_code, cmd, instance,
+ description=None):
+ """Init error and parent error class."""
+ if isinstance(cmd, (tuple, list)):
+ cmd = ' '.join(cmd)
+ super(InTargetExecuteError, self).__init__(
+ stdout=stdout, stderr=stderr, exit_code=exit_code, cmd=cmd,
+ reason="Instance: {}".format(instance),
+ description=description if description else self.default_desc)
+
+
+class TempDir(object):
+ """Configurable temporary directory like tempfile.TemporaryDirectory."""
+
+ def __init__(self, tmpdir=None, preserve=False, prefix='cloud_test_data_'):
+ """Initialize.
+
+ @param tmpdir: directory to use as tempdir
+ @param preserve: if true, always preserve data on exit
+ @param prefix: prefix to use for tempfile name
+ """
+ self.tmpdir = tmpdir
+ self.preserve = preserve
+ self.prefix = prefix
+
+ def __enter__(self):
+ """Create tempdir.
+
+ @return_value: tempdir path
+ """
+ if not self.tmpdir:
+ self.tmpdir = tempfile.mkdtemp(prefix=self.prefix)
+ LOG.debug('using tmpdir: %s', self.tmpdir)
+ return self.tmpdir
+
+ def __exit__(self, etype, value, trace):
+ """Destroy tempdir if no errors occurred."""
+ if etype or self.preserve:
+ LOG.info('leaving data in %s', self.tmpdir)
+ else:
+ shutil.rmtree(self.tmpdir)
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py
index 2a63550e..fc1efcfc 100644
--- a/tests/cloud_tests/verify.py
+++ b/tests/cloud_tests/verify.py
@@ -1,18 +1,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from tests.cloud_tests import (config, LOG, util, testcases)
+"""Verify test results."""
import os
import unittest
+from tests.cloud_tests import (config, LOG, util, testcases)
+
def verify_data(base_dir, tests):
- """
- verify test data is correct,
- base_dir: base directory for data
- test_config: dict of all test config, from util.load_test_config()
- tests: list of test names
- return_value: {<test_name>: {passed: True/False, failures: []}}
+ """Verify test data is correct.
+
+ @param base_dir: base directory for data
+ @param tests: list of test names
+ @return_value: {<test_name>: {passed: True/False, failures: []}}
"""
runner = unittest.TextTestRunner(verbosity=util.current_verbosity())
res = {}
@@ -53,9 +54,10 @@ def verify_data(base_dir, tests):
def verify(args):
- """
- verify test data
- return_value: 0 for success, or number of failed tests
+ """Verify test data.
+
+ @param args: directory of test data
+ @return_value: 0 for success, or number of failed tests
"""
failed = 0
res = {}
diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
index 9ff15993..08c5c469 100644
--- a/tests/unittests/helpers.py
+++ b/tests/unittests/helpers.py
@@ -19,10 +19,6 @@ try:
from contextlib import ExitStack
except ImportError:
from contextlib2 import ExitStack
-try:
- from cStringIO import StringIO
-except ImportError:
- from io import StringIO
from cloudinit import helpers as ch
from cloudinit import util
@@ -86,7 +82,26 @@ def retarget_many_wrapper(new_base, am, old_func):
class TestCase(unittest2.TestCase):
- pass
+ def reset_global_state(self):
+ """Reset any global state to its original settings.
+
+ cloudinit caches some values in cloudinit.util. Unit tests that
+ involved those cached paths were then subject to failure if the order
+ of invocation changed (LP: #1703697).
+
+ This function resets any of these global state variables to their
+ initial state.
+
+ In the future this should really be done with some registry that
+ can then be cleaned in a more obvious way.
+ """
+ util.PROC_CMDLINE = None
+ util._DNS_REDIRECT_IP = None
+ util._LSB_RELEASE = {}
+
+ def setUp(self):
+ super(unittest2.TestCase, self).setUp()
+ self.reset_global_state()
class CiTestCase(TestCase):
@@ -101,11 +116,13 @@ class CiTestCase(TestCase):
super(CiTestCase, self).setUp()
if self.with_logs:
# Create a log handler so unit tests can search expected logs.
- logger = logging.getLogger()
- self.logs = StringIO()
+ self.logger = logging.getLogger()
+ self.logs = six.StringIO()
+ formatter = logging.Formatter('%(levelname)s: %(message)s')
handler = logging.StreamHandler(self.logs)
- self.old_handlers = logger.handlers
- logger.handlers = [handler]
+ handler.setFormatter(formatter)
+ self.old_handlers = self.logger.handlers
+ self.logger.handlers = [handler]
def tearDown(self):
if self.with_logs:
@@ -359,4 +376,16 @@ except AttributeError:
return wrapper
return decorator
+
+# older versions of mock do not have the useful 'assert_not_called'
+if not hasattr(mock.Mock, 'assert_not_called'):
+ def __mock_assert_not_called(mmock):
+ if mmock.call_count != 0:
+ msg = ("[citest] Expected '%s' to not have been called. "
+ "Called %s times." %
+ (mmock._mock_name or 'mock', mmock.call_count))
+ raise AssertionError(msg)
+ mock.Mock.assert_not_called = __mock_assert_not_called
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py
index c16d1a6e..990bff2c 100644
--- a/tests/unittests/test_datasource/test_aliyun.py
+++ b/tests/unittests/test_datasource/test_aliyun.py
@@ -2,6 +2,7 @@
import functools
import httpretty
+import mock
import os
from .. import helpers as test_helpers
@@ -111,15 +112,29 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase):
self.assertEqual(self.default_metadata['hostname'],
self.ds.get_hostname())
+ @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun")
@httpretty.activate
- def test_with_mock_server(self):
+ def test_with_mock_server(self, m_is_aliyun):
+ m_is_aliyun.return_value = True
self.regist_default_server()
- self.ds.get_data()
+ ret = self.ds.get_data()
+ self.assertEqual(True, ret)
+ self.assertEqual(1, m_is_aliyun.call_count)
self._test_get_data()
self._test_get_sshkey()
self._test_get_iid()
self._test_host_name()
+ @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun")
+ @httpretty.activate
+ def test_returns_false_when_not_on_aliyun(self, m_is_aliyun):
+ """If is_aliyun returns false, then get_data should return False."""
+ m_is_aliyun.return_value = False
+ self.regist_default_server()
+ ret = self.ds.get_data()
+ self.assertEqual(1, m_is_aliyun.call_count)
+ self.assertEqual(False, ret)
+
def test_parse_public_keys(self):
public_keys = {}
self.assertEqual(ay.parse_public_keys(public_keys), [])
@@ -149,4 +164,36 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase):
self.assertEqual(ay.parse_public_keys(public_keys),
public_keys['key-pair-0']['openssh-key'])
+
+class TestIsAliYun(test_helpers.CiTestCase):
+ ALIYUN_PRODUCT = 'Alibaba Cloud ECS'
+ read_dmi_data_expected = [mock.call('system-product-name')]
+
+ @mock.patch("cloudinit.sources.DataSourceAliYun.util.read_dmi_data")
+ def test_true_on_aliyun_product(self, m_read_dmi_data):
+ """Should return true if the dmi product data has expected value."""
+ m_read_dmi_data.return_value = self.ALIYUN_PRODUCT
+ ret = ay._is_aliyun()
+ self.assertEqual(self.read_dmi_data_expected,
+ m_read_dmi_data.call_args_list)
+ self.assertEqual(True, ret)
+
+ @mock.patch("cloudinit.sources.DataSourceAliYun.util.read_dmi_data")
+ def test_false_on_empty_string(self, m_read_dmi_data):
+ """Should return false on empty value returned."""
+ m_read_dmi_data.return_value = ""
+ ret = ay._is_aliyun()
+ self.assertEqual(self.read_dmi_data_expected,
+ m_read_dmi_data.call_args_list)
+ self.assertEqual(False, ret)
+
+ @mock.patch("cloudinit.sources.DataSourceAliYun.util.read_dmi_data")
+ def test_false_on_unknown_string(self, m_read_dmi_data):
+ """Should return false on an unrelated string."""
+ m_read_dmi_data.return_value = "cubs win"
+ ret = ay._is_aliyun()
+ self.assertEqual(self.read_dmi_data_expected,
+ m_read_dmi_data.call_args_list)
+ self.assertEqual(False, ret)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 852ec703..20e70fb7 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -76,7 +76,9 @@ def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None):
return content
-class TestAzureDataSource(TestCase):
+class TestAzureDataSource(CiTestCase):
+
+ with_logs = True
def setUp(self):
super(TestAzureDataSource, self).setUp()
@@ -160,6 +162,12 @@ scbus-1 on xpt0 bus 0
self.instance_id = 'test-instance-id'
+ def _dmi_mocks(key):
+ if key == 'system-uuid':
+ return self.instance_id
+ elif key == 'chassis-asset-tag':
+ return '7783-7084-3265-9085-8269-3286-77'
+
self.apply_patches([
(dsaz, 'list_possible_azure_ds_devs', dsdevs),
(dsaz, 'invoke_agent', _invoke_agent),
@@ -170,16 +178,22 @@ scbus-1 on xpt0 bus 0
(dsaz, 'set_hostname', mock.MagicMock()),
(dsaz, 'get_metadata_from_fabric', self.get_metadata_from_fabric),
(dsaz.util, 'read_dmi_data', mock.MagicMock(
- return_value=self.instance_id)),
+ side_effect=_dmi_mocks)),
])
- dsrc = dsaz.DataSourceAzureNet(
+ dsrc = dsaz.DataSourceAzure(
data.get('sys_cfg', {}), distro=None, paths=self.paths)
if agent_command is not None:
dsrc.ds_cfg['agent_command'] = agent_command
return dsrc
+ def _get_and_setup(self, dsrc):
+ ret = dsrc.get_data()
+ if ret:
+ dsrc.setup(True)
+ return ret
+
def xml_equals(self, oxml, nxml):
"""Compare two sets of XML to make sure they are equal"""
@@ -241,6 +255,24 @@ fdescfs /dev/fd fdescfs rw 0 0
res = get_path_dev_freebsd('/etc', mnt_list)
self.assertIsNotNone(res)
+ @mock.patch('cloudinit.sources.DataSourceAzure.util.read_dmi_data')
+ def test_non_azure_dmi_chassis_asset_tag(self, m_read_dmi_data):
+ """Report non-azure when DMI's chassis asset tag doesn't match.
+
+ Return False when the asset tag doesn't match Azure's static
+ AZURE_CHASSIS_ASSET_TAG.
+ """
+ # Return a non-matching asset tag value
+ nonazure_tag = dsaz.AZURE_CHASSIS_ASSET_TAG + 'X'
+ m_read_dmi_data.return_value = nonazure_tag
+ dsrc = dsaz.DataSourceAzure(
+ {}, distro=None, paths=self.paths)
+ self.assertFalse(dsrc.get_data())
+ self.assertEqual(
+ "DEBUG: Non-Azure DMI asset tag '{0}' discovered.\n".format(
+ nonazure_tag),
+ self.logs.getvalue())
+
def test_basic_seed_dir(self):
odata = {'HostName': "myhost", 'UserName': "myuser"}
data = {'ovfcontent': construct_valid_ovf_env(data=odata),
@@ -273,7 +305,7 @@ fdescfs /dev/fd fdescfs rw 0 0
data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
dsrc = self._get_ds(data)
- ret = dsrc.get_data()
+ ret = self._get_and_setup(dsrc)
self.assertTrue(ret)
self.assertEqual(data['agent_invoked'], cfg['agent_command'])
@@ -286,7 +318,7 @@ fdescfs /dev/fd fdescfs rw 0 0
data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
dsrc = self._get_ds(data)
- ret = dsrc.get_data()
+ ret = self._get_and_setup(dsrc)
self.assertTrue(ret)
self.assertEqual(data['agent_invoked'], cfg['agent_command'])
@@ -296,7 +328,7 @@ fdescfs /dev/fd fdescfs rw 0 0
'sys_cfg': sys_cfg}
dsrc = self._get_ds(data)
- ret = dsrc.get_data()
+ ret = self._get_and_setup(dsrc)
self.assertTrue(ret)
self.assertEqual(data['agent_invoked'], '_COMMAND')
@@ -368,7 +400,7 @@ fdescfs /dev/fd fdescfs rw 0 0
pubkeys=pubkeys)}
dsrc = self._get_ds(data, agent_command=['not', '__builtin__'])
- ret = dsrc.get_data()
+ ret = self._get_and_setup(dsrc)
self.assertTrue(ret)
for mypk in mypklist:
self.assertIn(mypk, dsrc.cfg['_pubkeys'])
@@ -383,7 +415,7 @@ fdescfs /dev/fd fdescfs rw 0 0
pubkeys=pubkeys)}
dsrc = self._get_ds(data, agent_command=['not', '__builtin__'])
- ret = dsrc.get_data()
+ ret = self._get_and_setup(dsrc)
self.assertTrue(ret)
for mypk in mypklist:
@@ -399,7 +431,7 @@ fdescfs /dev/fd fdescfs rw 0 0
pubkeys=pubkeys)}
dsrc = self._get_ds(data, agent_command=['not', '__builtin__'])
- ret = dsrc.get_data()
+ ret = self._get_and_setup(dsrc)
self.assertTrue(ret)
for mypk in mypklist:
@@ -493,18 +525,20 @@ fdescfs /dev/fd fdescfs rw 0 0
dsrc.get_data()
def test_exception_fetching_fabric_data_doesnt_propagate(self):
- ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- ds.ds_cfg['agent_command'] = '__builtin__'
+ """Errors communicating with fabric should warn, but return True."""
+ dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ dsrc.ds_cfg['agent_command'] = '__builtin__'
self.get_metadata_from_fabric.side_effect = Exception
- self.assertFalse(ds.get_data())
+ ret = self._get_and_setup(dsrc)
+ self.assertTrue(ret)
def test_fabric_data_included_in_metadata(self):
- ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- ds.ds_cfg['agent_command'] = '__builtin__'
+ dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ dsrc.ds_cfg['agent_command'] = '__builtin__'
self.get_metadata_from_fabric.return_value = {'test': 'value'}
- ret = ds.get_data()
+ ret = self._get_and_setup(dsrc)
self.assertTrue(ret)
- self.assertEqual('value', ds.metadata['test'])
+ self.assertEqual('value', dsrc.metadata['test'])
def test_instance_id_from_dmidecode_used(self):
ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
@@ -517,6 +551,95 @@ fdescfs /dev/fd fdescfs rw 0 0
ds.get_data()
self.assertEqual(self.instance_id, ds.metadata['instance-id'])
+ @mock.patch("cloudinit.sources.DataSourceAzure.util.is_FreeBSD")
+ @mock.patch("cloudinit.sources.DataSourceAzure._check_freebsd_cdrom")
+ def test_list_possible_azure_ds_devs(self, m_check_fbsd_cdrom,
+ m_is_FreeBSD):
+ """On FreeBSD, possible devs should show /dev/cd0."""
+ m_is_FreeBSD.return_value = True
+ m_check_fbsd_cdrom.return_value = True
+ self.assertEqual(dsaz.list_possible_azure_ds_devs(), ['/dev/cd0'])
+ self.assertEqual(
+ [mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list)
+
+ @mock.patch('cloudinit.net.get_interface_mac')
+ @mock.patch('cloudinit.net.get_devicelist')
+ @mock.patch('cloudinit.net.device_driver')
+ @mock.patch('cloudinit.net.generate_fallback_config')
+ def test_network_config(self, mock_fallback, mock_dd,
+ mock_devlist, mock_get_mac):
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata),
+ 'sys_cfg': {}}
+
+ fallback_config = {
+ 'version': 1,
+ 'config': [{
+ 'type': 'physical', 'name': 'eth0',
+ 'mac_address': '00:11:22:33:44:55',
+ 'params': {'driver': 'hv_netsvc'},
+ 'subnets': [{'type': 'dhcp'}],
+ }]
+ }
+ mock_fallback.return_value = fallback_config
+
+ mock_devlist.return_value = ['eth0']
+ mock_dd.return_value = ['hv_netsvc']
+ mock_get_mac.return_value = '00:11:22:33:44:55'
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+
+ netconfig = dsrc.network_config
+ self.assertEqual(netconfig, fallback_config)
+ mock_fallback.assert_called_with(blacklist_drivers=['mlx4_core'],
+ config_driver=True)
+
+ @mock.patch('cloudinit.net.get_interface_mac')
+ @mock.patch('cloudinit.net.get_devicelist')
+ @mock.patch('cloudinit.net.device_driver')
+ @mock.patch('cloudinit.net.generate_fallback_config')
+ def test_network_config_blacklist(self, mock_fallback, mock_dd,
+ mock_devlist, mock_get_mac):
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata),
+ 'sys_cfg': {}}
+
+ fallback_config = {
+ 'version': 1,
+ 'config': [{
+ 'type': 'physical', 'name': 'eth0',
+ 'mac_address': '00:11:22:33:44:55',
+ 'params': {'driver': 'hv_netsvc'},
+ 'subnets': [{'type': 'dhcp'}],
+ }]
+ }
+ blacklist_config = {
+ 'type': 'physical',
+ 'name': 'eth1',
+ 'mac_address': '00:11:22:33:44:55',
+ 'params': {'driver': 'mlx4_core'}
+ }
+ mock_fallback.return_value = fallback_config
+
+ mock_devlist.return_value = ['eth0', 'eth1']
+ mock_dd.side_effect = [
+ 'hv_netsvc', # list composition, skipped
+ 'mlx4_core', # list composition, match
+ 'mlx4_core', # config get driver name
+ ]
+ mock_get_mac.return_value = '00:11:22:33:44:55'
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+
+ netconfig = dsrc.network_config
+ expected_config = fallback_config
+ expected_config['config'].append(blacklist_config)
+ self.assertEqual(netconfig, expected_config)
+
class TestAzureBounce(TestCase):
@@ -531,9 +654,17 @@ class TestAzureBounce(TestCase):
self.patches.enter_context(
mock.patch.object(dsaz, 'get_metadata_from_fabric',
mock.MagicMock(return_value={})))
+
+ def _dmi_mocks(key):
+ if key == 'system-uuid':
+ return 'test-instance-id'
+ elif key == 'chassis-asset-tag':
+ return '7783-7084-3265-9085-8269-3286-77'
+ raise RuntimeError('should not get here')
+
self.patches.enter_context(
mock.patch.object(dsaz.util, 'read_dmi_data',
- mock.MagicMock(return_value='test-instance-id')))
+ mock.MagicMock(side_effect=_dmi_mocks)))
def setUp(self):
super(TestAzureBounce, self).setUp()
@@ -558,12 +689,18 @@ class TestAzureBounce(TestCase):
if ovfcontent is not None:
populate_dir(os.path.join(self.paths.seed_dir, "azure"),
{'ovf-env.xml': ovfcontent})
- dsrc = dsaz.DataSourceAzureNet(
+ dsrc = dsaz.DataSourceAzure(
{}, distro=None, paths=self.paths)
if agent_command is not None:
dsrc.ds_cfg['agent_command'] = agent_command
return dsrc
+ def _get_and_setup(self, dsrc):
+ ret = dsrc.get_data()
+ if ret:
+ dsrc.setup(True)
+ return ret
+
def get_ovf_env_with_dscfg(self, hostname, cfg):
odata = {
'HostName': hostname,
@@ -607,17 +744,20 @@ class TestAzureBounce(TestCase):
host_name = 'unchanged-host-name'
self.get_hostname.return_value = host_name
cfg = {'hostname_bounce': {'policy': 'force'}}
- self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg),
- agent_command=['not', '__builtin__']).get_data()
+ dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg),
+ agent_command=['not', '__builtin__'])
+ ret = self._get_and_setup(dsrc)
+ self.assertTrue(ret)
self.assertEqual(1, perform_hostname_bounce.call_count)
def test_different_hostnames_sets_hostname(self):
expected_hostname = 'azure-expected-host-name'
self.get_hostname.return_value = 'default-host-name'
- self._get_ds(
+ dsrc = self._get_ds(
self.get_ovf_env_with_dscfg(expected_hostname, {}),
- agent_command=['not', '__builtin__'],
- ).get_data()
+ agent_command=['not', '__builtin__'])
+ ret = self._get_and_setup(dsrc)
+ self.assertTrue(ret)
self.assertEqual(expected_hostname,
self.set_hostname.call_args_list[0][0][0])
@@ -626,19 +766,21 @@ class TestAzureBounce(TestCase):
self, perform_hostname_bounce):
expected_hostname = 'azure-expected-host-name'
self.get_hostname.return_value = 'default-host-name'
- self._get_ds(
+ dsrc = self._get_ds(
self.get_ovf_env_with_dscfg(expected_hostname, {}),
- agent_command=['not', '__builtin__'],
- ).get_data()
+ agent_command=['not', '__builtin__'])
+ ret = self._get_and_setup(dsrc)
+ self.assertTrue(ret)
self.assertEqual(1, perform_hostname_bounce.call_count)
def test_different_hostnames_sets_hostname_back(self):
initial_host_name = 'default-host-name'
self.get_hostname.return_value = initial_host_name
- self._get_ds(
+ dsrc = self._get_ds(
self.get_ovf_env_with_dscfg('some-host-name', {}),
- agent_command=['not', '__builtin__'],
- ).get_data()
+ agent_command=['not', '__builtin__'])
+ ret = self._get_and_setup(dsrc)
+ self.assertTrue(ret)
self.assertEqual(initial_host_name,
self.set_hostname.call_args_list[-1][0][0])
@@ -648,10 +790,11 @@ class TestAzureBounce(TestCase):
perform_hostname_bounce.side_effect = Exception
initial_host_name = 'default-host-name'
self.get_hostname.return_value = initial_host_name
- self._get_ds(
+ dsrc = self._get_ds(
self.get_ovf_env_with_dscfg('some-host-name', {}),
- agent_command=['not', '__builtin__'],
- ).get_data()
+ agent_command=['not', '__builtin__'])
+ ret = self._get_and_setup(dsrc)
+ self.assertTrue(ret)
self.assertEqual(initial_host_name,
self.set_hostname.call_args_list[-1][0][0])
@@ -662,7 +805,9 @@ class TestAzureBounce(TestCase):
self.get_hostname.return_value = old_hostname
cfg = {'hostname_bounce': {'interface': interface, 'policy': 'force'}}
data = self.get_ovf_env_with_dscfg(hostname, cfg)
- self._get_ds(data, agent_command=['not', '__builtin__']).get_data()
+ dsrc = self._get_ds(data, agent_command=['not', '__builtin__'])
+ ret = self._get_and_setup(dsrc)
+ self.assertTrue(ret)
self.assertEqual(1, self.subp.call_count)
bounce_env = self.subp.call_args[1]['env']
self.assertEqual(interface, bounce_env['interface'])
@@ -674,7 +819,9 @@ class TestAzureBounce(TestCase):
dsaz.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd
cfg = {'hostname_bounce': {'policy': 'force'}}
data = self.get_ovf_env_with_dscfg('some-hostname', cfg)
- self._get_ds(data, agent_command=['not', '__builtin__']).get_data()
+ dsrc = self._get_ds(data, agent_command=['not', '__builtin__'])
+ ret = self._get_and_setup(dsrc)
+ self.assertTrue(ret)
self.assertEqual(1, self.subp.call_count)
bounce_args = self.subp.call_args[1]['args']
self.assertEqual(cmd, bounce_args)
@@ -696,6 +843,33 @@ class TestAzureBounce(TestCase):
self.assertEqual(0, self.set_hostname.call_count)
+class TestLoadAzureDsDir(CiTestCase):
+ """Tests for load_azure_ds_dir."""
+
+ def setUp(self):
+ self.source_dir = self.tmp_dir()
+ super(TestLoadAzureDsDir, self).setUp()
+
+ def test_missing_ovf_env_xml_raises_non_azure_datasource_error(self):
+ """load_azure_ds_dir raises an error When ovf-env.xml doesn't exit."""
+ with self.assertRaises(dsaz.NonAzureDataSource) as context_manager:
+ dsaz.load_azure_ds_dir(self.source_dir)
+ self.assertEqual(
+ 'No ovf-env file found',
+ str(context_manager.exception))
+
+ def test_wb_invalid_ovf_env_xml_calls_read_azure_ovf(self):
+ """load_azure_ds_dir calls read_azure_ovf to parse the xml."""
+ ovf_path = os.path.join(self.source_dir, 'ovf-env.xml')
+ with open(ovf_path, 'wb') as stream:
+ stream.write(b'invalid xml')
+ with self.assertRaises(dsaz.BrokenAzureDataSource) as context_manager:
+ dsaz.load_azure_ds_dir(self.source_dir)
+ self.assertEqual(
+ 'Invalid ovf-env.xml: syntax error: line 1, column 0',
+ str(context_manager.exception))
+
+
class TestReadAzureOvf(TestCase):
def test_invalid_xml_raises_non_azure_ds(self):
invalid_xml = "<foo>" + construct_valid_ovf_env(data={})
@@ -903,4 +1077,12 @@ class TestCanDevBeReformatted(CiTestCase):
self.assertEqual(False, value)
self.assertIn("3 or more", msg.lower())
+
+class TestAzureNetExists(CiTestCase):
+ def test_azure_net_must_exist_for_legacy_objpkl(self):
+ """DataSourceAzureNet must exist for old obj.pkl files
+ that reference it."""
+ self.assertTrue(hasattr(dsaz, "DataSourceAzureNet"))
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py
index c08717f3..413e87ac 100644
--- a/tests/unittests/test_datasource/test_common.py
+++ b/tests/unittests/test_datasource/test_common.py
@@ -19,6 +19,7 @@ from cloudinit.sources import (
DataSourceOpenNebula as OpenNebula,
DataSourceOpenStack as OpenStack,
DataSourceOVF as OVF,
+ DataSourceScaleway as Scaleway,
DataSourceSmartOS as SmartOS,
)
from cloudinit.sources import DataSourceNone as DSNone
@@ -26,6 +27,7 @@ from cloudinit.sources import DataSourceNone as DSNone
from .. import helpers as test_helpers
DEFAULT_LOCAL = [
+ Azure.DataSourceAzure,
CloudSigma.DataSourceCloudSigma,
ConfigDrive.DataSourceConfigDrive,
DigitalOcean.DataSourceDigitalOcean,
@@ -36,8 +38,8 @@ DEFAULT_LOCAL = [
]
DEFAULT_NETWORK = [
+ AliYun.DataSourceAliYun,
AltCloud.DataSourceAltCloud,
- Azure.DataSourceAzureNet,
Bigstep.DataSourceBigstep,
CloudStack.DataSourceCloudStack,
DSNone.DataSourceNone,
@@ -47,6 +49,7 @@ DEFAULT_NETWORK = [
NoCloud.DataSourceNoCloudNet,
OpenStack.DataSourceOpenStack,
OVF.DataSourceOVFNet,
+ Scaleway.DataSourceScaleway,
]
diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py
new file mode 100644
index 00000000..12230ae2
--- /dev/null
+++ b/tests/unittests/test_datasource/test_ec2.py
@@ -0,0 +1,202 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import httpretty
+import mock
+
+from .. import helpers as test_helpers
+from cloudinit import helpers
+from cloudinit.sources import DataSourceEc2 as ec2
+
+
+# collected from api version 2009-04-04/ with
+# python3 -c 'import json
+# from cloudinit.ec2_utils import get_instance_metadata as gm
+# print(json.dumps(gm("2009-04-04"), indent=1, sort_keys=True))'
+DEFAULT_METADATA = {
+ "ami-id": "ami-80861296",
+ "ami-launch-index": "0",
+ "ami-manifest-path": "(unknown)",
+ "block-device-mapping": {"ami": "/dev/sda1", "root": "/dev/sda1"},
+ "hostname": "ip-10-0-0-149",
+ "instance-action": "none",
+ "instance-id": "i-0052913950685138c",
+ "instance-type": "t2.micro",
+ "local-hostname": "ip-10-0-0-149",
+ "local-ipv4": "10.0.0.149",
+ "placement": {"availability-zone": "us-east-1b"},
+ "profile": "default-hvm",
+ "public-hostname": "",
+ "public-ipv4": "107.23.188.247",
+ "public-keys": {"brickies": ["ssh-rsa AAAAB3Nz....w== brickies"]},
+ "reservation-id": "r-00a2c173fb5782a08",
+ "security-groups": "wide-open"
+}
+
+
+def _register_ssh_keys(rfunc, base_url, keys_data):
+ """handle ssh key inconsistencies.
+
+ public-keys in the ec2 metadata is inconsistently formatted compared
+ to other entries.
+ Given keys_data of {name1: pubkey1, name2: pubkey2}
+
+ This registers the following urls:
+ base_url 0={name1}\n1={name2} # (for each name)
+ base_url/ 0={name1}\n1={name2} # (for each name)
+ base_url/0 openssh-key
+ base_url/0/ openssh-key
+ base_url/0/openssh-key {pubkey1}
+ base_url/0/openssh-key/ {pubkey1}
+ ...
+ """
+
+ base_url = base_url.rstrip("/")
+ odd_index = '\n'.join(
+ ["{0}={1}".format(n, name)
+ for n, name in enumerate(sorted(keys_data))])
+
+ rfunc(base_url, odd_index)
+ rfunc(base_url + "/", odd_index)
+
+ for n, name in enumerate(sorted(keys_data)):
+ val = keys_data[name]
+ if isinstance(val, list):
+ val = '\n'.join(val)
+ burl = base_url + "/%s" % n
+ rfunc(burl, "openssh-key")
+ rfunc(burl + "/", "openssh-key")
+ rfunc(burl + "/%s/openssh-key" % name, val)
+ rfunc(burl + "/%s/openssh-key/" % name, val)
+
+
+def register_mock_metaserver(base_url, data):
+ """Register with httpretty a ec2 metadata like service serving 'data'.
+
+ If given a dictionary, it will populate urls under base_url for
+ that dictionary. For example, input of
+ {"instance-id": "i-abc", "mac": "00:16:3e:00:00:00"}
+ populates
+ base_url with 'instance-id\nmac'
+ base_url/ with 'instance-id\nmac'
+ base_url/instance-id with i-abc
+ base_url/mac with 00:16:3e:00:00:00
+ In the index, references to lists or dictionaries have a trailing /.
+ """
+ def register_helper(register, base_url, body):
+ base_url = base_url.rstrip("/")
+ if isinstance(body, str):
+ register(base_url, body)
+ elif isinstance(body, list):
+ register(base_url, '\n'.join(body) + '\n')
+ register(base_url + '/', '\n'.join(body) + '\n')
+ elif isinstance(body, dict):
+ vals = []
+ for k, v in body.items():
+ if k == 'public-keys':
+ _register_ssh_keys(
+ register, base_url + '/public-keys/', v)
+ continue
+ suffix = k.rstrip("/")
+ if not isinstance(v, (str, list)):
+ suffix += "/"
+ vals.append(suffix)
+ url = base_url + '/' + suffix
+ register_helper(register, url, v)
+ register(base_url, '\n'.join(vals) + '\n')
+ register(base_url + '/', '\n'.join(vals) + '\n')
+ elif body is None:
+ register(base_url, 'not found', status_code=404)
+
+ def myreg(*argc, **kwargs):
+ # print("register_url(%s, %s)" % (argc, kwargs))
+ return httpretty.register_uri(httpretty.GET, *argc, **kwargs)
+
+ register_helper(myreg, base_url, data)
+
+
+class TestEc2(test_helpers.HttprettyTestCase):
+ valid_platform_data = {
+ 'uuid': 'ec212f79-87d1-2f1d-588f-d86dc0fd5412',
+ 'uuid_source': 'dmi',
+ 'serial': 'ec212f79-87d1-2f1d-588f-d86dc0fd5412',
+ }
+
+ def setUp(self):
+ super(TestEc2, self).setUp()
+ self.metadata_addr = ec2.DataSourceEc2.metadata_urls[0]
+ self.api_ver = '2009-04-04'
+
+ @property
+ def metadata_url(self):
+ return '/'.join([self.metadata_addr, self.api_ver, 'meta-data', ''])
+
+ @property
+ def userdata_url(self):
+ return '/'.join([self.metadata_addr, self.api_ver, 'user-data'])
+
+ def _patch_add_cleanup(self, mpath, *args, **kwargs):
+ p = mock.patch(mpath, *args, **kwargs)
+ p.start()
+ self.addCleanup(p.stop)
+
+ def _setup_ds(self, sys_cfg, platform_data, md, ud=None):
+ distro = {}
+ paths = helpers.Paths({})
+ if sys_cfg is None:
+ sys_cfg = {}
+ ds = ec2.DataSourceEc2(sys_cfg=sys_cfg, distro=distro, paths=paths)
+ if platform_data is not None:
+ self._patch_add_cleanup(
+ "cloudinit.sources.DataSourceEc2._collect_platform_data",
+ return_value=platform_data)
+
+ if md:
+ register_mock_metaserver(self.metadata_url, md)
+ register_mock_metaserver(self.userdata_url, ud)
+
+ return ds
+
+ @httpretty.activate
+ def test_valid_platform_with_strict_true(self):
+ """Valid platform data should return true with strict_id true."""
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
+ md=DEFAULT_METADATA)
+ ret = ds.get_data()
+ self.assertEqual(True, ret)
+
+ @httpretty.activate
+ def test_valid_platform_with_strict_false(self):
+ """Valid platform data should return true with strict_id false."""
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
+ md=DEFAULT_METADATA)
+ ret = ds.get_data()
+ self.assertEqual(True, ret)
+
+ @httpretty.activate
+ def test_unknown_platform_with_strict_true(self):
+ """Unknown platform data with strict_id true should return False."""
+ uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a'
+ ds = self._setup_ds(
+ platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''},
+ sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
+ md=DEFAULT_METADATA)
+ ret = ds.get_data()
+ self.assertEqual(False, ret)
+
+ @httpretty.activate
+ def test_unknown_platform_with_strict_false(self):
+ """Unknown platform data with strict_id false should return True."""
+ uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a'
+ ds = self._setup_ds(
+ platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''},
+ sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
+ md=DEFAULT_METADATA)
+ ret = ds.get_data()
+ self.assertEqual(True, ret)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py
index 6fd1341d..ad608bec 100644
--- a/tests/unittests/test_datasource/test_gce.py
+++ b/tests/unittests/test_datasource/test_gce.py
@@ -72,11 +72,11 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
self.ds = DataSourceGCE.DataSourceGCE(
settings.CFG_BUILTIN, None,
helpers.Paths({}))
- self.m_platform_reports_gce = mock.patch(
- 'cloudinit.sources.DataSourceGCE.platform_reports_gce',
- return_value=True)
- self.m_platform_reports_gce.start()
- self.addCleanup(self.m_platform_reports_gce.stop)
+ ppatch = self.m_platform_reports_gce = mock.patch(
+ 'cloudinit.sources.DataSourceGCE.platform_reports_gce')
+ self.m_platform_reports_gce = ppatch.start()
+ self.m_platform_reports_gce.return_value = True
+ self.addCleanup(ppatch.stop)
super(TestDataSourceGCE, self).setUp()
def test_connection(self):
@@ -163,9 +163,12 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
self.assertEqual(True, r)
self.assertEqual('bar', self.ds.availability_zone)
- def test_get_data_returns_false_if_not_on_gce(self):
+ @mock.patch("cloudinit.sources.DataSourceGCE.GoogleMetadataFetcher")
+ def test_get_data_returns_false_if_not_on_gce(self, m_fetcher):
self.m_platform_reports_gce.return_value = False
- self.assertEqual(False, self.ds.get_data())
+ ret = self.ds.get_data()
+ self.assertEqual(False, ret)
+ m_fetcher.assert_not_called()
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py
new file mode 100644
index 00000000..65d83ad7
--- /dev/null
+++ b/tests/unittests/test_datasource/test_scaleway.py
@@ -0,0 +1,262 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+
+import httpretty
+import requests
+
+from cloudinit import helpers
+from cloudinit import settings
+from cloudinit.sources import DataSourceScaleway
+
+from ..helpers import mock, HttprettyTestCase, TestCase
+
+
+class DataResponses(object):
+ """
+ Possible responses of the API endpoint
+ 169.254.42.42/user_data/cloud-init and
+ 169.254.42.42/vendor_data/cloud-init.
+ """
+
+ FAKE_USER_DATA = '#!/bin/bash\necho "user-data"'
+
+ @staticmethod
+ def rate_limited(method, uri, headers):
+ return 429, headers, ''
+
+ @staticmethod
+ def api_error(method, uri, headers):
+ return 500, headers, ''
+
+ @classmethod
+ def get_ok(cls, method, uri, headers):
+ return 200, headers, cls.FAKE_USER_DATA
+
+ @staticmethod
+ def empty(method, uri, headers):
+ """
+ No user data for this server.
+ """
+ return 404, headers, ''
+
+
+class MetadataResponses(object):
+ """
+ Possible responses of the metadata API.
+ """
+
+ FAKE_METADATA = {
+ 'id': '00000000-0000-0000-0000-000000000000',
+ 'hostname': 'scaleway.host',
+ 'ssh_public_keys': [{
+ 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
+ 'fingerprint': '2048 06:ae:... login (RSA)'
+ }, {
+ 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
+ 'fingerprint': '2048 06:ff:... login2 (RSA)'
+ }]
+ }
+
+ @classmethod
+ def get_ok(cls, method, uri, headers):
+ return 200, headers, json.dumps(cls.FAKE_METADATA)
+
+
+class TestOnScaleway(TestCase):
+
+ def install_mocks(self, fake_dmi, fake_file_exists, fake_cmdline):
+ mock, faked = fake_dmi
+ mock.return_value = 'Scaleway' if faked else 'Whatever'
+
+ mock, faked = fake_file_exists
+ mock.return_value = faked
+
+ mock, faked = fake_cmdline
+ mock.return_value = \
+ 'initrd=initrd showopts scaleway nousb' if faked \
+ else 'BOOT_IMAGE=/vmlinuz-3.11.0-26-generic'
+
+ @mock.patch('cloudinit.util.get_cmdline')
+ @mock.patch('os.path.exists')
+ @mock.patch('cloudinit.util.read_dmi_data')
+ def test_not_on_scaleway(self, m_read_dmi_data, m_file_exists,
+ m_get_cmdline):
+ self.install_mocks(
+ fake_dmi=(m_read_dmi_data, False),
+ fake_file_exists=(m_file_exists, False),
+ fake_cmdline=(m_get_cmdline, False)
+ )
+ self.assertFalse(DataSourceScaleway.on_scaleway())
+
+ # When not on Scaleway, get_data() returns False.
+ datasource = DataSourceScaleway.DataSourceScaleway(
+ settings.CFG_BUILTIN, None, helpers.Paths({})
+ )
+ self.assertFalse(datasource.get_data())
+
+ @mock.patch('cloudinit.util.get_cmdline')
+ @mock.patch('os.path.exists')
+ @mock.patch('cloudinit.util.read_dmi_data')
+ def test_on_scaleway_dmi(self, m_read_dmi_data, m_file_exists,
+ m_get_cmdline):
+ """
+ dmidecode returns "Scaleway".
+ """
+ # dmidecode returns "Scaleway"
+ self.install_mocks(
+ fake_dmi=(m_read_dmi_data, True),
+ fake_file_exists=(m_file_exists, False),
+ fake_cmdline=(m_get_cmdline, False)
+ )
+ self.assertTrue(DataSourceScaleway.on_scaleway())
+
+ @mock.patch('cloudinit.util.get_cmdline')
+ @mock.patch('os.path.exists')
+ @mock.patch('cloudinit.util.read_dmi_data')
+ def test_on_scaleway_var_run_scaleway(self, m_read_dmi_data, m_file_exists,
+ m_get_cmdline):
+ """
+ /var/run/scaleway exists.
+ """
+ self.install_mocks(
+ fake_dmi=(m_read_dmi_data, False),
+ fake_file_exists=(m_file_exists, True),
+ fake_cmdline=(m_get_cmdline, False)
+ )
+ self.assertTrue(DataSourceScaleway.on_scaleway())
+
+ @mock.patch('cloudinit.util.get_cmdline')
+ @mock.patch('os.path.exists')
+ @mock.patch('cloudinit.util.read_dmi_data')
+ def test_on_scaleway_cmdline(self, m_read_dmi_data, m_file_exists,
+ m_get_cmdline):
+ """
+ "scaleway" in /proc/cmdline.
+ """
+ self.install_mocks(
+ fake_dmi=(m_read_dmi_data, False),
+ fake_file_exists=(m_file_exists, False),
+ fake_cmdline=(m_get_cmdline, True)
+ )
+ self.assertTrue(DataSourceScaleway.on_scaleway())
+
+
+def get_source_address_adapter(*args, **kwargs):
+ """
+ Scaleway user/vendor data API requires to be called with a privileged port.
+
+ If the unittests are run as non-root, the user doesn't have the permission
+ to bind on ports below 1024.
+
+ This function removes the bind on a privileged address, since anyway the
+ HTTP call is mocked by httpretty.
+ """
+ kwargs.pop('source_address')
+ return requests.adapters.HTTPAdapter(*args, **kwargs)
+
+
+class TestDataSourceScaleway(HttprettyTestCase):
+
+ def setUp(self):
+ self.datasource = DataSourceScaleway.DataSourceScaleway(
+ settings.CFG_BUILTIN, None, helpers.Paths({})
+ )
+ super(TestDataSourceScaleway, self).setUp()
+
+ self.metadata_url = \
+ DataSourceScaleway.BUILTIN_DS_CONFIG['metadata_url']
+ self.userdata_url = \
+ DataSourceScaleway.BUILTIN_DS_CONFIG['userdata_url']
+ self.vendordata_url = \
+ DataSourceScaleway.BUILTIN_DS_CONFIG['vendordata_url']
+
+ @httpretty.activate
+ @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter',
+ get_source_address_adapter)
+ @mock.patch('cloudinit.util.get_cmdline')
+ @mock.patch('time.sleep', return_value=None)
+ def test_metadata_ok(self, sleep, m_get_cmdline):
+ """
+ get_data() returns metadata, user data and vendor data.
+ """
+ m_get_cmdline.return_value = 'scaleway'
+
+ # Make user data API return a valid response
+ httpretty.register_uri(httpretty.GET, self.metadata_url,
+ body=MetadataResponses.get_ok)
+ httpretty.register_uri(httpretty.GET, self.userdata_url,
+ body=DataResponses.get_ok)
+ httpretty.register_uri(httpretty.GET, self.vendordata_url,
+ body=DataResponses.get_ok)
+ self.datasource.get_data()
+
+ self.assertEqual(self.datasource.get_instance_id(),
+ MetadataResponses.FAKE_METADATA['id'])
+ self.assertEqual(self.datasource.get_public_ssh_keys(), [
+ elem['key'] for elem in
+ MetadataResponses.FAKE_METADATA['ssh_public_keys']
+ ])
+ self.assertEqual(self.datasource.get_hostname(),
+ MetadataResponses.FAKE_METADATA['hostname'])
+ self.assertEqual(self.datasource.get_userdata_raw(),
+ DataResponses.FAKE_USER_DATA)
+ self.assertEqual(self.datasource.get_vendordata_raw(),
+ DataResponses.FAKE_USER_DATA)
+ self.assertIsNone(self.datasource.availability_zone)
+ self.assertIsNone(self.datasource.region)
+ self.assertEqual(sleep.call_count, 0)
+
+ @httpretty.activate
+ @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter',
+ get_source_address_adapter)
+ @mock.patch('cloudinit.util.get_cmdline')
+ @mock.patch('time.sleep', return_value=None)
+ def test_metadata_404(self, sleep, m_get_cmdline):
+ """
+ get_data() returns metadata, but no user data nor vendor data.
+ """
+ m_get_cmdline.return_value = 'scaleway'
+
+ # Make user and vendor data APIs return HTTP/404, which means there is
+ # no user / vendor data for the server.
+ httpretty.register_uri(httpretty.GET, self.metadata_url,
+ body=MetadataResponses.get_ok)
+ httpretty.register_uri(httpretty.GET, self.userdata_url,
+ body=DataResponses.empty)
+ httpretty.register_uri(httpretty.GET, self.vendordata_url,
+ body=DataResponses.empty)
+ self.datasource.get_data()
+ self.assertIsNone(self.datasource.get_userdata_raw())
+ self.assertIsNone(self.datasource.get_vendordata_raw())
+ self.assertEqual(sleep.call_count, 0)
+
+ @httpretty.activate
+ @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter',
+ get_source_address_adapter)
+ @mock.patch('cloudinit.util.get_cmdline')
+ @mock.patch('time.sleep', return_value=None)
+ def test_metadata_rate_limit(self, sleep, m_get_cmdline):
+ """
+ get_data() is rate limited two times by the metadata API when fetching
+ user data.
+ """
+ m_get_cmdline.return_value = 'scaleway'
+
+ httpretty.register_uri(httpretty.GET, self.metadata_url,
+ body=MetadataResponses.get_ok)
+ httpretty.register_uri(httpretty.GET, self.vendordata_url,
+ body=DataResponses.empty)
+
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url,
+ responses=[
+ httpretty.Response(body=DataResponses.rate_limited),
+ httpretty.Response(body=DataResponses.rate_limited),
+ httpretty.Response(body=DataResponses.get_ok),
+ ]
+ )
+ self.datasource.get_data()
+ self.assertEqual(self.datasource.get_userdata_raw(),
+ DataResponses.FAKE_USER_DATA)
+ self.assertEqual(sleep.call_count, 2)
diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py
index 9ded4f6c..1d02f7bd 100644
--- a/tests/unittests/test_distros/test_create_users.py
+++ b/tests/unittests/test_distros/test_create_users.py
@@ -38,6 +38,8 @@ class MyBaseDistro(distros.Distro):
raise NotImplementedError()
+@mock.patch("cloudinit.distros.util.system_is_snappy", return_value=False)
+@mock.patch("cloudinit.distros.util.subp")
class TestCreateUser(TestCase):
def setUp(self):
super(TestCase, self).setUp()
@@ -53,8 +55,7 @@ class TestCreateUser(TestCase):
logcmd[i + 1] = 'REDACTED'
return mock.call(args, logstring=logcmd)
- @mock.patch("cloudinit.distros.util.subp")
- def test_basic(self, m_subp):
+ def test_basic(self, m_subp, m_is_snappy):
user = 'foouser'
self.dist.create_user(user)
self.assertEqual(
@@ -62,8 +63,7 @@ class TestCreateUser(TestCase):
[self._useradd2call([user, '-m']),
mock.call(['passwd', '-l', user])])
- @mock.patch("cloudinit.distros.util.subp")
- def test_no_home(self, m_subp):
+ def test_no_home(self, m_subp, m_is_snappy):
user = 'foouser'
self.dist.create_user(user, no_create_home=True)
self.assertEqual(
@@ -71,8 +71,7 @@ class TestCreateUser(TestCase):
[self._useradd2call([user, '-M']),
mock.call(['passwd', '-l', user])])
- @mock.patch("cloudinit.distros.util.subp")
- def test_system_user(self, m_subp):
+ def test_system_user(self, m_subp, m_is_snappy):
# system user should have no home and get --system
user = 'foouser'
self.dist.create_user(user, system=True)
@@ -81,8 +80,7 @@ class TestCreateUser(TestCase):
[self._useradd2call([user, '--system', '-M']),
mock.call(['passwd', '-l', user])])
- @mock.patch("cloudinit.distros.util.subp")
- def test_explicit_no_home_false(self, m_subp):
+ def test_explicit_no_home_false(self, m_subp, m_is_snappy):
user = 'foouser'
self.dist.create_user(user, no_create_home=False)
self.assertEqual(
@@ -90,16 +88,14 @@ class TestCreateUser(TestCase):
[self._useradd2call([user, '-m']),
mock.call(['passwd', '-l', user])])
- @mock.patch("cloudinit.distros.util.subp")
- def test_unlocked(self, m_subp):
+ def test_unlocked(self, m_subp, m_is_snappy):
user = 'foouser'
self.dist.create_user(user, lock_passwd=False)
self.assertEqual(
m_subp.call_args_list,
[self._useradd2call([user, '-m'])])
- @mock.patch("cloudinit.distros.util.subp")
- def test_set_password(self, m_subp):
+ def test_set_password(self, m_subp, m_is_snappy):
user = 'foouser'
password = 'passfoo'
self.dist.create_user(user, passwd=password)
@@ -109,8 +105,7 @@ class TestCreateUser(TestCase):
mock.call(['passwd', '-l', user])])
@mock.patch("cloudinit.distros.util.is_group")
- @mock.patch("cloudinit.distros.util.subp")
- def test_group_added(self, m_subp, m_is_group):
+ def test_group_added(self, m_is_group, m_subp, m_is_snappy):
m_is_group.return_value = False
user = 'foouser'
self.dist.create_user(user, groups=['group1'])
@@ -121,8 +116,7 @@ class TestCreateUser(TestCase):
self.assertEqual(m_subp.call_args_list, expected)
@mock.patch("cloudinit.distros.util.is_group")
- @mock.patch("cloudinit.distros.util.subp")
- def test_only_new_group_added(self, m_subp, m_is_group):
+ def test_only_new_group_added(self, m_is_group, m_subp, m_is_snappy):
ex_groups = ['existing_group']
groups = ['group1', ex_groups[0]]
m_is_group.side_effect = lambda m: m in ex_groups
@@ -135,8 +129,8 @@ class TestCreateUser(TestCase):
self.assertEqual(m_subp.call_args_list, expected)
@mock.patch("cloudinit.distros.util.is_group")
- @mock.patch("cloudinit.distros.util.subp")
- def test_create_groups_with_whitespace_string(self, m_subp, m_is_group):
+ def test_create_groups_with_whitespace_string(
+ self, m_is_group, m_subp, m_is_snappy):
# groups supported as a comma delimeted string even with white space
m_is_group.return_value = False
user = 'foouser'
diff --git a/tests/unittests/test_distros/test_debian.py b/tests/unittests/test_distros/test_debian.py
new file mode 100644
index 00000000..2330ad52
--- /dev/null
+++ b/tests/unittests/test_distros/test_debian.py
@@ -0,0 +1,82 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from ..helpers import (CiTestCase, mock)
+
+from cloudinit.distros.debian import apply_locale
+from cloudinit import util
+
+
+@mock.patch("cloudinit.distros.debian.util.subp")
+class TestDebianApplyLocale(CiTestCase):
+ def test_no_rerun(self, m_subp):
+ """If system has defined locale, no re-run is expected."""
+ spath = self.tmp_path("default-locale")
+ m_subp.return_value = (None, None)
+ locale = 'en_US.UTF-8'
+ util.write_file(spath, 'LANG=%s\n' % locale, omode="w")
+ apply_locale(locale, sys_path=spath)
+ m_subp.assert_not_called()
+
+ def test_rerun_if_different(self, m_subp):
+ """If system has different locale, locale-gen should be called."""
+ spath = self.tmp_path("default-locale")
+ m_subp.return_value = (None, None)
+ locale = 'en_US.UTF-8'
+ util.write_file(spath, 'LANG=fr_FR.UTF-8', omode="w")
+ apply_locale(locale, sys_path=spath)
+ self.assertEqual(
+ [['locale-gen', locale],
+ ['update-locale', '--locale-file=' + spath, 'LANG=%s' % locale]],
+ [p[0][0] for p in m_subp.call_args_list])
+
+ def test_rerun_if_no_file(self, m_subp):
+ """If system has no locale file, locale-gen should be called."""
+ spath = self.tmp_path("default-locale")
+ m_subp.return_value = (None, None)
+ locale = 'en_US.UTF-8'
+ apply_locale(locale, sys_path=spath)
+ self.assertEqual(
+ [['locale-gen', locale],
+ ['update-locale', '--locale-file=' + spath, 'LANG=%s' % locale]],
+ [p[0][0] for p in m_subp.call_args_list])
+
+ def test_rerun_on_unset_system_locale(self, m_subp):
+ """If system has unset locale, locale-gen should be called."""
+ m_subp.return_value = (None, None)
+ spath = self.tmp_path("default-locale")
+ locale = 'en_US.UTF-8'
+ util.write_file(spath, 'LANG=', omode="w")
+ apply_locale(locale, sys_path=spath)
+ self.assertEqual(
+ [['locale-gen', locale],
+ ['update-locale', '--locale-file=' + spath, 'LANG=%s' % locale]],
+ [p[0][0] for p in m_subp.call_args_list])
+
+ def test_rerun_on_mismatched_keys(self, m_subp):
+ """If key is LC_ALL and system has only LANG, rerun is expected."""
+ m_subp.return_value = (None, None)
+ spath = self.tmp_path("default-locale")
+ locale = 'en_US.UTF-8'
+ util.write_file(spath, 'LANG=', omode="w")
+ apply_locale(locale, sys_path=spath, keyname='LC_ALL')
+ self.assertEqual(
+ [['locale-gen', locale],
+ ['update-locale', '--locale-file=' + spath,
+ 'LC_ALL=%s' % locale]],
+ [p[0][0] for p in m_subp.call_args_list])
+
+ def test_falseish_locale_raises_valueerror(self, m_subp):
+ """locale as None or "" is invalid and should raise ValueError."""
+
+ with self.assertRaises(ValueError) as ctext_m:
+ apply_locale(None)
+ m_subp.assert_not_called()
+
+ self.assertEqual(
+ 'Failed to provide locale value.', str(ctext_m.exception))
+
+ with self.assertRaises(ValueError) as ctext_m:
+ apply_locale("")
+ m_subp.assert_not_called()
+ self.assertEqual(
+ 'Failed to provide locale value.', str(ctext_m.exception))
diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py
index be9a8318..2f505d93 100644
--- a/tests/unittests/test_distros/test_netconfig.py
+++ b/tests/unittests/test_distros/test_netconfig.py
@@ -92,10 +92,9 @@ iface lo inet loopback
auto eth0
iface eth0 inet static
- address 192.168.1.5
+ address 192.168.1.5/24
broadcast 192.168.1.0
gateway 192.168.1.254
- netmask 255.255.255.0
auto eth1
iface eth1 inet dhcp
@@ -156,7 +155,7 @@ network:
ethernets:
eth7:
addresses:
- - 192.168.1.5/255.255.255.0
+ - 192.168.1.5/24
gateway4: 192.168.1.254
eth9:
dhcp4: true
@@ -477,7 +476,9 @@ NETWORKING=yes
# Created by cloud-init on instance boot automatically, do not edit.
#
BOOTPROTO=none
+DEFROUTE=yes
DEVICE=eth0
+GATEWAY=192.168.1.254
IPADDR=192.168.1.5
NETMASK=255.255.255.0
NM_CONTROLLED=no
@@ -626,9 +627,11 @@ IPV6_AUTOCONF=no
# Created by cloud-init on instance boot automatically, do not edit.
#
BOOTPROTO=none
+DEFROUTE=yes
DEVICE=eth0
IPV6ADDR=2607:f0d0:1002:0011::2/64
IPV6INIT=yes
+IPV6_DEFAULTGW=2607:f0d0:1002:0011::1
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py
index f5694b26..8ccfe55c 100644
--- a/tests/unittests/test_ds_identify.py
+++ b/tests/unittests/test_ds_identify.py
@@ -39,9 +39,11 @@ RC_FOUND = 0
RC_NOT_FOUND = 1
DS_NONE = 'None'
+P_CHASSIS_ASSET_TAG = "sys/class/dmi/id/chassis_asset_tag"
P_PRODUCT_NAME = "sys/class/dmi/id/product_name"
P_PRODUCT_SERIAL = "sys/class/dmi/id/product_serial"
P_PRODUCT_UUID = "sys/class/dmi/id/product_uuid"
+P_SEED_DIR = "var/lib/cloud/seed"
P_DSID_CFG = "etc/cloud/ds-identify.cfg"
MOCK_VIRT_IS_KVM = {'name': 'detect_virt', 'RET': 'kvm', 'ret': 0}
@@ -160,6 +162,30 @@ class TestDsIdentify(CiTestCase):
_print_run_output(rc, out, err, cfg, files)
return rc, out, err, cfg, files
+ def test_wb_print_variables(self):
+ """_print_info reports an array of discovered variables to stderr."""
+ data = VALID_CFG['Azure-dmi-detection']
+ _, _, err, _, _ = self._call_via_dict(data)
+ expected_vars = [
+ 'DMI_PRODUCT_NAME', 'DMI_SYS_VENDOR', 'DMI_PRODUCT_SERIAL',
+ 'DMI_PRODUCT_UUID', 'PID_1_PRODUCT_NAME', 'DMI_CHASSIS_ASSET_TAG',
+ 'FS_LABELS', 'KERNEL_CMDLINE', 'VIRT', 'UNAME_KERNEL_NAME',
+ 'UNAME_KERNEL_RELEASE', 'UNAME_KERNEL_VERSION', 'UNAME_MACHINE',
+ 'UNAME_NODENAME', 'UNAME_OPERATING_SYSTEM', 'DSNAME', 'DSLIST',
+ 'MODE', 'ON_FOUND', 'ON_MAYBE', 'ON_NOTFOUND']
+ for var in expected_vars:
+ self.assertIn('{0}='.format(var), err)
+
+ def test_azure_dmi_detection_from_chassis_asset_tag(self):
+ """Azure datasource is detected from DMI chassis-asset-tag"""
+ self._test_ds_found('Azure-dmi-detection')
+
+ def test_azure_seed_file_detection(self):
+ """Azure datasource is detected due to presence of a seed file.
+
+ The seed file tested is /var/lib/cloud/seed/azure/ovf-env.xml."""
+ self._test_ds_found('Azure-seed-detection')
+
def test_aws_ec2_hvm(self):
"""EC2: hvm instances use dmi serial and uuid starting with 'ec2'."""
self._test_ds_found('Ec2-hvm')
@@ -220,6 +246,20 @@ class TestDsIdentify(CiTestCase):
mydata['files'][cfgpath] = 'datasource_list: ["Ec2", "None"]\n'
self._check_via_dict(mydata, rc=RC_FOUND, dslist=['Ec2', DS_NONE])
+ def test_aliyun_identified(self):
+ """Test that Aliyun cloud is identified by product id."""
+ self._test_ds_found('AliYun')
+
+ def test_aliyun_over_ec2(self):
+ """Even if all other factors identified Ec2, AliYun should be used."""
+ mydata = copy.deepcopy(VALID_CFG['Ec2-xen'])
+ self._test_ds_found('AliYun')
+ prod_name = VALID_CFG['AliYun']['files'][P_PRODUCT_NAME]
+ mydata['files'][P_PRODUCT_NAME] = prod_name
+ policy = "search,found=first,maybe=none,notfound=disabled"
+ self._check_via_dict(mydata, rc=RC_FOUND, dslist=['AliYun', DS_NONE],
+ policy_dmi=policy)
+
def blkid_out(disks=None):
"""Convert a list of disk dictionaries into blkid content."""
@@ -254,6 +294,23 @@ def _print_run_output(rc, out, err, cfg, files):
VALID_CFG = {
+ 'AliYun': {
+ 'ds': 'AliYun',
+ 'files': {P_PRODUCT_NAME: 'Alibaba Cloud ECS\n'},
+ },
+ 'Azure-dmi-detection': {
+ 'ds': 'Azure',
+ 'files': {
+ P_CHASSIS_ASSET_TAG: '7783-7084-3265-9085-8269-3286-77\n',
+ }
+ },
+ 'Azure-seed-detection': {
+ 'ds': 'Azure',
+ 'files': {
+ P_CHASSIS_ASSET_TAG: 'No-match\n',
+ os.path.join(P_SEED_DIR, 'azure', 'ovf-env.xml'): 'present\n',
+ }
+ },
'Ec2-hvm': {
'ds': 'Ec2',
'mocks': [{'name': 'detect_virt', 'RET': 'kvm', 'ret': 0}],
diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py
index 916a0d7a..8a6d49ed 100644
--- a/tests/unittests/test_handler/test_handler_disk_setup.py
+++ b/tests/unittests/test_handler/test_handler_disk_setup.py
@@ -3,7 +3,7 @@
import random
from cloudinit.config import cc_disk_setup
-from ..helpers import ExitStack, mock, TestCase
+from ..helpers import CiTestCase, ExitStack, mock, TestCase
class TestIsDiskUsed(TestCase):
@@ -174,32 +174,32 @@ class TestUpdateFsSetupDevices(TestCase):
return_value=('/dev/xdb1', False))
@mock.patch('cloudinit.config.cc_disk_setup.device_type', return_value=None)
@mock.patch('cloudinit.config.cc_disk_setup.util.subp', return_value=('', ''))
-class TestMkfsCommandHandling(TestCase):
+class TestMkfsCommandHandling(CiTestCase):
+
+ with_logs = True
def test_with_cmd(self, subp, *args):
"""mkfs honors cmd and logs warnings when extra_opts or overwrite are
provided."""
- with self.assertLogs(
- 'cloudinit.config.cc_disk_setup') as logs:
- cc_disk_setup.mkfs({
- 'cmd': 'mkfs -t %(filesystem)s -L %(label)s %(device)s',
- 'filesystem': 'ext4',
- 'device': '/dev/xdb1',
- 'label': 'with_cmd',
- 'extra_opts': ['should', 'generate', 'warning'],
- 'overwrite': 'should generate warning too'
- })
+ cc_disk_setup.mkfs({
+ 'cmd': 'mkfs -t %(filesystem)s -L %(label)s %(device)s',
+ 'filesystem': 'ext4',
+ 'device': '/dev/xdb1',
+ 'label': 'with_cmd',
+ 'extra_opts': ['should', 'generate', 'warning'],
+ 'overwrite': 'should generate warning too'
+ })
self.assertIn(
- 'WARNING:cloudinit.config.cc_disk_setup:fs_setup:extra_opts ' +
+ 'extra_opts ' +
'ignored because cmd was specified: mkfs -t ext4 -L with_cmd ' +
'/dev/xdb1',
- logs.output)
+ self.logs.getvalue())
self.assertIn(
- 'WARNING:cloudinit.config.cc_disk_setup:fs_setup:overwrite ' +
+ 'overwrite ' +
'ignored because cmd was specified: mkfs -t ext4 -L with_cmd ' +
'/dev/xdb1',
- logs.output)
+ self.logs.getvalue())
subp.assert_called_once_with(
'mkfs -t ext4 -L with_cmd /dev/xdb1', shell=True)
diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py
index bc4277b7..7f278646 100644
--- a/tests/unittests/test_handler/test_handler_ntp.py
+++ b/tests/unittests/test_handler/test_handler_ntp.py
@@ -3,7 +3,7 @@
from cloudinit.config import cc_ntp
from cloudinit.sources import DataSourceNone
from cloudinit import (distros, helpers, cloud, util)
-from ..helpers import FilesystemMockingTestCase, mock
+from ..helpers import FilesystemMockingTestCase, mock, skipIf
import os
@@ -16,6 +16,13 @@ servers {{servers}}
pools {{pools}}
"""
+try:
+ import jsonschema
+ assert jsonschema # avoid pyflakes error F401: import unused
+ _missing_jsonschema_dep = False
+except ImportError:
+ _missing_jsonschema_dep = True
+
class TestNtp(FilesystemMockingTestCase):
@@ -55,7 +62,7 @@ class TestNtp(FilesystemMockingTestCase):
def test_ntp_rename_ntp_conf(self):
"""When NTP_CONF exists, rename_ntp moves it."""
ntpconf = self.tmp_path("ntp.conf", self.new_root)
- os.mknod(ntpconf)
+ util.write_file(ntpconf, "")
with mock.patch("cloudinit.config.cc_ntp.NTP_CONF", ntpconf):
cc_ntp.rename_ntp_conf()
self.assertFalse(os.path.exists(ntpconf))
@@ -209,7 +216,121 @@ class TestNtp(FilesystemMockingTestCase):
"""When no ntp section is defined handler logs a warning and noops."""
cc_ntp.handle('cc_ntp', {}, None, None, [])
self.assertEqual(
- 'Skipping module named cc_ntp, not present or disabled by cfg\n',
+ 'DEBUG: Skipping module named cc_ntp, '
+ 'not present or disabled by cfg\n',
self.logs.getvalue())
+ def test_ntp_handler_schema_validation_allows_empty_ntp_config(self):
+ """Ntp schema validation allows for an empty ntp: configuration."""
+ invalid_config = {'ntp': {}}
+ distro = 'ubuntu'
+ cc = self._get_cloud(distro)
+ ntp_conf = os.path.join(self.new_root, 'ntp.conf')
+ with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
+ stream.write(NTP_TEMPLATE)
+ with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
+ cc_ntp.handle('cc_ntp', invalid_config, cc, None, [])
+ self.assertNotIn('Invalid config:', self.logs.getvalue())
+ with open(ntp_conf) as stream:
+ content = stream.read()
+ default_pools = [
+ "{0}.{1}.pool.ntp.org".format(x, distro)
+ for x in range(0, cc_ntp.NR_POOL_SERVERS)]
+ self.assertEqual(
+ "servers []\npools {0}\n".format(default_pools),
+ content)
+
+ @skipIf(_missing_jsonschema_dep, "No python-jsonschema dependency")
+ def test_ntp_handler_schema_validation_warns_non_string_item_type(self):
+ """Ntp schema validation warns of non-strings in pools or servers.
+
+ Schema validation is not strict, so ntp config is still be rendered.
+ """
+ invalid_config = {'ntp': {'pools': [123], 'servers': ['valid', None]}}
+ cc = self._get_cloud('ubuntu')
+ ntp_conf = os.path.join(self.new_root, 'ntp.conf')
+ with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
+ stream.write(NTP_TEMPLATE)
+ with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
+ cc_ntp.handle('cc_ntp', invalid_config, cc, None, [])
+ self.assertIn(
+ "Invalid config:\nntp.pools.0: 123 is not of type 'string'\n"
+ "ntp.servers.1: None is not of type 'string'",
+ self.logs.getvalue())
+ with open(ntp_conf) as stream:
+ content = stream.read()
+ self.assertEqual("servers ['valid', None]\npools [123]\n", content)
+
+ @skipIf(_missing_jsonschema_dep, "No python-jsonschema dependency")
+ def test_ntp_handler_schema_validation_warns_of_non_array_type(self):
+ """Ntp schema validation warns of non-array pools or servers types.
+
+ Schema validation is not strict, so ntp config is still be rendered.
+ """
+ invalid_config = {'ntp': {'pools': 123, 'servers': 'non-array'}}
+ cc = self._get_cloud('ubuntu')
+ ntp_conf = os.path.join(self.new_root, 'ntp.conf')
+ with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
+ stream.write(NTP_TEMPLATE)
+ with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
+ cc_ntp.handle('cc_ntp', invalid_config, cc, None, [])
+ self.assertIn(
+ "Invalid config:\nntp.pools: 123 is not of type 'array'\n"
+ "ntp.servers: 'non-array' is not of type 'array'",
+ self.logs.getvalue())
+ with open(ntp_conf) as stream:
+ content = stream.read()
+ self.assertEqual("servers non-array\npools 123\n", content)
+
+ @skipIf(_missing_jsonschema_dep, "No python-jsonschema dependency")
+ def test_ntp_handler_schema_validation_warns_invalid_key_present(self):
+ """Ntp schema validation warns of invalid keys present in ntp config.
+
+ Schema validation is not strict, so ntp config is still be rendered.
+ """
+ invalid_config = {
+ 'ntp': {'invalidkey': 1, 'pools': ['0.mycompany.pool.ntp.org']}}
+ cc = self._get_cloud('ubuntu')
+ ntp_conf = os.path.join(self.new_root, 'ntp.conf')
+ with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
+ stream.write(NTP_TEMPLATE)
+ with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
+ cc_ntp.handle('cc_ntp', invalid_config, cc, None, [])
+ self.assertIn(
+ "Invalid config:\nntp: Additional properties are not allowed "
+ "('invalidkey' was unexpected)",
+ self.logs.getvalue())
+ with open(ntp_conf) as stream:
+ content = stream.read()
+ self.assertEqual(
+ "servers []\npools ['0.mycompany.pool.ntp.org']\n",
+ content)
+
+ @skipIf(_missing_jsonschema_dep, "No python-jsonschema dependency")
+ def test_ntp_handler_schema_validation_warns_of_duplicates(self):
+ """Ntp schema validation warns of duplicates in servers or pools.
+
+ Schema validation is not strict, so ntp config is still be rendered.
+ """
+ invalid_config = {
+ 'ntp': {'pools': ['0.mypool.org', '0.mypool.org'],
+ 'servers': ['10.0.0.1', '10.0.0.1']}}
+ cc = self._get_cloud('ubuntu')
+ ntp_conf = os.path.join(self.new_root, 'ntp.conf')
+ with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
+ stream.write(NTP_TEMPLATE)
+ with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
+ cc_ntp.handle('cc_ntp', invalid_config, cc, None, [])
+ self.assertIn(
+ "Invalid config:\nntp.pools: ['0.mypool.org', '0.mypool.org'] has "
+ "non-unique elements\nntp.servers: ['10.0.0.1', '10.0.0.1'] has "
+ "non-unique elements",
+ self.logs.getvalue())
+ with open(ntp_conf) as stream:
+ content = stream.read()
+ self.assertEqual(
+ "servers ['10.0.0.1', '10.0.0.1']\n"
+ "pools ['0.mypool.org', '0.mypool.org']\n",
+ content)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_write_files.py b/tests/unittests/test_handler/test_handler_write_files.py
index fb252d1d..1129e77d 100644
--- a/tests/unittests/test_handler/test_handler_write_files.py
+++ b/tests/unittests/test_handler/test_handler_write_files.py
@@ -1,10 +1,10 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.config.cc_write_files import write_files
+from cloudinit.config.cc_write_files import write_files, decode_perms
from cloudinit import log as logging
from cloudinit import util
-from ..helpers import FilesystemMockingTestCase
+from ..helpers import CiTestCase, FilesystemMockingTestCase
import base64
import gzip
@@ -49,13 +49,13 @@ class TestWriteFiles(FilesystemMockingTestCase):
expected = "hello world\n"
filename = "/tmp/my.file"
write_files(
- "test_simple", [{"content": expected, "path": filename}], LOG)
+ "test_simple", [{"content": expected, "path": filename}])
self.assertEqual(util.load_file(filename), expected)
def test_yaml_binary(self):
self.patchUtils(self.tmp)
data = util.load_yaml(YAML_TEXT)
- write_files("testname", data['write_files'], LOG)
+ write_files("testname", data['write_files'])
for path, content in YAML_CONTENT_EXPECTED.items():
self.assertEqual(util.load_file(path), content)
@@ -87,7 +87,7 @@ class TestWriteFiles(FilesystemMockingTestCase):
files.append(cur)
expected.append((cur['path'], data))
- write_files("test_decoding", files, LOG)
+ write_files("test_decoding", files)
for path, content in expected:
self.assertEqual(util.load_file(path, decode=False), content)
@@ -98,6 +98,33 @@ class TestWriteFiles(FilesystemMockingTestCase):
self.assertEqual(len(expected), flen_expected)
+class TestDecodePerms(CiTestCase):
+
+ with_logs = True
+
+ def test_none_returns_default(self):
+ """If None is passed as perms, then default should be returned."""
+ default = object()
+ found = decode_perms(None, default)
+ self.assertEqual(default, found)
+
+ def test_integer(self):
+ """A valid integer should return itself."""
+ found = decode_perms(0o755, None)
+ self.assertEqual(0o755, found)
+
+ def test_valid_octal_string(self):
+ """A string should be read as octal."""
+ found = decode_perms("644", None)
+ self.assertEqual(0o644, found)
+
+ def test_invalid_octal_string_returns_default_and_warns(self):
+ """A string with invalid octal should warn and return default."""
+ found = decode_perms("999", None)
+ self.assertIsNone(found)
+ self.assertIn("WARNING: Undecodable", self.logs.getvalue())
+
+
def _gzip_bytes(data):
buf = six.BytesIO()
fp = None
diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py
new file mode 100644
index 00000000..eda4802a
--- /dev/null
+++ b/tests/unittests/test_handler/test_schema.py
@@ -0,0 +1,232 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.config.schema import (
+ CLOUD_CONFIG_HEADER, SchemaValidationError, get_schema_doc,
+ validate_cloudconfig_file, validate_cloudconfig_schema,
+ main)
+from cloudinit.util import write_file
+
+from ..helpers import CiTestCase, mock, skipIf
+
+from copy import copy
+from six import StringIO
+from textwrap import dedent
+
+try:
+ import jsonschema
+ assert jsonschema # avoid pyflakes error F401: import unused
+ _missing_jsonschema_dep = False
+except ImportError:
+ _missing_jsonschema_dep = True
+
+
+class SchemaValidationErrorTest(CiTestCase):
+ """Test validate_cloudconfig_schema"""
+
+ def test_schema_validation_error_expects_schema_errors(self):
+ """SchemaValidationError is initialized from schema_errors."""
+ errors = (('key.path', 'unexpected key "junk"'),
+ ('key2.path', '"-123" is not a valid "hostname" format'))
+ exception = SchemaValidationError(schema_errors=errors)
+ self.assertIsInstance(exception, Exception)
+ self.assertEqual(exception.schema_errors, errors)
+ self.assertEqual(
+ 'Cloud config schema errors: key.path: unexpected key "junk", '
+ 'key2.path: "-123" is not a valid "hostname" format',
+ str(exception))
+ self.assertTrue(isinstance(exception, ValueError))
+
+
+class ValidateCloudConfigSchemaTest(CiTestCase):
+ """Tests for validate_cloudconfig_schema."""
+
+ with_logs = True
+
+ @skipIf(_missing_jsonschema_dep, "No python-jsonschema dependency")
+ def test_validateconfig_schema_non_strict_emits_warnings(self):
+ """When strict is False validate_cloudconfig_schema emits warnings."""
+ schema = {'properties': {'p1': {'type': 'string'}}}
+ validate_cloudconfig_schema({'p1': -1}, schema, strict=False)
+ self.assertIn(
+ "Invalid config:\np1: -1 is not of type 'string'\n",
+ self.logs.getvalue())
+
+ @skipIf(_missing_jsonschema_dep, "No python-jsonschema dependency")
+ def test_validateconfig_schema_emits_warning_on_missing_jsonschema(self):
+ """Warning from validate_cloudconfig_schema when missing jsonschema."""
+ schema = {'properties': {'p1': {'type': 'string'}}}
+ with mock.patch.dict('sys.modules', **{'jsonschema': ImportError()}):
+ validate_cloudconfig_schema({'p1': -1}, schema, strict=True)
+ self.assertIn(
+ 'Ignoring schema validation. python-jsonschema is not present',
+ self.logs.getvalue())
+
+ @skipIf(_missing_jsonschema_dep, "No python-jsonschema dependency")
+ def test_validateconfig_schema_strict_raises_errors(self):
+ """When strict is True validate_cloudconfig_schema raises errors."""
+ schema = {'properties': {'p1': {'type': 'string'}}}
+ with self.assertRaises(SchemaValidationError) as context_mgr:
+ validate_cloudconfig_schema({'p1': -1}, schema, strict=True)
+ self.assertEqual(
+ "Cloud config schema errors: p1: -1 is not of type 'string'",
+ str(context_mgr.exception))
+
+ @skipIf(_missing_jsonschema_dep, "No python-jsonschema dependency")
+ def test_validateconfig_schema_honors_formats(self):
+ """With strict True, validate_cloudconfig_schema errors on format."""
+ schema = {
+ 'properties': {'p1': {'type': 'string', 'format': 'hostname'}}}
+ with self.assertRaises(SchemaValidationError) as context_mgr:
+ validate_cloudconfig_schema({'p1': '-1'}, schema, strict=True)
+ self.assertEqual(
+ "Cloud config schema errors: p1: '-1' is not a 'hostname'",
+ str(context_mgr.exception))
+
+
+class ValidateCloudConfigFileTest(CiTestCase):
+ """Tests for validate_cloudconfig_file."""
+
+ def setUp(self):
+ super(ValidateCloudConfigFileTest, self).setUp()
+ self.config_file = self.tmp_path('cloudcfg.yaml')
+
+ def test_validateconfig_file_error_on_absent_file(self):
+ """On absent config_path, validate_cloudconfig_file errors."""
+ with self.assertRaises(RuntimeError) as context_mgr:
+ validate_cloudconfig_file('/not/here', {})
+ self.assertEqual(
+ 'Configfile /not/here does not exist',
+ str(context_mgr.exception))
+
+ def test_validateconfig_file_error_on_invalid_header(self):
+ """On invalid header, validate_cloudconfig_file errors.
+
+ A SchemaValidationError is raised when the file doesn't begin with
+ CLOUD_CONFIG_HEADER.
+ """
+ write_file(self.config_file, '#junk')
+ with self.assertRaises(SchemaValidationError) as context_mgr:
+ validate_cloudconfig_file(self.config_file, {})
+ self.assertEqual(
+ 'Cloud config schema errors: header: File {0} needs to begin with '
+ '"{1}"'.format(self.config_file, CLOUD_CONFIG_HEADER.decode()),
+ str(context_mgr.exception))
+
+ def test_validateconfig_file_error_on_non_yaml_format(self):
+ """On non-yaml format, validate_cloudconfig_file errors."""
+ write_file(self.config_file, '#cloud-config\n{}}')
+ with self.assertRaises(SchemaValidationError) as context_mgr:
+ validate_cloudconfig_file(self.config_file, {})
+ self.assertIn(
+ 'schema errors: format: File {0} is not valid yaml.'.format(
+ self.config_file),
+ str(context_mgr.exception))
+
+ @skipIf(_missing_jsonschema_dep, "No python-jsonschema dependency")
+ def test_validateconfig_file_sctricty_validates_schema(self):
+ """validate_cloudconfig_file raises errors on invalid schema."""
+ schema = {
+ 'properties': {'p1': {'type': 'string', 'format': 'hostname'}}}
+ write_file(self.config_file, '#cloud-config\np1: "-1"')
+ with self.assertRaises(SchemaValidationError) as context_mgr:
+ validate_cloudconfig_file(self.config_file, schema)
+ self.assertEqual(
+ "Cloud config schema errors: p1: '-1' is not a 'hostname'",
+ str(context_mgr.exception))
+
+
+class GetSchemaDocTest(CiTestCase):
+ """Tests for get_schema_doc."""
+
+ def setUp(self):
+ super(GetSchemaDocTest, self).setUp()
+ self.required_schema = {
+ 'title': 'title', 'description': 'description', 'id': 'id',
+ 'name': 'name', 'frequency': 'frequency',
+ 'distros': ['debian', 'rhel']}
+
+ def test_get_schema_doc_returns_restructured_text(self):
+ """get_schema_doc returns restructured text for a cloudinit schema."""
+ full_schema = copy(self.required_schema)
+ full_schema.update(
+ {'properties': {
+ 'prop1': {'type': 'array', 'description': 'prop-description',
+ 'items': {'type': 'int'}}}})
+ self.assertEqual(
+ dedent("""
+ name
+ ---
+ **Summary:** title
+
+ description
+
+ **Internal name:** ``id``
+
+ **Module frequency:** frequency
+
+ **Supported distros:** debian, rhel
+
+ **Config schema**:
+ **prop1:** (array of int) prop-description\n\n"""),
+ get_schema_doc(full_schema))
+
+ def test_get_schema_doc_returns_restructured_text_with_examples(self):
+ """get_schema_doc returns indented examples when present in schema."""
+ full_schema = copy(self.required_schema)
+ full_schema.update(
+ {'examples': {'ex1': [1, 2, 3]},
+ 'properties': {
+ 'prop1': {'type': 'array', 'description': 'prop-description',
+ 'items': {'type': 'int'}}}})
+ self.assertIn(
+ dedent("""
+ **Config schema**:
+ **prop1:** (array of int) prop-description
+
+ **Examples**::
+
+ ex1"""),
+ get_schema_doc(full_schema))
+
+ def test_get_schema_doc_raises_key_errors(self):
+ """get_schema_doc raises KeyErrors on missing keys."""
+ for key in self.required_schema:
+ invalid_schema = copy(self.required_schema)
+ invalid_schema.pop(key)
+ with self.assertRaises(KeyError) as context_mgr:
+ get_schema_doc(invalid_schema)
+ self.assertIn(key, str(context_mgr.exception))
+
+
+class MainTest(CiTestCase):
+
+ def test_main_missing_args(self):
+ """Main exits non-zero and reports an error on missing parameters."""
+ with mock.patch('sys.argv', ['mycmd']):
+ with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
+ self.assertEqual(1, main(), 'Expected non-zero exit code')
+ self.assertEqual(
+ 'Expected either --config-file argument or --doc\n',
+ m_stderr.getvalue())
+
+ def test_main_prints_docs(self):
+ """When --doc parameter is provided, main generates documentation."""
+ myargs = ['mycmd', '--doc']
+ with mock.patch('sys.argv', myargs):
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ self.assertEqual(0, main(), 'Expected 0 exit code')
+ self.assertIn('\nNTP\n---\n', m_stdout.getvalue())
+
+ def test_main_validates_config_file(self):
+ """When --config-file parameter is provided, main validates schema."""
+ myyaml = self.tmp_path('my.yaml')
+ myargs = ['mycmd', '--config-file', myyaml]
+ with open(myyaml, 'wb') as stream:
+ stream.write(b'#cloud-config\nntp:') # shortest ntp schema
+ with mock.patch('sys.argv', myargs):
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ self.assertEqual(0, main(), 'Expected 0 exit code')
+ self.assertIn(
+ 'Valid cloud-config file {0}'.format(myyaml), m_stdout.getvalue())
+
+# vi: ts=4 expandtab syntax=python
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index 167ed01e..e49abcc4 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -1,6 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit import net
+from cloudinit.net import _natural_sort_key
from cloudinit.net import cmdline
from cloudinit.net import eni
from cloudinit.net import netplan
@@ -149,20 +150,19 @@ ONBOOT=yes
TYPE=Ethernet
USERCTL=no
""".lstrip()),
- ('etc/sysconfig/network-scripts/route-eth0',
- """
-# Created by cloud-init on instance boot automatically, do not edit.
-#
-ADDRESS0=0.0.0.0
-GATEWAY0=172.19.3.254
-NETMASK0=0.0.0.0
-""".lstrip()),
('etc/resolv.conf',
"""
; Created by cloud-init on instance boot automatically, do not edit.
;
nameserver 172.19.0.12
""".lstrip()),
+ ('etc/NetworkManager/conf.d/99-cloud-init.conf',
+ """
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+[main]
+dns = none
+""".lstrip()),
('etc/udev/rules.d/70-persistent-net.rules',
"".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))]
@@ -224,6 +224,13 @@ USERCTL=no
;
nameserver 172.19.0.12
""".lstrip()),
+ ('etc/NetworkManager/conf.d/99-cloud-init.conf',
+ """
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+[main]
+dns = none
+""".lstrip()),
('etc/udev/rules.d/70-persistent-net.rules',
"".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))]
@@ -291,7 +298,7 @@ DEVICE=eth0
GATEWAY=172.19.3.254
HWADDR=fa:16:3e:ed:9a:59
IPADDR=172.19.1.34
-IPV6ADDR=2001:DB8::10
+IPV6ADDR=2001:DB8::10/64
IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64"
IPV6INIT=yes
IPV6_DEFAULTGW=2001:DB8::1
@@ -307,6 +314,13 @@ USERCTL=no
;
nameserver 172.19.0.12
""".lstrip()),
+ ('etc/NetworkManager/conf.d/99-cloud-init.conf',
+ """
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+[main]
+dns = none
+""".lstrip()),
('etc/udev/rules.d/70-persistent-net.rules',
"".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))]
@@ -341,17 +355,15 @@ iface lo inet loopback
auto eth0
iface eth0 inet static
- address 1.2.3.12
+ address 1.2.3.12/29
broadcast 1.2.3.15
dns-nameservers 69.9.160.191 69.9.191.4
gateway 1.2.3.9
- netmask 255.255.255.248
auto eth1
iface eth1 inet static
- address 10.248.2.4
+ address 10.248.2.4/29
broadcast 10.248.2.7
- netmask 255.255.255.248
""".lstrip()
NETWORK_CONFIGS = {
@@ -410,6 +422,28 @@ NETWORK_CONFIGS = {
via: 65.61.151.37
set-name: eth99
""").rstrip(' '),
+ 'expected_sysconfig': {
+ 'ifcfg-eth1': textwrap.dedent("""\
+ BOOTPROTO=none
+ DEVICE=eth1
+ HWADDR=cf:d6:af:48:e8:80
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no"""),
+ 'ifcfg-eth99': textwrap.dedent("""\
+ BOOTPROTO=dhcp
+ DEFROUTE=yes
+ DEVICE=eth99
+ GATEWAY=65.61.151.37
+ HWADDR=c0:d6:9f:2c:e8:80
+ IPADDR=192.168.21.3
+ NETMASK=255.255.255.0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no"""),
+ },
'yaml': textwrap.dedent("""
version: 1
config:
@@ -470,6 +504,62 @@ NETWORK_CONFIGS = {
- {'type': 'dhcp6'}
""").rstrip(' '),
},
+ 'v4_and_v6_static': {
+ 'expected_eni': textwrap.dedent("""\
+ auto lo
+ iface lo inet loopback
+
+ auto iface0
+ iface iface0 inet static
+ address 192.168.14.2/24
+ mtu 9000
+
+ # control-alias iface0
+ iface iface0 inet6 static
+ address 2001:1::1/64
+ mtu 1500
+ """).rstrip(' '),
+ 'expected_netplan': textwrap.dedent("""
+ network:
+ version: 2
+ ethernets:
+ iface0:
+ addresses:
+ - 192.168.14.2/24
+ - 2001:1::1/64
+ mtu: 9000
+ mtu6: 1500
+ """).rstrip(' '),
+ 'yaml': textwrap.dedent("""\
+ version: 1
+ config:
+ - type: 'physical'
+ name: 'iface0'
+ subnets:
+ - type: static
+ address: 192.168.14.2/24
+ mtu: 9000
+ - type: static
+ address: 2001:1::1/64
+ mtu: 1500
+ """).rstrip(' '),
+ 'expected_sysconfig': {
+ 'ifcfg-iface0': textwrap.dedent("""\
+ BOOTPROTO=none
+ DEVICE=iface0
+ IPADDR=192.168.14.2
+ IPV6ADDR=2001:1::1/64
+ IPV6INIT=yes
+ NETMASK=255.255.255.0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no
+ MTU=9000
+ IPV6_MTU=1500
+ """),
+ },
+ },
'all': {
'expected_eni': ("""\
auto lo
@@ -511,12 +601,26 @@ iface bond0 inet6 dhcp
auto br0
iface br0 inet static
address 192.168.14.2/24
+ bridge_ageing 250
+ bridge_bridgeprio 22
+ bridge_fd 1
+ bridge_gcint 2
+ bridge_hello 1
+ bridge_maxage 10
+ bridge_pathcost eth3 50
+ bridge_pathcost eth4 75
+ bridge_portprio eth3 28
+ bridge_portprio eth4 14
bridge_ports eth3 eth4
bridge_stp off
+ bridge_waitport 1 eth3
+ bridge_waitport 2 eth4
# control-alias br0
iface br0 inet6 static
address 2001:1::1/64
+ post-up route add -A inet6 default gw 2001:4800:78ff:1b::1 || true
+ pre-down route del -A inet6 default gw 2001:4800:78ff:1b::1 || true
auto bond0.200
iface bond0.200 inet dhcp
@@ -642,6 +746,18 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
interfaces:
- eth3
- eth4
+ parameters:
+ ageing-time: 250
+ forward-delay: 1
+ hello-time: 1
+ max-age: 10
+ path-cost:
+ eth3: 50
+ eth4: 75
+ priority: 22
+ routes:
+ - to: ::/0
+ via: 2001:4800:78ff:1b::1
vlans:
bond0.200:
dhcp4: true
@@ -664,6 +780,119 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
- sacchromyces.maas
- brettanomyces.maas
""").rstrip(' '),
+ 'expected_sysconfig': {
+ 'ifcfg-bond0': textwrap.dedent("""\
+ BONDING_MASTER=yes
+ BONDING_OPTS="mode=active-backup """
+ """xmit_hash_policy=layer3+4 """
+ """miimon=100"
+ BONDING_SLAVE0=eth1
+ BONDING_SLAVE1=eth2
+ BOOTPROTO=dhcp
+ DEVICE=bond0
+ DHCPV6C=yes
+ IPV6INIT=yes
+ MACADDR=aa:bb:cc:dd:ee:ff
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Bond
+ USERCTL=no"""),
+ 'ifcfg-bond0.200': textwrap.dedent("""\
+ BOOTPROTO=dhcp
+ DEVICE=bond0.200
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ PHYSDEV=bond0
+ TYPE=Ethernet
+ USERCTL=no
+ VLAN=yes"""),
+ 'ifcfg-br0': textwrap.dedent("""\
+ AGEING=250
+ BOOTPROTO=none
+ DEFROUTE=yes
+ DEVICE=br0
+ IPADDR=192.168.14.2
+ IPV6ADDR=2001:1::1/64
+ IPV6INIT=yes
+ IPV6_DEFAULTGW=2001:4800:78ff:1b::1
+ NETMASK=255.255.255.0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ PRIO=22
+ STP=off
+ TYPE=Bridge
+ USERCTL=no"""),
+ 'ifcfg-eth0': textwrap.dedent("""\
+ BOOTPROTO=none
+ DEVICE=eth0
+ HWADDR=c0:d6:9f:2c:e8:80
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no"""),
+ 'ifcfg-eth0.101': textwrap.dedent("""\
+ BOOTPROTO=none
+ DEFROUTE=yes
+ DEVICE=eth0.101
+ GATEWAY=192.168.0.1
+ IPADDR=192.168.0.2
+ IPADDR1=192.168.2.10
+ MTU=1500
+ NETMASK=255.255.255.0
+ NETMASK1=255.255.255.0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ PHYSDEV=eth0
+ TYPE=Ethernet
+ USERCTL=no
+ VLAN=yes"""),
+ 'ifcfg-eth1': textwrap.dedent("""\
+ BOOTPROTO=none
+ DEVICE=eth1
+ HWADDR=aa:d6:9f:2c:e8:80
+ MASTER=bond0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ SLAVE=yes
+ TYPE=Ethernet
+ USERCTL=no"""),
+ 'ifcfg-eth2': textwrap.dedent("""\
+ BOOTPROTO=none
+ DEVICE=eth2
+ HWADDR=c0:bb:9f:2c:e8:80
+ MASTER=bond0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ SLAVE=yes
+ TYPE=Ethernet
+ USERCTL=no"""),
+ 'ifcfg-eth3': textwrap.dedent("""\
+ BOOTPROTO=none
+ BRIDGE=br0
+ DEVICE=eth3
+ HWADDR=66:bb:9f:2c:e8:80
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no"""),
+ 'ifcfg-eth4': textwrap.dedent("""\
+ BOOTPROTO=none
+ BRIDGE=br0
+ DEVICE=eth4
+ HWADDR=98:bb:9f:2c:e8:80
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no"""),
+ 'ifcfg-eth5': textwrap.dedent("""\
+ BOOTPROTO=dhcp
+ DEVICE=eth5
+ HWADDR=98:bb:9f:2c:e8:8a
+ NM_CONTROLLED=no
+ ONBOOT=no
+ TYPE=Ethernet
+ USERCTL=no""")
+ },
'yaml': textwrap.dedent("""
version: 1
config:
@@ -752,14 +981,32 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
forwarding: 1
# basically anything in /proc/sys/net/ipv6/conf/.../
params:
- bridge_stp: 'off'
- bridge_fd: 0
+ bridge_ageing: 250
+ bridge_bridgeprio: 22
+ bridge_fd: 1
+ bridge_gcint: 2
+ bridge_hello: 1
+ bridge_maxage: 10
bridge_maxwait: 0
+ bridge_pathcost:
+ - eth3 50
+ - eth4 75
+ bridge_portprio:
+ - eth3 28
+ - eth4 14
+ bridge_stp: 'off'
+ bridge_waitport:
+ - 1 eth3
+ - 2 eth4
subnets:
- type: static
address: 192.168.14.2/24
- type: static
address: 2001:1::1/64 # default to /64
+ routes:
+ - gateway: 2001:4800:78ff:1b::1
+ netmask: '::'
+ network: '::'
# A global nameserver.
- type: nameserver
address: 8.8.8.8
@@ -778,9 +1025,308 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
gateway: 11.0.0.1
metric: 3
""").lstrip(),
- }
+ },
+ 'bond': {
+ 'yaml': textwrap.dedent("""
+ version: 1
+ config:
+ - type: physical
+ name: bond0s0
+ mac_address: "aa:bb:cc:dd:e8:00"
+ - type: physical
+ name: bond0s1
+ mac_address: "aa:bb:cc:dd:e8:01"
+ - type: bond
+ name: bond0
+ mac_address: "aa:bb:cc:dd:e8:ff"
+ bond_interfaces:
+ - bond0s0
+ - bond0s1
+ params:
+ bond-mode: active-backup
+ bond_miimon: 100
+ bond-xmit-hash-policy: "layer3+4"
+ subnets:
+ - type: static
+ address: 192.168.0.2/24
+ gateway: 192.168.0.1
+ routes:
+ - gateway: 192.168.0.3
+ netmask: 255.255.255.0
+ network: 10.1.3.0
+ - type: static
+ address: 192.168.1.2/24
+ - type: static
+ address: 2001:1::1/92
+ """),
+ 'expected_sysconfig': {
+ 'ifcfg-bond0': textwrap.dedent("""\
+ BONDING_MASTER=yes
+ BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 miimon=100"
+ BONDING_SLAVE0=bond0s0
+ BONDING_SLAVE1=bond0s1
+ BOOTPROTO=none
+ DEFROUTE=yes
+ DEVICE=bond0
+ GATEWAY=192.168.0.1
+ MACADDR=aa:bb:cc:dd:e8:ff
+ IPADDR=192.168.0.2
+ IPADDR1=192.168.1.2
+ IPV6ADDR=2001:1::1/92
+ IPV6INIT=yes
+ NETMASK=255.255.255.0
+ NETMASK1=255.255.255.0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Bond
+ USERCTL=no
+ """),
+ 'ifcfg-bond0s0': textwrap.dedent("""\
+ BOOTPROTO=none
+ DEVICE=bond0s0
+ HWADDR=aa:bb:cc:dd:e8:00
+ MASTER=bond0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ SLAVE=yes
+ TYPE=Ethernet
+ USERCTL=no
+ """),
+ 'route6-bond0': textwrap.dedent("""\
+ """),
+ 'route-bond0': textwrap.dedent("""\
+ ADDRESS0=10.1.3.0
+ GATEWAY0=192.168.0.3
+ NETMASK0=255.255.255.0
+ """),
+ 'ifcfg-bond0s1': textwrap.dedent("""\
+ BOOTPROTO=none
+ DEVICE=bond0s1
+ HWADDR=aa:bb:cc:dd:e8:01
+ MASTER=bond0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ SLAVE=yes
+ TYPE=Ethernet
+ USERCTL=no
+ """),
+ },
+ },
+ 'vlan': {
+ 'yaml': textwrap.dedent("""
+ version: 1
+ config:
+ - type: physical
+ name: en0
+ mac_address: "aa:bb:cc:dd:e8:00"
+ - type: vlan
+ name: en0.99
+ vlan_link: en0
+ vlan_id: 99
+ subnets:
+ - type: static
+ address: '192.168.2.2/24'
+ - type: static
+ address: '192.168.1.2/24'
+ gateway: 192.168.1.1
+ - type: static
+ address: 2001:1::bbbb/96
+ routes:
+ - gateway: 2001:1::1
+ netmask: '::'
+ network: '::'
+ """),
+ 'expected_sysconfig': {
+ 'ifcfg-en0': textwrap.dedent("""\
+ BOOTPROTO=none
+ DEVICE=en0
+ HWADDR=aa:bb:cc:dd:e8:00
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no"""),
+ 'ifcfg-en0.99': textwrap.dedent("""\
+ BOOTPROTO=none
+ DEFROUTE=yes
+ DEVICE=en0.99
+ GATEWAY=192.168.1.1
+ IPADDR=192.168.2.2
+ IPADDR1=192.168.1.2
+ IPV6ADDR=2001:1::bbbb/96
+ IPV6INIT=yes
+ IPV6_DEFAULTGW=2001:1::1
+ NETMASK=255.255.255.0
+ NETMASK1=255.255.255.0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ PHYSDEV=en0
+ TYPE=Ethernet
+ USERCTL=no
+ VLAN=yes"""),
+ },
+ },
+ 'bridge': {
+ 'yaml': textwrap.dedent("""
+ version: 1
+ config:
+ - type: physical
+ name: eth0
+ mac_address: "52:54:00:12:34:00"
+ subnets:
+ - type: static
+ address: 2001:1::100/96
+ - type: physical
+ name: eth1
+ mac_address: "52:54:00:12:34:01"
+ subnets:
+ - type: static
+ address: 2001:1::101/96
+ - type: bridge
+ name: br0
+ bridge_interfaces:
+ - eth0
+ - eth1
+ params:
+ bridge_stp: 'off'
+ bridge_bridgeprio: 22
+ subnets:
+ - type: static
+ address: 192.168.2.2/24"""),
+ 'expected_sysconfig': {
+ 'ifcfg-br0': textwrap.dedent("""\
+ BOOTPROTO=none
+ DEVICE=br0
+ IPADDR=192.168.2.2
+ NETMASK=255.255.255.0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ PRIO=22
+ STP=off
+ TYPE=Bridge
+ USERCTL=no
+ """),
+ 'ifcfg-eth0': textwrap.dedent("""\
+ BOOTPROTO=none
+ BRIDGE=br0
+ DEVICE=eth0
+ HWADDR=52:54:00:12:34:00
+ IPV6ADDR=2001:1::100/96
+ IPV6INIT=yes
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no
+ """),
+ 'ifcfg-eth1': textwrap.dedent("""\
+ BOOTPROTO=none
+ BRIDGE=br0
+ DEVICE=eth1
+ HWADDR=52:54:00:12:34:01
+ IPV6ADDR=2001:1::101/96
+ IPV6INIT=yes
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no
+ """),
+ },
+ },
+ 'manual': {
+ 'yaml': textwrap.dedent("""
+ version: 1
+ config:
+ - type: physical
+ name: eth0
+ mac_address: "52:54:00:12:34:00"
+ subnets:
+ - type: static
+ address: 192.168.1.2/24
+ control: manual
+ - type: physical
+ name: eth1
+ mtu: 1480
+ mac_address: "52:54:00:12:34:aa"
+ subnets:
+ - type: manual
+ - type: physical
+ name: eth2
+ mac_address: "52:54:00:12:34:ff"
+ subnets:
+ - type: manual
+ control: manual
+ """),
+ 'expected_eni': textwrap.dedent("""\
+ auto lo
+ iface lo inet loopback
+
+ # control-manual eth0
+ iface eth0 inet static
+ address 192.168.1.2/24
+
+ auto eth1
+ iface eth1 inet manual
+ mtu 1480
+
+ # control-manual eth2
+ iface eth2 inet manual
+ """),
+ 'expected_netplan': textwrap.dedent("""\
+
+ network:
+ version: 2
+ ethernets:
+ eth0:
+ addresses:
+ - 192.168.1.2/24
+ match:
+ macaddress: '52:54:00:12:34:00'
+ set-name: eth0
+ eth1:
+ match:
+ macaddress: 52:54:00:12:34:aa
+ mtu: 1480
+ set-name: eth1
+ eth2:
+ match:
+ macaddress: 52:54:00:12:34:ff
+ set-name: eth2
+ """),
+ 'expected_sysconfig': {
+ 'ifcfg-eth0': textwrap.dedent("""\
+ BOOTPROTO=none
+ DEVICE=eth0
+ HWADDR=52:54:00:12:34:00
+ IPADDR=192.168.1.2
+ NETMASK=255.255.255.0
+ NM_CONTROLLED=no
+ ONBOOT=no
+ TYPE=Ethernet
+ USERCTL=no
+ """),
+ 'ifcfg-eth1': textwrap.dedent("""\
+ BOOTPROTO=none
+ DEVICE=eth1
+ HWADDR=52:54:00:12:34:aa
+ MTU=1480
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no
+ """),
+ 'ifcfg-eth2': textwrap.dedent("""\
+ BOOTPROTO=none
+ DEVICE=eth2
+ HWADDR=52:54:00:12:34:ff
+ NM_CONTROLLED=no
+ ONBOOT=no
+ TYPE=Ethernet
+ USERCTL=no
+ """),
+ },
+ },
}
+
CONFIG_V1_EXPLICIT_LOOPBACK = {
'version': 1,
'config': [{'name': 'eth0', 'type': 'physical',
@@ -790,39 +1336,231 @@ CONFIG_V1_EXPLICIT_LOOPBACK = {
]}
-def _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net,
- mock_sys_dev_path):
- mock_get_devicelist.return_value = ['eth1000']
- dev_characteristics = {
- 'eth1000': {
- "bridge": False,
- "carrier": False,
- "dormant": False,
- "operstate": "down",
- "address": "07-1C-C6-75-A4-BE",
- }
+CONFIG_V1_SIMPLE_SUBNET = {
+ 'version': 1,
+ 'config': [{'mac_address': '52:54:00:12:34:00',
+ 'name': 'interface0',
+ 'subnets': [{'address': '10.0.2.15',
+ 'gateway': '10.0.2.2',
+ 'netmask': '255.255.255.0',
+ 'type': 'static'}],
+ 'type': 'physical'}]}
+
+
+DEFAULT_DEV_ATTRS = {
+ 'eth1000': {
+ "bridge": False,
+ "carrier": False,
+ "dormant": False,
+ "operstate": "down",
+ "address": "07-1C-C6-75-A4-BE",
+ "device/driver": None,
+ "device/device": None,
}
+}
+
+
+def _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net,
+ mock_sys_dev_path, dev_attrs=None):
+ if not dev_attrs:
+ dev_attrs = DEFAULT_DEV_ATTRS
+
+ mock_get_devicelist.return_value = dev_attrs.keys()
def fake_read(devname, path, translate=None,
on_enoent=None, on_keyerror=None,
on_einval=None):
- return dev_characteristics[devname][path]
+ return dev_attrs[devname][path]
mock_read_sys_net.side_effect = fake_read
def sys_dev_path(devname, path=""):
- return tmp_dir + devname + "/" + path
+ return tmp_dir + "/" + devname + "/" + path
- for dev in dev_characteristics:
+ for dev in dev_attrs:
os.makedirs(os.path.join(tmp_dir, dev))
with open(os.path.join(tmp_dir, dev, 'operstate'), 'w') as fh:
- fh.write("down")
+ fh.write(dev_attrs[dev]['operstate'])
+ os.makedirs(os.path.join(tmp_dir, dev, "device"))
+ for key in ['device/driver']:
+ if key in dev_attrs[dev] and dev_attrs[dev][key]:
+ target = dev_attrs[dev][key]
+ link = os.path.join(tmp_dir, dev, key)
+ print('symlink %s -> %s' % (link, target))
+ os.symlink(target, link)
mock_sys_dev_path.side_effect = sys_dev_path
+class TestGenerateFallbackConfig(CiTestCase):
+
+ @mock.patch("cloudinit.net.sys_dev_path")
+ @mock.patch("cloudinit.net.read_sys_net")
+ @mock.patch("cloudinit.net.get_devicelist")
+ def test_device_driver(self, mock_get_devicelist, mock_read_sys_net,
+ mock_sys_dev_path):
+ devices = {
+ 'eth0': {
+ 'bridge': False, 'carrier': False, 'dormant': False,
+ 'operstate': 'down', 'address': '00:11:22:33:44:55',
+ 'device/driver': 'hv_netsvc', 'device/device': '0x3'},
+ 'eth1': {
+ 'bridge': False, 'carrier': False, 'dormant': False,
+ 'operstate': 'down', 'address': '00:11:22:33:44:55',
+ 'device/driver': 'mlx4_core', 'device/device': '0x7'},
+ }
+
+ tmp_dir = self.tmp_dir()
+ _setup_test(tmp_dir, mock_get_devicelist,
+ mock_read_sys_net, mock_sys_dev_path,
+ dev_attrs=devices)
+
+ network_cfg = net.generate_fallback_config(config_driver=True)
+ ns = network_state.parse_net_config_data(network_cfg,
+ skip_broken=False)
+
+ render_dir = os.path.join(tmp_dir, "render")
+ os.makedirs(render_dir)
+
+ # don't set rulepath so eni writes them
+ renderer = eni.Renderer(
+ {'eni_path': 'interfaces', 'netrules_path': 'netrules'})
+ renderer.render_network_state(ns, render_dir)
+
+ self.assertTrue(os.path.exists(os.path.join(render_dir,
+ 'interfaces')))
+ with open(os.path.join(render_dir, 'interfaces')) as fh:
+ contents = fh.read()
+ print(contents)
+ expected = """
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet dhcp
+"""
+ self.assertEqual(expected.lstrip(), contents.lstrip())
+
+ self.assertTrue(os.path.exists(os.path.join(render_dir, 'netrules')))
+ with open(os.path.join(render_dir, 'netrules')) as fh:
+ contents = fh.read()
+ print(contents)
+ expected_rule = [
+ 'SUBSYSTEM=="net"',
+ 'ACTION=="add"',
+ 'DRIVERS=="hv_netsvc"',
+ 'ATTR{address}=="00:11:22:33:44:55"',
+ 'NAME="eth0"',
+ ]
+ self.assertEqual(", ".join(expected_rule) + '\n', contents.lstrip())
+
+ @mock.patch("cloudinit.net.sys_dev_path")
+ @mock.patch("cloudinit.net.read_sys_net")
+ @mock.patch("cloudinit.net.get_devicelist")
+ def test_device_driver_blacklist(self, mock_get_devicelist,
+ mock_read_sys_net, mock_sys_dev_path):
+ devices = {
+ 'eth1': {
+ 'bridge': False, 'carrier': False, 'dormant': False,
+ 'operstate': 'down', 'address': '00:11:22:33:44:55',
+ 'device/driver': 'hv_netsvc', 'device/device': '0x3'},
+ 'eth0': {
+ 'bridge': False, 'carrier': False, 'dormant': False,
+ 'operstate': 'down', 'address': '00:11:22:33:44:55',
+ 'device/driver': 'mlx4_core', 'device/device': '0x7'},
+ }
+
+ tmp_dir = self.tmp_dir()
+ _setup_test(tmp_dir, mock_get_devicelist,
+ mock_read_sys_net, mock_sys_dev_path,
+ dev_attrs=devices)
+
+ blacklist = ['mlx4_core']
+ network_cfg = net.generate_fallback_config(blacklist_drivers=blacklist,
+ config_driver=True)
+ ns = network_state.parse_net_config_data(network_cfg,
+ skip_broken=False)
+
+ render_dir = os.path.join(tmp_dir, "render")
+ os.makedirs(render_dir)
+
+ # don't set rulepath so eni writes them
+ renderer = eni.Renderer(
+ {'eni_path': 'interfaces', 'netrules_path': 'netrules'})
+ renderer.render_network_state(ns, render_dir)
+
+ self.assertTrue(os.path.exists(os.path.join(render_dir,
+ 'interfaces')))
+ with open(os.path.join(render_dir, 'interfaces')) as fh:
+ contents = fh.read()
+ print(contents)
+ expected = """
+auto lo
+iface lo inet loopback
+
+auto eth1
+iface eth1 inet dhcp
+"""
+ self.assertEqual(expected.lstrip(), contents.lstrip())
+
+ self.assertTrue(os.path.exists(os.path.join(render_dir, 'netrules')))
+ with open(os.path.join(render_dir, 'netrules')) as fh:
+ contents = fh.read()
+ print(contents)
+ expected_rule = [
+ 'SUBSYSTEM=="net"',
+ 'ACTION=="add"',
+ 'DRIVERS=="hv_netsvc"',
+ 'ATTR{address}=="00:11:22:33:44:55"',
+ 'NAME="eth1"',
+ ]
+ self.assertEqual(", ".join(expected_rule) + '\n', contents.lstrip())
+
+
class TestSysConfigRendering(CiTestCase):
+ scripts_dir = '/etc/sysconfig/network-scripts'
+ header = ('# Created by cloud-init on instance boot automatically, '
+ 'do not edit.\n#\n')
+
+ def _render_and_read(self, network_config=None, state=None, dir=None):
+ if dir is None:
+ dir = self.tmp_dir()
+
+ if network_config:
+ ns = network_state.parse_net_config_data(network_config)
+ elif state:
+ ns = state
+ else:
+ raise ValueError("Expected data or state, got neither")
+
+ renderer = sysconfig.Renderer()
+ renderer.render_network_state(ns, dir)
+ return dir2dict(dir)
+
+ def _compare_files_to_expected(self, expected, found):
+ orig_maxdiff = self.maxDiff
+ expected_d = dict(
+ (os.path.join(self.scripts_dir, k), util.load_shell_content(v))
+ for k, v in expected.items())
+
+ # only compare the files in scripts_dir
+ scripts_found = dict(
+ (k, util.load_shell_content(v)) for k, v in found.items()
+ if k.startswith(self.scripts_dir))
+ try:
+ self.maxDiff = None
+ self.assertEqual(expected_d, scripts_found)
+ finally:
+ self.maxDiff = orig_maxdiff
+
+ def _assert_headers(self, found):
+ missing = [f for f in found
+ if (f.startswith(self.scripts_dir) and
+ not found[f].startswith(self.header))]
+ if missing:
+ raise AssertionError("Missing headers in: %s" % missing)
+
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@mock.patch("cloudinit.net.get_devicelist")
@@ -950,6 +1688,32 @@ USERCTL=no
with open(os.path.join(render_dir, fn)) as fh:
self.assertEqual(expected_content, fh.read())
+ def test_network_config_v1_samples(self):
+ ns = network_state.parse_net_config_data(CONFIG_V1_SIMPLE_SUBNET)
+ render_dir = self.tmp_path("render")
+ os.makedirs(render_dir)
+ renderer = sysconfig.Renderer()
+ renderer.render_network_state(ns, render_dir)
+ found = dir2dict(render_dir)
+ nspath = '/etc/sysconfig/network-scripts/'
+ self.assertNotIn(nspath + 'ifcfg-lo', found.keys())
+ expected = """\
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+BOOTPROTO=none
+DEFROUTE=yes
+DEVICE=interface0
+GATEWAY=10.0.2.2
+HWADDR=52:54:00:12:34:00
+IPADDR=10.0.2.15
+NETMASK=255.255.255.0
+NM_CONTROLLED=no
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+"""
+ self.assertEqual(expected, found[nspath + 'ifcfg-interface0'])
+
def test_config_with_explicit_loopback(self):
ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK)
render_dir = self.tmp_path("render")
@@ -971,6 +1735,48 @@ USERCTL=no
"""
self.assertEqual(expected, found[nspath + 'ifcfg-eth0'])
+ def test_bond_config(self):
+ entry = NETWORK_CONFIGS['bond']
+ found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self._compare_files_to_expected(entry['expected_sysconfig'], found)
+ self._assert_headers(found)
+
+ def test_vlan_config(self):
+ entry = NETWORK_CONFIGS['vlan']
+ found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self._compare_files_to_expected(entry['expected_sysconfig'], found)
+ self._assert_headers(found)
+
+ def test_bridge_config(self):
+ entry = NETWORK_CONFIGS['bridge']
+ found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self._compare_files_to_expected(entry['expected_sysconfig'], found)
+ self._assert_headers(found)
+
+ def test_manual_config(self):
+ entry = NETWORK_CONFIGS['manual']
+ found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self._compare_files_to_expected(entry['expected_sysconfig'], found)
+ self._assert_headers(found)
+
+ def test_all_config(self):
+ entry = NETWORK_CONFIGS['all']
+ found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self._compare_files_to_expected(entry['expected_sysconfig'], found)
+ self._assert_headers(found)
+
+ def test_small_config(self):
+ entry = NETWORK_CONFIGS['small']
+ found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self._compare_files_to_expected(entry['expected_sysconfig'], found)
+ self._assert_headers(found)
+
+ def test_v4_and_v6_static_config(self):
+ entry = NETWORK_CONFIGS['v4_and_v6_static']
+ found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self._compare_files_to_expected(entry['expected_sysconfig'], found)
+ self._assert_headers(found)
+
class TestEniNetRendering(CiTestCase):
@@ -992,9 +1798,7 @@ class TestEniNetRendering(CiTestCase):
os.makedirs(render_dir)
renderer = eni.Renderer(
- {'links_path_prefix': None,
- 'eni_path': 'interfaces', 'netrules_path': None,
- })
+ {'eni_path': 'interfaces', 'netrules_path': None})
renderer.render_network_state(ns, render_dir)
self.assertTrue(os.path.exists(os.path.join(render_dir,
@@ -1366,6 +2170,13 @@ class TestNetplanRoundTrip(CiTestCase):
entry['expected_netplan'].splitlines(),
files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ def testsimple_render_v4_and_v6_static(self):
+ entry = NETWORK_CONFIGS['v4_and_v6_static']
+ files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self.assertEqual(
+ entry['expected_netplan'].splitlines(),
+ files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+
def testsimple_render_all(self):
entry = NETWORK_CONFIGS['all']
files = self._render_and_read(network_config=yaml.load(entry['yaml']))
@@ -1373,10 +2184,17 @@ class TestNetplanRoundTrip(CiTestCase):
entry['expected_netplan'].splitlines(),
files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ def testsimple_render_manual(self):
+ entry = NETWORK_CONFIGS['manual']
+ files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self.assertEqual(
+ entry['expected_netplan'].splitlines(),
+ files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+
class TestEniRoundTrip(CiTestCase):
def _render_and_read(self, network_config=None, state=None, eni_path=None,
- links_prefix=None, netrules_path=None, dir=None):
+ netrules_path=None, dir=None):
if dir is None:
dir = self.tmp_dir()
@@ -1391,8 +2209,7 @@ class TestEniRoundTrip(CiTestCase):
eni_path = 'etc/network/interfaces'
renderer = eni.Renderer(
- config={'eni_path': eni_path, 'links_path_prefix': links_prefix,
- 'netrules_path': netrules_path})
+ config={'eni_path': eni_path, 'netrules_path': netrules_path})
renderer.render_network_state(ns, dir)
return dir2dict(dir)
@@ -1425,6 +2242,27 @@ class TestEniRoundTrip(CiTestCase):
entry['expected_eni'].splitlines(),
files['/etc/network/interfaces'].splitlines())
+ def testsimple_render_v4_and_v6_static(self):
+ entry = NETWORK_CONFIGS['v4_and_v6_static']
+ files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self.assertEqual(
+ entry['expected_eni'].splitlines(),
+ files['/etc/network/interfaces'].splitlines())
+
+ def testsimple_render_manual(self):
+ """Test rendering of 'manual' for 'type' and 'control'.
+
+ 'type: manual' in a subnet is odd, but it is the way that was used
+ to declare that a network device should get a mtu set on it even
+ if there were no addresses to configure. Also strange is the fact
+ that in order to apply that MTU the ifupdown device must be set
+ to 'auto', or the MTU would not be set."""
+ entry = NETWORK_CONFIGS['manual']
+ files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self.assertEqual(
+ entry['expected_eni'].splitlines(),
+ files['/etc/network/interfaces'].splitlines())
+
def test_routes_rendered(self):
# as reported in bug 1649652
conf = [
@@ -1516,6 +2354,118 @@ class TestNetRenderers(CiTestCase):
priority=['sysconfig', 'eni'])
+class TestGetInterfaces(CiTestCase):
+ _data = {'bonds': ['bond1'],
+ 'bridges': ['bridge1'],
+ 'vlans': ['bond1.101'],
+ 'own_macs': ['enp0s1', 'enp0s2', 'bridge1-nic', 'bridge1',
+ 'bond1.101', 'lo', 'eth1'],
+ 'macs': {'enp0s1': 'aa:aa:aa:aa:aa:01',
+ 'enp0s2': 'aa:aa:aa:aa:aa:02',
+ 'bond1': 'aa:aa:aa:aa:aa:01',
+ 'bond1.101': 'aa:aa:aa:aa:aa:01',
+ 'bridge1': 'aa:aa:aa:aa:aa:03',
+ 'bridge1-nic': 'aa:aa:aa:aa:aa:03',
+ 'lo': '00:00:00:00:00:00',
+ 'greptap0': '00:00:00:00:00:00',
+ 'eth1': 'aa:aa:aa:aa:aa:01',
+ 'tun0': None},
+ 'drivers': {'enp0s1': 'virtio_net',
+ 'enp0s2': 'e1000',
+ 'bond1': None,
+ 'bond1.101': None,
+ 'bridge1': None,
+ 'bridge1-nic': None,
+ 'lo': None,
+ 'greptap0': None,
+ 'eth1': 'mlx4_core',
+ 'tun0': None}}
+ data = {}
+
+ def _se_get_devicelist(self):
+ return list(self.data['devices'])
+
+ def _se_device_driver(self, name):
+ return self.data['drivers'][name]
+
+ def _se_device_devid(self, name):
+ return '0x%s' % sorted(list(self.data['drivers'].keys())).index(name)
+
+ def _se_get_interface_mac(self, name):
+ return self.data['macs'][name]
+
+ def _se_is_bridge(self, name):
+ return name in self.data['bridges']
+
+ def _se_is_vlan(self, name):
+ return name in self.data['vlans']
+
+ def _se_interface_has_own_mac(self, name):
+ return name in self.data['own_macs']
+
+ def _mock_setup(self):
+ self.data = copy.deepcopy(self._data)
+ self.data['devices'] = set(list(self.data['macs'].keys()))
+ mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge',
+ 'interface_has_own_mac', 'is_vlan', 'device_driver',
+ 'device_devid')
+ self.mocks = {}
+ for n in mocks:
+ m = mock.patch('cloudinit.net.' + n,
+ side_effect=getattr(self, '_se_' + n))
+ self.addCleanup(m.stop)
+ self.mocks[n] = m.start()
+
+ def test_gi_includes_duplicate_macs(self):
+ self._mock_setup()
+ ret = net.get_interfaces()
+
+ self.assertIn('enp0s1', self._se_get_devicelist())
+ self.assertIn('eth1', self._se_get_devicelist())
+ found = [ent for ent in ret if 'aa:aa:aa:aa:aa:01' in ent]
+ self.assertEqual(len(found), 2)
+
+ def test_gi_excludes_any_without_mac_address(self):
+ self._mock_setup()
+ ret = net.get_interfaces()
+
+ self.assertIn('tun0', self._se_get_devicelist())
+ found = [ent for ent in ret if 'tun0' in ent]
+ self.assertEqual(len(found), 0)
+
+ def test_gi_excludes_stolen_macs(self):
+ self._mock_setup()
+ ret = net.get_interfaces()
+ self.mocks['interface_has_own_mac'].assert_has_calls(
+ [mock.call('enp0s1'), mock.call('bond1')], any_order=True)
+ expected = [
+ ('enp0s2', 'aa:aa:aa:aa:aa:02', 'e1000', '0x5'),
+ ('enp0s1', 'aa:aa:aa:aa:aa:01', 'virtio_net', '0x4'),
+ ('eth1', 'aa:aa:aa:aa:aa:01', 'mlx4_core', '0x6'),
+ ('lo', '00:00:00:00:00:00', None, '0x8'),
+ ('bridge1-nic', 'aa:aa:aa:aa:aa:03', None, '0x3'),
+ ]
+ self.assertEqual(sorted(expected), sorted(ret))
+
+ def test_gi_excludes_bridges(self):
+ self._mock_setup()
+ # add a device 'b1', make all return they have their "own mac",
+ # set everything other than 'b1' to be a bridge.
+ # then expect b1 is the only thing left.
+ self.data['macs']['b1'] = 'aa:aa:aa:aa:aa:b1'
+ self.data['drivers']['b1'] = None
+ self.data['devices'].add('b1')
+ self.data['bonds'] = []
+ self.data['own_macs'] = self.data['devices']
+ self.data['bridges'] = [f for f in self.data['devices'] if f != "b1"]
+ ret = net.get_interfaces()
+ self.assertEqual([('b1', 'aa:aa:aa:aa:aa:b1', None, '0x0')], ret)
+ self.mocks['is_bridge'].assert_has_calls(
+ [mock.call('bridge1'), mock.call('enp0s1'), mock.call('bond1'),
+ mock.call('b1')],
+ any_order=True)
+
+
class TestGetInterfacesByMac(CiTestCase):
_data = {'bonds': ['bond1'],
'bridges': ['bridge1'],
@@ -1627,6 +2577,19 @@ class TestGetInterfacesByMac(CiTestCase):
self.assertEqual('lo', ret[empty_mac])
+class TestInterfacesSorting(CiTestCase):
+
+ def test_natural_order(self):
+ data = ['ens5', 'ens6', 'ens3', 'ens20', 'ens13', 'ens2']
+ self.assertEqual(
+ sorted(data, key=_natural_sort_key),
+ ['ens2', 'ens3', 'ens5', 'ens6', 'ens13', 'ens20'])
+ data2 = ['enp2s0', 'enp2s3', 'enp0s3', 'enp0s13', 'enp0s8', 'enp1s2']
+ self.assertEqual(
+ sorted(data2, key=_natural_sort_key),
+ ['enp0s3', 'enp0s8', 'enp0s13', 'enp1s2', 'enp2s0', 'enp2s3'])
+
+
def _gzip_data(data):
with io.BytesIO() as iobuf:
gzfp = gzip.GzipFile(mode="wb", fileobj=iobuf)
@@ -1634,4 +2597,229 @@ def _gzip_data(data):
gzfp.close()
return iobuf.getvalue()
+
+class TestRenameInterfaces(CiTestCase):
+
+ @mock.patch('cloudinit.util.subp')
+ def test_rename_all(self, mock_subp):
+ renames = [
+ ('00:11:22:33:44:55', 'interface0', 'virtio_net', '0x3'),
+ ('00:11:22:33:44:aa', 'interface2', 'virtio_net', '0x5'),
+ ]
+ current_info = {
+ 'ens3': {
+ 'downable': True,
+ 'device_id': '0x3',
+ 'driver': 'virtio_net',
+ 'mac': '00:11:22:33:44:55',
+ 'name': 'ens3',
+ 'up': False},
+ 'ens5': {
+ 'downable': True,
+ 'device_id': '0x5',
+ 'driver': 'virtio_net',
+ 'mac': '00:11:22:33:44:aa',
+ 'name': 'ens5',
+ 'up': False},
+ }
+ net._rename_interfaces(renames, current_info=current_info)
+ print(mock_subp.call_args_list)
+ mock_subp.assert_has_calls([
+ mock.call(['ip', 'link', 'set', 'ens3', 'name', 'interface0'],
+ capture=True),
+ mock.call(['ip', 'link', 'set', 'ens5', 'name', 'interface2'],
+ capture=True),
+ ])
+
+ @mock.patch('cloudinit.util.subp')
+ def test_rename_no_driver_no_device_id(self, mock_subp):
+ renames = [
+ ('00:11:22:33:44:55', 'interface0', None, None),
+ ('00:11:22:33:44:aa', 'interface1', None, None),
+ ]
+ current_info = {
+ 'eth0': {
+ 'downable': True,
+ 'device_id': None,
+ 'driver': None,
+ 'mac': '00:11:22:33:44:55',
+ 'name': 'eth0',
+ 'up': False},
+ 'eth1': {
+ 'downable': True,
+ 'device_id': None,
+ 'driver': None,
+ 'mac': '00:11:22:33:44:aa',
+ 'name': 'eth1',
+ 'up': False},
+ }
+ net._rename_interfaces(renames, current_info=current_info)
+ print(mock_subp.call_args_list)
+ mock_subp.assert_has_calls([
+ mock.call(['ip', 'link', 'set', 'eth0', 'name', 'interface0'],
+ capture=True),
+ mock.call(['ip', 'link', 'set', 'eth1', 'name', 'interface1'],
+ capture=True),
+ ])
+
+ @mock.patch('cloudinit.util.subp')
+ def test_rename_all_bounce(self, mock_subp):
+ renames = [
+ ('00:11:22:33:44:55', 'interface0', 'virtio_net', '0x3'),
+ ('00:11:22:33:44:aa', 'interface2', 'virtio_net', '0x5'),
+ ]
+ current_info = {
+ 'ens3': {
+ 'downable': True,
+ 'device_id': '0x3',
+ 'driver': 'virtio_net',
+ 'mac': '00:11:22:33:44:55',
+ 'name': 'ens3',
+ 'up': True},
+ 'ens5': {
+ 'downable': True,
+ 'device_id': '0x5',
+ 'driver': 'virtio_net',
+ 'mac': '00:11:22:33:44:aa',
+ 'name': 'ens5',
+ 'up': True},
+ }
+ net._rename_interfaces(renames, current_info=current_info)
+ print(mock_subp.call_args_list)
+ mock_subp.assert_has_calls([
+ mock.call(['ip', 'link', 'set', 'ens3', 'down'], capture=True),
+ mock.call(['ip', 'link', 'set', 'ens3', 'name', 'interface0'],
+ capture=True),
+ mock.call(['ip', 'link', 'set', 'ens5', 'down'], capture=True),
+ mock.call(['ip', 'link', 'set', 'ens5', 'name', 'interface2'],
+ capture=True),
+ mock.call(['ip', 'link', 'set', 'interface0', 'up'], capture=True),
+ mock.call(['ip', 'link', 'set', 'interface2', 'up'], capture=True)
+ ])
+
+ @mock.patch('cloudinit.util.subp')
+ def test_rename_duplicate_macs(self, mock_subp):
+ renames = [
+ ('00:11:22:33:44:55', 'eth0', 'hv_netsvc', '0x3'),
+ ('00:11:22:33:44:55', 'vf1', 'mlx4_core', '0x5'),
+ ]
+ current_info = {
+ 'eth0': {
+ 'downable': True,
+ 'device_id': '0x3',
+ 'driver': 'hv_netsvc',
+ 'mac': '00:11:22:33:44:55',
+ 'name': 'eth0',
+ 'up': False},
+ 'eth1': {
+ 'downable': True,
+ 'device_id': '0x5',
+ 'driver': 'mlx4_core',
+ 'mac': '00:11:22:33:44:55',
+ 'name': 'eth1',
+ 'up': False},
+ }
+ net._rename_interfaces(renames, current_info=current_info)
+ print(mock_subp.call_args_list)
+ mock_subp.assert_has_calls([
+ mock.call(['ip', 'link', 'set', 'eth1', 'name', 'vf1'],
+ capture=True),
+ ])
+
+ @mock.patch('cloudinit.util.subp')
+ def test_rename_duplicate_macs_driver_no_devid(self, mock_subp):
+ renames = [
+ ('00:11:22:33:44:55', 'eth0', 'hv_netsvc', None),
+ ('00:11:22:33:44:55', 'vf1', 'mlx4_core', None),
+ ]
+ current_info = {
+ 'eth0': {
+ 'downable': True,
+ 'device_id': '0x3',
+ 'driver': 'hv_netsvc',
+ 'mac': '00:11:22:33:44:55',
+ 'name': 'eth0',
+ 'up': False},
+ 'eth1': {
+ 'downable': True,
+ 'device_id': '0x5',
+ 'driver': 'mlx4_core',
+ 'mac': '00:11:22:33:44:55',
+ 'name': 'eth1',
+ 'up': False},
+ }
+ net._rename_interfaces(renames, current_info=current_info)
+ print(mock_subp.call_args_list)
+ mock_subp.assert_has_calls([
+ mock.call(['ip', 'link', 'set', 'eth1', 'name', 'vf1'],
+ capture=True),
+ ])
+
+ @mock.patch('cloudinit.util.subp')
+ def test_rename_multi_mac_dups(self, mock_subp):
+ renames = [
+ ('00:11:22:33:44:55', 'eth0', 'hv_netsvc', '0x3'),
+ ('00:11:22:33:44:55', 'vf1', 'mlx4_core', '0x5'),
+ ('00:11:22:33:44:55', 'vf2', 'mlx4_core', '0x7'),
+ ]
+ current_info = {
+ 'eth0': {
+ 'downable': True,
+ 'device_id': '0x3',
+ 'driver': 'hv_netsvc',
+ 'mac': '00:11:22:33:44:55',
+ 'name': 'eth0',
+ 'up': False},
+ 'eth1': {
+ 'downable': True,
+ 'device_id': '0x5',
+ 'driver': 'mlx4_core',
+ 'mac': '00:11:22:33:44:55',
+ 'name': 'eth1',
+ 'up': False},
+ 'eth2': {
+ 'downable': True,
+ 'device_id': '0x7',
+ 'driver': 'mlx4_core',
+ 'mac': '00:11:22:33:44:55',
+ 'name': 'eth2',
+ 'up': False},
+ }
+ net._rename_interfaces(renames, current_info=current_info)
+ print(mock_subp.call_args_list)
+ mock_subp.assert_has_calls([
+ mock.call(['ip', 'link', 'set', 'eth1', 'name', 'vf1'],
+ capture=True),
+ mock.call(['ip', 'link', 'set', 'eth2', 'name', 'vf2'],
+ capture=True),
+ ])
+
+ @mock.patch('cloudinit.util.subp')
+ def test_rename_macs_case_insensitive(self, mock_subp):
+ """_rename_interfaces must support upper or lower case macs."""
+ renames = [
+ ('aa:aa:aa:aa:aa:aa', 'en0', None, None),
+ ('BB:BB:BB:BB:BB:BB', 'en1', None, None),
+ ('cc:cc:cc:cc:cc:cc', 'en2', None, None),
+ ('DD:DD:DD:DD:DD:DD', 'en3', None, None),
+ ]
+ current_info = {
+ 'eth0': {'downable': True, 'mac': 'AA:AA:AA:AA:AA:AA',
+ 'name': 'eth0', 'up': False},
+ 'eth1': {'downable': True, 'mac': 'bb:bb:bb:bb:bb:bb',
+ 'name': 'eth1', 'up': False},
+ 'eth2': {'downable': True, 'mac': 'cc:cc:cc:cc:cc:cc',
+ 'name': 'eth2', 'up': False},
+ 'eth3': {'downable': True, 'mac': 'DD:DD:DD:DD:DD:DD',
+ 'name': 'eth3', 'up': False},
+ }
+ net._rename_interfaces(renames, current_info=current_info)
+
+ expected = [
+ mock.call(['ip', 'link', 'set', 'eth%d' % i, 'name', 'en%d' % i],
+ capture=True)
+ for i in range(len(renames))]
+ mock_subp.assert_has_calls(expected)
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py
index 31324204..55f15b55 100644
--- a/tests/unittests/test_runs/test_simple_run.py
+++ b/tests/unittests/test_runs/test_simple_run.py
@@ -16,24 +16,6 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
self.patchOS(root)
self.patchUtils(root)
- def _pp_root(self, root, repatch=True):
- for (dirpath, dirnames, filenames) in os.walk(root):
- print(dirpath)
- for f in filenames:
- joined = os.path.join(dirpath, f)
- if os.path.islink(joined):
- print("f %s - (symlink)" % (f))
- else:
- print("f %s" % (f))
- for d in dirnames:
- joined = os.path.join(dirpath, d)
- if os.path.islink(joined):
- print("d %s - (symlink)" % (d))
- else:
- print("d %s" % (d))
- if repatch:
- self._patchIn(root)
-
def test_none_ds(self):
new_root = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, new_root)
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 014aa6a3..f38a664c 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -20,6 +20,9 @@ except ImportError:
import mock
+BASH = util.which('bash')
+
+
class FakeSelinux(object):
def __init__(self, match_what):
@@ -362,6 +365,9 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase):
self.addCleanup(shutil.rmtree, self.new_root)
self.patchOS(self.new_root)
self.patchUtils(self.new_root)
+ p = mock.patch("cloudinit.util.is_container", return_value=False)
+ self.addCleanup(p.stop)
+ self._m_is_container = p.start()
def _create_sysfs_parent_directory(self):
util.ensure_dir(os.path.join('sys', 'class', 'dmi', 'id'))
@@ -450,6 +456,26 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase):
self._create_sysfs_file(sysfs_key, dmi_value)
self.assertEqual(expected, util.read_dmi_data(dmi_key))
+ def test_container_returns_none(self):
+ """In a container read_dmi_data should always return None."""
+
+ # first verify we get the value if not in container
+ self._m_is_container.return_value = False
+ key, val = ("system-product-name", "my_product")
+ self._create_sysfs_file('product_name', val)
+ self.assertEqual(val, util.read_dmi_data(key))
+
+ # then verify in container returns None
+ self._m_is_container.return_value = True
+ self.assertIsNone(util.read_dmi_data(key))
+
+ def test_container_returns_none_on_unknown(self):
+ """In a container even bogus keys return None."""
+ self._m_is_container.return_value = True
+ self._create_sysfs_file('product_name', "should-be-ignored")
+ self.assertIsNone(util.read_dmi_data("bogus"))
+ self.assertIsNone(util.read_dmi_data("system-product-name"))
+
class TestMultiLog(helpers.FilesystemMockingTestCase):
@@ -544,17 +570,17 @@ class TestReadSeeded(helpers.TestCase):
class TestSubp(helpers.TestCase):
- stdin2err = ['bash', '-c', 'cat >&2']
+ stdin2err = [BASH, '-c', 'cat >&2']
stdin2out = ['cat']
utf8_invalid = b'ab\xaadef'
utf8_valid = b'start \xc3\xa9 end'
utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7'
- printenv = ['bash', '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--']
+ printenv = [BASH, '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--']
def printf_cmd(self, *args):
# bash's printf supports \xaa. So does /usr/bin/printf
# but by using bash, we remove dependency on another program.
- return(['bash', '-c', 'printf "$@"', 'printf'] + list(args))
+ return([BASH, '-c', 'printf "$@"', 'printf'] + list(args))
def test_subp_handles_utf8(self):
# The given bytes contain utf-8 accented characters as seen in e.g.
@@ -781,4 +807,20 @@ class TestSystemIsSnappy(helpers.FilesystemMockingTestCase):
self.reRoot(root_d)
self.assertTrue(util.system_is_snappy())
+
+class TestLoadShellContent(helpers.TestCase):
+ def test_comments_handled_correctly(self):
+ """Shell comments should be allowed in the content."""
+ self.assertEqual(
+ {'key1': 'val1', 'key2': 'val2', 'key3': 'val3 #tricky'},
+ util.load_shell_content('\n'.join([
+ "#top of file comment",
+ "key1=val1 #this is a comment",
+ "# second comment",
+ 'key2="val2" # inlin comment'
+ '#badkey=wark',
+ 'key3="val3 #tricky"',
+ ''])))
+
+
# vi: ts=4 expandtab
diff --git a/tools/build-on-freebsd b/tools/build-on-freebsd
index ccc10b40..ff9153ad 100755
--- a/tools/build-on-freebsd
+++ b/tools/build-on-freebsd
@@ -8,6 +8,7 @@ fail() { echo "FAILED:" "$@" 1>&2; exit 1; }
# Check dependencies:
depschecked=/tmp/c-i.dependencieschecked
pkgs="
+ bash
dmidecode
e2fsprogs
py27-Jinja2
@@ -16,7 +17,7 @@ pkgs="
py27-configobj
py27-jsonpatch
py27-jsonpointer
- py27-oauth
+ py27-oauthlib
py27-prettytable
py27-requests
py27-serial
@@ -35,9 +36,6 @@ touch $depschecked
python setup.py build
python setup.py install -O1 --skip-build --prefix /usr/local/ --init-system sysvinit_freebsd
-# Install the correct config file:
-cp config/cloud.cfg-freebsd /etc/cloud/cloud.cfg
-
# Enable cloud-init in /etc/rc.conf:
sed -i.bak -e "/cloudinit_enable=.*/d" /etc/rc.conf
echo 'cloudinit_enable="YES"' >> /etc/rc.conf
diff --git a/tools/cloudconfig-schema b/tools/cloudconfig-schema
new file mode 100755
index 00000000..32f0d61e
--- /dev/null
+++ b/tools/cloudconfig-schema
@@ -0,0 +1,35 @@
+#!/usr/bin/env python3
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""cloudconfig-schema
+
+Validate existing files against cloud-config schema or provide supported schema
+documentation.
+"""
+
+import os
+import sys
+
+
+def call_entry_point(name):
+ (istr, dot, ent) = name.rpartition('.')
+ try:
+ __import__(istr)
+ except ImportError:
+ # if that import failed, check dirname(__file__/..)
+ # to support ./bin/program with modules in .
+ _tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+ sys.path.insert(0, _tdir)
+ try:
+ __import__(istr)
+ except ImportError as e:
+ sys.stderr.write("Unable to find %s: %s\n" % (name, e))
+ sys.exit(2)
+
+ sys.exit(getattr(sys.modules[istr], ent)())
+
+
+if __name__ == '__main__':
+ call_entry_point("cloudinit.config.schema.main")
+
+# vi: ts=4 expandtab syntax=python
diff --git a/tools/ds-identify b/tools/ds-identify
index 74d26537..33bd2991 100755
--- a/tools/ds-identify
+++ b/tools/ds-identify
@@ -85,6 +85,7 @@ DI_MAIN=${DI_MAIN:-main}
DI_DEFAULT_POLICY="search,found=all,maybe=all,notfound=${DI_DISABLED}"
DI_DEFAULT_POLICY_NO_DMI="search,found=all,maybe=all,notfound=${DI_ENABLED}"
+DI_DMI_CHASSIS_ASSET_TAG=""
DI_DMI_PRODUCT_NAME=""
DI_DMI_SYS_VENDOR=""
DI_DMI_PRODUCT_SERIAL=""
@@ -110,7 +111,8 @@ DI_DSNAME=""
# this has to match the builtin list in cloud-init, it is what will
# be searched if there is no setting found in config.
DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \
-CloudSigma CloudStack DigitalOcean Ec2 GCE OpenNebula OpenStack OVF SmartOS"
+CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \
+OVF SmartOS Scaleway"
DI_DSLIST=""
DI_MODE=""
DI_ON_FOUND=""
@@ -258,6 +260,12 @@ read_kernel_cmdline() {
DI_KERNEL_CMDLINE="$cmdline"
}
+read_dmi_chassis_asset_tag() {
+ cached "${DI_DMI_CHASSIS_ASSET_TAG}" && return
+ get_dmi_field chassis_asset_tag
+ DI_DMI_CHASSIS_ASSET_TAG="$_RET"
+}
+
read_dmi_sys_vendor() {
cached "${DI_DMI_SYS_VENDOR}" && return
get_dmi_field sys_vendor
@@ -385,6 +393,14 @@ read_pid1_product_name() {
DI_PID_1_PRODUCT_NAME="$product_name"
}
+dmi_chassis_asset_tag_matches() {
+ is_container && return 1
+ case "${DI_DMI_CHASSIS_ASSET_TAG}" in
+ $1) return 0;;
+ esac
+ return 1
+}
+
dmi_product_name_matches() {
is_container && return 1
case "${DI_DMI_PRODUCT_NAME}" in
@@ -401,11 +417,6 @@ dmi_product_serial_matches() {
return 1
}
-dmi_product_name_is() {
- is_container && return 1
- [ "${DI_DMI_PRODUCT_NAME}" = "$1" ]
-}
-
dmi_sys_vendor_is() {
is_container && return 1
[ "${DI_DMI_SYS_VENDOR}" = "$1" ]
@@ -477,7 +488,7 @@ dscheck_CloudStack() {
dscheck_CloudSigma() {
# http://paste.ubuntu.com/23624795/
- dmi_product_name_is "CloudSigma" && return $DS_FOUND
+ dmi_product_name_matches "CloudSigma" && return $DS_FOUND
return $DS_NOT_FOUND
}
@@ -544,6 +555,9 @@ dscheck_NoCloud() {
case " ${DI_KERNEL_CMDLINE} " in
*\ ds=nocloud*) return ${DS_FOUND};;
esac
+ case " ${DI_DMI_PRODUCT_SERIAL} " in
+ *\ ds=nocloud*) return ${DS_FOUND};;
+ esac
for d in nocloud nocloud-net; do
check_seed_dir "$d" meta-data user-data && return ${DS_FOUND}
done
@@ -653,6 +667,8 @@ dscheck_Azure() {
# UUID="112D211272645f72" LABEL="rd_rdfe_stable.161212-1209"
# TYPE="udf">/dev/sr0</device>
#
+ local azure_chassis="7783-7084-3265-9085-8269-3286-77"
+ dmi_chassis_asset_tag_matches "${azure_chassis}" && return $DS_FOUND
check_seed_dir azure ovf-env.xml && return ${DS_FOUND}
[ "${DI_VIRT}" = "microsoft" ] || return ${DS_NOT_FOUND}
@@ -785,7 +801,7 @@ dscheck_Ec2() {
}
dscheck_GCE() {
- if dmi_product_name_is "Google Compute Engine"; then
+ if dmi_product_name_matches "Google Compute Engine"; then
return ${DS_FOUND}
fi
# product name is not guaranteed (LP: #1674861)
@@ -806,10 +822,10 @@ dscheck_OpenStack() {
return ${DS_NOT_FOUND}
fi
local nova="OpenStack Nova" compute="OpenStack Compute"
- if dmi_product_name_is "$nova"; then
+ if dmi_product_name_matches "$nova"; then
return ${DS_FOUND}
fi
- if dmi_product_name_is "$compute"; then
+ if dmi_product_name_matches "$compute"; then
# RDO installed nova (LP: #1675349).
return ${DS_FOUND}
fi
@@ -821,10 +837,11 @@ dscheck_OpenStack() {
}
dscheck_AliYun() {
- # aliyun is not enabled by default (LP: #1638931)
- # so if we are here, it is because the datasource_list was
- # set to include it. Thus, 'maybe'.
- return $DS_MAYBE
+ check_seed_dir "AliYun" meta-data user-data && return ${DS_FOUND}
+ if dmi_product_name_matches "Alibaba Cloud ECS"; then
+ return $DS_FOUND
+ fi
+ return $DS_NOT_FOUND
}
dscheck_AltCloud() {
@@ -879,6 +896,22 @@ dscheck_None() {
return ${DS_NOT_FOUND}
}
+dscheck_Scaleway() {
+ if [ "${DI_DMI_SYS_VENDOR}" = "Scaleway" ]; then
+ return $DS_FOUND
+ fi
+
+ case " ${DI_KERNEL_CMDLINE} " in
+ *\ scaleway\ *) return ${DS_FOUND};;
+ esac
+
+ if [ -f ${PATH_ROOT}/var/run/scaleway ]; then
+ return ${DS_FOUND}
+ fi
+
+ return ${DS_NOT_FOUND}
+}
+
collect_info() {
read_virt
read_pid1_product_name
@@ -887,6 +920,7 @@ collect_info() {
read_config
read_datasource_list
read_dmi_sys_vendor
+ read_dmi_chassis_asset_tag
read_dmi_product_name
read_dmi_product_serial
read_dmi_product_uuid
@@ -901,7 +935,7 @@ print_info() {
_print_info() {
local n="" v="" vars=""
vars="DMI_PRODUCT_NAME DMI_SYS_VENDOR DMI_PRODUCT_SERIAL"
- vars="$vars DMI_PRODUCT_UUID PID_1_PRODUCT_NAME"
+ vars="$vars DMI_PRODUCT_UUID PID_1_PRODUCT_NAME DMI_CHASSIS_ASSET_TAG"
vars="$vars FS_LABELS KERNEL_CMDLINE VIRT"
vars="$vars UNAME_KERNEL_NAME UNAME_KERNEL_RELEASE UNAME_KERNEL_VERSION"
vars="$vars UNAME_MACHINE UNAME_NODENAME UNAME_OPERATING_SYSTEM"
diff --git a/tools/mock-meta.py b/tools/mock-meta.py
index f185dbf2..a5d14ab7 100755
--- a/tools/mock-meta.py
+++ b/tools/mock-meta.py
@@ -262,8 +262,8 @@ class MetaDataHandler(object):
except ValueError:
raise WebException(hclient.BAD_REQUEST,
"%s: not an integer" % mybe_key)
- except KeyError:
- raise WebException(hclient.BAD_REQUEST,
+ except IndexError:
+ raise WebException(hclient.NOT_FOUND,
"Unknown key id %r" % mybe_key)
# Extract the possible sub-params
result = traverse(nparams[1:], {
diff --git a/tools/net-convert.py b/tools/net-convert.py
index b2db8adf..68559cbf 100755
--- a/tools/net-convert.py
+++ b/tools/net-convert.py
@@ -75,7 +75,7 @@ def main():
r_cls = sysconfig.Renderer
r = r_cls()
- r.render_network_state(ns, target=args.directory)
+ r.render_network_state(network_state=ns, target=args.directory)
if __name__ == '__main__':
diff --git a/tools/read-dependencies b/tools/read-dependencies
index f4349055..2a648680 100755
--- a/tools/read-dependencies
+++ b/tools/read-dependencies
@@ -1,43 +1,239 @@
#!/usr/bin/env python
+"""List pip dependencies or system package dependencies for cloud-init."""
# You might be tempted to rewrite this as a shell script, but you
# would be surprised to discover that things like 'egrep' or 'sed' may
# differ between Linux and *BSD.
+try:
+ from argparse import ArgumentParser
+except ImportError:
+ raise RuntimeError(
+ 'Could not import python-argparse. Please install python-argparse '
+ 'package to continue')
+
+import json
import os
import re
-import sys
import subprocess
+import sys
+
+DEFAULT_REQUIREMENTS = 'requirements.txt'
+
+# Map the appropriate package dir needed for each distro choice
+DISTRO_PKG_TYPE_MAP = {
+ 'centos': 'redhat',
+ 'redhat': 'redhat',
+ 'debian': 'debian',
+ 'ubuntu': 'debian',
+ 'opensuse': 'suse',
+ 'suse': 'suse'
+}
+
+DISTRO_INSTALL_PKG_CMD = {
+ 'centos': ['yum', 'install', '--assumeyes'],
+ 'redhat': ['yum', 'install', '--assumeyes'],
+ 'debian': ['apt', 'install', '-y'],
+ 'ubuntu': ['apt', 'install', '-y'],
+ 'opensuse': ['zypper', 'install'],
+ 'suse': ['zypper', 'install']
+}
+
+
+# List of base system packages required to enable ci automation
+CI_SYSTEM_BASE_PKGS = {
+ 'common': ['make', 'sudo', 'tar'],
+ 'redhat': ['python-tox'],
+ 'centos': ['python-tox'],
+ 'ubuntu': ['devscripts', 'python3-dev', 'libssl-dev', 'tox', 'sbuild'],
+ 'debian': ['devscripts', 'python3-dev', 'libssl-dev', 'tox', 'sbuild']}
+
+
+# JSON definition of distro-specific package dependencies
+DISTRO_PKG_DEPS_PATH = "packages/pkg-deps.json"
+
+
+def get_parser():
+ """Return an argument parser for this command."""
+ parser = ArgumentParser(description=__doc__)
+ parser.add_argument(
+ '-r', '--requirements-file', type=str, dest='req_files',
+ action='append', default=None,
+ help='pip-style requirements file [default=%s]' % DEFAULT_REQUIREMENTS)
+ parser.add_argument(
+ '-d', '--distro', type=str, choices=DISTRO_PKG_TYPE_MAP.keys(),
+ help='The name of the distro to generate package deps for.')
+ parser.add_argument(
+ '--dry-run', action='store_true', default=False, dest='dry_run',
+ help='Dry run the install, making no package changes.')
+ parser.add_argument(
+ '-s', '--system-pkg-names', action='store_true', default=False,
+ dest='system_pkg_names',
+ help='The name of the distro to generate package deps for.')
+ parser.add_argument(
+ '-i', '--install', action='store_true', default=False,
+ dest='install',
+ help='When specified, install the required system packages.')
+ parser.add_argument(
+ '-t', '--test-distro', action='store_true', default=False,
+ dest='test_distro',
+ help='Additionally install continuous integration system packages '
+ 'required for build and test automation.')
+ parser.add_argument(
+ '-v', '--python-version', type=str, dest='python_version', default=None,
+ choices=["2", "3"],
+ help='Override the version of python we want to generate system '
+ 'package dependencies for. Defaults to the version of python '
+ 'this script is called with')
+ return parser
+
+
+def get_package_deps_from_json(topdir, distro):
+ """Get a dict of build and runtime package requirements for a distro.
+
+ @param topdir: The root directory in which to search for the
+ DISTRO_PKG_DEPS_PATH json blob of package requirements information.
+ @param distro: The specific distribution shortname to pull dependencies
+ for.
+ @return: Dict containing "requires", "build-requires" and "rename" lists
+ for a given distribution.
+ """
+ with open(os.path.join(topdir, DISTRO_PKG_DEPS_PATH), 'r') as stream:
+ deps = json.loads(stream.read())
+ if distro is None:
+ return {}
+ return deps[DISTRO_PKG_TYPE_MAP[distro]]
+
+
+def parse_pip_requirements(requirements_path):
+ """Return the pip requirement names from pip-style requirements_path."""
+ dep_names = []
+ with open(requirements_path, "r") as fp:
+ for line in fp:
+ line = line.strip()
+ if not line or line.startswith("#"):
+ continue
+
+ # remove pip-style markers
+ dep = line.split(';')[0]
+
+ # remove version requirements
+ if re.search('[>=.<]+', dep):
+ dep_names.append(re.split(r'[>=.<]+', dep)[0].strip())
+ else:
+ dep_names.append(dep)
+ return dep_names
+
+
+def translate_pip_to_system_pkg(pip_requires, renames, python_ver):
+ """Translate pip package names to distro-specific package names.
+
+ @param pip_requires: List of versionless pip package names to translate.
+ @param renames: Dict containg special case renames from pip name to system
+ package name for the distro.
+ @param python_ver: Optional python version string "2" or "3". When None,
+ use the python version that is calling this script via sys.version_info.
+ """
+ if python_ver is None:
+ python_ver = str(sys.version_info[0])
+ if python_ver == "2":
+ prefix = "python-"
+ else:
+ prefix = "python3-"
+ standard_pkg_name = "{0}{1}"
+ translated_names = []
+ for pip_name in pip_requires:
+ pip_name = pip_name.lower()
+ # Find a rename if present for the distro package and python version
+ rename = renames.get(pip_name, {}).get(python_ver, None)
+ if rename:
+ translated_names.append(rename)
+ else:
+ translated_names.append(
+ standard_pkg_name.format(prefix, pip_name))
+ return translated_names
+
+
+def main(distro):
+ parser = get_parser()
+ args = parser.parse_args()
+ if 'CLOUD_INIT_TOP_D' in os.environ:
+ topd = os.path.realpath(os.environ.get('CLOUD_INIT_TOP_D'))
+ else:
+ topd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
-if 'CLOUD_INIT_TOP_D' in os.environ:
- topd = os.path.realpath(os.environ.get('CLOUD_INIT_TOP_D'))
-else:
- topd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+ if args.test_distro:
+ # Give us all the system deps we need for continuous integration
+ if args.req_files:
+ sys.stderr.write(
+ "Parameter --test-distro overrides --requirements-file. Use "
+ "one or the other.\n")
+ sys.exit(1)
+ args.req_files = [os.path.join(topd, DEFAULT_REQUIREMENTS),
+ os.path.join(topd, 'test-' + DEFAULT_REQUIREMENTS)]
+ args.install = True
+ if args.req_files is None:
+ args.req_files = [os.path.join(topd, DEFAULT_REQUIREMENTS)]
+ if not os.path.isfile(args.req_files[0]):
+ sys.stderr.write("Unable to locate '%s' file that should "
+ "exist in cloud-init root directory." %
+ args.req_files[0])
+ sys.exit(1)
-for fname in ("setup.py", "requirements.txt"):
- if not os.path.isfile(os.path.join(topd, fname)):
- sys.stderr.write("Unable to locate '%s' file that should "
- "exist in cloud-init root directory." % fname)
+ bad_files = [r for r in args.req_files if not os.path.isfile(r)]
+ if bad_files:
+ sys.stderr.write(
+ "Unable to find requirements files: %s\n" % ','.join(bad_files))
sys.exit(1)
-if len(sys.argv) > 1:
- reqfile = sys.argv[1]
-else:
- reqfile = "requirements.txt"
+ pip_pkg_names = set()
+ for req_path in args.req_files:
+ pip_pkg_names.update(set(parse_pip_requirements(req_path)))
+ deps_from_json = get_package_deps_from_json(topd, args.distro)
+ renames = deps_from_json.get('renames', {})
+ translated_pip_names = translate_pip_to_system_pkg(
+ pip_pkg_names, renames, args.python_version)
+ all_deps = []
+ if args.distro:
+ all_deps.extend(
+ translated_pip_names + deps_from_json['requires'] +
+ deps_from_json['build-requires'])
+ else:
+ if args.system_pkg_names:
+ all_deps = translated_pip_names
+ else:
+ all_deps = pip_pkg_names
+ if args.install:
+ pkg_install(all_deps, args.distro, args.test_distro, args.dry_run)
+ else:
+ print('\n'.join(all_deps))
-with open(os.path.join(topd, reqfile), "r") as fp:
- for line in fp:
- line = line.strip()
- if not line or line.startswith("#"):
- continue
- # remove pip-style markers
- dep = line.split(';')[0]
+def pkg_install(pkg_list, distro, test_distro=False, dry_run=False):
+ """Install a list of packages using the DISTRO_INSTALL_PKG_CMD."""
+ if test_distro:
+ pkg_list = list(pkg_list) + CI_SYSTEM_BASE_PKGS['common']
+ distro_base_pkgs = CI_SYSTEM_BASE_PKGS.get(distro, [])
+ pkg_list += distro_base_pkgs
+ print('Installing deps: {0}{1}'.format(
+ '(dryrun)' if dry_run else '', ' '.join(pkg_list)))
+ install_cmd = []
+ if dry_run:
+ install_cmd.append('echo')
+ if os.geteuid() != 0:
+ install_cmd.append('sudo')
+ install_cmd.extend(DISTRO_INSTALL_PKG_CMD[distro])
+ if distro in ['centos', 'redhat']:
+ # CentOS and Redhat need epel-release to access oauthlib and jsonschema
+ subprocess.check_call(install_cmd + ['epel-release'])
+ if distro in ['suse', 'opensuse', 'redhat', 'centos']:
+ pkg_list.append('rpm-build')
+ subprocess.check_call(install_cmd + pkg_list)
- # remove version requirements
- dep = re.split("[>=.<]*", dep)[0].strip()
- print(dep)
-sys.exit(0)
+if __name__ == "__main__":
+ parser = get_parser()
+ args = parser.parse_args()
+ sys.exit(main(args.distro))
# vi: ts=4 expandtab
diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg
new file mode 100755
index 00000000..e624541a
--- /dev/null
+++ b/tools/render-cloudcfg
@@ -0,0 +1,43 @@
+#!/usr/bin/env python3
+
+import argparse
+import os
+import sys
+
+if "avoid-pep8-E402-import-not-top-of-file":
+ _tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+ sys.path.insert(0, _tdir)
+ from cloudinit import templater
+ from cloudinit import util
+ from cloudinit.atomic_helper import write_file
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ variants = ["bsd", "centos", "fedora", "rhel", "ubuntu", "unknown"]
+ platform = util.system_info()
+ parser.add_argument(
+ "--variant", default=platform['variant'], action="store",
+ help="define the variant.", choices=variants)
+ parser.add_argument(
+ "template", nargs="?", action="store",
+ default='./config/cloud.cfg.tmpl',
+ help="Path to the cloud.cfg template")
+ parser.add_argument(
+ "output", nargs="?", action="store", default="-",
+ help="Output file. Use '-' to write to stdout")
+
+ args = parser.parse_args()
+
+ with open(args.template, 'r') as fh:
+ contents = fh.read()
+ tpl_params = {'variant': args.variant}
+ contents = (templater.render_string(contents, tpl_params)).rstrip() + "\n"
+ util.load_yaml(contents)
+ if args.output == "-":
+ sys.stdout.write(contents)
+ else:
+ write_file(args.output, contents, omode="w")
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/run-centos b/tools/run-centos
new file mode 100755
index 00000000..d44d5145
--- /dev/null
+++ b/tools/run-centos
@@ -0,0 +1,271 @@
+#!/bin/bash
+# This file is part of cloud-init. See LICENSE file for license information.
+
+set -u
+
+VERBOSITY=0
+TEMP_D=""
+KEEP=false
+CONTAINER=""
+
+error() { echo "$@" 1>&2; }
+fail() { [ $# -eq 0 ] || error "$@"; exit 1; }
+errorrc() { local r=$?; error "$@" "ret=$r"; return $r; }
+
+Usage() {
+ cat <<EOF
+Usage: ${0##*/} [ options ] version
+
+ This utility can makes it easier to run tests, build rpm and source rpm
+ generation inside a LXC of the specified version of CentOS.
+
+ version is major release number (6 or 7)
+
+ options:
+ -a | --artifact keep .rpm artifacts
+ -k | --keep keep container after tests
+ -r | --rpm build .rpm
+ -s | --srpm build .src.rpm
+ -u | --unittest run unit tests
+
+ Example:
+ * ${0##*/} --rpm --srpm --unittest 6
+EOF
+}
+
+bad_Usage() { Usage 1>&2; [ $# -eq 0 ] || error "$@"; return 1; }
+cleanup() {
+ if [ -n "$CONTAINER" -a "$KEEP" = "false" ]; then
+ delete_container "$CONTAINER"
+ fi
+ [ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}"
+}
+
+debug() {
+ local level=${1}; shift;
+ [ "${level}" -gt "${VERBOSITY}" ] && return
+ error "${@}"
+}
+
+
+inside_as() {
+ # inside_as(container_name, user, cmd[, args])
+ # executes cmd with args inside container as user in users home dir.
+ local name="$1" user="$2"
+ shift 2
+ if [ "$user" = "root" ]; then
+ inside "$name" "$@"
+ return
+ fi
+ local stuffed="" b64=""
+ stuffed=$(getopt --shell sh --options "" -- -- "$@")
+ stuffed=${stuffed# -- }
+ b64=$(printf "%s\n" "$stuffed" | base64 --wrap=0)
+ inside "$name" su "$user" -c \
+ 'cd; eval set -- "$(echo '$b64' | base64 --decode)" && exec "$@"'
+}
+
+inside_as_cd() {
+ local name="$1" user="$2" dir="$3"
+ shift 3
+ inside_as "$name" "$user" sh -c 'cd "$0" && exec "$@"' "$dir" "$@"
+}
+
+inside() {
+ local name="$1"
+ shift
+ lxc exec "$name" -- "$@"
+}
+
+inject_cloud_init(){
+ # take current cloud-init git dir and put it inside $name at
+ # ~$user/cloud-init.
+ local name="$1" user="$2" top_d="" dname="" pstat=""
+ top_d=$(git rev-parse --show-toplevel) || {
+ errorrc "Failed to get git top level in $PWD";
+ return
+ }
+ dname=$(basename "${top_d}") || return
+ debug 1 "collecting ${top_d} ($dname) into user $user in $name."
+ tar -C "${top_d}/.." -cpf - "$dname" |
+ inside_as "$name" "$user" sh -ec '
+ dname=$1
+ rm -Rf "$dname"
+ tar -xpf -
+ [ "$dname" = "cloud-init" ] || mv "$dname" cloud-init' \
+ extract "$dname"
+ [ "${PIPESTATUS[*]}" = "0 0" ] || {
+ error "Failed to push tarball of '$top_d' into $name" \
+ " for user $user (dname=$dname)"
+ return 1
+ }
+ return 0
+}
+
+prep() {
+ # we need some very basic things not present in the container.
+ # - git
+ # - tar (CentOS 6 lxc container does not have it)
+ # - python-argparse (or python3)
+ local needed="" pair="" pkg="" cmd="" needed=""
+ for pair in tar:tar git:git; do
+ pkg=${pair#*:}
+ cmd=${pair%%:*}
+ command -v $cmd >/dev/null 2>&1 || needed="${needed} $pkg"
+ done
+ if ! command -v python3; then
+ python -c "import argparse" >/dev/null 2>&1 ||
+ needed="${needed} python-argparse"
+ fi
+ needed=${needed# }
+ if [ -z "$needed" ]; then
+ error "No prep packages needed"
+ return 0
+ fi
+ error "Installing prep packages: ${needed}"
+ yum install --assumeyes ${needed}
+}
+
+start_container() {
+ local src="$1" name="$2"
+ debug 1 "starting container $name from '$src'"
+ lxc launch "$src" "$name" || {
+ errorrc "Failed to start container '$name' from '$src'";
+ return
+ }
+ CONTAINER=$name
+
+ local out="" ret=""
+ debug 1 "waiting for networking"
+ out=$(inside "$name" sh -c '
+ i=0
+ while [ $i -lt 60 ]; do
+ getent hosts mirrorlist.centos.org && exit 0
+ sleep 2
+ done' 2>&1)
+ ret=$?
+ if [ $ret -ne 0 ]; then
+ error "Waiting for network in container '$name' failed. [$ret]"
+ error "$out"
+ return $ret
+ fi
+
+ if [ ! -z "${http_proxy-}" ]; then
+ debug 1 "configuring proxy ${http_proxy}"
+ inside "$name" sh -c "echo proxy=$http_proxy >> /etc/yum.conf"
+ fi
+}
+
+delete_container() {
+ debug 1 "removing container $1 [--keep to keep]"
+ lxc delete --force "$1"
+}
+
+main() {
+ local short_opts="ahkrsuv"
+ local long_opts="artifact,help,keep,rpm,srpm,unittest,verbose"
+ local getopt_out=""
+ getopt_out=$(getopt --name "${0##*/}" \
+ --options "${short_opts}" --long "${long_opts}" -- "$@") &&
+ eval set -- "${getopt_out}" ||
+ { bad_Usage; return; }
+
+ local cur="" next=""
+ local artifact="" keep="" rpm="" srpm="" unittest="" version=""
+
+ while [ $# -ne 0 ]; do
+ cur="${1:-}"; next="${2:-}";
+ case "$cur" in
+ -a|--artifact) artifact=1;;
+ -h|--help) Usage ; exit 0;;
+ -k|--keep) KEEP=true;;
+ -r|--rpm) rpm=1;;
+ -s|--srpm) srpm=1;;
+ -u|--unittest) unittest=1;;
+ -v|--verbose) VERBOSITY=$((${VERBOSITY}+1));;
+ --) shift; break;;
+ esac
+ shift;
+ done
+
+ [ $# -eq 1 ] || { bad_Usage "ERROR: Must provide version!"; return; }
+ version="$1"
+ case "$version" in
+ 6|7) :;;
+ *) error "Expected version of 6 or 7, not '$version'"; return;;
+ esac
+
+ TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX") ||
+ fail "failed to make tempdir"
+ trap cleanup EXIT
+
+ # program starts here
+ local uuid="" name="" user="ci-test" cdir=""
+ cdir="/home/$user/cloud-init"
+ uuid=$(uuidgen -t) || { error "no uuidgen"; return 1; }
+ name="cloud-init-centos-${uuid%%-*}"
+
+ start_container "images:centos/$version" "$name"
+
+ # prep the container (install very basic dependencies)
+ inside "$name" bash -s prep <"$0" ||
+ { errorrc "Failed to prep container $name"; return; }
+
+ # add the user
+ inside "$name" useradd "$user"
+
+ debug 1 "inserting cloud-init"
+ inject_cloud_init "$name" "$user" || {
+ errorrc "FAIL: injecting cloud-init into $name failed."
+ return
+ }
+
+ inside_as_cd "$name" root "$cdir" \
+ ./tools/read-dependencies --distro=centos --test-distro || {
+ errorrc "FAIL: failed to install dependencies with read-dependencies"
+ return
+ }
+
+ local errors=0
+ inside_as_cd "$name" "$user" "$cdir" \
+ sh -ec "git checkout .; git status" ||
+ { errorrc "git checkout failed."; errors=$(($errors+1)); }
+
+ if [ -n "$unittest" ]; then
+ debug 1 "running unit tests."
+ inside_as_cd "$name" "$user" "$cdir" nosetests tests/unittests ||
+ { errorrc "nosetests failed."; errors=$(($errors+1)); }
+ fi
+
+ if [ -n "$srpm" ]; then
+ debug 1 "building srpm."
+ inside_as_cd "$name" "$user" "$cdir" ./packages/brpm --srpm ||
+ { errorrc "brpm --srpm."; errors=$(($errors+1)); }
+ fi
+
+ if [ -n "$rpm" ]; then
+ debug 1 "building rpm."
+ inside_as_cd "$name" "$user" "$cdir" ./packages/brpm ||
+ { errorrc "brpm failed."; errors=$(($errors+1)); }
+ fi
+
+ if [ -n "$artifact" ]; then
+ for built_rpm in $(inside "$name" sh -c "echo $cdir/*.rpm"); do
+ lxc file pull "$name/$built_rpm" .
+ done
+ fi
+
+ if [ "$errors" != "0" ]; then
+ error "there were $errors errors."
+ return 1
+ fi
+ return 0
+}
+
+if [ "${1:-}" = "prep" ]; then
+ shift
+ prep "$@"
+else
+ main "$@"
+fi
+# vi: ts=4 expandtab
diff --git a/tox.ini b/tox.ini
index 6276662d..ef768847 100644
--- a/tox.ini
+++ b/tox.ini
@@ -21,7 +21,11 @@ setenv =
LC_ALL = en_US.utf-8
[testenv:pylint]
-deps = pylint==1.7.1
+deps =
+ # requirements
+ pylint==1.7.1
+ # test-requirements because unit tests are now present in cloudinit tree
+ -r{toxinidir}/test-requirements.txt
commands = {envpython} -m pylint {posargs:cloudinit}
[testenv:py3]
@@ -29,7 +33,7 @@ basepython = python3
deps = -r{toxinidir}/test-requirements.txt
commands = {envpython} -m nose {posargs:--with-coverage \
--cover-erase --cover-branches --cover-inclusive \
- --cover-package=cloudinit tests/unittests}
+ --cover-package=cloudinit tests/unittests cloudinit}
[testenv:py27]
basepython = python2.7
@@ -98,11 +102,15 @@ deps = pyflakes
[testenv:tip-pylint]
commands = {envpython} -m pylint {posargs:cloudinit}
-deps = pylint
+deps =
+ # requirements
+ pylint
+ # test-requirements
+ -r{toxinidir}/test-requirements.txt
[testenv:citest]
basepython = python3
commands = {envpython} -m tests.cloud_tests {posargs}
passenv = HOME
deps =
- pylxd==2.1.3
+ pylxd==2.2.4