summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore4
-rw-r--r--.pylintrc29
-rw-r--r--HACKING.rst3
-rw-r--r--cloudinit/atomic_helper.py12
-rw-r--r--cloudinit/cloud.py3
-rw-r--r--cloudinit/cmd/main.py17
-rw-r--r--cloudinit/config/__init__.py2
-rw-r--r--cloudinit/config/cc_apt_configure.py11
-rw-r--r--cloudinit/config/cc_disk_setup.py30
-rw-r--r--cloudinit/config/cc_fan.py4
-rw-r--r--cloudinit/config/cc_growpart.py6
-rw-r--r--cloudinit/config/cc_mounts.py12
-rw-r--r--cloudinit/config/cc_resolv_conf.py2
-rw-r--r--cloudinit/config/cc_rsyslog.py7
-rwxr-xr-xcloudinit/config/cc_set_passwords.py3
-rw-r--r--cloudinit/config/cc_snap_config.py4
-rw-r--r--cloudinit/config/cc_snappy.py6
-rw-r--r--cloudinit/config/cc_yum_add_repo.py24
-rwxr-xr-xcloudinit/distros/__init__.py31
-rw-r--r--cloudinit/distros/arch.py6
-rw-r--r--cloudinit/distros/debian.py5
-rw-r--r--cloudinit/distros/freebsd.py8
-rw-r--r--cloudinit/distros/gentoo.py11
-rw-r--r--cloudinit/distros/parsers/hosts.py4
-rw-r--r--cloudinit/distros/parsers/resolv_conf.py6
-rw-r--r--cloudinit/distros/rhel.py4
-rwxr-xr-xcloudinit/distros/ug_util.py16
-rw-r--r--cloudinit/ec2_utils.py7
-rw-r--r--cloudinit/gpg.py2
-rw-r--r--cloudinit/handlers/__init__.py2
-rw-r--r--cloudinit/helpers.py14
-rw-r--r--cloudinit/net/__init__.py7
-rw-r--r--cloudinit/net/network_state.py8
-rw-r--r--cloudinit/reporting/handlers.py4
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py4
-rw-r--r--cloudinit/sources/DataSourceAzure.py22
-rw-r--r--cloudinit/sources/DataSourceCloudSigma.py2
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py7
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py4
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py2
-rw-r--r--cloudinit/sources/DataSourceEc2.py10
-rw-r--r--cloudinit/sources/DataSourceGCE.py5
-rw-r--r--cloudinit/sources/DataSourceMAAS.py10
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py4
-rw-r--r--cloudinit/sources/DataSourceOVF.py10
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py4
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py4
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py2
-rw-r--r--cloudinit/sources/__init__.py4
-rw-r--r--cloudinit/sources/helpers/azure.py2
-rw-r--r--cloudinit/sources/helpers/digitalocean.py64
-rw-r--r--cloudinit/sources/helpers/openstack.py2
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_file.py8
-rw-r--r--cloudinit/stages.py33
-rw-r--r--cloudinit/templater.py8
-rw-r--r--cloudinit/url_helper.py11
-rw-r--r--cloudinit/user_data.py11
-rw-r--r--cloudinit/util.py33
-rw-r--r--cloudinit/warnings.py4
-rw-r--r--doc/examples/cloud-config-chef.txt72
-rw-r--r--doc/examples/cloud-config-resolv-conf.txt4
-rw-r--r--doc/examples/cloud-config-update-apt.txt7
-rw-r--r--doc/merging.rst201
-rw-r--r--doc/rtd/topics/examples.rst30
-rw-r--r--doc/rtd/topics/merging.rst200
-rw-r--r--doc/rtd/topics/modules.rst1
-rwxr-xr-xsetup.py4
-rw-r--r--snapcraft.yaml21
-rw-r--r--templates/sources.list.debian.tmpl6
-rw-r--r--tests/cloud_tests/__main__.py2
-rw-r--r--tests/cloud_tests/args.py2
-rw-r--r--tests/cloud_tests/collect.py2
-rw-r--r--tests/cloud_tests/configs/examples/install_run_chef_recipes.yaml77
-rw-r--r--tests/cloud_tests/testcases/examples/install_run_chef_recipes.py17
-rw-r--r--tests/cloud_tests/verify.py9
-rw-r--r--tests/unittests/test_datasource/test_altcloud.py123
-rw-r--r--tests/unittests/test_datasource/test_digitalocean.py46
-rw-r--r--tests/unittests/test_handler/test_handler_yum_add_repo.py56
-rw-r--r--tests/unittests/test_net.py30
-rw-r--r--tests/unittests/test_util.py33
-rw-r--r--tools/21-cloudinit.conf2
-rwxr-xr-xtools/ds-identify23
-rwxr-xr-xtools/mock-meta.py6
83 files changed, 909 insertions, 649 deletions
diff --git a/.gitignore b/.gitignore
index 3946ec76..b0500a68 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,3 +6,7 @@ __pycache__
.tox
.coverage
doc/rtd_html
+parts
+prime
+stage
+*.snap
diff --git a/.pylintrc b/.pylintrc
index b8cda03c..b160ce7b 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -6,14 +6,35 @@ jobs=4
[MESSAGES CONTROL]
-# Errors only
-disable=C, F, I, R, W
+# Errors and warings with some filtered:
+# W0105(pointless-string-statement)
+# W0107(unnecessary-pass)
+# W0201(attribute-defined-outside-init)
+# W0212(protected-access)
+# W0221(arguments-differ)
+# W0222(signature-differs)
+# W0223(abstract-method)
+# W0231(super-init-not-called)
+# W0311(bad-indentation)
+# W0511(fixme)
+# W0602(global-variable-not-assigned)
+# W0603(global-statement)
+# W0611(unused-import)
+# W0612(unused-variable)
+# W0613(unused-argument)
+# W0621(redefined-outer-name)
+# W0622(redefined-builtin)
+# W0631(undefined-loop-variable)
+# W0703(broad-except)
+# W1401(anomalous-backslash-in-string)
+
+disable=C, F, I, R, W0105, W0107, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0612, W0613, W0621, W0622, W0631, W0703, W1401
[REPORTS]
# Set the output format. Available formats are text, parseable, colorized, msvs
-output-format=colorized
+output-format=parseable
# Just the errors please, no full report
reports=no
@@ -25,7 +46,7 @@ reports=no
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis. It
# supports qualified module names, as well as Unix pattern matching.
-ignored-modules=six.moves,pkg_resources
+ignored-modules=six.moves,pkg_resources,httplib,http.client
# List of class names for which member attributes should not be checked (useful
# for classes with dynamically set attributes). This supports the use of
diff --git a/HACKING.rst b/HACKING.rst
index caee7ac1..93e3f424 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -13,6 +13,9 @@ Do these things once
If you have already signed it as an individual, your Launchpad user will be listed in the `contributor-agreement-canonical`_ group. Unfortunately there is no easy way to check if an organization or company you are doing work for has signed. If you are unsure or have questions, email `Scott Moser <mailto:scott.moser@canonical.com>`_ or ping smoser in ``#cloud-init`` channel via freenode.
+ When prompted for 'Project contact' or 'Canonical Project Manager' enter
+ 'Scott Moser'.
+
* Clone the upstream `repository`_ on Launchpad::
git clone https://git.launchpad.net/cloud-init
diff --git a/cloudinit/atomic_helper.py b/cloudinit/atomic_helper.py
index fb2df8d5..587b9945 100644
--- a/cloudinit/atomic_helper.py
+++ b/cloudinit/atomic_helper.py
@@ -2,13 +2,23 @@
import json
import os
+import stat
import tempfile
_DEF_PERMS = 0o644
-def write_file(filename, content, mode=_DEF_PERMS, omode="wb"):
+def write_file(filename, content, mode=_DEF_PERMS,
+ omode="wb", copy_mode=False):
# open filename in mode 'omode', write content, set permissions to 'mode'
+
+ if copy_mode:
+ try:
+ file_stat = os.stat(filename)
+ mode = stat.S_IMODE(file_stat.st_mode)
+ except OSError:
+ pass
+
tf = None
try:
tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(filename),
diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py
index b93a42ea..d8a9fc86 100644
--- a/cloudinit/cloud.py
+++ b/cloudinit/cloud.py
@@ -56,7 +56,8 @@ class Cloud(object):
def get_template_filename(self, name):
fn = self.paths.template_tpl % (name)
if not os.path.isfile(fn):
- LOG.warn("No template found at %s for template named %s", fn, name)
+ LOG.warning("No template found at %s for template named %s",
+ fn, name)
return None
return fn
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index fd221323..26cc2654 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -405,7 +405,8 @@ def main_init(name, args):
errfmt_orig = errfmt
(outfmt, errfmt) = util.get_output_cfg(mods.cfg, name)
if outfmt_orig != outfmt or errfmt_orig != errfmt:
- LOG.warn("Stdout, stderr changing to (%s, %s)", outfmt, errfmt)
+ LOG.warning("Stdout, stderr changing to (%s, %s)",
+ outfmt, errfmt)
(outfmt, errfmt) = util.fixup_output(mods.cfg, name)
except Exception:
util.logexc(LOG, "Failed to re-adjust output redirection!")
@@ -427,15 +428,15 @@ def di_report_warn(datasource, cfg):
dicfg = cfg.get('di_report', {})
if not isinstance(dicfg, dict):
- LOG.warn("di_report config not a dictionary: %s", dicfg)
+ LOG.warning("di_report config not a dictionary: %s", dicfg)
return
dslist = dicfg.get('datasource_list')
if dslist is None:
- LOG.warn("no 'datasource_list' found in di_report.")
+ LOG.warning("no 'datasource_list' found in di_report.")
return
elif not isinstance(dslist, list):
- LOG.warn("di_report/datasource_list not a list: %s", dslist)
+ LOG.warning("di_report/datasource_list not a list: %s", dslist)
return
# ds.__module__ is like cloudinit.sources.DataSourceName
@@ -444,8 +445,8 @@ def di_report_warn(datasource, cfg):
if modname.startswith(sources.DS_PREFIX):
modname = modname[len(sources.DS_PREFIX):]
else:
- LOG.warn("Datasource '%s' came from unexpected module '%s'.",
- datasource, modname)
+ LOG.warning("Datasource '%s' came from unexpected module '%s'.",
+ datasource, modname)
if modname in dslist:
LOG.debug("used datasource '%s' from '%s' was in di_report's list: %s",
@@ -571,10 +572,10 @@ def main_single(name, args):
mod_args,
mod_freq)
if failures:
- LOG.warn("Ran %s but it failed!", mod_name)
+ LOG.warning("Ran %s but it failed!", mod_name)
return 1
elif not which_ran:
- LOG.warn("Did not run %s, does it exist?", mod_name)
+ LOG.warning("Did not run %s, does it exist?", mod_name)
return 1
else:
# Guess it worked
diff --git a/cloudinit/config/__init__.py b/cloudinit/config/__init__.py
index 57e2a44d..0ef9a748 100644
--- a/cloudinit/config/__init__.py
+++ b/cloudinit/config/__init__.py
@@ -37,7 +37,7 @@ def fixup_module(mod, def_freq=PER_INSTANCE):
else:
freq = mod.frequency
if freq and freq not in FREQUENCIES:
- LOG.warn("Module %s has an unknown frequency %s", mod, freq)
+ LOG.warning("Module %s has an unknown frequency %s", mod, freq)
if not hasattr(mod, 'distros'):
setattr(mod, 'distros', [])
if not hasattr(mod, 'osfamilies'):
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 06804e85..7e751776 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -347,8 +347,8 @@ def dpkg_reconfigure(packages, target=None):
unhandled.append(pkg)
if len(unhandled):
- LOG.warn("The following packages were installed and preseeded, "
- "but cannot be unconfigured: %s", unhandled)
+ LOG.warning("The following packages were installed and preseeded, "
+ "but cannot be unconfigured: %s", unhandled)
if len(to_config):
util.subp(['dpkg-reconfigure', '--frontend=noninteractive'] +
@@ -441,7 +441,7 @@ def rename_apt_lists(new_mirrors, target=None):
os.rename(filename, newname)
except OSError:
# since this is a best effort task, warn with but don't fail
- LOG.warn("Failed to rename apt list:", exc_info=True)
+ LOG.warning("Failed to rename apt list:", exc_info=True)
def mirror_to_placeholder(tmpl, mirror, placeholder):
@@ -449,7 +449,7 @@ def mirror_to_placeholder(tmpl, mirror, placeholder):
replace the specified mirror in a template with a placeholder string
Checks for existance of the expected mirror and warns if not found"""
if mirror not in tmpl:
- LOG.warn("Expected mirror '%s' not found in: %s", mirror, tmpl)
+ LOG.warning("Expected mirror '%s' not found in: %s", mirror, tmpl)
return tmpl.replace(mirror, placeholder)
@@ -525,7 +525,8 @@ def generate_sources_list(cfg, release, mirrors, cloud):
if not template_fn:
template_fn = cloud.get_template_filename('sources.list')
if not template_fn:
- LOG.warn("No template found, not rendering /etc/apt/sources.list")
+ LOG.warning("No template found, "
+ "not rendering /etc/apt/sources.list")
return
tmpl = util.load_file(template_fn)
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index f39f0815..f49386e3 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -181,7 +181,7 @@ def update_fs_setup_devices(disk_setup, tformer):
# update it with the response from 'tformer'
for definition in disk_setup:
if not isinstance(definition, dict):
- LOG.warn("entry in disk_setup not a dict: %s", definition)
+ LOG.warning("entry in disk_setup not a dict: %s", definition)
continue
origname = definition.get('device')
@@ -279,7 +279,7 @@ def is_device_valid(name, partition=False):
try:
d_type = device_type(name)
except Exception:
- LOG.warn("Query against device %s failed" % name)
+ LOG.warning("Query against device %s failed", name)
return False
if partition and d_type == 'part':
@@ -372,7 +372,7 @@ def find_device_node(device, fs_type=None, label=None, valid_targets=None,
if not raw_device_used:
return (device, False)
- LOG.warn("Failed to find device during available device search.")
+ LOG.warning("Failed to find device during available device search.")
return (None, False)
@@ -638,7 +638,7 @@ def purge_disk(device):
if d['type'] not in ["disk", "crypt"]:
wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']]
try:
- LOG.info("Purging filesystem on /dev/%s" % d['name'])
+ LOG.info("Purging filesystem on /dev/%s", d['name'])
util.subp(wipefs_cmd)
except Exception:
raise Exception("Failed FS purge of /dev/%s" % d['name'])
@@ -700,7 +700,7 @@ def exec_mkpart_gpt(device, layout):
[SGDISK_CMD,
'-t', '{}:{}'.format(index, partition_type), device])
except Exception:
- LOG.warn("Failed to partition device %s" % device)
+ LOG.warning("Failed to partition device %s", device)
raise
read_parttbl(device)
@@ -736,7 +736,7 @@ def mkpart(device, definition):
# ensure that we get a real device rather than a symbolic link
device = os.path.realpath(device)
- LOG.debug("Checking values for %s definition" % device)
+ LOG.debug("Checking values for %s definition", device)
overwrite = definition.get('overwrite', False)
layout = definition.get('layout', False)
table_type = definition.get('table_type', 'mbr')
@@ -766,7 +766,7 @@ def mkpart(device, definition):
LOG.debug("Checking if device is safe to partition")
if not overwrite and (is_disk_used(device) or is_filesystem(device)):
- LOG.debug("Skipping partitioning on configured device %s" % device)
+ LOG.debug("Skipping partitioning on configured device %s", device)
return
LOG.debug("Checking for device size")
@@ -774,7 +774,7 @@ def mkpart(device, definition):
LOG.debug("Calculating partition layout")
part_definition = get_partition_layout(table_type, device_size, layout)
- LOG.debug(" Layout is: %s" % part_definition)
+ LOG.debug(" Layout is: %s", part_definition)
LOG.debug("Creating partition table on %s", device)
exec_mkpart(table_type, device, part_definition)
@@ -799,7 +799,7 @@ def lookup_force_flag(fs):
if fs.lower() in flags:
return flags[fs]
- LOG.warn("Force flag for %s is unknown." % fs)
+ LOG.warning("Force flag for %s is unknown.", fs)
return ''
@@ -858,7 +858,7 @@ def mkfs(fs_cfg):
LOG.debug("Device %s has required file system", device)
return
else:
- LOG.warn("Destroying filesystem on %s", device)
+ LOG.warning("Destroying filesystem on %s", device)
else:
LOG.debug("Device %s is cleared for formating", device)
@@ -883,14 +883,14 @@ def mkfs(fs_cfg):
return
if not reuse and fs_replace and device:
- LOG.debug("Replacing file system on %s as instructed." % device)
+ LOG.debug("Replacing file system on %s as instructed.", device)
if not device:
LOG.debug("No device aviable that matches request. "
"Skipping fs creation for %s", fs_cfg)
return
elif not partition or str(partition).lower() == 'none':
- LOG.debug("Using the raw device to place filesystem %s on" % label)
+ LOG.debug("Using the raw device to place filesystem %s on", label)
else:
LOG.debug("Error in device identification handling.")
@@ -901,7 +901,7 @@ def mkfs(fs_cfg):
# Make sure the device is defined
if not device:
- LOG.warn("Device is not known: %s", device)
+ LOG.warning("Device is not known: %s", device)
return
# Check that we can create the FS
@@ -923,8 +923,8 @@ def mkfs(fs_cfg):
mkfs_cmd = util.which("mk%s" % fs_type)
if not mkfs_cmd:
- LOG.warn("Cannot create fstype '%s'. No mkfs.%s command", fs_type,
- fs_type)
+ LOG.warning("Cannot create fstype '%s'. No mkfs.%s command",
+ fs_type, fs_type)
return
fs_cmd = [mkfs_cmd, device]
diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py
index f0cda3d5..0a135bbe 100644
--- a/cloudinit/config/cc_fan.py
+++ b/cloudinit/config/cc_fan.py
@@ -64,7 +64,7 @@ def stop_update_start(service, config_file, content, systemd=False):
try:
return util.subp(cmd, capture=True)
except util.ProcessExecutionError as e:
- LOG.warn("failed: %s (%s): %s", service, cmd, e)
+ LOG.warning("failed: %s (%s): %s", service, cmd, e)
return False
stop_failed = not run(cmds['stop'], msg='stop %s' % service)
@@ -74,7 +74,7 @@ def stop_update_start(service, config_file, content, systemd=False):
ret = run(cmds['start'], msg='start %s' % service)
if ret and stop_failed:
- LOG.warn("success: %s started", service)
+ LOG.warning("success: %s started", service)
if 'enable' in cmds:
ret = run(cmds['enable'], msg='enable %s' % service)
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index 089693e8..d2bc6e6c 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -252,9 +252,13 @@ def devent2dev(devent):
container = util.is_container()
# Ensure the path is a block device.
- if (dev == "/dev/root" and not os.path.exists(dev) and not container):
+ if (dev == "/dev/root" and not container):
dev = util.rootdev_from_cmdline(util.get_cmdline())
if dev is None:
+ if os.path.exists(dev):
+ # if /dev/root exists, but we failed to convert
+ # that to a "real" /dev/ path device, then return it.
+ return dev
raise ValueError("Unable to find device '/dev/root'")
return dev
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 5b630f8b..f14a4fc5 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -216,8 +216,9 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
else:
pinfo[k] = v
- LOG.debug("suggest %(size)s swap for %(mem)s memory with '%(avail)s'"
- " disk given max=%(max_in)s [max=%(max)s]'" % pinfo)
+ LOG.debug("suggest %s swap for %s memory with '%s'"
+ " disk given max=%s [max=%s]'", pinfo['size'], pinfo['mem'],
+ pinfo['avail'], pinfo['max_in'], pinfo['max'])
return size
@@ -266,7 +267,7 @@ def handle_swapcfg(swapcfg):
return None or (filename, size)
"""
if not isinstance(swapcfg, dict):
- LOG.warn("input for swap config was not a dict.")
+ LOG.warning("input for swap config was not a dict.")
return None
fname = swapcfg.get('filename', '/swap.img')
@@ -289,7 +290,8 @@ def handle_swapcfg(swapcfg):
return fname
LOG.debug("swap file %s existed, but not in /proc/swaps", fname)
except Exception:
- LOG.warn("swap file %s existed. Error reading /proc/swaps", fname)
+ LOG.warning("swap file %s existed. Error reading /proc/swaps",
+ fname)
return fname
try:
@@ -300,7 +302,7 @@ def handle_swapcfg(swapcfg):
return setup_swapfile(fname=fname, size=size, maxsize=maxsize)
except Exception as e:
- LOG.warn("failed to setup swap: %s", e)
+ LOG.warning("failed to setup swap: %s", e)
return None
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index 9c5cd1fe..2548d1f1 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -77,7 +77,7 @@ def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"):
params['options'] = {}
params['flags'] = flags
- LOG.debug("Writing resolv.conf from template %s" % template_fn)
+ LOG.debug("Writing resolv.conf from template %s", template_fn)
templater.render_to_file(template_fn, target_fname, params)
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 50316214..50ff9e35 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -252,7 +252,8 @@ def apply_rsyslog_changes(configs, def_fname, cfg_dir):
for cur_pos, ent in enumerate(configs):
if isinstance(ent, dict):
if "content" not in ent:
- LOG.warn("No 'content' entry in config entry %s", cur_pos + 1)
+ LOG.warning("No 'content' entry in config entry %s",
+ cur_pos + 1)
continue
content = ent['content']
filename = ent.get("filename", def_fname)
@@ -262,7 +263,7 @@ def apply_rsyslog_changes(configs, def_fname, cfg_dir):
filename = filename.strip()
if not filename:
- LOG.warn("Entry %s has an empty filename", cur_pos + 1)
+ LOG.warning("Entry %s has an empty filename", cur_pos + 1)
continue
filename = os.path.join(cfg_dir, filename)
@@ -389,7 +390,7 @@ def remotes_to_rsyslog_cfg(remotes, header=None, footer=None):
try:
lines.append(str(parse_remotes_line(line, name=name)))
except ValueError as e:
- LOG.warn("failed loading remote %s: %s [%s]", name, line, e)
+ LOG.warning("failed loading remote %s: %s [%s]", name, line, e)
if footer is not None:
lines.append(footer)
return '\n'.join(lines) + "\n"
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index eb0bdab0..bb24d57f 100755
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -215,7 +215,8 @@ def handle(_name, cfg, cloud, log, args):
pw_auth))
lines = [str(l) for l in new_lines]
- util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines))
+ util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines),
+ copy_mode=True)
try:
cmd = cloud.distro.init_cmd # Default service
diff --git a/cloudinit/config/cc_snap_config.py b/cloudinit/config/cc_snap_config.py
index db511661..fe0cc73e 100644
--- a/cloudinit/config/cc_snap_config.py
+++ b/cloudinit/config/cc_snap_config.py
@@ -5,8 +5,8 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""
-Snappy
-------
+Snap Config
+-----------
**Summary:** snap_config modules allows configuration of snapd.
This module uses the same ``snappy`` namespace for configuration but
diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index 6ea81b84..a9682f19 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -283,8 +283,8 @@ def handle(name, cfg, cloud, log, args):
render_snap_op(**pkg_op)
except Exception as e:
fails.append((pkg_op, e,))
- LOG.warn("'%s' failed for '%s': %s",
- pkg_op['op'], pkg_op['name'], e)
+ LOG.warning("'%s' failed for '%s': %s",
+ pkg_op['op'], pkg_op['name'], e)
# Default to disabling SSH
ssh_enabled = mycfg.get('ssh_enabled', "auto")
@@ -303,7 +303,7 @@ def handle(name, cfg, cloud, log, args):
LOG.debug("Enabling SSH, password authentication requested")
ssh_enabled = True
elif ssh_enabled not in (True, False):
- LOG.warn("Unknown value '%s' in ssh_enabled", ssh_enabled)
+ LOG.warning("Unknown value '%s' in ssh_enabled", ssh_enabled)
disable_enable_ssh(ssh_enabled)
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index ef8535ed..6a42f499 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -32,7 +32,10 @@ entry, the config entry will be skipped.
import os
-import configobj
+try:
+ from configparser import ConfigParser
+except ImportError:
+ from ConfigParser import ConfigParser
import six
from cloudinit import util
@@ -52,8 +55,8 @@ def _format_repo_value(val):
return str(int(val))
if isinstance(val, (list, tuple)):
# Can handle 'lists' in certain cases
- # See: http://bit.ly/Qqrf1t
- return "\n ".join([_format_repo_value(v) for v in val])
+ # See: https://linux.die.net/man/5/yum.conf
+ return "\n".join([_format_repo_value(v) for v in val])
if not isinstance(val, six.string_types):
return str(val)
return val
@@ -62,16 +65,19 @@ def _format_repo_value(val):
# TODO(harlowja): move to distro?
# See man yum.conf
def _format_repository_config(repo_id, repo_config):
- to_be = configobj.ConfigObj()
- to_be[repo_id] = {}
+ to_be = ConfigParser()
+ to_be.add_section(repo_id)
# Do basic translation of the items -> values
for (k, v) in repo_config.items():
# For now assume that people using this know
# the format of yum and don't verify keys/values further
- to_be[repo_id][k] = _format_repo_value(v)
- lines = to_be.write()
- lines.insert(0, "# Created by cloud-init on %s" % (util.time_rfc2822()))
- return "\n".join(lines)
+ to_be.set(repo_id, k, _format_repo_value(v))
+ to_be_stream = six.StringIO()
+ to_be.write(to_be_stream)
+ to_be_stream.seek(0)
+ lines = to_be_stream.readlines()
+ lines.insert(0, "# Created by cloud-init on %s\n" % (util.time_rfc2822()))
+ return "".join(lines)
def handle(name, cfg, _cloud, log, _args):
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 803ac74e..28650b88 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -143,9 +143,9 @@ class Distro(object):
def _apply_network_from_network_config(self, netconfig, bring_up=True):
distro = self.__class__
- LOG.warn("apply_network_config is not currently implemented "
- "for distribution '%s'. Attempting to use apply_network",
- distro)
+ LOG.warning("apply_network_config is not currently implemented "
+ "for distribution '%s'. Attempting to use apply_network",
+ distro)
header = '\n'.join([
"# Converted from network_config for distro %s" % distro,
"# Implmentation of _write_network_config is needed."
@@ -335,7 +335,8 @@ class Distro(object):
try:
(_out, err) = util.subp(cmd)
if len(err):
- LOG.warn("Running %s resulted in stderr output: %s", cmd, err)
+ LOG.warning("Running %s resulted in stderr output: %s",
+ cmd, err)
return True
except util.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
@@ -358,7 +359,7 @@ class Distro(object):
Add a user to the system using standard GNU tools
"""
if util.is_user(name):
- LOG.info("User %s already exists, skipping." % name)
+ LOG.info("User %s already exists, skipping.", name)
return
if 'create_groups' in kwargs:
@@ -520,9 +521,9 @@ class Distro(object):
keys = list(keys.values())
if keys is not None:
if not isinstance(keys, (tuple, list, set)):
- LOG.warn("Invalid type '%s' detected for"
- " 'ssh_authorized_keys', expected list,"
- " string, dict, or set.", type(keys))
+ LOG.warning("Invalid type '%s' detected for"
+ " 'ssh_authorized_keys', expected list,"
+ " string, dict, or set.", type(keys))
else:
keys = set(keys) or []
ssh_util.setup_user_keys(keys, name, options=None)
@@ -595,7 +596,7 @@ class Distro(object):
"#includedir %s" % (path), '']
sudoers_contents = "\n".join(lines)
util.append_file(sudo_base, sudoers_contents)
- LOG.debug("Added '#includedir %s' to %s" % (path, sudo_base))
+ LOG.debug("Added '#includedir %s' to %s", path, sudo_base)
except IOError as e:
util.logexc(LOG, "Failed to write %s", sudo_base)
raise e
@@ -647,11 +648,11 @@ class Distro(object):
# Check if group exists, and then add it doesn't
if util.is_group(name):
- LOG.warn("Skipping creation of existing group '%s'" % name)
+ LOG.warning("Skipping creation of existing group '%s'", name)
else:
try:
util.subp(group_add_cmd)
- LOG.info("Created new group %s" % name)
+ LOG.info("Created new group %s", name)
except Exception:
util.logexc(LOG, "Failed to create group %s", name)
@@ -659,12 +660,12 @@ class Distro(object):
if len(members) > 0:
for member in members:
if not util.is_user(member):
- LOG.warn("Unable to add group member '%s' to group '%s'"
- "; user does not exist.", member, name)
+ LOG.warning("Unable to add group member '%s' to group '%s'"
+ "; user does not exist.", member, name)
continue
util.subp(['usermod', '-a', '-G', name, member])
- LOG.info("Added user '%s' to group '%s'" % (member, name))
+ LOG.info("Added user '%s' to group '%s'", member, name)
def _get_package_mirror_info(mirror_info, data_source=None,
@@ -708,7 +709,7 @@ def _get_package_mirror_info(mirror_info, data_source=None,
if found:
results[name] = found
- LOG.debug("filtered distro mirror info: %s" % results)
+ LOG.debug("filtered distro mirror info: %s", results)
return results
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
index 64b8c1fb..75d46201 100644
--- a/cloudinit/distros/arch.py
+++ b/cloudinit/distros/arch.py
@@ -83,7 +83,8 @@ class Distro(distros.Distro):
try:
(_out, err) = util.subp(cmd)
if len(err):
- LOG.warn("Running %s resulted in stderr output: %s", cmd, err)
+ LOG.warning("Running %s resulted in stderr output: %s",
+ cmd, err)
except util.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
@@ -94,7 +95,8 @@ class Distro(distros.Distro):
try:
(_out, err) = util.subp(cmd)
if len(err):
- LOG.warn("Running %s resulted in stderr output: %s", cmd, err)
+ LOG.warning("Running %s resulted in stderr output: %s",
+ cmd, err)
return True
except util.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index 3f0f9d53..d06d46a6 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -143,8 +143,7 @@ class Distro(distros.Distro):
pkgs = []
e = os.environ.copy()
- # See: http://tiny.cc/kg91fw
- # Or: http://tiny.cc/mh91fw
+ # See: http://manpages.ubuntu.com/manpages/xenial/man7/debconf.7.html
e['DEBIAN_FRONTEND'] = 'noninteractive'
wcfg = self.get_option("apt_get_wrapper", APT_GET_WRAPPER)
@@ -224,6 +223,6 @@ def _maybe_remove_legacy_eth0(path="/etc/network/interfaces.d/eth0.cfg"):
except Exception:
msg = bmsg + " %s exists, but could not be read." % path
- LOG.warn(msg)
+ LOG.warning(msg)
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index a70ee45b..183e4452 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -148,7 +148,7 @@ class Distro(distros.Distro):
def create_group(self, name, members):
group_add_cmd = ['pw', '-n', name]
if util.is_group(name):
- LOG.warn("Skipping creation of existing group '%s'", name)
+ LOG.warning("Skipping creation of existing group '%s'", name)
else:
try:
util.subp(group_add_cmd)
@@ -160,8 +160,8 @@ class Distro(distros.Distro):
if len(members) > 0:
for member in members:
if not util.is_user(member):
- LOG.warn("Unable to add group member '%s' to group '%s'"
- "; user does not exist.", member, name)
+ LOG.warning("Unable to add group member '%s' to group '%s'"
+ "; user does not exist.", member, name)
continue
try:
util.subp(['pw', 'usermod', '-n', name, '-G', member])
@@ -369,7 +369,7 @@ class Distro(distros.Distro):
# OS. This is just fine.
(_out, err) = util.subp(cmd, rcs=[0, 1])
if len(err):
- LOG.warn("Error running %s: %s", cmd, err)
+ LOG.warning("Error running %s: %s", cmd, err)
def install_packages(self, pkglist):
self.update_package_sources()
diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py
index 83fb56ff..0ad2f032 100644
--- a/cloudinit/distros/gentoo.py
+++ b/cloudinit/distros/gentoo.py
@@ -96,8 +96,8 @@ class Distro(distros.Distro):
try:
(_out, err) = util.subp(cmd)
if len(err):
- LOG.warn("Running %s resulted in stderr output: %s",
- cmd, err)
+ LOG.warning("Running %s resulted in stderr output: %s",
+ cmd, err)
except util.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed",
cmd)
@@ -121,7 +121,8 @@ class Distro(distros.Distro):
try:
(_out, err) = util.subp(cmd)
if len(err):
- LOG.warn("Running %s resulted in stderr output: %s", cmd, err)
+ LOG.warning("Running %s resulted in stderr output: %s",
+ cmd, err)
return True
except util.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
@@ -138,8 +139,8 @@ class Distro(distros.Distro):
try:
(_out, err) = util.subp(cmd)
if len(err):
- LOG.warn("Running %s resulted in stderr output: %s", cmd,
- err)
+ LOG.warning("Running %s resulted in stderr output: %s",
+ cmd, err)
except util.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
diff --git a/cloudinit/distros/parsers/hosts.py b/cloudinit/distros/parsers/hosts.py
index 87f164be..64444581 100644
--- a/cloudinit/distros/parsers/hosts.py
+++ b/cloudinit/distros/parsers/hosts.py
@@ -10,8 +10,8 @@ from cloudinit.distros.parsers import chop_comment
# See: man hosts
-# or http://unixhelp.ed.ac.uk/CGI/man-cgi?hosts
-# or http://tinyurl.com/6lmox3
+# or https://linux.die.net/man/5/hosts
+# or https://www.freebsd.org/doc/en_US.ISO8859-1/books/handbook/configtuning-configfiles.html # noqa
class HostsConf(object):
def __init__(self, text):
self._text = text
diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py
index d1f8a042..a62055ae 100644
--- a/cloudinit/distros/parsers/resolv_conf.py
+++ b/cloudinit/distros/parsers/resolv_conf.py
@@ -81,9 +81,9 @@ class ResolvConf(object):
if len(new_ns) == len(current_ns):
return current_ns
if len(current_ns) >= 3:
- LOG.warn("ignoring nameserver %r: adding would "
- "exceed the maximum of "
- "'3' name servers (see resolv.conf(5))" % (ns))
+ LOG.warning("ignoring nameserver %r: adding would "
+ "exceed the maximum of "
+ "'3' name servers (see resolv.conf(5))", ns)
return current_ns[:3]
self._remove_option('nameserver')
for n in new_ns:
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index 372c7d0f..1fecb619 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -28,7 +28,7 @@ def _make_sysconfig_bool(val):
class Distro(distros.Distro):
- # See: http://tiny.cc/6r99fw
+ # See: https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Networking_Guide/sec-Network_Configuration_Using_sysconfig_Files.html # noqa
clock_conf_fn = "/etc/sysconfig/clock"
locale_conf_fn = '/etc/sysconfig/i18n'
systemd_locale_conf_fn = '/etc/locale.conf'
@@ -130,8 +130,8 @@ class Distro(distros.Distro):
rhel_util.update_sysconfig_file(out_fn, host_cfg)
def _select_hostname(self, hostname, fqdn):
- # See: http://bit.ly/TwitgL
# Should be fqdn if we can use it
+ # See: https://www.centos.org/docs/5/html/Deployment_Guide-en-US/ch-sysconfig.html#s2-sysconfig-network # noqa
if fqdn:
return fqdn
return hostname
diff --git a/cloudinit/distros/ug_util.py b/cloudinit/distros/ug_util.py
index 53a0eafb..9378dd78 100755
--- a/cloudinit/distros/ug_util.py
+++ b/cloudinit/distros/ug_util.py
@@ -214,8 +214,8 @@ def normalize_users_groups(cfg, distro):
'name': old_user,
}
if not isinstance(old_user, dict):
- LOG.warn(("Format for 'user' key must be a string or "
- "dictionary and not %s"), type_utils.obj_name(old_user))
+ LOG.warning(("Format for 'user' key must be a string or dictionary"
+ " and not %s"), type_utils.obj_name(old_user))
old_user = {}
# If no old user format, then assume the distro
@@ -227,9 +227,9 @@ def normalize_users_groups(cfg, distro):
try:
distro_user_config = distro.get_default_user()
except NotImplementedError:
- LOG.warn(("Distro has not implemented default user "
- "access. No distribution provided default user"
- " will be normalized."))
+ LOG.warning(("Distro has not implemented default user "
+ "access. No distribution provided default user"
+ " will be normalized."))
# Merge the old user (which may just be an empty dict when not
# present with the distro provided default user configuration so
@@ -239,9 +239,9 @@ def normalize_users_groups(cfg, distro):
base_users = cfg.get('users', [])
if not isinstance(base_users, (list, dict) + six.string_types):
- LOG.warn(("Format for 'users' key must be a comma separated string"
- " or a dictionary or a list and not %s"),
- type_utils.obj_name(base_users))
+ LOG.warning(("Format for 'users' key must be a comma separated string"
+ " or a dictionary or a list and not %s"),
+ type_utils.obj_name(base_users))
base_users = []
if old_user:
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
index 13691549..723d6bd6 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/ec2_utils.py
@@ -38,8 +38,8 @@ class MetadataLeafDecoder(object):
# Assume it's json, unless it fails parsing...
return json.loads(blob)
except (ValueError, TypeError) as e:
- LOG.warn("Field %s looked like a json object, but it was"
- " not: %s", field, e)
+ LOG.warning("Field %s looked like a json object, but it"
+ " was not: %s", field, e)
if blob.find("\n") != -1:
return blob.splitlines()
return blob
@@ -125,7 +125,8 @@ class MetadataMaterializer(object):
joined.update(child_contents)
for field in leaf_contents.keys():
if field in joined:
- LOG.warn("Duplicate key found in results from %s", base_url)
+ LOG.warning("Duplicate key found in results from %s",
+ base_url)
else:
joined[field] = leaf_contents[field]
return joined
diff --git a/cloudinit/gpg.py b/cloudinit/gpg.py
index 70c620de..d58d73e0 100644
--- a/cloudinit/gpg.py
+++ b/cloudinit/gpg.py
@@ -43,7 +43,7 @@ def delete_key(key):
util.subp(["gpg", "--batch", "--yes", "--delete-keys", key],
capture=True)
except util.ProcessExecutionError as error:
- LOG.warn('Failed delete key "%s": %s', key, error)
+ LOG.warning('Failed delete key "%s": %s', key, error)
def getkeybyid(keyid, keyserver='keyserver.ubuntu.com'):
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index 1362db6e..c3576c04 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -246,7 +246,7 @@ def fixup_handler(mod, def_freq=PER_INSTANCE):
else:
freq = mod.frequency
if freq and freq not in FREQUENCIES:
- LOG.warn("Handler %s has an unknown frequency %s", mod, freq)
+ LOG.warning("Handler %s has an unknown frequency %s", mod, freq)
return mod
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index 7435d58d..f01021aa 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -126,11 +126,11 @@ class FileSemaphores(object):
# this case could happen if the migrator module hadn't run yet
# but the item had run before we did canon_sem_name.
if cname != name and os.path.exists(self._get_path(name, freq)):
- LOG.warn("%s has run without canonicalized name [%s].\n"
- "likely the migrator has not yet run. "
- "It will run next boot.\n"
- "run manually with: cloud-init single --name=migrator"
- % (name, cname))
+ LOG.warning("%s has run without canonicalized name [%s].\n"
+ "likely the migrator has not yet run. "
+ "It will run next boot.\n"
+ "run manually with: cloud-init single --name=migrator",
+ name, cname)
return True
return False
@@ -375,8 +375,8 @@ class Paths(object):
def get_ipath(self, name=None):
ipath = self._get_ipath(name)
if not ipath:
- LOG.warn(("No per instance data available, "
- "is there an datasource/iid set?"))
+ LOG.warning(("No per instance data available, "
+ "is there an datasource/iid set?"))
return None
else:
return ipath
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 346be5d3..a072a8d6 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -86,6 +86,11 @@ def is_bridge(devname):
return os.path.exists(sys_dev_path(devname, "bridge"))
+def is_vlan(devname):
+ uevent = str(read_sys_net_safe(devname, "uevent"))
+ return 'DEVTYPE=vlan' in uevent.splitlines()
+
+
def is_connected(devname):
# is_connected isn't really as simple as that. 2 is
# 'physically connected'. 3 is 'not connected'. but a wlan interface will
@@ -393,6 +398,8 @@ def get_interfaces_by_mac():
continue
if is_bridge(name):
continue
+ if is_vlan(name):
+ continue
mac = get_interface_mac(name)
# some devices may not have a mac (tun0)
if not mac:
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 692b6007..db3c3579 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -242,8 +242,8 @@ class NetworkStateInterpreter(object):
if not skip_broken:
raise
else:
- LOG.warn("Skipping invalid command: %s", command,
- exc_info=True)
+ LOG.warning("Skipping invalid command: %s", command,
+ exc_info=True)
LOG.debug(self.dump_network_state())
def parse_config_v2(self, skip_broken=True):
@@ -262,8 +262,8 @@ class NetworkStateInterpreter(object):
if not skip_broken:
raise
else:
- LOG.warn("Skipping invalid command: %s", command,
- exc_info=True)
+ LOG.warning("Skipping invalid command: %s", command,
+ exc_info=True)
LOG.debug(self.dump_network_state())
@ensure_command_keys(['name'])
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index b90bc191..4066076c 100644
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -37,7 +37,7 @@ class LogHandler(ReportingHandler):
try:
level = getattr(logging, level.upper())
except Exception:
- LOG.warn("invalid level '%s', using WARN", input_level)
+ LOG.warning("invalid level '%s', using WARN", input_level)
level = logging.WARN
self.level = level
@@ -82,7 +82,7 @@ class WebHookHandler(ReportingHandler):
timeout=self.timeout,
retries=self.retries, ssl_details=self.ssl_details)
except Exception:
- LOG.warn("failed posting event: %s" % event.as_string())
+ LOG.warning("failed posting event: %s", event.as_string())
available_handlers = DictRegistry()
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index 8528fa10..ed1d691a 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -181,7 +181,7 @@ class DataSourceAltCloud(sources.DataSource):
try:
cmd = CMD_PROBE_FLOPPY
(cmd_out, _err) = util.subp(cmd)
- LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
+ LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)
except ProcessExecutionError as _err:
util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
return False
@@ -196,7 +196,7 @@ class DataSourceAltCloud(sources.DataSource):
cmd = CMD_UDEVADM_SETTLE
cmd.append('--exit-if-exists=' + floppy_dev)
(cmd_out, _err) = util.subp(cmd)
- LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
+ LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)
except ProcessExecutionError as _err:
util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
return False
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 48a3e1df..04358b73 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -116,7 +116,7 @@ class DataSourceAzureNet(sources.DataSource):
# the metadata and "bounce" the network to force DDNS to update via
# dhclient
azure_hostname = self.metadata.get('local-hostname')
- LOG.debug("Hostname in metadata is {}".format(azure_hostname))
+ LOG.debug("Hostname in metadata is %s", azure_hostname)
hostname_command = self.ds_cfg['hostname_bounce']['hostname_command']
with temporary_hostname(azure_hostname, self.ds_cfg,
@@ -132,7 +132,7 @@ class DataSourceAzureNet(sources.DataSource):
cfg=cfg,
prev_hostname=previous_hostname)
except Exception as e:
- LOG.warn("Failed publishing hostname: %s", e)
+ LOG.warning("Failed publishing hostname: %s", e)
util.logexc(LOG, "handling set_hostname failed")
def get_metadata_from_agent(self):
@@ -168,7 +168,7 @@ class DataSourceAzureNet(sources.DataSource):
func=wait_for_files,
args=(fp_files,))
if len(missing):
- LOG.warn("Did not find files, but going on: %s", missing)
+ LOG.warning("Did not find files, but going on: %s", missing)
metadata = {}
metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
@@ -199,7 +199,7 @@ class DataSourceAzureNet(sources.DataSource):
except BrokenAzureDataSource as exc:
raise exc
except util.MountFailedError:
- LOG.warn("%s was not mountable", cdev)
+ LOG.warning("%s was not mountable", cdev)
continue
(md, self.userdata_raw, cfg, files) = ret
@@ -331,8 +331,8 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
log_pre="Azure ephemeral disk: ")
if missing:
- LOG.warn("ephemeral device '%s' did not appear after %d seconds.",
- devpath, maxwait)
+ LOG.warning("ephemeral device '%s' did not appear after %d seconds.",
+ devpath, maxwait)
return
result = False
@@ -342,7 +342,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
else:
result, msg = can_dev_be_reformatted(devpath)
- LOG.debug("reformattable=%s: %s" % (result, msg))
+ LOG.debug("reformattable=%s: %s", result, msg)
if not result:
return
@@ -355,7 +355,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
LOG.debug(bmsg + " removed.")
except Exception as e:
# python3 throws FileNotFoundError, python2 throws OSError
- LOG.warn(bmsg + ": remove failed! (%s)" % e)
+ LOG.warning(bmsg + ": remove failed! (%s)", e)
else:
LOG.debug(bmsg + " did not exist.")
return
@@ -405,7 +405,7 @@ def pubkeys_from_crt_files(flist):
errors.append(fname)
if errors:
- LOG.warn("failed to convert the crt files to pubkey: %s", errors)
+ LOG.warning("failed to convert the crt files to pubkey: %s", errors)
return pubkeys
@@ -427,8 +427,8 @@ def wait_for_files(flist, maxwait=60, naplen=.5, log_pre=""):
time.sleep(naplen)
waited += naplen
- LOG.warn("%sStill missing files after %s seconds: %s",
- log_pre, maxwait, need)
+ LOG.warning("%sStill missing files after %s seconds: %s",
+ log_pre, maxwait, need)
return need
diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
index ffc23e3d..19df16b1 100644
--- a/cloudinit/sources/DataSourceCloudSigma.py
+++ b/cloudinit/sources/DataSourceCloudSigma.py
@@ -43,7 +43,7 @@ class DataSourceCloudSigma(sources.DataSource):
LOG.debug("detected hypervisor as %s", sys_product_name)
return 'cloudsigma' in sys_product_name.lower()
- LOG.warn("failed to query dmi data for system product name")
+ LOG.warning("failed to query dmi data for system product name")
return False
def get_data(self):
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index b0ab275c..0188d894 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -178,9 +178,10 @@ def get_default_gateway():
def get_dhclient_d():
# find lease files directory
- supported_dirs = ["/var/lib/dhclient", "/var/lib/dhcp"]
+ supported_dirs = ["/var/lib/dhclient", "/var/lib/dhcp",
+ "/var/lib/NetworkManager"]
for d in supported_dirs:
- if os.path.exists(d):
+ if os.path.exists(d) and len(os.listdir(d)) > 0:
LOG.debug("Using %s lease directory", d)
return d
return None
@@ -207,8 +208,8 @@ def get_latest_lease():
def get_vr_address():
# Get the address of the virtual router via dhcp leases
- # see http://bit.ly/T76eKC for documentation on the virtual router.
# If no virtual router is detected, fallback on default gateway.
+ # See http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/4.8/virtual_machines/user-data.html # noqa
lease_file = get_latest_lease()
if not lease_file:
LOG.debug("No lease file found, using default gateway")
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 46dd89e0..ef374f3f 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -127,7 +127,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
try:
self.vendordata_raw = sources.convert_vendordata(vd)
except ValueError as e:
- LOG.warn("Invalid content in vendor-data: %s", e)
+ LOG.warning("Invalid content in vendor-data: %s", e)
self.vendordata_raw = None
# network_config is an /etc/network/interfaces formated file and is
@@ -190,7 +190,7 @@ def on_first_boot(data, distro=None, network=True):
if network:
net_conf = data.get("network_config", '')
if net_conf and distro:
- LOG.warn("Updating network interfaces from config drive")
+ LOG.warning("Updating network interfaces from config drive")
distro.apply_network(net_conf)
write_injected_files(data.get('files'))
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index d052c4c3..5e7e66be 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -51,7 +51,7 @@ class DataSourceDigitalOcean(sources.DataSource):
if not is_do:
return False
- LOG.info("Running on digital ocean. droplet_id=%s" % droplet_id)
+ LOG.info("Running on digital ocean. droplet_id=%s", droplet_id)
ipv4LL_nic = None
if self.use_ip4LL:
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 6f01a139..2f9c7edf 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -125,7 +125,7 @@ class DataSourceEc2(sources.DataSource):
if len(filtered):
mdurls = filtered
else:
- LOG.warn("Empty metadata url list! using default list")
+ LOG.warning("Empty metadata url list! using default list")
mdurls = self.metadata_urls
urls = []
@@ -232,7 +232,7 @@ def read_strict_mode(cfgval, default):
try:
return parse_strict_mode(cfgval)
except ValueError as e:
- LOG.warn(e)
+ LOG.warning(e)
return default
@@ -270,7 +270,7 @@ def warn_if_necessary(cfgval, cfg):
try:
mode, sleep = parse_strict_mode(cfgval)
except ValueError as e:
- LOG.warn(e)
+ LOG.warning(e)
return
if mode == "false":
@@ -304,8 +304,8 @@ def identify_platform():
if result:
return result
except Exception as e:
- LOG.warn("calling %s with %s raised exception: %s",
- checker, data, e)
+ LOG.warning("calling %s with %s raised exception: %s",
+ checker, data, e)
def _collect_platform_data():
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index 637c9505..e9afda9c 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -98,7 +98,7 @@ class DataSourceGCE(sources.DataSource):
if not running_on_gce:
LOG.debug(msg, mkey)
else:
- LOG.warn(msg, mkey)
+ LOG.warning(msg, mkey)
return False
self.metadata[mkey] = value
@@ -116,7 +116,8 @@ class DataSourceGCE(sources.DataSource):
self.metadata['user-data'] = b64decode(
self.metadata['user-data'])
else:
- LOG.warn('unknown user-data-encoding: %s, ignoring', encoding)
+ LOG.warning('unknown user-data-encoding: %s, ignoring',
+ encoding)
return running_on_gce
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 41179b02..77df5a51 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -71,7 +71,7 @@ class DataSourceMAAS(sources.DataSource):
except MAASSeedDirNone:
pass
except MAASSeedDirMalformed as exc:
- LOG.warn("%s was malformed: %s" % (self.seed_dir, exc))
+ LOG.warning("%s was malformed: %s", self.seed_dir, exc)
raise
# If there is no metadata_url, then we're not configured
@@ -107,7 +107,7 @@ class DataSourceMAAS(sources.DataSource):
try:
self.vendordata_raw = sources.convert_vendordata(vd)
except ValueError as e:
- LOG.warn("Invalid content in vendor-data: %s", e)
+ LOG.warning("Invalid content in vendor-data: %s", e)
self.vendordata_raw = None
def wait_for_metadata_service(self, url):
@@ -126,7 +126,7 @@ class DataSourceMAAS(sources.DataSource):
if timeout in mcfg:
timeout = int(mcfg.get("timeout", timeout))
except Exception:
- LOG.warn("Failed to get timeout, using %s" % timeout)
+ LOG.warning("Failed to get timeout, using %s", timeout)
starttime = time.time()
if url.endswith("/"):
@@ -190,8 +190,8 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
else:
md[path] = util.decode_binary(resp.contents)
else:
- LOG.warn(("Fetching from %s resulted in"
- " an invalid http code %s"), url, resp.code)
+ LOG.warning(("Fetching from %s resulted in"
+ " an invalid http code %s"), url, resp.code)
except url_helper.UrlError as e:
if e.code == 404 and not optional:
raise MAASSeedDirMalformed(
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index 5924b828..c68f6b8c 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -104,8 +104,8 @@ class DataSourceNoCloud(sources.DataSource):
pp2d_kwargs)
except ValueError as e:
if dev in label_list:
- LOG.warn("device %s with label=%s not a"
- "valid seed.", dev, label)
+ LOG.warning("device %s with label=%s not a"
+ "valid seed.", dev, label)
continue
mydata = _merge_new_seed(mydata, seeded)
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index d70784ac..f20c9a65 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -225,12 +225,12 @@ def get_max_wait_from_cfg(cfg):
try:
max_wait = int(cfg.get(max_wait_cfg_option, default_max_wait))
except ValueError:
- LOG.warn("Failed to get '%s', using %s",
- max_wait_cfg_option, default_max_wait)
+ LOG.warning("Failed to get '%s', using %s",
+ max_wait_cfg_option, default_max_wait)
if max_wait <= 0:
- LOG.warn("Invalid value '%s' for '%s', using '%s' instead",
- max_wait, max_wait_cfg_option, default_max_wait)
+ LOG.warning("Invalid value '%s' for '%s', using '%s' instead",
+ max_wait, max_wait_cfg_option, default_max_wait)
max_wait = default_max_wait
return max_wait
@@ -355,7 +355,7 @@ def transport_iso9660(require_iso=True):
try:
(fname, contents) = util.mount_cb(fullp, get_ovf_env, mtype=mtype)
except util.MountFailedError:
- LOG.debug("%s not mountable as iso9660" % fullp)
+ LOG.debug("%s not mountable as iso9660", fullp)
continue
if contents is not False:
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index cd75e6ea..5fdac192 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -64,7 +64,7 @@ class DataSourceOpenNebula(sources.DataSource):
except BrokenContextDiskDir as exc:
raise exc
except util.MountFailedError:
- LOG.warn("%s was not mountable" % cdev)
+ LOG.warning("%s was not mountable", cdev)
if results:
seed = cdev
@@ -381,7 +381,7 @@ def read_context_disk_dir(source_dir, asuser=None):
try:
results['userdata'] = util.b64d(results['userdata'])
except TypeError:
- LOG.warn("Failed base64 decoding of userdata")
+ LOG.warning("Failed base64 decoding of userdata")
# generate static /etc/network/interfaces
# only if there are any required context variables
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index e1ea21f8..f0a6bfce 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -73,7 +73,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
if len(filtered):
urls = filtered
else:
- LOG.warn("Empty metadata url list! using default list")
+ LOG.warning("Empty metadata url list! using default list")
urls = [DEF_MD_URL]
md_urls = []
@@ -137,7 +137,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
try:
self.vendordata_raw = sources.convert_vendordata(vd)
except ValueError as e:
- LOG.warn("Invalid content in vendor-data: %s", e)
+ LOG.warning("Invalid content in vendor-data: %s", e)
self.vendordata_raw = None
return True
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 5e668947..6c6902fd 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -555,7 +555,7 @@ class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
val = base64.b64decode(val.encode()).decode()
# Bogus input produces different errors in Python 2 and 3
except (TypeError, binascii.Error):
- LOG.warn("Failed base64 decoding key '%s': %s", key, val)
+ LOG.warning("Failed base64 decoding key '%s': %s", key, val)
if strip:
val = val.strip()
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 5c99437e..c3ce36d6 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -237,8 +237,8 @@ class DataSource(object):
if candidate in valid:
return candidate
else:
- LOG.warn("invalid dsmode '%s', using default=%s",
- candidate, default)
+ LOG.warning("invalid dsmode '%s', using default=%s",
+ candidate, default)
return default
return default
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index f32dac9a..6e01aa47 100644
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -289,7 +289,7 @@ class WALinuxAgentShim(object):
LOG.debug("Unable to find endpoint in dhclient logs. "
" Falling back to check lease files")
if fallback_lease_file is None:
- LOG.warn("No fallback lease file was specified.")
+ LOG.warning("No fallback lease file was specified.")
value = None
else:
LOG.debug("Looking for endpoint in lease file %s",
diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py
index 72f7bde4..257989e8 100644
--- a/cloudinit/sources/helpers/digitalocean.py
+++ b/cloudinit/sources/helpers/digitalocean.py
@@ -23,11 +23,8 @@ def assign_ipv4_link_local(nic=None):
"""
if not nic:
- for cdev in sorted(cloudnet.get_devicelist()):
- if cloudnet.is_physical(cdev):
- nic = cdev
- LOG.debug("assigned nic '%s' for link-local discovery", nic)
- break
+ nic = get_link_local_nic()
+ LOG.debug("selected interface '%s' for reading metadata", nic)
if not nic:
raise RuntimeError("unable to find interfaces to access the"
@@ -57,6 +54,13 @@ def assign_ipv4_link_local(nic=None):
return nic
+def get_link_local_nic():
+ nics = [f for f in cloudnet.get_devicelist() if cloudnet.is_physical(f)]
+ if not nics:
+ return None
+ return min(nics, key=lambda d: cloudnet.read_sys_net_int(d, 'ifindex'))
+
+
def del_ipv4_link_local(nic=None):
"""Remove the ip4LL address. While this is not necessary, the ip4LL
address is extraneous and confusing to users.
@@ -107,15 +111,12 @@ def convert_network_configuration(config, dns_servers):
}
"""
- def _get_subnet_part(pcfg, nameservers=None):
+ def _get_subnet_part(pcfg):
subpart = {'type': 'static',
'control': 'auto',
'address': pcfg.get('ip_address'),
'gateway': pcfg.get('gateway')}
- if nameservers:
- subpart['dns_nameservers'] = nameservers
-
if ":" in pcfg.get('ip_address'):
subpart['address'] = "{0}/{1}".format(pcfg.get('ip_address'),
pcfg.get('cidr'))
@@ -124,27 +125,31 @@ def convert_network_configuration(config, dns_servers):
return subpart
- all_nics = []
- for k in ('public', 'private'):
- if k in config:
- all_nics.extend(config[k])
-
- macs_to_nics = cloudnet.get_interfaces_by_mac()
nic_configs = []
+ macs_to_nics = cloudnet.get_interfaces_by_mac()
+ LOG.debug("nic mapping: %s", macs_to_nics)
- for nic in all_nics:
+ for n in config:
+ nic = config[n][0]
+ LOG.debug("considering %s", nic)
mac_address = nic.get('mac')
+ if mac_address not in macs_to_nics:
+ raise RuntimeError("Did not find network interface on system "
+ "with mac '%s'. Cannot apply configuration: %s"
+ % (mac_address, nic))
+
sysfs_name = macs_to_nics.get(mac_address)
nic_type = nic.get('type', 'unknown')
- # Note: the entry 'public' above contains a list, but
- # the list will only ever have one nic inside it per digital ocean.
- # If it ever had more than one nic, then this code would
- # assign all 'public' the same name.
- if_name = NIC_MAP.get(nic_type, sysfs_name)
- LOG.debug("mapped %s interface to %s, assigning name of %s",
- mac_address, sysfs_name, if_name)
+ if_name = NIC_MAP.get(nic_type, sysfs_name)
+ if if_name != sysfs_name:
+ LOG.debug("Found %s interface '%s' on '%s', assigned name of '%s'",
+ nic_type, mac_address, sysfs_name, if_name)
+ else:
+ msg = ("Found interface '%s' on '%s', which is not a public "
+ "or private interface. Using default system naming.")
+ LOG.debug(msg, mac_address, sysfs_name)
ncfg = {'type': 'physical',
'mac_address': mac_address,
@@ -157,13 +162,8 @@ def convert_network_configuration(config, dns_servers):
continue
sub_part = _get_subnet_part(raw_subnet)
- if nic_type == 'public' and 'anchor' not in netdef:
- # add DNS resolvers to the public interfaces only
- sub_part = _get_subnet_part(raw_subnet, dns_servers)
- else:
- # remove the gateway any non-public interfaces
- if 'gateway' in sub_part:
- del sub_part['gateway']
+ if netdef in ('private', 'anchor_ipv4', 'anchor_ipv6'):
+ del sub_part['gateway']
subnets.append(sub_part)
@@ -171,6 +171,10 @@ def convert_network_configuration(config, dns_servers):
nic_configs.append(ncfg)
LOG.debug("nic '%s' configuration: %s", if_name, ncfg)
+ if dns_servers:
+ LOG.debug("added dns servers: %s", dns_servers)
+ nic_configs.append({'type': 'nameserver', 'address': dns_servers})
+
return {'version': 1, 'config': nic_configs}
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 61cd36bd..26f3168d 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -21,7 +21,7 @@ from cloudinit import sources
from cloudinit import url_helper
from cloudinit import util
-# For reference: http://tinyurl.com/laora4c
+# See https://docs.openstack.org/user-guide/cli-config-drive.html
LOG = logging.getLogger(__name__)
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py
index 14293f3c..602af078 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_file.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_file.py
@@ -43,9 +43,9 @@ class ConfigFile(ConfigSource, dict):
# "sensitive" settings shall not be logged
if canLog:
- logger.debug("ADDED KEY-VAL :: '%s' = '%s'" % (key, val))
+ logger.debug("ADDED KEY-VAL :: '%s' = '%s'", key, val)
else:
- logger.debug("ADDED KEY-VAL :: '%s' = '*****************'" % key)
+ logger.debug("ADDED KEY-VAL :: '%s' = '*****************'", key)
self[key] = val
@@ -60,7 +60,7 @@ class ConfigFile(ConfigSource, dict):
Keyword arguments:
filename - The full path to the config file.
"""
- logger.info('Parsing the config file %s.' % filename)
+ logger.info('Parsing the config file %s.', filename)
config = configparser.ConfigParser()
config.optionxform = str
@@ -69,7 +69,7 @@ class ConfigFile(ConfigSource, dict):
self.clear()
for category in config.sections():
- logger.debug("FOUND CATEGORY = '%s'" % category)
+ logger.debug("FOUND CATEGORY = '%s'", category)
for (key, value) in config.items(category):
self._insertKey(category + '|' + key, value)
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 12165433..f7191b09 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -163,8 +163,8 @@ class Init(object):
except OSError as e:
error = e
- LOG.warn("Failed changing perms on '%s'. tried: %s. %s",
- log_file, ','.join(perms), error)
+ LOG.warning("Failed changing perms on '%s'. tried: %s. %s",
+ log_file, ','.join(perms), error)
def read_cfg(self, extra_fns=None):
# None check so that we don't keep on re-loading if empty
@@ -447,9 +447,9 @@ class Init(object):
mod_locs, looked_locs = importer.find_module(
mod_name, [''], ['list_types', 'handle_part'])
if not mod_locs:
- LOG.warn("Could not find a valid user-data handler"
- " named %s in file %s (searched %s)",
- mod_name, fname, looked_locs)
+ LOG.warning("Could not find a valid user-data handler"
+ " named %s in file %s (searched %s)",
+ mod_name, fname, looked_locs)
continue
mod = importer.import_module(mod_locs[0])
mod = handlers.fixup_handler(mod)
@@ -568,7 +568,8 @@ class Init(object):
if not isinstance(vdcfg, dict):
vdcfg = {'enabled': False}
- LOG.warn("invalid 'vendor_data' setting. resetting to: %s", vdcfg)
+ LOG.warning("invalid 'vendor_data' setting. resetting to: %s",
+ vdcfg)
enabled = vdcfg.get('enabled')
no_handlers = vdcfg.get('disabled_handlers', None)
@@ -632,10 +633,10 @@ class Init(object):
return
try:
- LOG.debug("applying net config names for %s" % netcfg)
+ LOG.debug("applying net config names for %s", netcfg)
self.distro.apply_network_config_names(netcfg)
except Exception as e:
- LOG.warn("Failed to rename devices: %s", e)
+ LOG.warning("Failed to rename devices: %s", e)
if (self.datasource is not NULL_DATA_SOURCE and
not self.is_new_instance()):
@@ -651,9 +652,9 @@ class Init(object):
"likely broken: %s", e)
return
except NotImplementedError:
- LOG.warn("distro '%s' does not implement apply_network_config. "
- "networking may not be configured properly.",
- self.distro)
+ LOG.warning("distro '%s' does not implement apply_network_config. "
+ "networking may not be configured properly.",
+ self.distro)
return
@@ -737,15 +738,15 @@ class Modules(object):
if not mod_name:
continue
if freq and freq not in FREQUENCIES:
- LOG.warn(("Config specified module %s"
- " has an unknown frequency %s"), raw_name, freq)
+ LOG.warning(("Config specified module %s"
+ " has an unknown frequency %s"), raw_name, freq)
# Reset it so when ran it will get set to a known value
freq = None
mod_locs, looked_locs = importer.find_module(
mod_name, ['', type_utils.obj_name(config)], ['handle'])
if not mod_locs:
- LOG.warn("Could not find module named %s (searched %s)",
- mod_name, looked_locs)
+ LOG.warning("Could not find module named %s (searched %s)",
+ mod_name, looked_locs)
continue
mod = config.fixup_module(importer.import_module(mod_locs[0]))
mostly_mods.append([mod, raw_name, freq, run_args])
@@ -877,7 +878,7 @@ def _pkl_load(fname):
pickle_contents = util.load_file(fname, decode=False)
except Exception as e:
if os.path.isfile(fname):
- LOG.warn("failed loading pickle in %s: %s" % (fname, e))
+ LOG.warning("failed loading pickle in %s: %s", fname, e)
pass
# This is allowed so just return nothing successfully loaded...
diff --git a/cloudinit/templater.py b/cloudinit/templater.py
index 648cd218..b3ea64e4 100644
--- a/cloudinit/templater.py
+++ b/cloudinit/templater.py
@@ -103,14 +103,14 @@ def detect_template(text):
raise ValueError("Unknown template rendering type '%s' requested"
% template_type)
if template_type == 'jinja' and not JINJA_AVAILABLE:
- LOG.warn("Jinja not available as the selected renderer for"
- " desired template, reverting to the basic renderer.")
+ LOG.warning("Jinja not available as the selected renderer for"
+ " desired template, reverting to the basic renderer.")
return ('basic', basic_render, rest)
elif template_type == 'jinja' and JINJA_AVAILABLE:
return ('jinja', jinja_render, rest)
if template_type == 'cheetah' and not CHEETAH_AVAILABLE:
- LOG.warn("Cheetah not available as the selected renderer for"
- " desired template, reverting to the basic renderer.")
+ LOG.warning("Cheetah not available as the selected renderer for"
+ " desired template, reverting to the basic renderer.")
return ('basic', basic_render, rest)
elif template_type == 'cheetah' and CHEETAH_AVAILABLE:
return ('cheetah', cheetah_render, rest)
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 2f6a158e..d2b92e6a 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -155,8 +155,8 @@ def _get_ssl_args(url, ssl_details):
scheme = urlparse(url).scheme
if scheme == 'https' and ssl_details:
if not SSL_ENABLED:
- LOG.warn("SSL is not supported in requests v%s, "
- "cert. verification can not occur!", _REQ_VER)
+ LOG.warning("SSL is not supported in requests v%s, "
+ "cert. verification can not occur!", _REQ_VER)
else:
if 'ca_certs' in ssl_details and ssl_details['ca_certs']:
ssl_args['verify'] = ssl_details['ca_certs']
@@ -415,14 +415,15 @@ class OauthUrlHelper(object):
return
if 'date' not in exception.headers:
- LOG.warn("Missing header 'date' in %s response", exception.code)
+ LOG.warning("Missing header 'date' in %s response",
+ exception.code)
return
date = exception.headers['date']
try:
remote_time = time.mktime(parsedate(date))
except Exception as e:
- LOG.warn("Failed to convert datetime '%s': %s", date, e)
+ LOG.warning("Failed to convert datetime '%s': %s", date, e)
return
skew = int(remote_time - time.time())
@@ -430,7 +431,7 @@ class OauthUrlHelper(object):
old_skew = self.skew_data.get(host, 0)
if abs(old_skew - skew) > self.skew_change_limit:
self.update_skew_file(host, skew)
- LOG.warn("Setting oauth clockskew for %s to %d", host, skew)
+ LOG.warning("Setting oauth clockskew for %s to %d", host, skew)
self.skew_data[host] = skew
return
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index cfe5aa2f..88cb7f84 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -109,8 +109,9 @@ class UserDataProcessor(object):
ctype_orig = None
was_compressed = True
except util.DecompressionError as e:
- LOG.warn("Failed decompressing payload from %s of length"
- " %s due to: %s", ctype_orig, len(payload), e)
+ LOG.warning("Failed decompressing payload from %s of"
+ " length %s due to: %s",
+ ctype_orig, len(payload), e)
continue
# Attempt to figure out the payloads content-type
@@ -228,9 +229,9 @@ class UserDataProcessor(object):
if resp.ok():
content = resp.contents
else:
- LOG.warn(("Fetching from %s resulted in"
- " a invalid http code of %s"),
- include_url, resp.code)
+ LOG.warning(("Fetching from %s resulted in"
+ " a invalid http code of %s"),
+ include_url, resp.code)
if content is not None:
new_msg = convert_string(content)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 17abdf81..22af99dd 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -96,11 +96,11 @@ def _lsb_release(target=None):
data[fmap[fname]] = val.strip()
missing = [k for k in fmap.values() if k not in data]
if len(missing):
- LOG.warn("Missing fields in lsb_release --all output: %s",
- ','.join(missing))
+ LOG.warning("Missing fields in lsb_release --all output: %s",
+ ','.join(missing))
except ProcessExecutionError as err:
- LOG.warn("Unable to get lsb_release --all: %s", err)
+ LOG.warning("Unable to get lsb_release --all: %s", err)
data = dict((v, "UNAVAILABLE") for v in fmap.values())
return data
@@ -590,7 +590,7 @@ def system_info():
'release': platform.release(),
'python': platform.python_version(),
'uname': platform.uname(),
- 'dist': platform.linux_distribution(),
+ 'dist': platform.linux_distribution(), # pylint: disable=W1505
}
@@ -865,7 +865,7 @@ def read_file_or_url(url, timeout=5, retries=10,
url = "file://%s" % url
if url.lower().startswith("file://"):
if data:
- LOG.warn("Unable to post data to file resource %s", url)
+ LOG.warning("Unable to post data to file resource %s", url)
file_path = url[len("file://"):]
try:
contents = load_file(file_path, decode=False)
@@ -1279,7 +1279,7 @@ def get_cmdline():
# replace nulls with space and drop trailing null
cmdline = contents.replace("\x00", " ")[:-1]
except Exception as e:
- LOG.warn("failed reading /proc/1/cmdline: %s", e)
+ LOG.warning("failed reading /proc/1/cmdline: %s", e)
cmdline = ""
else:
try:
@@ -1400,7 +1400,7 @@ def logexc(log, msg, *args):
# or even desirable to have that much junk
# coming out to a non-debug stream
if msg:
- log.warn(msg, *args)
+ log.warning(msg, *args)
# Debug gets the full trace. However, nose has a bug whereby its
# logcapture plugin doesn't properly handle the case where there is no
# actual exception. To avoid tracebacks during the test suite then, we'll
@@ -1688,7 +1688,7 @@ def chmod(path, mode):
os.chmod(path, real_mode)
-def write_file(filename, content, mode=0o644, omode="wb"):
+def write_file(filename, content, mode=0o644, omode="wb", copy_mode=False):
"""
Writes a file with the given content and sets the file mode as specified.
Resotres the SELinux context if possible.
@@ -1698,6 +1698,14 @@ def write_file(filename, content, mode=0o644, omode="wb"):
@param mode: The filesystem mode to set on the file.
@param omode: The open mode used when opening the file (w, wb, a, etc.)
"""
+
+ if copy_mode:
+ try:
+ file_stat = os.stat(filename)
+ mode = stat.S_IMODE(file_stat.st_mode)
+ except OSError:
+ pass
+
ensure_dir(os.path.dirname(filename))
if 'b' in omode.lower():
content = encode_text(content)
@@ -2336,8 +2344,8 @@ def read_dmi_data(key):
if dmidecode_path:
return _call_dmidecode(key, dmidecode_path)
- LOG.warn("did not find either path %s or dmidecode command",
- DMI_SYS_PATH)
+ LOG.warning("did not find either path %s or dmidecode command",
+ DMI_SYS_PATH)
return None
@@ -2396,9 +2404,10 @@ def rootdev_from_cmdline(cmdline):
if found.startswith("LABEL="):
return "/dev/disk/by-label/" + found[len("LABEL="):]
if found.startswith("UUID="):
- return "/dev/disk/by-uuid/" + found[len("UUID="):]
+ return "/dev/disk/by-uuid/" + found[len("UUID="):].lower()
if found.startswith("PARTUUID="):
- disks_path = "/dev/disk/by-partuuid/" + found[len("PARTUUID="):]
+ disks_path = ("/dev/disk/by-partuuid/" +
+ found[len("PARTUUID="):].lower())
if os.path.exists(disks_path):
return disks_path
results = find_devs_with(found)
diff --git a/cloudinit/warnings.py b/cloudinit/warnings.py
index 3206d4e9..f9f7a63c 100644
--- a/cloudinit/warnings.py
+++ b/cloudinit/warnings.py
@@ -130,10 +130,10 @@ def show_warning(name, cfg=None, sleep=None, mode=True, **kwargs):
os.path.join(_get_warn_dir(cfg), name),
topline + "\n".join(fmtlines) + "\n" + topline)
- LOG.warn(topline + "\n".join(fmtlines) + "\n" + closeline)
+ LOG.warning(topline + "\n".join(fmtlines) + "\n" + closeline)
if sleep:
- LOG.debug("sleeping %d seconds for warning '%s'" % (sleep, name))
+ LOG.debug("sleeping %d seconds for warning '%s'", sleep, name)
time.sleep(sleep)
# vi: ts=4 expandtab
diff --git a/doc/examples/cloud-config-chef.txt b/doc/examples/cloud-config-chef.txt
index 75d78a15..3cb62006 100644
--- a/doc/examples/cloud-config-chef.txt
+++ b/doc/examples/cloud-config-chef.txt
@@ -5,46 +5,50 @@
# Make sure that this file is valid yaml before starting instances.
# It should be passed as user-data when starting the instance.
#
-# This example assumes the instance is 12.04 (precise)
+# This example assumes the instance is 16.04 (xenial)
# The default is to install from packages.
-# Key from http://apt.opscode.com/packages@opscode.com.gpg.key
+# Key from https://packages.chef.io/chef.asc
apt:
- sources:
- - source: "deb http://apt.opscode.com/ $RELEASE-0.10 main"
- key: |
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: GnuPG v1.4.9 (GNU/Linux)
+ source1:
+ source: "deb http://packages.chef.io/repos/apt/stable $RELEASE main"
+ key: |
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+ Version: GnuPG v1.4.12 (Darwin)
+ Comment: GPGTools - http://gpgtools.org
- mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
- twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
- dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
- JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
- ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
- XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
- DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
- sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
- Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
- YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
- CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
- +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu5Ag0ESmkLtBAIAIO2SwlR
- lU5i6gTOp42RHWW7/pmW78CwUqJnYqnXROrt3h9F9xrsGkH0Fh1FRtsnncgzIhvh
- DLQnRHnkXm0ws0jV0PF74ttoUT6BLAUsFi2SPP1zYNJ9H9fhhK/pjijtAcQwdgxu
- wwNJ5xCEscBZCjhSRXm0d30bK1o49Cow8ZIbHtnXVP41c9QWOzX/LaGZsKQZnaMx
- EzDk8dyyctR2f03vRSVyTFGgdpUcpbr9eTFVgikCa6ODEBv+0BnCH6yGTXwBid9g
- w0o1e/2DviKUWCC+AlAUOubLmOIGFBuI4UR+rux9affbHcLIOTiKQXv79lW3P7W8
- AAfniSQKfPWXrrcAAwUH/2XBqD4Uxhbs25HDUUiM/m6Gnlj6EsStg8n0nMggLhuN
- QmPfoNByMPUqvA7sULyfr6xCYzbzRNxABHSpf85FzGQ29RF4xsA4vOOU8RDIYQ9X
- Q8NqqR6pydprRFqWe47hsAN7BoYuhWqTtOLSBmnAnzTR5pURoqcquWYiiEavZixJ
- 3ZRAq/HMGioJEtMFrvsZjGXuzef7f0ytfR1zYeLVWnL9Bd32CueBlI7dhYwkFe+V
- Ep5jWOCj02C1wHcwt+uIRDJV6TdtbIiBYAdOMPk15+VBdweBXwMuYXr76+A7VeDL
- zIhi7tKFo6WiwjKZq0dzctsJJjtIfr4K4vbiD9Ojg1iISQQYEQIACQUCSmkLtAIb
- DAAKCRApQKupg++CauISAJ9CxYPOKhOxalBnVTLeNUkAHGg2gACeIsbobtaD4ZHG
- 0GLl8EkfA8uhluM=
- =zKAm
- -----END PGP PUBLIC KEY BLOCK-----
+ mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
+ twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
+ dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
+ JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
+ ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
+ XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
+ DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
+ sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
+ Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
+ YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
+ CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
+ +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu0IENIRUYgUGFja2FnZXMg
+ PHBhY2thZ2VzQGNoZWYuaW8+iGIEExECACIFAlQwYFECGwMGCwkIBwMCBhUIAgkK
+ CwQWAgMBAh4BAheAAAoJEClAq6mD74JqX94An26z99XOHWpLN8ahzm7cp13t4Xid
+ AJ9wVcgoUBzvgg91lKfv/34cmemZn7kCDQRKaQu0EAgAg7ZLCVGVTmLqBM6njZEd
+ Zbv+mZbvwLBSomdiqddE6u3eH0X3GuwaQfQWHUVG2yedyDMiG+EMtCdEeeRebTCz
+ SNXQ8Xvi22hRPoEsBSwWLZI8/XNg0n0f1+GEr+mOKO0BxDB2DG7DA0nnEISxwFkK
+ OFJFebR3fRsrWjj0KjDxkhse2ddU/jVz1BY7Nf8toZmwpBmdozETMOTx3LJy1HZ/
+ Te9FJXJMUaB2lRyluv15MVWCKQJro4MQG/7QGcIfrIZNfAGJ32DDSjV7/YO+IpRY
+ IL4CUBQ65suY4gYUG4jhRH6u7H1p99sdwsg5OIpBe/v2Vbc/tbwAB+eJJAp89Zeu
+ twADBQf/ZcGoPhTGFuzbkcNRSIz+boaeWPoSxK2DyfScyCAuG41CY9+g0HIw9Sq8
+ DuxQvJ+vrEJjNvNE3EAEdKl/zkXMZDb1EXjGwDi845TxEMhhD1dDw2qpHqnJ2mtE
+ WpZ7juGwA3sGhi6FapO04tIGacCfNNHmlRGipyq5ZiKIRq9mLEndlECr8cwaKgkS
+ 0wWu+xmMZe7N5/t/TK19HXNh4tVacv0F3fYK54GUjt2FjCQV75USnmNY4KPTYLXA
+ dzC364hEMlXpN21siIFgB04w+TXn5UF3B4FfAy5hevvr4DtV4MvMiGLu0oWjpaLC
+ MpmrR3Ny2wkmO0h+vgri9uIP06ODWIhJBBgRAgAJBQJKaQu0AhsMAAoJEClAq6mD
+ 74Jq4hIAoJ5KrYS8kCwj26SAGzglwggpvt3CAJ0bekyky56vNqoegB+y4PQVDv4K
+ zA==
+ =IxPr
+ -----END PGP PUBLIC KEY BLOCK-----
chef:
diff --git a/doc/examples/cloud-config-resolv-conf.txt b/doc/examples/cloud-config-resolv-conf.txt
index 37ffc91a..c4843f54 100644
--- a/doc/examples/cloud-config-resolv-conf.txt
+++ b/doc/examples/cloud-config-resolv-conf.txt
@@ -5,9 +5,9 @@
#
# Ensure that your yaml is valid and pass this as user-data when starting
# the instance. Also be sure that your cloud.cfg file includes this
-# configuration module in the appropirate section.
+# configuration module in the appropriate section.
#
-manage-resolv-conf: true
+manage_resolv_conf: true
resolv_conf:
nameservers: ['8.8.4.4', '8.8.8.8']
diff --git a/doc/examples/cloud-config-update-apt.txt b/doc/examples/cloud-config-update-apt.txt
index a83ce3f7..647241ca 100644
--- a/doc/examples/cloud-config-update-apt.txt
+++ b/doc/examples/cloud-config-update-apt.txt
@@ -1,7 +1,8 @@
#cloud-config
-# Update apt database on first boot
-# (ie run apt-get update)
+# Update apt database on first boot (run 'apt-get update').
+# Note, if packages are given, or package_upgrade is true, then
+# update will be done independent of this setting.
#
-# Default: true
+# Default: false
# Aliases: apt_update
package_update: false
diff --git a/doc/merging.rst b/doc/merging.rst
deleted file mode 100644
index bf49b909..00000000
--- a/doc/merging.rst
+++ /dev/null
@@ -1,201 +0,0 @@
-Overview
-========
-
-This was implemented because it has been a common feature request that there be
-a way to specify how cloud-config yaml "dictionaries" provided as user-data are
-merged together when there are multiple yamls to merge together (say when
-performing an #include).
-
-Since previously the merging algorithm was very simple and would only overwrite
-and not append lists, or strings, and so on it was decided to create a new and
-improved way to merge dictionaries (and there contained objects) together in a
-way that is customizable, thus allowing for users who provide cloud-config
-user-data to determine exactly how there objects will be merged.
-
-For example.
-
-.. code-block:: yaml
-
- #cloud-config (1)
- run_cmd:
- - bash1
- - bash2
-
- #cloud-config (2)
- run_cmd:
- - bash3
- - bash4
-
-The previous way of merging the following 2 objects would result in a final
-cloud-config object that contains the following.
-
-.. code-block:: yaml
-
- #cloud-config (merged)
- run_cmd:
- - bash3
- - bash4
-
-Typically this is not what users want, instead they would likely prefer:
-
-.. code-block:: yaml
-
- #cloud-config (merged)
- run_cmd:
- - bash1
- - bash2
- - bash3
- - bash4
-
-This way makes it easier to combine the various cloud-config objects you have
-into a more useful list, thus reducing duplication that would have had to
-occur in the previous method to accomplish the same result.
-
-Customizability
-===============
-
-Since the above merging algorithm may not always be the desired merging
-algorithm (like how the previous merging algorithm was not always the preferred
-one) the concept of customizing how merging can be done was introduced through
-a new concept call 'merge classes'.
-
-A merge class is a class defintion which provides functions that can be used
-to merge a given type with another given type.
-
-An example of one of these merging classes is the following:
-
-.. code-block:: python
-
- class Merger(object):
- def __init__(self, merger, opts):
- self._merger = merger
- self._overwrite = 'overwrite' in opts
-
- # This merging algorithm will attempt to merge with
- # another dictionary, on encountering any other type of object
- # it will not merge with said object, but will instead return
- # the original value
- #
- # On encountering a dictionary, it will create a new dictionary
- # composed of the original and the one to merge with, if 'overwrite'
- # is enabled then keys that exist in the original will be overwritten
- # by keys in the one to merge with (and associated values). Otherwise
- # if not in overwrite mode the 2 conflicting keys themselves will
- # be merged.
- def _on_dict(self, value, merge_with):
- if not isinstance(merge_with, (dict)):
- return value
- merged = dict(value)
- for (k, v) in merge_with.items():
- if k in merged:
- if not self._overwrite:
- merged[k] = self._merger.merge(merged[k], v)
- else:
- merged[k] = v
- else:
- merged[k] = v
- return merged
-
-As you can see there is a '_on_dict' method here that will be given a source
-value and a value to merge with. The result will be the merged object. This
-code itself is called by another merging class which 'directs' the merging to
-happen by analyzing the types of the objects to merge and attempting to find a
-know object that will merge that type. I will avoid pasting that here, but it
-can be found in the `mergers/__init__.py` file (see `LookupMerger` and
-`UnknownMerger`).
-
-So following the typical cloud-init way of allowing source code to be
-downloaded and used dynamically, it is possible for users to inject there own
-merging files to handle specific types of merging as they choose (the basic
-ones included will handle lists, dicts, and strings). Note how each merge can
-have options associated with it which affect how the merging is performed, for
-example a dictionary merger can be told to overwrite instead of attempt to
-merge, or a string merger can be told to append strings instead of discarding
-other strings to merge with.
-
-How to activate
-===============
-
-There are a few ways to activate the merging algorithms, and to customize them
-for your own usage.
-
-1. The first way involves the usage of MIME messages in cloud-init to specify
- multipart documents (this is one way in which multiple cloud-config is
- joined together into a single cloud-config). Two new headers are looked
- for, both of which can define the way merging is done (the first header to
- exist wins). These new headers (in lookup order) are 'Merge-Type' and
- 'X-Merge-Type'. The value should be a string which will satisfy the new
- merging format defintion (see below for this format).
-
-2. The second way is actually specifying the merge-type in the body of the
- cloud-config dictionary. There are 2 ways to specify this, either as a
- string or as a dictionary (see format below). The keys that are looked up
- for this definition are the following (in order), 'merge_how',
- 'merge_type'.
-
-String format
--------------
-
-The string format that is expected is the following.
-
-::
-
- classname1(option1,option2)+classname2(option3,option4)....
-
-The class name there will be connected to class names used when looking for the
-class that can be used to merge and options provided will be given to the class
-on construction of that class.
-
-For example, the default string that is used when none is provided is the
-following:
-
-::
-
- list()+dict()+str()
-
-Dictionary format
------------------
-
-In cases where a dictionary can be used to specify the same information as the
-string format (ie option #2 of above) it can be used, for example.
-
-.. code-block:: python
-
- {'merge_how': [{'name': 'list', 'settings': ['extend']},
- {'name': 'dict', 'settings': []},
- {'name': 'str', 'settings': ['append']}]}
-
-This would be the equivalent format for default string format but in dictionary
-form instead of string form.
-
-Specifying multiple types and its effect
-========================================
-
-Now you may be asking yourself, if I specify a merge-type header or dictionary
-for every cloud-config that I provide, what exactly happens?
-
-The answer is that when merging, a stack of 'merging classes' is kept, the
-first one on that stack is the default merging classes, this set of mergers
-will be used when the first cloud-config is merged with the initial empty
-cloud-config dictionary. If the cloud-config that was just merged provided a
-set of merging classes (via the above formats) then those merging classes will
-be pushed onto the stack. Now if there is a second cloud-config to be merged
-then the merging classes from the cloud-config before the first will be used
-(not the default) and so on. This way a cloud-config can decide how it will
-merge with a cloud-config dictionary coming after it.
-
-Other uses
-==========
-
-In addition to being used for merging user-data sections, the default merging
-algorithm for merging 'conf.d' yaml files (which form an initial yaml config
-for cloud-init) was also changed to use this mechanism so its full
-benefits (and customization) can also be used there as well. Other places that
-used the previous merging are also, similarly, now extensible (metadata
-merging, for example).
-
-Note, however, that merge algorithms are not used *across* types of
-configuration. As was the case before merging was implemented,
-user-data will overwrite conf.d configuration without merging.
-
-.. vi: textwidth=78
diff --git a/doc/rtd/topics/examples.rst b/doc/rtd/topics/examples.rst
index a110721c..c30d2263 100644
--- a/doc/rtd/topics/examples.rst
+++ b/doc/rtd/topics/examples.rst
@@ -93,6 +93,13 @@ Install arbitrary packages
:language: yaml
:linenos:
+Update apt database on first boot
+=================================
+
+.. literalinclude:: ../../examples/cloud-config-update-apt.txt
+ :language: yaml
+ :linenos:
+
Run apt or yum upgrade
======================
@@ -149,6 +156,27 @@ Register RedHat Subscription
:language: yaml
:linenos:
-.. _chef: http://www.opscode.com/chef/
+Configure data sources
+======================
+
+.. literalinclude:: ../../examples/cloud-config-datasources.txt
+ :language: yaml
+ :linenos:
+
+Create partitions and filesystems
+=================================
+
+.. literalinclude:: ../../examples/cloud-config-disk-setup.txt
+ :language: yaml
+ :linenos:
+
+Grow partitions
+===============
+
+.. literalinclude:: ../../examples/cloud-config-growpart.txt
+ :language: yaml
+ :linenos:
+
+.. _chef: http://www.chef.io/chef/
.. _puppet: http://puppetlabs.com/
.. vi: textwidth=78
diff --git a/doc/rtd/topics/merging.rst b/doc/rtd/topics/merging.rst
index eca118f5..2f927a47 100644
--- a/doc/rtd/topics/merging.rst
+++ b/doc/rtd/topics/merging.rst
@@ -2,5 +2,203 @@
Merging User-Data Sections
**************************
-.. include:: ../../merging.rst
+Overview
+========
+
+This was implemented because it has been a common feature request that there be
+a way to specify how cloud-config yaml "dictionaries" provided as user-data are
+merged together when there are multiple yamls to merge together (say when
+performing an #include).
+
+Since previously the merging algorithm was very simple and would only overwrite
+and not append lists, or strings, and so on it was decided to create a new and
+improved way to merge dictionaries (and their contained objects) together in a
+way that is customizable, thus allowing for users who provide cloud-config
+user-data to determine exactly how their objects will be merged.
+
+For example.
+
+.. code-block:: yaml
+
+ #cloud-config (1)
+ run_cmd:
+ - bash1
+ - bash2
+
+ #cloud-config (2)
+ run_cmd:
+ - bash3
+ - bash4
+
+The previous way of merging the two objects above would result in a final
+cloud-config object that contains the following.
+
+.. code-block:: yaml
+
+ #cloud-config (merged)
+ run_cmd:
+ - bash3
+ - bash4
+
+Typically this is not what users want; instead they would likely prefer:
+
+.. code-block:: yaml
+
+ #cloud-config (merged)
+ run_cmd:
+ - bash1
+ - bash2
+ - bash3
+ - bash4
+
+This way makes it easier to combine the various cloud-config objects you have
+into a more useful list, thus reducing duplication necessary to accomplish the
+same result with the previous method.
+
+Customizability
+===============
+
+Because the above merging algorithm may not always be desired (just as the
+previous merging algorithm was not always the preferred one), the concept of
+customized merging was introduced through 'merge classes'.
+
+A merge class is a class definition which provides functions that can be used
+to merge a given type with another given type.
+
+An example of one of these merging classes is the following:
+
+.. code-block:: python
+
+ class Merger(object):
+ def __init__(self, merger, opts):
+ self._merger = merger
+ self._overwrite = 'overwrite' in opts
+
+ # This merging algorithm will attempt to merge with
+ # another dictionary, on encountering any other type of object
+ # it will not merge with said object, but will instead return
+ # the original value
+ #
+ # On encountering a dictionary, it will create a new dictionary
+ # composed of the original and the one to merge with, if 'overwrite'
+ # is enabled then keys that exist in the original will be overwritten
+ # by keys in the one to merge with (and associated values). Otherwise
+ # if not in overwrite mode the 2 conflicting keys themselves will
+ # be merged.
+ def _on_dict(self, value, merge_with):
+ if not isinstance(merge_with, (dict)):
+ return value
+ merged = dict(value)
+ for (k, v) in merge_with.items():
+ if k in merged:
+ if not self._overwrite:
+ merged[k] = self._merger.merge(merged[k], v)
+ else:
+ merged[k] = v
+ else:
+ merged[k] = v
+ return merged
+
+As you can see there is a '_on_dict' method here that will be given a source
+value and a value to merge with. The result will be the merged object. This
+code itself is called by another merging class which 'directs' the merging to
+happen by analyzing the types of the objects to merge and attempting to find a
+know object that will merge that type. I will avoid pasting that here, but it
+can be found in the `mergers/__init__.py` file (see `LookupMerger` and
+`UnknownMerger`).
+
+So following the typical cloud-init way of allowing source code to be
+downloaded and used dynamically, it is possible for users to inject there own
+merging files to handle specific types of merging as they choose (the basic
+ones included will handle lists, dicts, and strings). Note how each merge can
+have options associated with it which affect how the merging is performed, for
+example a dictionary merger can be told to overwrite instead of attempt to
+merge, or a string merger can be told to append strings instead of discarding
+other strings to merge with.
+
+How to activate
+===============
+
+There are a few ways to activate the merging algorithms, and to customize them
+for your own usage.
+
+1. The first way involves the usage of MIME messages in cloud-init to specify
+ multipart documents (this is one way in which multiple cloud-config is
+ joined together into a single cloud-config). Two new headers are looked
+ for, both of which can define the way merging is done (the first header to
+ exist wins). These new headers (in lookup order) are 'Merge-Type' and
+ 'X-Merge-Type'. The value should be a string which will satisfy the new
+ merging format defintion (see below for this format).
+
+2. The second way is actually specifying the merge-type in the body of the
+ cloud-config dictionary. There are 2 ways to specify this, either as a
+ string or as a dictionary (see format below). The keys that are looked up
+ for this definition are the following (in order), 'merge_how',
+ 'merge_type'.
+
+String format
+-------------
+
+The string format that is expected is the following.
+
+::
+
+ classname1(option1,option2)+classname2(option3,option4)....
+
+The class name there will be connected to class names used when looking for the
+class that can be used to merge and options provided will be given to the class
+on construction of that class.
+
+For example, the default string that is used when none is provided is the
+following:
+
+::
+
+ list()+dict()+str()
+
+Dictionary format
+-----------------
+
+A dictionary can be used when it specifies the same information as the
+string format (i.e. the second option above), for example:
+
+.. code-block:: python
+
+ {'merge_how': [{'name': 'list', 'settings': ['extend']},
+ {'name': 'dict', 'settings': []},
+ {'name': 'str', 'settings': ['append']}]}
+
+This would be the equivalent format for default string format but in dictionary
+form instead of string form.
+
+Specifying multiple types and its effect
+========================================
+
+Now you may be asking yourself, if I specify a merge-type header or dictionary
+for every cloud-config that I provide, what exactly happens?
+
+The answer is that when merging, a stack of 'merging classes' is kept, the
+first one on that stack is the default merging classes, this set of mergers
+will be used when the first cloud-config is merged with the initial empty
+cloud-config dictionary. If the cloud-config that was just merged provided a
+set of merging classes (via the above formats) then those merging classes will
+be pushed onto the stack. Now if there is a second cloud-config to be merged
+then the merging classes from the cloud-config before the first will be used
+(not the default) and so on. This way a cloud-config can decide how it will
+merge with a cloud-config dictionary coming after it.
+
+Other uses
+==========
+
+In addition to being used for merging user-data sections, the default merging
+algorithm for merging 'conf.d' yaml files (which form an initial yaml config
+for cloud-init) was also changed to use this mechanism so its full
+benefits (and customization) can also be used there as well. Other places that
+used the previous merging are also, similarly, now extensible (metadata
+merging, for example).
+
+Note, however, that merge algorithms are not used *across* types of
+configuration. As was the case before merging was implemented,
+user-data will overwrite conf.d configuration without merging.
+
.. vi: textwidth=78
diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst
index a3ead4f1..c963c09a 100644
--- a/doc/rtd/topics/modules.rst
+++ b/doc/rtd/topics/modules.rst
@@ -44,6 +44,7 @@ Modules
.. automodule:: cloudinit.config.cc_set_hostname
.. automodule:: cloudinit.config.cc_set_passwords
.. automodule:: cloudinit.config.cc_snappy
+.. automodule:: cloudinit.config.cc_snap_config
.. automodule:: cloudinit.config.cc_spacewalk
.. automodule:: cloudinit.config.cc_ssh
.. automodule:: cloudinit.config.cc_ssh_authkey_fingerprints
diff --git a/setup.py b/setup.py
index e6693c90..32a44d94 100755
--- a/setup.py
+++ b/setup.py
@@ -138,9 +138,7 @@ class InitsysInstallData(install):
self.init_system = self.init_system.split(",")
if len(self.init_system) == 0:
- raise DistutilsArgError(
- ("You must specify one of (%s) when"
- " specifying init system(s)!") % (", ".join(INITSYS_TYPES)))
+ self.init_system = ['systemd']
bad = [f for f in self.init_system if f not in INITSYS_TYPES]
if len(bad) != 0:
diff --git a/snapcraft.yaml b/snapcraft.yaml
new file mode 100644
index 00000000..24e8e74d
--- /dev/null
+++ b/snapcraft.yaml
@@ -0,0 +1,21 @@
+name: cloud-init
+version: master
+summary: Init scripts for cloud instances
+description: |
+ Cloud instances need special scripts to run during initialisation to
+ retrieve and install ssh keys and to let the user run various scripts.
+
+grade: stable
+confinement: classic
+
+apps:
+ cloud-init:
+ # LP: #1669306
+ command: usr/bin/python3 $SNAP/bin/cloud-init
+ plugs: [network]
+
+parts:
+ cloud-init:
+ plugin: python
+ source-type: git
+ source: https://git.launchpad.net/cloud-init
diff --git a/templates/sources.list.debian.tmpl b/templates/sources.list.debian.tmpl
index c8043f76..d64ace4d 100644
--- a/templates/sources.list.debian.tmpl
+++ b/templates/sources.list.debian.tmpl
@@ -26,7 +26,5 @@ deb-src {{mirror}} {{codename}}-updates main contrib non-free
## N.B. software from this repository may not have been tested as
## extensively as that contained in the main release, although it includes
## newer versions of some applications which may provide useful features.
-{#
-deb http://backports.debian.org/debian-backports {{codename}}-backports main contrib non-free
-deb-src http://backports.debian.org/debian-backports {{codename}}-backports main contrib non-free
--#}
+deb {{mirror}} {{codename}}-backports main contrib non-free
+deb-src {{mirror}} {{codename}}-backports main contrib non-free
diff --git a/tests/cloud_tests/__main__.py b/tests/cloud_tests/__main__.py
index ef7d1878..ed654ad3 100644
--- a/tests/cloud_tests/__main__.py
+++ b/tests/cloud_tests/__main__.py
@@ -38,7 +38,7 @@ def run(args):
finally:
# TODO: make this configurable via environ or cmdline
if failed:
- LOG.warn('some tests failed, leaving data in %s', args.data_dir)
+ LOG.warning('some tests failed, leaving data in %s', args.data_dir)
else:
shutil.rmtree(args.data_dir)
return failed
diff --git a/tests/cloud_tests/args.py b/tests/cloud_tests/args.py
index b68cc98e..371b0444 100644
--- a/tests/cloud_tests/args.py
+++ b/tests/cloud_tests/args.py
@@ -94,7 +94,7 @@ def normalize_create_args(args):
if os.path.exists(config.name_to_path(args.name)):
msg = 'test: {} already exists'.format(args.name)
if args.force:
- LOG.warn('%s but ignoring due to --force', msg)
+ LOG.warning('%s but ignoring due to --force', msg)
else:
LOG.error(msg)
return None
diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py
index 68b47d7a..02fc0e52 100644
--- a/tests/cloud_tests/collect.py
+++ b/tests/cloud_tests/collect.py
@@ -45,7 +45,7 @@ def collect_test_data(args, snapshot, os_name, test_name):
# if test is not enabled, skip and return 0 failures
if not test_config.get('enabled', False):
- LOG.warn('test config %s is not enabled, skipping', test_name)
+ LOG.warning('test config %s is not enabled, skipping', test_name)
return ({}, 0)
# create test instance
diff --git a/tests/cloud_tests/configs/examples/install_run_chef_recipes.yaml b/tests/cloud_tests/configs/examples/install_run_chef_recipes.yaml
index 3cd28dfe..0bec305e 100644
--- a/tests/cloud_tests/configs/examples/install_run_chef_recipes.yaml
+++ b/tests/cloud_tests/configs/examples/install_run_chef_recipes.yaml
@@ -1,46 +1,50 @@
#
# From cloud config examples on cloudinit.readthedocs.io
#
-# 2016-11-17: Disabled as test suite fails this long running test currently
+# 2017-03-31: Disabled as depends on third party apt repository
#
enabled: False
cloud_config: |
#cloud-config
- # Key from http://apt.opscode.com/packages@opscode.com.gpg.key
+ # Key from https://packages.chef.io/chef.asc
apt:
- sources:
- - source: "deb http://apt.opscode.com/ $RELEASE-0.10 main"
- key: |
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: GnuPG v1.4.9 (GNU/Linux)
+ source1:
+ source: "deb http://packages.chef.io/repos/apt/stable $RELEASE main"
+ key: |
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+ Version: GnuPG v1.4.12 (Darwin)
+ Comment: GPGTools - http://gpgtools.org
- mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
- twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
- dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
- JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
- ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
- XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
- DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
- sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
- Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
- YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
- CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
- +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu5Ag0ESmkLtBAIAIO2SwlR
- lU5i6gTOp42RHWW7/pmW78CwUqJnYqnXROrt3h9F9xrsGkH0Fh1FRtsnncgzIhvh
- DLQnRHnkXm0ws0jV0PF74ttoUT6BLAUsFi2SPP1zYNJ9H9fhhK/pjijtAcQwdgxu
- wwNJ5xCEscBZCjhSRXm0d30bK1o49Cow8ZIbHtnXVP41c9QWOzX/LaGZsKQZnaMx
- EzDk8dyyctR2f03vRSVyTFGgdpUcpbr9eTFVgikCa6ODEBv+0BnCH6yGTXwBid9g
- w0o1e/2DviKUWCC+AlAUOubLmOIGFBuI4UR+rux9affbHcLIOTiKQXv79lW3P7W8
- AAfniSQKfPWXrrcAAwUH/2XBqD4Uxhbs25HDUUiM/m6Gnlj6EsStg8n0nMggLhuN
- QmPfoNByMPUqvA7sULyfr6xCYzbzRNxABHSpf85FzGQ29RF4xsA4vOOU8RDIYQ9X
- Q8NqqR6pydprRFqWe47hsAN7BoYuhWqTtOLSBmnAnzTR5pURoqcquWYiiEavZixJ
- 3ZRAq/HMGioJEtMFrvsZjGXuzef7f0ytfR1zYeLVWnL9Bd32CueBlI7dhYwkFe+V
- Ep5jWOCj02C1wHcwt+uIRDJV6TdtbIiBYAdOMPk15+VBdweBXwMuYXr76+A7VeDL
- zIhi7tKFo6WiwjKZq0dzctsJJjtIfr4K4vbiD9Ojg1iISQQYEQIACQUCSmkLtAIb
- DAAKCRApQKupg++CauISAJ9CxYPOKhOxalBnVTLeNUkAHGg2gACeIsbobtaD4ZHG
- 0GLl8EkfA8uhluM=
- =zKAm
- -----END PGP PUBLIC KEY BLOCK-----
+ mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
+ twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
+ dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
+ JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
+ ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
+ XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
+ DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
+ sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
+ Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
+ YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
+ CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
+ +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu0IENIRUYgUGFja2FnZXMg
+ PHBhY2thZ2VzQGNoZWYuaW8+iGIEExECACIFAlQwYFECGwMGCwkIBwMCBhUIAgkK
+ CwQWAgMBAh4BAheAAAoJEClAq6mD74JqX94An26z99XOHWpLN8ahzm7cp13t4Xid
+ AJ9wVcgoUBzvgg91lKfv/34cmemZn7kCDQRKaQu0EAgAg7ZLCVGVTmLqBM6njZEd
+ Zbv+mZbvwLBSomdiqddE6u3eH0X3GuwaQfQWHUVG2yedyDMiG+EMtCdEeeRebTCz
+ SNXQ8Xvi22hRPoEsBSwWLZI8/XNg0n0f1+GEr+mOKO0BxDB2DG7DA0nnEISxwFkK
+ OFJFebR3fRsrWjj0KjDxkhse2ddU/jVz1BY7Nf8toZmwpBmdozETMOTx3LJy1HZ/
+ Te9FJXJMUaB2lRyluv15MVWCKQJro4MQG/7QGcIfrIZNfAGJ32DDSjV7/YO+IpRY
+ IL4CUBQ65suY4gYUG4jhRH6u7H1p99sdwsg5OIpBe/v2Vbc/tbwAB+eJJAp89Zeu
+ twADBQf/ZcGoPhTGFuzbkcNRSIz+boaeWPoSxK2DyfScyCAuG41CY9+g0HIw9Sq8
+ DuxQvJ+vrEJjNvNE3EAEdKl/zkXMZDb1EXjGwDi845TxEMhhD1dDw2qpHqnJ2mtE
+ WpZ7juGwA3sGhi6FapO04tIGacCfNNHmlRGipyq5ZiKIRq9mLEndlECr8cwaKgkS
+ 0wWu+xmMZe7N5/t/TK19HXNh4tVacv0F3fYK54GUjt2FjCQV75USnmNY4KPTYLXA
+ dzC364hEMlXpN21siIFgB04w+TXn5UF3B4FfAy5hevvr4DtV4MvMiGLu0oWjpaLC
+ MpmrR3Ny2wkmO0h+vgri9uIP06ODWIhJBBgRAgAJBQJKaQu0AhsMAAoJEClAq6mD
+ 74Jq4hIAoJ5KrYS8kCwj26SAGzglwggpvt3CAJ0bekyky56vNqoegB+y4PQVDv4K
+ zA==
+ =IxPr
+ -----END PGP PUBLIC KEY BLOCK-----
chef:
@@ -91,4 +95,9 @@ cloud_config: |
# Useful for troubleshooting cloud-init issues
output: {all: '| tee -a /var/log/cloud-init-output.log'}
+collect_scripts:
+ chef_installed: |
+ #!/bin/sh
+ dpkg-query -W -f '${Status}\n' chef
+
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/install_run_chef_recipes.py b/tests/cloud_tests/testcases/examples/install_run_chef_recipes.py
new file mode 100644
index 00000000..b36486f0
--- /dev/null
+++ b/tests/cloud_tests/testcases/examples/install_run_chef_recipes.py
@@ -0,0 +1,17 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""cloud-init Integration Test Verify Script"""
+from tests.cloud_tests.testcases import base
+
+
+class TestChefExample(base.CloudTestCase):
+ """Test chef module"""
+
+ def test_chef_basic(self):
+ """Test chef installed"""
+ out = self.get_data_file('chef_installed')
+ self.assertIn('install ok', out)
+
+ # FIXME: Add more tests, and/or replace with comprehensive module tests
+
+# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py
index ef7d4e21..2a63550e 100644
--- a/tests/cloud_tests/verify.py
+++ b/tests/cloud_tests/verify.py
@@ -45,9 +45,9 @@ def verify_data(base_dir, tests):
}
for failure in res[test_name]['failures']:
- LOG.warn('test case: %s failed %s.%s with: %s',
- test_name, failure['class'], failure['function'],
- failure['error'])
+ LOG.warning('test case: %s failed %s.%s with: %s',
+ test_name, failure['class'], failure['function'],
+ failure['error'])
return res
@@ -80,7 +80,8 @@ def verify(args):
if len(fail_list) == 0:
LOG.info('test: %s passed all tests', test_name)
else:
- LOG.warn('test: %s failed %s tests', test_name, len(fail_list))
+ LOG.warning('test: %s failed %s tests', test_name,
+ len(fail_list))
failed += len(fail_list)
# dump results
diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py
index b0ad86ab..63a2b04d 100644
--- a/tests/unittests/test_datasource/test_altcloud.py
+++ b/tests/unittests/test_datasource/test_altcloud.py
@@ -10,6 +10,7 @@
This test file exercises the code in sources DataSourceAltCloud.py
'''
+import mock
import os
import shutil
import tempfile
@@ -18,10 +19,7 @@ from cloudinit import helpers
from cloudinit import util
from unittest import TestCase
-# Get the cloudinit.sources.DataSourceAltCloud import items needed.
-import cloudinit.sources.DataSourceAltCloud
-from cloudinit.sources.DataSourceAltCloud import DataSourceAltCloud
-from cloudinit.sources.DataSourceAltCloud import read_user_data_callback
+import cloudinit.sources.DataSourceAltCloud as dsac
OS_UNAME_ORIG = getattr(os, 'uname')
@@ -32,17 +30,17 @@ def _write_cloud_info_file(value):
with a cloud backend identifier ImageFactory when building
an image with ImageFactory.
'''
- cifile = open(cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE, 'w')
+ cifile = open(dsac.CLOUD_INFO_FILE, 'w')
cifile.write(value)
cifile.close()
- os.chmod(cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE, 0o664)
+ os.chmod(dsac.CLOUD_INFO_FILE, 0o664)
def _remove_cloud_info_file():
'''
Remove the test CLOUD_INFO_FILE
'''
- os.remove(cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE)
+ os.remove(dsac.CLOUD_INFO_FILE)
def _write_user_data_files(mount_dir, value):
@@ -122,7 +120,7 @@ class TestGetCloudType(TestCase):
Forcing read_dmi_data return to match a RHEVm system: RHEV Hypervisor
'''
util.read_dmi_data = _dmi_data('RHEV')
- dsrc = DataSourceAltCloud({}, None, self.paths)
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual('RHEV', dsrc.get_cloud_type())
def test_vsphere(self):
@@ -131,7 +129,7 @@ class TestGetCloudType(TestCase):
Forcing read_dmi_data return to match a vSphere system: RHEV Hypervisor
'''
util.read_dmi_data = _dmi_data('VMware Virtual Platform')
- dsrc = DataSourceAltCloud({}, None, self.paths)
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual('VSPHERE', dsrc.get_cloud_type())
def test_unknown(self):
@@ -140,7 +138,7 @@ class TestGetCloudType(TestCase):
Forcing read_dmi_data return to match an unrecognized return.
'''
util.read_dmi_data = _dmi_data('Unrecognized Platform')
- dsrc = DataSourceAltCloud({}, None, self.paths)
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual('UNKNOWN', dsrc.get_cloud_type())
@@ -154,8 +152,7 @@ class TestGetDataCloudInfoFile(TestCase):
self.paths = helpers.Paths({'cloud_dir': '/tmp'})
self.cloud_info_file = tempfile.mkstemp()[1]
self.dmi_data = util.read_dmi_data
- cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
- self.cloud_info_file
+ dsac.CLOUD_INFO_FILE = self.cloud_info_file
def tearDown(self):
# Reset
@@ -167,14 +164,13 @@ class TestGetDataCloudInfoFile(TestCase):
pass
util.read_dmi_data = self.dmi_data
- cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
- '/etc/sysconfig/cloud-info'
+ dsac.CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info'
def test_rhev(self):
'''Success Test module get_data() forcing RHEV.'''
_write_cloud_info_file('RHEV')
- dsrc = DataSourceAltCloud({}, None, self.paths)
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_rhevm = lambda: True
self.assertEqual(True, dsrc.get_data())
@@ -182,7 +178,7 @@ class TestGetDataCloudInfoFile(TestCase):
'''Success Test module get_data() forcing VSPHERE.'''
_write_cloud_info_file('VSPHERE')
- dsrc = DataSourceAltCloud({}, None, self.paths)
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_vsphere = lambda: True
self.assertEqual(True, dsrc.get_data())
@@ -190,7 +186,7 @@ class TestGetDataCloudInfoFile(TestCase):
'''Failure Test module get_data() forcing RHEV.'''
_write_cloud_info_file('RHEV')
- dsrc = DataSourceAltCloud({}, None, self.paths)
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_rhevm = lambda: False
self.assertEqual(False, dsrc.get_data())
@@ -198,7 +194,7 @@ class TestGetDataCloudInfoFile(TestCase):
'''Failure Test module get_data() forcing VSPHERE.'''
_write_cloud_info_file('VSPHERE')
- dsrc = DataSourceAltCloud({}, None, self.paths)
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_vsphere = lambda: False
self.assertEqual(False, dsrc.get_data())
@@ -206,7 +202,7 @@ class TestGetDataCloudInfoFile(TestCase):
'''Failure Test module get_data() forcing unrecognized.'''
_write_cloud_info_file('unrecognized')
- dsrc = DataSourceAltCloud({}, None, self.paths)
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.get_data())
@@ -219,7 +215,7 @@ class TestGetDataNoCloudInfoFile(TestCase):
'''Set up.'''
self.paths = helpers.Paths({'cloud_dir': '/tmp'})
self.dmi_data = util.read_dmi_data
- cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
+ dsac.CLOUD_INFO_FILE = \
'no such file'
# We have a different code path for arm to deal with LP1243287
# We have to switch arch to x86_64 to avoid test failure
@@ -227,7 +223,7 @@ class TestGetDataNoCloudInfoFile(TestCase):
def tearDown(self):
# Reset
- cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
+ dsac.CLOUD_INFO_FILE = \
'/etc/sysconfig/cloud-info'
util.read_dmi_data = self.dmi_data
# Return back to original arch
@@ -237,7 +233,7 @@ class TestGetDataNoCloudInfoFile(TestCase):
'''Test No cloud info file module get_data() forcing RHEV.'''
util.read_dmi_data = _dmi_data('RHEV Hypervisor')
- dsrc = DataSourceAltCloud({}, None, self.paths)
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_rhevm = lambda: True
self.assertEqual(True, dsrc.get_data())
@@ -245,7 +241,7 @@ class TestGetDataNoCloudInfoFile(TestCase):
'''Test No cloud info file module get_data() forcing VSPHERE.'''
util.read_dmi_data = _dmi_data('VMware Virtual Platform')
- dsrc = DataSourceAltCloud({}, None, self.paths)
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_vsphere = lambda: True
self.assertEqual(True, dsrc.get_data())
@@ -253,7 +249,7 @@ class TestGetDataNoCloudInfoFile(TestCase):
'''Test No cloud info file module get_data() forcing unrecognized.'''
util.read_dmi_data = _dmi_data('Unrecognized Platform')
- dsrc = DataSourceAltCloud({}, None, self.paths)
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.get_data())
@@ -261,11 +257,14 @@ class TestUserDataRhevm(TestCase):
'''
Test to exercise method: DataSourceAltCloud.user_data_rhevm()
'''
+ cmd_pass = ['true']
+ cmd_fail = ['false']
+ cmd_not_found = ['bogus bad command']
+
def setUp(self):
'''Set up.'''
self.paths = helpers.Paths({'cloud_dir': '/tmp'})
self.mount_dir = tempfile.mkdtemp()
-
_write_user_data_files(self.mount_dir, 'test user data')
def tearDown(self):
@@ -279,61 +278,44 @@ class TestUserDataRhevm(TestCase):
except OSError:
pass
- cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
- '/etc/sysconfig/cloud-info'
- cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \
- ['/sbin/modprobe', 'floppy']
- cloudinit.sources.DataSourceAltCloud.CMD_UDEVADM_SETTLE = \
- ['/sbin/udevadm', 'settle', '--quiet', '--timeout=5']
+ dsac.CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info'
+ dsac.CMD_PROBE_FLOPPY = ['/sbin/modprobe', 'floppy']
+ dsac.CMD_UDEVADM_SETTLE = ['/sbin/udevadm', 'settle',
+ '--quiet', '--timeout=5']
def test_mount_cb_fails(self):
'''Test user_data_rhevm() where mount_cb fails.'''
- cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \
- ['echo', 'modprobe floppy']
-
- dsrc = DataSourceAltCloud({}, None, self.paths)
-
+ dsac.CMD_PROBE_FLOPPY = self.cmd_pass
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
def test_modprobe_fails(self):
'''Test user_data_rhevm() where modprobe fails.'''
- cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \
- ['ls', 'modprobe floppy']
-
- dsrc = DataSourceAltCloud({}, None, self.paths)
-
+ dsac.CMD_PROBE_FLOPPY = self.cmd_fail
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
def test_no_modprobe_cmd(self):
'''Test user_data_rhevm() with no modprobe command.'''
- cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \
- ['bad command', 'modprobe floppy']
-
- dsrc = DataSourceAltCloud({}, None, self.paths)
-
+ dsac.CMD_PROBE_FLOPPY = self.cmd_not_found
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
def test_udevadm_fails(self):
'''Test user_data_rhevm() where udevadm fails.'''
- cloudinit.sources.DataSourceAltCloud.CMD_UDEVADM_SETTLE = \
- ['ls', 'udevadm floppy']
-
- dsrc = DataSourceAltCloud({}, None, self.paths)
-
+ dsac.CMD_UDEVADM_SETTLE = self.cmd_fail
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
def test_no_udevadm_cmd(self):
'''Test user_data_rhevm() with no udevadm command.'''
- cloudinit.sources.DataSourceAltCloud.CMD_UDEVADM_SETTLE = \
- ['bad command', 'udevadm floppy']
-
- dsrc = DataSourceAltCloud({}, None, self.paths)
-
+ dsac.CMD_UDEVADM_SETTLE = self.cmd_not_found
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
@@ -359,17 +341,30 @@ class TestUserDataVsphere(TestCase):
except OSError:
pass
- cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
+ dsac.CLOUD_INFO_FILE = \
'/etc/sysconfig/cloud-info'
- def test_user_data_vsphere(self):
+ @mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with")
+ @mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb")
+ def test_user_data_vsphere_no_cdrom(self, m_mount_cb, m_find_devs_with):
'''Test user_data_vsphere() where mount_cb fails.'''
- cloudinit.sources.DataSourceAltCloud.MEDIA_DIR = self.mount_dir
+ m_mount_cb.return_value = []
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
+ self.assertEqual(False, dsrc.user_data_vsphere())
+ self.assertEqual(0, m_mount_cb.call_count)
- dsrc = DataSourceAltCloud({}, None, self.paths)
+ @mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with")
+ @mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb")
+ def test_user_data_vsphere_mcb_fail(self, m_mount_cb, m_find_devs_with):
+ '''Test user_data_vsphere() where mount_cb fails.'''
+ m_find_devs_with.return_value = ["/dev/mock/cdrom"]
+ m_mount_cb.side_effect = util.MountFailedError("Unable To mount")
+ dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_vsphere())
+ self.assertEqual(1, m_find_devs_with.call_count)
+ self.assertEqual(1, m_mount_cb.call_count)
class TestReadUserDataCallback(TestCase):
@@ -398,7 +393,7 @@ class TestReadUserDataCallback(TestCase):
'''Test read_user_data_callback() with both files.'''
self.assertEqual('test user data',
- read_user_data_callback(self.mount_dir))
+ dsac.read_user_data_callback(self.mount_dir))
def test_callback_dc(self):
'''Test read_user_data_callback() with only DC file.'''
@@ -408,7 +403,7 @@ class TestReadUserDataCallback(TestCase):
non_dc_file=True)
self.assertEqual('test user data',
- read_user_data_callback(self.mount_dir))
+ dsac.read_user_data_callback(self.mount_dir))
def test_callback_non_dc(self):
'''Test read_user_data_callback() with only non-DC file.'''
@@ -418,13 +413,13 @@ class TestReadUserDataCallback(TestCase):
non_dc_file=False)
self.assertEqual('test user data',
- read_user_data_callback(self.mount_dir))
+ dsac.read_user_data_callback(self.mount_dir))
def test_callback_none(self):
'''Test read_user_data_callback() no files are found.'''
_remove_user_data_files(self.mount_dir)
- self.assertEqual(None, read_user_data_callback(self.mount_dir))
+ self.assertEqual(None, dsac.read_user_data_callback(self.mount_dir))
def force_arch(arch=None):
diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py
index 61d6e001..a11166a9 100644
--- a/tests/unittests/test_datasource/test_digitalocean.py
+++ b/tests/unittests/test_datasource/test_digitalocean.py
@@ -197,7 +197,8 @@ class TestNetworkConvert(TestCase):
@mock.patch('cloudinit.net.get_interfaces_by_mac')
def _get_networking(self, m_get_by_mac):
m_get_by_mac.return_value = {
- '04:01:57:d1:9e:01': 'ens1', '04:01:57:d1:9e:02': 'ens2',
+ '04:01:57:d1:9e:01': 'ens1',
+ '04:01:57:d1:9e:02': 'ens2',
'b8:ae:ed:75:5f:9a': 'enp0s25',
'ae:cc:08:7c:88:00': 'meta2p1'}
netcfg = digitalocean.convert_network_configuration(
@@ -208,18 +209,33 @@ class TestNetworkConvert(TestCase):
def test_networking_defined(self):
netcfg = self._get_networking()
self.assertIsNotNone(netcfg)
+ dns_defined = False
- for nic_def in netcfg.get('config'):
- print(json.dumps(nic_def, indent=3))
- n_type = nic_def.get('type')
- n_subnets = nic_def.get('type')
- n_name = nic_def.get('name')
- n_mac = nic_def.get('mac_address')
+ for part in netcfg.get('config'):
+ n_type = part.get('type')
+ print("testing part ", n_type, "\n", json.dumps(part, indent=3))
+
+ if n_type == 'nameserver':
+ n_address = part.get('address')
+ self.assertIsNotNone(n_address)
+ self.assertEqual(len(n_address), 3)
+
+ dns_resolvers = DO_META["dns"]["nameservers"]
+ for x in n_address:
+ self.assertIn(x, dns_resolvers)
+ dns_defined = True
- self.assertIsNotNone(n_type)
- self.assertIsNotNone(n_subnets)
- self.assertIsNotNone(n_name)
- self.assertIsNotNone(n_mac)
+ else:
+ n_subnets = part.get('type')
+ n_name = part.get('name')
+ n_mac = part.get('mac_address')
+
+ self.assertIsNotNone(n_type)
+ self.assertIsNotNone(n_subnets)
+ self.assertIsNotNone(n_name)
+ self.assertIsNotNone(n_mac)
+
+ self.assertTrue(dns_defined)
def _get_nic_definition(self, int_type, expected_name):
"""helper function to return if_type (i.e. public) and the expected
@@ -260,12 +276,6 @@ class TestNetworkConvert(TestCase):
self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address'))
self.assertEqual('physical', nic_def.get('type'))
- def _check_dns_nameservers(self, subn_def):
- self.assertIn('dns_nameservers', subn_def)
- expected_nameservers = DO_META['dns']['nameservers']
- nic_nameservers = subn_def.get('dns_nameservers')
- self.assertEqual(expected_nameservers, nic_nameservers)
-
def test_public_interface_ipv6(self):
"""test public ipv6 addressing"""
(nic_def, meta_def) = self._get_nic_definition('public', 'eth0')
@@ -280,7 +290,6 @@ class TestNetworkConvert(TestCase):
self.assertEqual(cidr_notated_address, subn_def.get('address'))
self.assertEqual(ipv6_def.get('gateway'), subn_def.get('gateway'))
- self._check_dns_nameservers(subn_def)
def test_public_interface_ipv4(self):
"""test public ipv4 addressing"""
@@ -293,7 +302,6 @@ class TestNetworkConvert(TestCase):
self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask'))
self.assertEqual(ipv4_def.get('gateway'), subn_def.get('gateway'))
- self._check_dns_nameservers(subn_def)
def test_public_interface_anchor_ipv4(self):
"""test public ipv4 addressing"""
diff --git a/tests/unittests/test_handler/test_handler_yum_add_repo.py b/tests/unittests/test_handler/test_handler_yum_add_repo.py
index 3feba86c..4815bdb6 100644
--- a/tests/unittests/test_handler/test_handler_yum_add_repo.py
+++ b/tests/unittests/test_handler/test_handler_yum_add_repo.py
@@ -5,10 +5,13 @@ from cloudinit import util
from .. import helpers
-import configobj
+try:
+ from configparser import ConfigParser
+except ImportError:
+ from ConfigParser import ConfigParser
import logging
import shutil
-from six import BytesIO
+from six import StringIO
import tempfile
LOG = logging.getLogger(__name__)
@@ -54,9 +57,9 @@ class TestConfig(helpers.FilesystemMockingTestCase):
}
self.patchUtils(self.tmp)
cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, [])
- contents = util.load_file("/etc/yum.repos.d/epel_testing.repo",
- decode=False)
- contents = configobj.ConfigObj(BytesIO(contents))
+ contents = util.load_file("/etc/yum.repos.d/epel_testing.repo")
+ parser = ConfigParser()
+ parser.readfp(StringIO(contents))
expected = {
'epel_testing': {
'name': 'Extra Packages for Enterprise Linux 5 - Testing',
@@ -67,6 +70,47 @@ class TestConfig(helpers.FilesystemMockingTestCase):
'gpgcheck': '1',
}
}
- self.assertEqual(expected, dict(contents))
+ for section in expected:
+ self.assertTrue(parser.has_section(section),
+ "Contains section {}".format(section))
+ for k, v in expected[section].items():
+ self.assertEqual(parser.get(section, k), v)
+
+ def test_write_config_array(self):
+ cfg = {
+ 'yum_repos': {
+ 'puppetlabs-products': {
+ 'name': 'Puppet Labs Products El 6 - $basearch',
+ 'baseurl':
+ 'http://yum.puppetlabs.com/el/6/products/$basearch',
+ 'gpgkey': [
+ 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs',
+ 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppet',
+ ],
+ 'enabled': True,
+ 'gpgcheck': True,
+ }
+ }
+ }
+ self.patchUtils(self.tmp)
+ cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, [])
+ contents = util.load_file("/etc/yum.repos.d/puppetlabs_products.repo")
+ parser = ConfigParser()
+ parser.readfp(StringIO(contents))
+ expected = {
+ 'puppetlabs_products': {
+ 'name': 'Puppet Labs Products El 6 - $basearch',
+ 'baseurl': 'http://yum.puppetlabs.com/el/6/products/$basearch',
+ 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs\n'
+ 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppet',
+ 'enabled': '1',
+ 'gpgcheck': '1',
+ }
+ }
+ for section in expected:
+ self.assertTrue(parser.has_section(section),
+ "Contains section {}".format(section))
+ for k, v in expected[section].items():
+ self.assertEqual(parser.get(section, k), v)
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index 9cc5e4ab..89e75369 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -1463,13 +1463,16 @@ class TestNetRenderers(CiTestCase):
class TestGetInterfacesByMac(CiTestCase):
_data = {'devices': ['enp0s1', 'enp0s2', 'bond1', 'bridge1',
- 'bridge1-nic', 'tun0'],
+ 'bridge1-nic', 'tun0', 'bond1.101'],
'bonds': ['bond1'],
'bridges': ['bridge1'],
- 'own_macs': ['enp0s1', 'enp0s2', 'bridge1-nic', 'bridge1'],
+ 'vlans': ['bond1.101'],
+ 'own_macs': ['enp0s1', 'enp0s2', 'bridge1-nic', 'bridge1',
+ 'bond1.101'],
'macs': {'enp0s1': 'aa:aa:aa:aa:aa:01',
'enp0s2': 'aa:aa:aa:aa:aa:02',
'bond1': 'aa:aa:aa:aa:aa:01',
+ 'bond1.101': 'aa:aa:aa:aa:aa:01',
'bridge1': 'aa:aa:aa:aa:aa:03',
'bridge1-nic': 'aa:aa:aa:aa:aa:03',
'tun0': None}}
@@ -1484,13 +1487,16 @@ class TestGetInterfacesByMac(CiTestCase):
def _se_is_bridge(self, name):
return name in self.data['bridges']
+ def _se_is_vlan(self, name):
+ return name in self.data['vlans']
+
def _se_interface_has_own_mac(self, name):
return name in self.data['own_macs']
def _mock_setup(self):
self.data = copy.deepcopy(self._data)
mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge',
- 'interface_has_own_mac')
+ 'interface_has_own_mac', 'is_vlan')
self.mocks = {}
for n in mocks:
m = mock.patch('cloudinit.net.' + n,
@@ -1536,6 +1542,24 @@ class TestGetInterfacesByMac(CiTestCase):
mock.call('b1')],
any_order=True)
+ def test_excludes_vlans(self):
+ self._mock_setup()
+ # add a device 'b1', make all return they have their "own mac",
+ # set everything other than 'b1' to be a vlan.
+ # then expect b1 is the only thing left.
+ self.data['macs']['b1'] = 'aa:aa:aa:aa:aa:b1'
+ self.data['devices'].append('b1')
+ self.data['bonds'] = []
+ self.data['bridges'] = []
+ self.data['own_macs'] = self.data['devices']
+ self.data['vlans'] = [f for f in self.data['devices'] if f != "b1"]
+ ret = net.get_interfaces_by_mac()
+ self.assertEqual({'aa:aa:aa:aa:aa:b1': 'b1'}, ret)
+ self.mocks['is_vlan'].assert_has_calls(
+ [mock.call('bridge1'), mock.call('enp0s1'), mock.call('bond1'),
+ mock.call('b1')],
+ any_order=True)
+
def _gzip_data(data):
with io.BytesIO() as iobuf:
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index ab74311e..5d21b4b7 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -103,8 +103,8 @@ class TestWriteFile(helpers.TestCase):
self.assertTrue(os.path.isdir(dirname))
self.assertTrue(os.path.isfile(path))
- def test_custom_mode(self):
- """Verify custom mode works properly."""
+ def test_explicit_mode(self):
+ """Verify explicit file mode works properly."""
path = os.path.join(self.tmp, "NewFile.txt")
contents = "Hey there"
@@ -115,6 +115,35 @@ class TestWriteFile(helpers.TestCase):
file_stat = os.stat(path)
self.assertEqual(0o666, stat.S_IMODE(file_stat.st_mode))
+ def test_copy_mode_no_existing(self):
+ """Verify that file is created with mode 0o644 if copy_mode
+ is true and there is no prior existing file."""
+ path = os.path.join(self.tmp, "NewFile.txt")
+ contents = "Hey there"
+
+ util.write_file(path, contents, copy_mode=True)
+
+ self.assertTrue(os.path.exists(path))
+ self.assertTrue(os.path.isfile(path))
+ file_stat = os.stat(path)
+ self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
+
+ def test_copy_mode_with_existing(self):
+ """Verify that file is created using mode of existing file
+ if copy_mode is true."""
+ path = os.path.join(self.tmp, "NewFile.txt")
+ contents = "Hey there"
+
+ open(path, 'w').close()
+ os.chmod(path, 0o666)
+
+ util.write_file(path, contents, copy_mode=True)
+
+ self.assertTrue(os.path.exists(path))
+ self.assertTrue(os.path.isfile(path))
+ file_stat = os.stat(path)
+ self.assertEqual(0o666, stat.S_IMODE(file_stat.st_mode))
+
def test_custom_omode(self):
"""Verify custom omode works properly."""
path = os.path.join(self.tmp, "NewFile.txt")
diff --git a/tools/21-cloudinit.conf b/tools/21-cloudinit.conf
index c65325c1..150d800f 100644
--- a/tools/21-cloudinit.conf
+++ b/tools/21-cloudinit.conf
@@ -3,4 +3,4 @@
# comment out the following line to allow CLOUDINIT messages through.
# Doing so means you'll also get CLOUDINIT messages in /var/log/syslog
-& ~
+& stop
diff --git a/tools/ds-identify b/tools/ds-identify
index 5d390ef7..a43b1291 100755
--- a/tools/ds-identify
+++ b/tools/ds-identify
@@ -92,7 +92,7 @@ DI_DMI_PRODUCT_UUID=""
DI_FS_LABELS=""
DI_KERNEL_CMDLINE=""
DI_VIRT=""
-DI_PID_1_PLATFORM=""
+DI_PID_1_PRODUCT_NAME=""
DI_UNAME_KERNEL_NAME=""
DI_UNAME_KERNEL_RELEASE=""
@@ -362,9 +362,9 @@ read_datasource_list() {
return 0
}
-read_pid1_platform() {
- local oifs="$IFS" out="" tok="" key="" val="" platform="${UNAVAILABLE}"
- cached "${DI_PID_1_PLATFORM}" && return
+read_pid1_product_name() {
+ local oifs="$IFS" out="" tok="" key="" val="" product_name="${UNAVAILABLE}"
+ cached "${DI_PID_1_PRODUCT_NAME}" && return
[ -r "${PATH_PROC_1_ENVIRON}" ] || return
out=$(tr '\0' '\n' <"${PATH_PROC_1_ENVIRON}")
IFS="$CR"; set -- $out; IFS="$oifs"
@@ -372,9 +372,9 @@ read_pid1_platform() {
key=${tok%%=*}
[ "$key" != "$tok" ] || continue
val=${tok#*=}
- [ "$key" = "platform" ] && platform="$val" && break
+ [ "$key" = "product_name" ] && product_name="$val" && break
done
- DI_PID_1_PLATFORM="$platform"
+ DI_PID_1_PRODUCT_NAME="$product_name"
}
dmi_product_name_matches() {
@@ -552,13 +552,14 @@ check_configdrive_v2() {
# look in /config-drive <vlc>/seed/config_drive for a directory
# openstack/YYYY-MM-DD format with a file meta_data.json
local d=""
- for d in /config-drive "${PATH_VAR_LIB_CLOUD}/seed/config_drive"; do
+ local vlc_config_drive_path="${PATH_VAR_LIB_CLOUD}/seed/config_drive"
+ for d in /config-drive $vlc_config_drive_path; do
set +f; set -- "$d/openstack/"2???-??-??/meta_data.json; set -f;
[ -f "$1" ] && return ${DS_FOUND}
done
# at least one cloud (softlayer) seeds config drive with only 'latest'.
local lpath="openstack/latest/meta_data.json"
- if [ -e "${PATH_VAR_LIB_CLOUD}/$lpath" ]; then
+ if [ -e "$vlc_config_drive_path/$lpath" ]; then
debug 1 "config drive seeded directory had only 'latest'"
return ${DS_FOUND}
fi
@@ -804,7 +805,7 @@ dscheck_OpenStack() {
# RDO installed nova (LP: #1675349).
return ${DS_FOUND}
fi
- if [ "${DI_PID_1_PLATFORM}" = "$nova" ]; then
+ if [ "${DI_PID_1_PRODUCT_NAME}" = "$nova" ]; then
return ${DS_FOUND}
fi
@@ -872,7 +873,7 @@ dscheck_None() {
collect_info() {
read_virt
- read_pid1_platform
+ read_pid1_product_name
read_kernel_cmdline
read_uname_info
read_config
@@ -892,7 +893,7 @@ print_info() {
_print_info() {
local n="" v="" vars=""
vars="DMI_PRODUCT_NAME DMI_SYS_VENDOR DMI_PRODUCT_SERIAL"
- vars="$vars DMI_PRODUCT_UUID PID_1_PLATFORM"
+ vars="$vars DMI_PRODUCT_UUID PID_1_PRODUCT_NAME"
vars="$vars FS_LABELS KERNEL_CMDLINE VIRT"
vars="$vars UNAME_KERNEL_NAME UNAME_KERNEL_RELEASE UNAME_KERNEL_VERSION"
vars="$vars UNAME_MACHINE UNAME_NODENAME UNAME_OPERATING_SYSTEM"
diff --git a/tools/mock-meta.py b/tools/mock-meta.py
index 95fc4659..82816e8a 100755
--- a/tools/mock-meta.py
+++ b/tools/mock-meta.py
@@ -293,9 +293,9 @@ class MetaDataHandler(object):
else:
return "%s" % (PLACEMENT_CAPABILITIES.get(pentry, ''))
else:
- log.warn(("Did not implement action %s, "
- "returning empty response: %r"),
- action, NOT_IMPL_RESPONSE)
+ log.warning(("Did not implement action %s, "
+ "returning empty response: %r"),
+ action, NOT_IMPL_RESPONSE)
return NOT_IMPL_RESPONSE