summaryrefslogtreecommitdiff
path: root/cloudinit/util.py
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit/util.py')
-rw-r--r--cloudinit/util.py161
1 files changed, 126 insertions, 35 deletions
diff --git a/cloudinit/util.py b/cloudinit/util.py
index dc3c5639..c961bbc7 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -43,15 +43,16 @@ import subprocess
import sys
import tempfile
import time
-import types
import urlparse
import yaml
from cloudinit import importer
from cloudinit import log as logging
+from cloudinit import mergers
from cloudinit import safeyaml
from cloudinit import url_helper
+from cloudinit import type_utils
from cloudinit import version
from cloudinit.settings import (CFG_BUILTIN)
@@ -219,11 +220,12 @@ def fork_cb(child_cb, *args):
os._exit(0) # pylint: disable=W0212
except:
logexc(LOG, ("Failed forking and"
- " calling callback %s"), obj_name(child_cb))
+ " calling callback %s"),
+ type_utils.obj_name(child_cb))
os._exit(1) # pylint: disable=W0212
else:
LOG.debug("Forked child %s who will run callback %s",
- fid, obj_name(child_cb))
+ fid, type_utils.obj_name(child_cb))
def is_true(val, addons=None):
@@ -537,40 +539,26 @@ def make_url(scheme, host, port=None,
return urlparse.urlunparse(pieces)
-def obj_name(obj):
- if isinstance(obj, (types.TypeType,
- types.ModuleType,
- types.FunctionType,
- types.LambdaType)):
- return str(obj.__name__)
- return obj_name(obj.__class__)
-
-
def mergemanydict(srcs, reverse=False):
if reverse:
srcs = reversed(srcs)
m_cfg = {}
+ merge_how = [mergers.default_mergers()]
for a_cfg in srcs:
if a_cfg:
- m_cfg = mergedict(m_cfg, a_cfg)
+ # Take the last merger as the one that
+ # will define how to merge next...
+ mergers_to_apply = list(merge_how[-1])
+ merger = mergers.construct(mergers_to_apply)
+ m_cfg = merger.merge(m_cfg, a_cfg)
+ # If the config has now has new merger set,
+ # extract them to be used next time...
+ new_mergers = mergers.dict_extract_mergers(m_cfg)
+ if new_mergers:
+ merge_how.append(new_mergers)
return m_cfg
-def mergedict(src, cand):
- """
- Merge values from C{cand} into C{src}.
- If C{src} has a key C{cand} will not override.
- Nested dictionaries are merged recursively.
- """
- if isinstance(src, dict) and isinstance(cand, dict):
- for (k, v) in cand.iteritems():
- if k not in src:
- src[k] = v
- else:
- src[k] = mergedict(src[k], v)
- return src
-
-
@contextlib.contextmanager
def chdir(ndir):
curr = os.getcwd()
@@ -713,7 +701,7 @@ def load_yaml(blob, default=None, allowed=(dict,)):
# Yes this will just be caught, but thats ok for now...
raise TypeError(("Yaml load allows %s root types,"
" but got %s instead") %
- (allowed, obj_name(converted)))
+ (allowed, type_utils.obj_name(converted)))
loaded = converted
except (yaml.YAMLError, TypeError, ValueError):
if len(blob) == 0:
@@ -782,7 +770,7 @@ def read_conf_with_confd(cfgfile):
if not isinstance(confd, (str, basestring)):
raise TypeError(("Config file %s contains 'conf_d' "
"with non-string type %s") %
- (cfgfile, obj_name(confd)))
+ (cfgfile, type_utils.obj_name(confd)))
else:
confd = str(confd).strip()
elif os.path.isdir("%s.d" % cfgfile):
@@ -793,7 +781,7 @@ def read_conf_with_confd(cfgfile):
# Conf.d settings override input configuration
confd_cfg = read_conf_d(confd)
- return mergedict(confd_cfg, cfg)
+ return mergemanydict([confd_cfg, cfg])
def read_cc_from_cmdline(cmdline=None):
@@ -948,7 +936,7 @@ def is_resolvable(name):
for (_fam, _stype, _proto, cname, sockaddr) in result:
badresults[iname].append("%s: %s" % (cname, sockaddr[0]))
badips.add(sockaddr[0])
- except socket.gaierror:
+ except (socket.gaierror, socket.error):
pass
_DNS_REDIRECT_IP = badips
if badresults:
@@ -961,7 +949,7 @@ def is_resolvable(name):
if addr in _DNS_REDIRECT_IP:
return False
return True
- except socket.gaierror:
+ except (socket.gaierror, socket.error):
return False
@@ -1540,7 +1528,7 @@ def shellify(cmdlist, add_header=True):
else:
raise RuntimeError(("Unable to shellify type %s"
" which is not a list or string")
- % (obj_name(args)))
+ % (type_utils.obj_name(args)))
LOG.debug("Shellified %s commands.", cmds_made)
return content
@@ -1599,7 +1587,7 @@ def get_proc_env(pid):
fn = os.path.join("/proc/", str(pid), "environ")
try:
contents = load_file(fn)
- toks = contents.split("\0")
+ toks = contents.split("\x00")
for tok in toks:
if tok == "":
continue
@@ -1655,3 +1643,106 @@ def expand_package_list(version_fmt, pkgs):
raise RuntimeError("Invalid package type.")
return pkglist
+
+
+def parse_mount_info(path, mountinfo_lines, log=LOG):
+ """Return the mount information for PATH given the lines from
+ /proc/$$/mountinfo."""
+
+ path_elements = [e for e in path.split('/') if e]
+ devpth = None
+ fs_type = None
+ match_mount_point = None
+ match_mount_point_elements = None
+ for i, line in enumerate(mountinfo_lines):
+ parts = line.split()
+
+ # Completely fail if there is anything in any line that is
+ # unexpected, as continuing to parse past a bad line could
+ # cause an incorrect result to be returned, so it's better
+ # return nothing than an incorrect result.
+
+ # The minimum number of elements in a valid line is 10.
+ if len(parts) < 10:
+ log.debug("Line %d has two few columns (%d): %s",
+ i + 1, len(parts), line)
+ return None
+
+ mount_point = parts[4]
+ mount_point_elements = [e for e in mount_point.split('/') if e]
+
+ # Ignore mounts deeper than the path in question.
+ if len(mount_point_elements) > len(path_elements):
+ continue
+
+ # Ignore mounts where the common path is not the same.
+ l = min(len(mount_point_elements), len(path_elements))
+ if mount_point_elements[0:l] != path_elements[0:l]:
+ continue
+
+ # Ignore mount points higher than an already seen mount
+ # point.
+ if (match_mount_point_elements is not None and
+ len(match_mount_point_elements) > len(mount_point_elements)):
+ continue
+
+ # Find the '-' which terminates a list of optional columns to
+ # find the filesystem type and the path to the device. See
+ # man 5 proc for the format of this file.
+ try:
+ i = parts.index('-')
+ except ValueError:
+ log.debug("Did not find column named '-' in line %d: %s",
+ i + 1, line)
+ return None
+
+ # Get the path to the device.
+ try:
+ fs_type = parts[i + 1]
+ devpth = parts[i + 2]
+ except IndexError:
+ log.debug("Too few columns after '-' column in line %d: %s",
+ i + 1, line)
+ return None
+
+ match_mount_point = mount_point
+ match_mount_point_elements = mount_point_elements
+
+ if devpth and fs_type and match_mount_point:
+ return (devpth, fs_type, match_mount_point)
+ else:
+ return None
+
+
+def get_mount_info(path, log=LOG):
+ # Use /proc/$$/mountinfo to find the device where path is mounted.
+ # This is done because with a btrfs filesystem using os.stat(path)
+ # does not return the ID of the device.
+ #
+ # Here, / has a device of 18 (decimal).
+ #
+ # $ stat /
+ # File: '/'
+ # Size: 234 Blocks: 0 IO Block: 4096 directory
+ # Device: 12h/18d Inode: 256 Links: 1
+ # Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root)
+ # Access: 2013-01-13 07:31:04.358011255 +0000
+ # Modify: 2013-01-13 18:48:25.930011255 +0000
+ # Change: 2013-01-13 18:48:25.930011255 +0000
+ # Birth: -
+ #
+ # Find where / is mounted:
+ #
+ # $ mount | grep ' / '
+ # /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo)
+ #
+ # And the device ID for /dev/vda1 is not 18:
+ #
+ # $ ls -l /dev/vda1
+ # brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1
+ #
+ # So use /proc/$$/mountinfo to find the device underlying the
+ # input path.
+ mountinfo_path = '/proc/%s/mountinfo' % os.getpid()
+ lines = load_file(mountinfo_path).splitlines()
+ return parse_mount_info(path, lines, log)