summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog12
-rw-r--r--cloudinit/config/cc_byobu.py27
-rw-r--r--cloudinit/config/cc_landscape.py8
-rw-r--r--cloudinit/config/cc_salt_minion.py5
-rw-r--r--cloudinit/config/cc_set_passwords.py17
-rw-r--r--cloudinit/config/cc_ssh.py17
-rw-r--r--cloudinit/config/cc_ssh_authkey_fingerprints.py19
-rw-r--r--cloudinit/config/cc_ssh_import_id.py37
-rw-r--r--cloudinit/config/cc_users_groups.py66
-rw-r--r--cloudinit/distros/__init__.py320
-rw-r--r--cloudinit/distros/fedora.py2
-rw-r--r--cloudinit/distros/ubuntu.py5
-rw-r--r--cloudinit/safeyaml.py32
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py82
-rw-r--r--cloudinit/sources/DataSourceEc2.py16
-rw-r--r--cloudinit/sources/DataSourceMAAS.py7
-rw-r--r--cloudinit/sources/__init__.py17
-rw-r--r--cloudinit/user_data.py2
-rw-r--r--cloudinit/util.py49
-rw-r--r--cloudinit/version.py2
-rw-r--r--config/cloud.cfg10
-rw-r--r--doc/examples/cloud-config-user-groups.txt27
-rwxr-xr-xpackages/brpm41
-rw-r--r--packages/redhat/cloud-init.spec.in20
-rwxr-xr-xsysvinit/cloud-init4
-rw-r--r--tests/unittests/test_datasource/test_configdrive.py145
-rw-r--r--tests/unittests/test_distros/test_user_data_normalize.py279
-rw-r--r--tests/unittests/test_util.py39
-rwxr-xr-xtools/make-dist-tarball2
29 files changed, 1082 insertions, 227 deletions
diff --git a/ChangeLog b/ChangeLog
index c5dcd418..d03c0293 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,16 @@
+0.7.1:
+ - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6
+ - config-drive: map hostname to local-hostname (LP: #1061964)
+ - landscape: install landscape-client package if not installed.
+ only take action if cloud-config is present (LP: #1066115)
+ - cc_landscape: restart landscape after install or config (LP: #1070345)
+ - multipart/archive. do not fail on unknown headers in multipart
+ mime or cloud-archive config (LP: #1065116).
0.7.0:
+ - add a 'exception_cb' argument to 'wait_for_url'. If provided, this
+ method will be called back with the exception received and the message.
+ - utilize the 'exception_cb' above to modify the oauth timestamp in
+ DataSourceMAAS requests if a 401 or 403 is received. (LP: #978127)
- catch signals and exit rather than stack tracing
- if logging fails, enable a fallback logger by patching the logging module
- do not 'start networking' in cloud-init-nonet, but add
diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py
index 4e2e06bb..92d428b7 100644
--- a/cloudinit/config/cc_byobu.py
+++ b/cloudinit/config/cc_byobu.py
@@ -18,12 +18,17 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+# Ensure this is aliased to a name not 'distros'
+# since the module attribute 'distros'
+# is a list of distros that are supported, not a sub-module
+from cloudinit import distros as ds
+
from cloudinit import util
distros = ['ubuntu', 'debian']
-def handle(name, cfg, _cloud, log, args):
+def handle(name, cfg, cloud, log, args):
if len(args) != 0:
value = args[0]
else:
@@ -56,16 +61,20 @@ def handle(name, cfg, _cloud, log, args):
shcmd = ""
if mod_user:
- user = util.get_cfg_option_str(cfg, "user", "ubuntu")
- shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst)
- shcmd += " || X=$(($X+1)); "
+ (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
+ (user, _user_config) = ds.extract_default(users)
+ if not user:
+ log.warn(("No default byobu user provided, "
+ "can not launch %s for the default user"), bl_inst)
+ else:
+ shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst)
+ shcmd += " || X=$(($X+1)); "
if mod_sys:
shcmd += "echo \"%s\" | debconf-set-selections" % dc_val
shcmd += " && dpkg-reconfigure byobu --frontend=noninteractive"
shcmd += " || X=$(($X+1)); "
- cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")]
-
- log.debug("Setting byobu to %s", value)
-
- util.subp(cmd, capture=False)
+ if len(shcmd):
+ cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")]
+ log.debug("Setting byobu to %s", value)
+ util.subp(cmd, capture=False)
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index 7cfb8296..56ab0ce3 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -59,6 +59,10 @@ def handle(_name, cfg, cloud, log, _args):
raise RuntimeError(("'landscape' key existed in config,"
" but not a dictionary type,"
" is a %s instead"), util.obj_name(ls_cloudcfg))
+ if not ls_cloudcfg:
+ return
+
+ cloud.distro.install_packages(["landscape-client"])
merge_data = [
LSC_BUILTIN_CFG,
@@ -79,8 +83,8 @@ def handle(_name, cfg, cloud, log, _args):
util.write_file(lsc_client_fn, contents.getvalue())
log.debug("Wrote landscape config file to %s", lsc_client_fn)
- if ls_cloudcfg:
- util.write_file(LS_DEFAULT_FILE, "RUN=1\n")
+ util.write_file(LS_DEFAULT_FILE, "RUN=1\n")
+ util.subp(["service", "landscape-client", "restart"])
def merge_together(objs):
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
index 79ed8807..8a1440d9 100644
--- a/cloudinit/config/cc_salt_minion.py
+++ b/cloudinit/config/cc_salt_minion.py
@@ -56,5 +56,6 @@ def handle(name, cfg, cloud, log, _args):
util.write_file(pub_name, salt_cfg['public_key'])
util.write_file(pem_name, salt_cfg['private_key'])
- # Start salt-minion
- util.subp(['service', 'salt-minion', 'start'], capture=False)
+ # restart salt-minion. 'service' will start even if not started. if it
+ # was started, it needs to be restarted for config change.
+ util.subp(['service', 'salt-minion', 'restart'], capture=False)
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index a017e6b6..26c558ad 100644
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -20,6 +20,11 @@
import sys
+# Ensure this is aliased to a name not 'distros'
+# since the module attribute 'distros'
+# is a list of distros that are supported, not a sub-module
+from cloudinit import distros as ds
+
from cloudinit import ssh_util
from cloudinit import util
@@ -50,18 +55,10 @@ def handle(_name, cfg, cloud, log, args):
expire = util.get_cfg_option_bool(chfg, 'expire', expire)
if not plist and password:
- user = cloud.distro.get_default_user()
-
- if 'users' in cfg:
-
- user_zero = cfg['users'][0]
-
- if isinstance(user_zero, dict) and 'name' in user_zero:
- user = user_zero['name']
-
+ (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
+ (user, _user_config) = ds.extract_default(users)
if user:
plist = "%s:%s" % (user, password)
-
else:
log.warn("No default or defined user to change password for.")
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 0ded62ba..32e48c30 100644
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -21,6 +21,11 @@
import glob
import os
+# Ensure this is aliased to a name not 'distros'
+# since the module attribute 'distros'
+# is a list of distros that are supported, not a sub-module
+from cloudinit import distros as ds
+
from cloudinit import ssh_util
from cloudinit import util
@@ -102,16 +107,8 @@ def handle(_name, cfg, cloud, log, _args):
" %s to file %s"), keytype, keyfile)
try:
- # TODO(utlemming): consolidate this stanza that occurs in:
- # cc_ssh_import_id, cc_set_passwords, maybe cc_users_groups.py
- user = cloud.distro.get_default_user()
-
- if 'users' in cfg:
- user_zero = cfg['users'][0]
-
- if user_zero != "default":
- user = user_zero
-
+ (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
+ (user, _user_config) = ds.extract_default(users)
disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
DISABLE_ROOT_OPTS)
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index 23f5755a..8c9a8806 100644
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -21,6 +21,11 @@ import hashlib
from prettytable import PrettyTable
+# Ensure this is aliased to a name not 'distros'
+# since the module attribute 'distros'
+# is a list of distros that are supported, not a sub-module
+from cloudinit import distros as ds
+
from cloudinit import ssh_util
from cloudinit import util
@@ -40,8 +45,10 @@ def _gen_fingerprint(b64_text, hash_meth='md5'):
hasher = hashlib.new(hash_meth)
hasher.update(base64.b64decode(b64_text))
return ":".join(_split_hash(hasher.hexdigest()))
- except TypeError:
+ except (TypeError, ValueError):
# Raised when b64 not really b64...
+ # or when the hash type is not really
+ # a known/supported hash type...
return '?'
@@ -89,8 +96,10 @@ def handle(name, cfg, cloud, log, _args):
log.debug(("Skipping module named %s, "
"logging of ssh fingerprints disabled"), name)
- user_name = util.get_cfg_option_str(cfg, "user", "ubuntu")
hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5")
- extract = ssh_util.extract_authorized_keys
- (auth_key_fn, auth_key_entries) = extract(user_name, cloud.paths)
- _pprint_key_entries(user_name, auth_key_fn, auth_key_entries, hash_meth)
+ extract_func = ssh_util.extract_authorized_keys
+ (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
+ for (user_name, _cfg) in users.items():
+ (auth_key_fn, auth_key_entries) = extract_func(user_name, cloud.paths)
+ _pprint_key_entries(user_name, auth_key_fn,
+ auth_key_entries, hash_meth)
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index 08fb63c6..83af36e9 100644
--- a/cloudinit/config/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -18,6 +18,11 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+# Ensure this is aliased to a name not 'distros'
+# since the module attribute 'distros'
+# is a list of distros that are supported, not a sub-module
+from cloudinit import distros as ds
+
from cloudinit import util
import pwd
@@ -39,33 +44,27 @@ def handle(_name, cfg, cloud, log, args):
return
# import for cloudinit created users
+ (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
elist = []
- for user_cfg in cfg['users']:
- user = None
+ for (user, user_cfg) in users.items():
import_ids = []
-
- if isinstance(user_cfg, str) and user_cfg == "default":
- user = cloud.distro.get_default_user()
- if not user:
- continue
-
+ if user_cfg['default']:
import_ids = util.get_cfg_option_list(cfg, "ssh_import_id", [])
-
- elif isinstance(user_cfg, dict):
- user = None
- import_ids = []
-
+ else:
try:
- user = user_cfg['name']
import_ids = user_cfg['ssh_import_id']
-
- if import_ids and isinstance(import_ids, str):
- import_ids = str(import_ids).split(',')
-
except:
- log.debug("user %s is not configured for ssh_import" % user)
+ log.debug("User %s is not configured for ssh_import_id", user)
continue
+ try:
+ import_ids = util.uniq_merge(import_ids)
+ import_ids = [str(i) for i in import_ids]
+ except:
+ log.debug("User %s is not correctly configured for ssh_import_id",
+ user)
+ continue
+
if not len(import_ids):
continue
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
index 418f3330..bf5b4581 100644
--- a/cloudinit/config/cc_users_groups.py
+++ b/cloudinit/config/cc_users_groups.py
@@ -16,63 +16,19 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+# Ensure this is aliased to a name not 'distros'
+# since the module attribute 'distros'
+# is a list of distros that are supported, not a sub-module
+from cloudinit import distros as ds
+
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
-def handle(name, cfg, cloud, log, _args):
- user_zero = None
-
- if 'groups' in cfg:
- for group in cfg['groups']:
- if isinstance(group, dict):
- for name, values in group.iteritems():
- if isinstance(values, list):
- cloud.distro.create_group(name, values)
- elif isinstance(values, str):
- cloud.distro.create_group(name, values.split(','))
- else:
- cloud.distro.create_group(group, [])
-
- if 'users' in cfg:
- user_zero = None
-
- for user_config in cfg['users']:
-
- # Handle the default user creation
- if 'default' in user_config:
- log.info("Creating default user")
-
- # Create the default user if so defined
- try:
- cloud.distro.add_default_user()
-
- if not user_zero:
- user_zero = cloud.distro.get_default_user()
-
- except NotImplementedError:
-
- if user_zero == name:
- user_zero = None
-
- log.warn("Distro has not implemented default user "
- "creation. No default user will be created")
-
- elif isinstance(user_config, dict) and 'name' in user_config:
-
- name = user_config['name']
- if not user_zero:
- user_zero = name
-
- # Make options friendly for distro.create_user
- new_opts = {}
- if isinstance(user_config, dict):
- for opt in user_config:
- new_opts[opt.replace('-', '_')] = user_config[opt]
-
- cloud.distro.create_user(**new_opts)
-
- else:
- # create user with no configuration
- cloud.distro.create_user(user_config)
+def handle(name, cfg, cloud, _log, _args):
+ (users, groups) = ds.normalize_users_groups(cfg, cloud.distro)
+ for (name, members) in groups.items():
+ cloud.distro.create_group(name, members)
+ for (user, config) in users.items():
+ cloud.distro.create_user(user, **config)
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 549c1612..11a72da1 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -24,9 +24,8 @@
from StringIO import StringIO
import abc
-import grp
+import itertools
import os
-import pwd
import re
from cloudinit import importer
@@ -38,44 +37,13 @@ LOG = logging.getLogger(__name__)
class Distro(object):
-
__metaclass__ = abc.ABCMeta
- default_user = None
- default_user_groups = None
def __init__(self, name, cfg, paths):
self._paths = paths
self._cfg = cfg
self.name = name
- def add_default_user(self):
- # Adds the distro user using the rules:
- # - Password is same as username but is locked
- # - nopasswd sudo access
-
- user = self.get_default_user()
- groups = self.get_default_user_groups()
-
- if not user:
- raise NotImplementedError("No Default user")
-
- user_dict = {
- 'name': user,
- 'plain_text_passwd': user,
- 'home': "/home/%s" % user,
- 'shell': "/bin/bash",
- 'lock_passwd': True,
- 'gecos': "%s%s" % (user[0:1].upper(), user[1:]),
- 'sudo': "ALL=(ALL) NOPASSWD:ALL",
- }
-
- if groups:
- user_dict['groups'] = groups
-
- self.create_user(**user_dict)
-
- LOG.info("Added default '%s' user with passwordless sudo", user)
-
@abc.abstractmethod
def install_packages(self, pkglist):
raise NotImplementedError()
@@ -112,7 +80,7 @@ class Distro(object):
return arch
def _get_arch_package_mirror_info(self, arch=None):
- mirror_info = self.get_option("package_mirrors", None)
+ mirror_info = self.get_option("package_mirrors", [])
if arch == None:
arch = self.get_primary_arch()
return _get_arch_package_mirror_info(mirror_info, arch)
@@ -122,7 +90,6 @@ class Distro(object):
# this resolves the package_mirrors config option
# down to a single dict of {mirror_name: mirror_url}
arch_info = self._get_arch_package_mirror_info(arch)
-
return _get_package_mirror_info(availability_zone=availability_zone,
mirror_info=arch_info)
@@ -205,18 +172,8 @@ class Distro(object):
return True
return False
- def isuser(self, name):
- try:
- if pwd.getpwnam(name):
- return True
- except KeyError:
- return False
-
def get_default_user(self):
- return self.default_user
-
- def get_default_user_groups(self):
- return self.default_user_groups
+ return self.get_option('default_user')
def create_user(self, name, **kwargs):
"""
@@ -273,10 +230,10 @@ class Distro(object):
adduser_cmd.append('-m')
# Create the user
- if self.isuser(name):
+ if util.is_user(name):
LOG.warn("User %s already exists, skipping." % name)
else:
- LOG.debug("Creating name %s" % name)
+ LOG.debug("Adding user named %s", name)
try:
util.subp(adduser_cmd, logstring=x_adduser_cmd)
except Exception as e:
@@ -324,6 +281,39 @@ class Distro(object):
return True
+ def ensure_sudo_dir(self, path, sudo_base='/etc/sudoers'):
+ # Ensure the dir is included and that
+ # it actually exists as a directory
+ sudoers_contents = ''
+ if os.path.exists(sudo_base):
+ sudoers_contents = util.load_file(sudo_base)
+ found_include = False
+ for line in sudoers_contents.splitlines():
+ line = line.strip()
+ include_match = re.search(r"^#includedir\s+(.*)$", line)
+ if not include_match:
+ continue
+ included_dir = include_match.group(1).strip()
+ if not included_dir:
+ continue
+ included_dir = os.path.abspath(included_dir)
+ if included_dir == path:
+ found_include = True
+ break
+ if not found_include:
+ sudoers_contents += "\n#includedir %s\n" % (path)
+ try:
+ if not os.path.exists(sudo_base):
+ util.write_file(sudo_base, sudoers_contents, 0440)
+ else:
+ with open(sudo_base, 'a') as f:
+ f.write(sudoers_contents)
+ LOG.debug("added '#includedir %s' to %s" % (path, sudo_base))
+ except IOError as e:
+ util.logexc(LOG, "Failed to write %s" % sudo_base, e)
+ raise e
+ util.ensure_dir(path, 0755)
+
def write_sudo_rules(self,
user,
rules,
@@ -339,9 +329,10 @@ class Distro(object):
content += "%s %s\n" % (user, rule)
content += "\n"
+ self.ensure_sudo_dir(os.path.dirname(sudo_file))
+
if not os.path.exists(sudo_file):
util.write_file(sudo_file, content, 0440)
-
else:
try:
with open(sudo_file, 'a') as f:
@@ -350,18 +341,11 @@ class Distro(object):
util.logexc(LOG, "Failed to write %s" % sudo_file, e)
raise e
- def isgroup(self, name):
- try:
- if grp.getgrnam(name):
- return True
- except:
- return False
-
def create_group(self, name, members):
group_add_cmd = ['groupadd', name]
# Check if group exists, and then add it doesn't
- if self.isgroup(name):
+ if util.is_group(name):
LOG.warn("Skipping creation of existing group '%s'" % name)
else:
try:
@@ -373,7 +357,7 @@ class Distro(object):
# Add members to the group, if so defined
if len(members) > 0:
for member in members:
- if not self.isuser(member):
+ if not util.is_user(member):
LOG.warn("Unable to add group member '%s' to group '%s'"
"; user does not exist." % (member, name))
continue
@@ -387,6 +371,8 @@ def _get_package_mirror_info(mirror_info, availability_zone=None,
# given a arch specific 'mirror_info' entry (from package_mirrors)
# search through the 'search' entries, and fallback appropriately
# return a dict with only {name: mirror} entries.
+ if not mirror_info:
+ mirror_info = {}
ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" %
"north|northeast|east|southeast|south|southwest|west|northwest")
@@ -431,6 +417,224 @@ def _get_arch_package_mirror_info(package_mirrors, arch):
return default
+# Normalizes a input group configuration
+# which can be a comma seperated list of
+# group names, or a list of group names
+# or a python dictionary of group names
+# to a list of members of that group.
+#
+# The output is a dictionary of group
+# names => members of that group which
+# is the standard form used in the rest
+# of cloud-init
+def _normalize_groups(grp_cfg):
+ if isinstance(grp_cfg, (str, basestring, list)):
+ c_grp_cfg = {}
+ for i in util.uniq_merge(grp_cfg):
+ c_grp_cfg[i] = []
+ grp_cfg = c_grp_cfg
+
+ groups = {}
+ if isinstance(grp_cfg, (dict)):
+ for (grp_name, grp_members) in grp_cfg.items():
+ groups[grp_name] = util.uniq_merge_sorted(grp_members)
+ else:
+ raise TypeError(("Group config must be list, dict "
+ " or string types only and not %s") %
+ util.obj_name(grp_cfg))
+ return groups
+
+
+# Normalizes a input group configuration
+# which can be a comma seperated list of
+# user names, or a list of string user names
+# or a list of dictionaries with components
+# that define the user config + 'name' (if
+# a 'name' field does not exist then the
+# default user is assumed to 'own' that
+# configuration.
+#
+# The output is a dictionary of user
+# names => user config which is the standard
+# form used in the rest of cloud-init. Note
+# the default user will have a special config
+# entry 'default' which will be marked as true
+# all other users will be marked as false.
+def _normalize_users(u_cfg, def_user_cfg=None):
+ if isinstance(u_cfg, (dict)):
+ ad_ucfg = []
+ for (k, v) in u_cfg.items():
+ if isinstance(v, (bool, int, basestring, str, float)):
+ if util.is_true(v):
+ ad_ucfg.append(str(k))
+ elif isinstance(v, (dict)):
+ v['name'] = k
+ ad_ucfg.append(v)
+ else:
+ raise TypeError(("Unmappable user value type %s"
+ " for key %s") % (util.obj_name(v), k))
+ u_cfg = ad_ucfg
+ elif isinstance(u_cfg, (str, basestring)):
+ u_cfg = util.uniq_merge_sorted(u_cfg)
+
+ users = {}
+ for user_config in u_cfg:
+ if isinstance(user_config, (str, basestring, list)):
+ for u in util.uniq_merge(user_config):
+ if u and u not in users:
+ users[u] = {}
+ elif isinstance(user_config, (dict)):
+ if 'name' in user_config:
+ n = user_config.pop('name')
+ prev_config = users.get(n) or {}
+ users[n] = util.mergemanydict([prev_config,
+ user_config])
+ else:
+ # Assume the default user then
+ prev_config = users.get('default') or {}
+ users['default'] = util.mergemanydict([prev_config,
+ user_config])
+ else:
+ raise TypeError(("User config must be dictionary/list "
+ " or string types only and not %s") %
+ util.obj_name(user_config))
+
+ # Ensure user options are in the right python friendly format
+ if users:
+ c_users = {}
+ for (uname, uconfig) in users.items():
+ c_uconfig = {}
+ for (k, v) in uconfig.items():
+ k = k.replace('-', '_').strip()
+ if k:
+ c_uconfig[k] = v
+ c_users[uname] = c_uconfig
+ users = c_users
+
+ # Fixup the default user into the real
+ # default user name and replace it...
+ def_user = None
+ if users and 'default' in users:
+ def_config = users.pop('default')
+ if def_user_cfg:
+ # Pickup what the default 'real name' is
+ # and any groups that are provided by the
+ # default config
+ def_user_cfg = def_user_cfg.copy()
+ def_user = def_user_cfg.pop('name')
+ def_groups = def_user_cfg.pop('groups', [])
+ # Pickup any config + groups for that user name
+ # that we may have previously extracted
+ parsed_config = users.pop(def_user, {})
+ parsed_groups = parsed_config.get('groups', [])
+ # Now merge our extracted groups with
+ # anything the default config provided
+ users_groups = util.uniq_merge_sorted(parsed_groups, def_groups)
+ parsed_config['groups'] = ",".join(users_groups)
+ # The real config for the default user is the
+ # combination of the default user config provided
+ # by the distro, the default user config provided
+ # by the above merging for the user 'default' and
+ # then the parsed config from the user's 'real name'
+ # which does not have to be 'default' (but could be)
+ users[def_user] = util.mergemanydict([def_user_cfg,
+ def_config,
+ parsed_config])
+
+ # Ensure that only the default user that we
+ # found (if any) is actually marked as being
+ # the default user
+ if users:
+ for (uname, uconfig) in users.items():
+ if def_user and uname == def_user:
+ uconfig['default'] = True
+ else:
+ uconfig['default'] = False
+
+ return users
+
+
+# Normalizes a set of user/users and group
+# dictionary configuration into a useable
+# format that the rest of cloud-init can
+# understand using the default user
+# provided by the input distrobution (if any)
+# to allow for mapping of the 'default' user.
+#
+# Output is a dictionary of group names -> [member] (list)
+# and a dictionary of user names -> user configuration (dict)
+#
+# If 'user' exists it will override
+# the 'users'[0] entry (if a list) otherwise it will
+# just become an entry in the returned dictionary (no override)
+def normalize_users_groups(cfg, distro):
+ if not cfg:
+ cfg = {}
+ users = {}
+ groups = {}
+ if 'groups' in cfg:
+ groups = _normalize_groups(cfg['groups'])
+
+ # Handle the previous style of doing this...
+ old_user = None
+ if 'user' in cfg and cfg['user']:
+ old_user = str(cfg['user'])
+ if not 'users' in cfg:
+ cfg['users'] = old_user
+ old_user = None
+ if 'users' in cfg:
+ default_user_config = None
+ try:
+ default_user_config = distro.get_default_user()
+ except NotImplementedError:
+ LOG.warn(("Distro has not implemented default user "
+ "access. No default user will be normalized."))
+ base_users = cfg['users']
+ if old_user:
+ if isinstance(base_users, (list)):
+ if len(base_users):
+ # The old user replaces user[0]
+ base_users[0] = {'name': old_user}
+ else:
+ # Just add it on at the end...
+ base_users.append({'name': old_user})
+ elif isinstance(base_users, (dict)):
+ if old_user not in base_users:
+ base_users[old_user] = True
+ elif isinstance(base_users, (str, basestring)):
+ # Just append it on to be re-parsed later
+ base_users += ",%s" % (old_user)
+ users = _normalize_users(base_users, default_user_config)
+ return (users, groups)
+
+
+# Given a user dictionary config it will
+# extract the default user name and user config
+# from that list and return that tuple or
+# return (None, None) if no default user is
+# found in the given input
+def extract_default(users, default_name=None, default_config=None):
+ if not users:
+ users = {}
+
+ def safe_find(entry):
+ config = entry[1]
+ if not config or 'default' not in config:
+ return False
+ else:
+ return config['default']
+
+ tmp_users = users.items()
+ tmp_users = dict(itertools.ifilter(safe_find, tmp_users))
+ if not tmp_users:
+ return (default_name, default_config)
+ else:
+ name = tmp_users.keys()[0]
+ config = tmp_users[name]
+ config.pop('default', None)
+ return (name, config)
+
+
def fetch(name):
locs = importer.find_module(name,
['', __name__],
diff --git a/cloudinit/distros/fedora.py b/cloudinit/distros/fedora.py
index f65a820d..c777845d 100644
--- a/cloudinit/distros/fedora.py
+++ b/cloudinit/distros/fedora.py
@@ -28,4 +28,4 @@ LOG = logging.getLogger(__name__)
class Distro(rhel.Distro):
- default_user = 'ec2-user'
+ pass
diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py
index 4e697f82..c527f248 100644
--- a/cloudinit/distros/ubuntu.py
+++ b/cloudinit/distros/ubuntu.py
@@ -28,7 +28,4 @@ LOG = logging.getLogger(__name__)
class Distro(debian.Distro):
-
- default_user = 'ubuntu'
- default_user_groups = ("adm,audio,cdrom,dialout,floppy,video,"
- "plugdev,dip,netdev,sudo")
+ pass
diff --git a/cloudinit/safeyaml.py b/cloudinit/safeyaml.py
new file mode 100644
index 00000000..eba5d056
--- /dev/null
+++ b/cloudinit/safeyaml.py
@@ -0,0 +1,32 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import yaml
+
+
+class _CustomSafeLoader(yaml.SafeLoader):
+ def construct_python_unicode(self, node):
+ return self.construct_scalar(node)
+
+_CustomSafeLoader.add_constructor(
+ u'tag:yaml.org,2002:python/unicode',
+ _CustomSafeLoader.construct_python_unicode)
+
+
+def load(blob):
+ return(yaml.load(blob, Loader=_CustomSafeLoader))
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index b8154367..4af2e5ae 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -48,6 +48,7 @@ class DataSourceConfigDrive(sources.DataSource):
self.dsmode = 'local'
self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
self.version = None
+ self.ec2_metadata = None
def __str__(self):
mstr = "%s [%s,ver=%s]" % (util.obj_name(self), self.dsmode,
@@ -55,6 +56,74 @@ class DataSourceConfigDrive(sources.DataSource):
mstr += "[source=%s]" % (self.source)
return mstr
+ def _ec2_name_to_device(self, name):
+ if not self.ec2_metadata:
+ return None
+ bdm = self.ec2_metadata.get('block-device-mapping', {})
+ for (ent_name, device) in bdm.items():
+ if name == ent_name:
+ return device
+ return None
+
+ def _os_name_to_device(self, name):
+ device = None
+ try:
+ criteria = 'LABEL=%s' % (name)
+ if name in ['swap']:
+ criteria = 'TYPE=%s' % (name)
+ dev_entries = util.find_devs_with(criteria)
+ if dev_entries:
+ device = dev_entries[0]
+ except util.ProcessExecutionError:
+ pass
+ return device
+
+ def _validate_device_name(self, device):
+ if not device:
+ return None
+ if not device.startswith("/"):
+ device = "/dev/%s" % device
+ if os.path.exists(device):
+ return device
+ # Durn, try adjusting the mapping
+ remapped = self._remap_device(os.path.basename(device))
+ if remapped:
+ LOG.debug("Remapped device name %s => %s", device, remapped)
+ return remapped
+ return None
+
+ def device_name_to_device(self, name):
+ # Translate a 'name' to a 'physical' device
+ if not name:
+ return None
+ # Try the ec2 mapping first
+ names = [name]
+ if name == 'root':
+ names.insert(0, 'ami')
+ if name == 'ami':
+ names.append('root')
+ device = None
+ LOG.debug("Using ec2 metadata lookup to find device %s", names)
+ for n in names:
+ device = self._ec2_name_to_device(n)
+ device = self._validate_device_name(device)
+ if device:
+ break
+ # Try the openstack way second
+ if not device:
+ LOG.debug("Using os lookup to find device %s", names)
+ for n in names:
+ device = self._os_name_to_device(n)
+ device = self._validate_device_name(device)
+ if device:
+ break
+ # Ok give up...
+ if not device:
+ return None
+ else:
+ LOG.debug("Using cfg drive lookup mapped to device %s", device)
+ return device
+
def get_data(self):
found = None
md = {}
@@ -85,6 +154,16 @@ class DataSourceConfigDrive(sources.DataSource):
md = results['metadata']
md = util.mergedict(md, DEFAULT_METADATA)
+ # Perform some metadata 'fixups'
+ #
+ # OpenStack uses the 'hostname' key
+ # while most of cloud-init uses the metadata
+ # 'local-hostname' key instead so if it doesn't
+ # exist we need to make sure its copied over.
+ for (tgt, src) in [('local-hostname', 'hostname')]:
+ if tgt not in md and src in md:
+ md[tgt] = md[src]
+
user_dsmode = results.get('dsmode', None)
if user_dsmode not in VALID_DSMODES + (None,):
LOG.warn("user specified invalid mode: %s" % user_dsmode)
@@ -133,6 +212,7 @@ class DataSourceConfigDrive(sources.DataSource):
self.source = found
self.metadata = md
+ self.ec2_metadata = results.get('ec2-metadata')
self.userdata_raw = results.get('userdata')
self.version = results['cfgdrive_ver']
@@ -217,7 +297,7 @@ def read_config_drive_dir_v2(source_dir, version="2012-08-10"):
('metadata',
"openstack/%s/meta_data.json" % version, True, json.loads),
('userdata', "openstack/%s/user_data" % version, False, None),
- ('ec2-metadata', "ec2/latest/metadata.json", False, json.loads),
+ ('ec2-metadata', "ec2/latest/meta-data.json", False, json.loads),
)
results = {'userdata': None}
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index c7ad6d54..3686fa10 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -151,22 +151,6 @@ class DataSourceEc2(sources.DataSource):
self.metadata_address = url2base.get(url)
return bool(url)
- def _remap_device(self, short_name):
- # LP: #611137
- # the metadata service may believe that devices are named 'sda'
- # when the kernel named them 'vda' or 'xvda'
- # we want to return the correct value for what will actually
- # exist in this instance
- mappings = {"sd": ("vd", "xvd")}
- for (nfrom, tlist) in mappings.iteritems():
- if not short_name.startswith(nfrom):
- continue
- for nto in tlist:
- cand = "/dev/%s%s" % (nto, short_name[len(nfrom):])
- if os.path.exists(cand):
- return cand
- return None
-
def device_name_to_device(self, name):
# Consult metadata service, that has
# ephemeral0: sdb
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 581e9a4b..e187aec9 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -145,10 +145,10 @@ class DataSourceMAAS(sources.DataSource):
def _except_cb(self, msg, exception):
if not (isinstance(exception, urllib2.HTTPError) and
- exception.code == 403):
+ (exception.code == 403 or exception.code == 401)):
return
if 'date' not in exception.headers:
- LOG.warn("date field not in 403 headers")
+ LOG.warn("date field not in %d headers" % exception.code)
return
date = exception.headers['date']
@@ -336,9 +336,8 @@ if __name__ == "__main__":
'token_secret': args.tsec, 'consumer_secret': args.csec}
if args.config:
- import yaml
with open(args.config) as fp:
- cfg = yaml.safe_load(fp)
+ cfg = util.load_yaml(fp.read())
if 'datasource' in cfg:
cfg = cfg['datasource']['MAAS']
for key in creds.keys():
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 04083d0c..b22369a8 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -23,6 +23,7 @@
from email.mime.multipart import MIMEMultipart
import abc
+import os
from cloudinit import importer
from cloudinit import log as logging
@@ -128,6 +129,22 @@ class DataSource(object):
return keys
+ def _remap_device(self, short_name):
+ # LP: #611137
+ # the metadata service may believe that devices are named 'sda'
+ # when the kernel named them 'vda' or 'xvda'
+ # we want to return the correct value for what will actually
+ # exist in this instance
+ mappings = {"sd": ("vd", "xvd")}
+ for (nfrom, tlist) in mappings.iteritems():
+ if not short_name.startswith(nfrom):
+ continue
+ for nto in tlist:
+ cand = "/dev/%s%s" % (nto, short_name[len(nfrom):])
+ if os.path.exists(cand):
+ return cand
+ return None
+
def device_name_to_device(self, _name):
# translate a 'name' to a device
# the primary function at this point is on ec2
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index 803ffc3a..58827e3d 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -224,7 +224,7 @@ class UserDataProcessor(object):
for header in list(ent.keys()):
if header in ('content', 'filename', 'type', 'launch-index'):
continue
- msg.add_header(header, ent['header'])
+ msg.add_header(header, ent[header])
self._attach_part(append_msg, msg)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 33da73eb..f5a7ac12 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -50,6 +50,7 @@ import yaml
from cloudinit import importer
from cloudinit import log as logging
+from cloudinit import safeyaml
from cloudinit import url_helper as uhelp
from cloudinit.settings import (CFG_BUILTIN)
@@ -248,6 +249,36 @@ def read_conf(fname):
raise
+# Merges X lists, and then keeps the
+# unique ones, but orders by sort order
+# instead of by the original order
+def uniq_merge_sorted(*lists):
+ return sorted(uniq_merge(*lists))
+
+
+# Merges X lists and then iterates over those
+# and only keeps the unique items (order preserving)
+# and returns that merged and uniqued list as the
+# final result.
+#
+# Note: if any entry is a string it will be
+# split on commas and empty entries will be
+# evicted and merged in accordingly.
+def uniq_merge(*lists):
+ combined_list = []
+ for a_list in lists:
+ if isinstance(a_list, (str, basestring)):
+ a_list = a_list.strip().split(",")
+ # Kickout the empty ones
+ a_list = [a for a in a_list if len(a)]
+ combined_list.extend(a_list)
+ uniq_list = []
+ for i in combined_list:
+ if i not in uniq_list:
+ uniq_list.append(i)
+ return uniq_list
+
+
def clean_filename(fn):
for (k, v) in FN_REPLACEMENTS.iteritems():
fn = fn.replace(k, v)
@@ -612,7 +643,7 @@ def load_yaml(blob, default=None, allowed=(dict,)):
LOG.debug(("Attempting to load yaml from string "
"of length %s with allowed root types %s"),
len(blob), allowed)
- converted = yaml.safe_load(blob)
+ converted = safeyaml.load(blob)
if not isinstance(converted, allowed):
# Yes this will just be caught, but thats ok for now...
raise TypeError(("Yaml load allows %s root types,"
@@ -1104,6 +1135,22 @@ def hash_blob(blob, routine, mlen=None):
return digest
+def is_user(name):
+ try:
+ if pwd.getpwnam(name):
+ return True
+ except KeyError:
+ return False
+
+
+def is_group(name):
+ try:
+ if grp.getgrnam(name):
+ return True
+ except KeyError:
+ return False
+
+
def rename(src, dest):
LOG.debug("Renaming %s to %s", src, dest)
# TODO(harlowja) use a se guard here??
diff --git a/cloudinit/version.py b/cloudinit/version.py
index 4599910c..12ff620a 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -20,7 +20,7 @@ from distutils import version as vr
def version():
- return vr.StrictVersion("0.7.0")
+ return vr.StrictVersion("0.7.1")
def version_string():
diff --git a/config/cloud.cfg b/config/cloud.cfg
index b3411d11..f6c9065a 100644
--- a/config/cloud.cfg
+++ b/config/cloud.cfg
@@ -1,7 +1,9 @@
# The top level settings are used as module
# and system configuration.
-# Implement for Ubuntu only: create the default 'ubuntu' user
+# A set of users which may be applied and/or used by various modules
+# when a 'default' entry is found it will reference the 'default_user'
+# from the distro configuration specified below
users:
- default
@@ -71,6 +73,12 @@ cloud_final_modules:
system_info:
# This will affect which distro class gets used
distro: ubuntu
+ # Default user name + that default users groups (if added/used)
+ default_user:
+ name: Ubuntu
+ lock_passwd: True
+ gecos: Ubuntu
+ groups: [adm, audio, cdrom, dialout, floppy, video, plugdev, dip, netdev]
# Other config here will be given to the distro class and/or path classes
paths:
cloud_dir: /var/lib/cloud/
diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt
index 1da0d717..de5f321b 100644
--- a/doc/examples/cloud-config-user-groups.txt
+++ b/doc/examples/cloud-config-user-groups.txt
@@ -1,11 +1,11 @@
-# add groups to the system
+# Add groups to the system
# The following example adds the ubuntu group with members foo and bar and
# the group cloud-users.
groups:
- ubuntu: [foo,bar]
- cloud-users
-# add users to the system. Users are added after groups are added.
+# Add users to the system. Users are added after groups are added.
users:
- default
- name: foobar
@@ -81,14 +81,29 @@ users:
# directive.
# system: Create the user as a system user. This means no home directory.
#
-# Default user creation: Ubuntu Only
-# Unless you define users, you will get a Ubuntu user on Ubuntu systems with the
+
+# Default user creation:
+#
+# Unless you define users, you will get a 'ubuntu' user on ubuntu systems with the
# legacy permission (no password sudo, locked user, etc). If however, you want
-# to have the ubuntu user in addition to other users, you need to instruct
+# to have the 'ubuntu' user in addition to other users, you need to instruct
# cloud-init that you also want the default user. To do this use the following
# syntax:
# users:
-# default: True
+# - default
+# - bob
+# - ....
# foobar: ...
#
# users[0] (the first user in users) overrides the user directive.
+#
+# The 'default' user above references the distro's config:
+# system_info:
+# default_user:
+# name: Ubuntu
+# plain_text_passwd: 'ubuntu'
+# home: /home/ubuntu
+# shell: /bin/bash
+# lock_passwd: True
+# gecos: Ubuntu
+# groups: [adm, audio, cdrom, dialout, floppy, video, plugdev, dip, netdev]
diff --git a/packages/brpm b/packages/brpm
index 77de0cf2..e6b03609 100755
--- a/packages/brpm
+++ b/packages/brpm
@@ -58,8 +58,7 @@ def get_log_header(version):
a_rev = rev
break
if not a_rev:
- return format_change_line(datetime.now(),
- '??', version)
+ return None
# Extract who made that tag as the header
cmd = ['bzr', 'log', '-r%s' % (a_rev), '--timezone=utc']
@@ -91,7 +90,7 @@ def format_change_line(ds, who, comment=None):
return "* %s" % (d)
-def generate_spec_contents(args, tmpl_fn, arc_fn):
+def generate_spec_contents(args, tmpl_fn, top_dir, arc_fn):
# Figure out the version and revno
cmd = [util.abs_join(find_root(), 'tools', 'read-version')]
@@ -107,6 +106,10 @@ def generate_spec_contents(args, tmpl_fn, arc_fn):
subs['version'] = version
subs['revno'] = revno
subs['release'] = "bzr%s" % (revno)
+ if args.sub_release is not None:
+ subs['subrelease'] = "." + str(args.sub_release)
+ else:
+ subs['subrelease'] = ''
subs['archive_name'] = arc_fn
cmd = [util.abs_join(find_root(), 'tools', 'read-dependencies')]
@@ -127,13 +130,23 @@ def generate_spec_contents(args, tmpl_fn, arc_fn):
# Format a nice changelog (as best as we can)
changelog = util.load_file(util.abs_join(find_root(), 'ChangeLog'))
changelog_lines = []
+ missing_versions = 0
for line in changelog.splitlines():
if not line.strip():
continue
if re.match(r"^\s*[\d][.][\d][.][\d]:\s*", line):
line = line.strip(":")
header = get_log_header(line)
- changelog_lines.append(header)
+ if not header:
+ missing_versions += 1
+ if missing_versions == 1:
+ # Must be using a new 'dev'/'trunk' release
+ changelog_lines.append(format_change_line(datetime.now(), '??'))
+ else:
+ sys.stderr.write(("Changelog version line %s "
+ "does not have a corresponding tag!\n") % (line))
+ else:
+ changelog_lines.append(header)
else:
changelog_lines.append(line)
subs['changelog'] = "\n".join(changelog_lines)
@@ -148,7 +161,9 @@ def generate_spec_contents(args, tmpl_fn, arc_fn):
else:
subs['systemd'] = False
+ subs['defines'] = ["_topdir %s" % (top_dir)]
subs['init_sys'] = args.boot
+ subs['patches'] = [os.path.basename(p) for p in args.patches]
return templater.render_from_file(tmpl_fn, params=subs)
@@ -164,6 +179,17 @@ def main():
" (default: %(default)s)"),
default=False,
action='store_true')
+ parser.add_argument('-s', "--sub-release", dest="sub_release",
+ metavar="RELEASE",
+ help=("a 'internal' release number to concat"
+ " with the bzr version number to form"
+ " the final version number"),
+ type=int,
+ default=None)
+ parser.add_argument("-p", "--patch", dest="patches",
+ help=("include the following patch when building"),
+ default=[],
+ action='append')
args = parser.parse_args()
capture = True
if args.verbose:
@@ -192,16 +218,17 @@ def main():
# Form the spec file to be used
tmpl_fn = util.abs_join(find_root(), 'packages',
'redhat', 'cloud-init.spec.in')
- contents = generate_spec_contents(args, tmpl_fn,
+ contents = generate_spec_contents(args, tmpl_fn, root_dir,
os.path.basename(archive_fn))
spec_fn = util.abs_join(root_dir, 'cloud-init.spec')
util.write_file(spec_fn, contents)
print("Created spec file at %r" % (spec_fn))
+ for p in args.patches:
+ util.copy(p, util.abs_join(arc_dir, os.path.basename(p)))
# Now build it!
print("Running 'rpmbuild' in %r" % (root_dir))
- cmd = ['rpmbuild', '--clean',
- '-ba', spec_fn]
+ cmd = ['rpmbuild', '-ba', spec_fn]
util.subp(cmd, capture=capture)
# Copy the items built to our local dir
diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in
index 35b27beb..30bcd050 100644
--- a/packages/redhat/cloud-init.spec.in
+++ b/packages/redhat/cloud-init.spec.in
@@ -5,9 +5,13 @@
# Or: http://fedoraproject.org/wiki/Packaging:ScriptletSnippets
# Or: http://www.rpm.org/max-rpm/ch-rpm-inside.html
+#for $d in $defines
+%define ${d}
+#end for
+
Name: cloud-init
Version: ${version}
-Release: ${release}%{?dist}
+Release: ${release}${subrelease}%{?dist}
Summary: Cloud instance init scripts
Group: System Environment/Base
@@ -36,6 +40,13 @@ Requires: shadow-utils
Requires: ${r}
#end for
+# Custom patches
+#set $size = 0
+#for $p in $patches
+Patch${size}: $p
+#set $size += 1
+#end for
+
#if $sysvinit
Requires(post): chkconfig
Requires(postun): initscripts
@@ -58,6 +69,13 @@ ssh keys and to let the user run various scripts.
%prep
%setup -q -n %{name}-%{version}~${release}
+# Custom patches activation
+#set $size = 0
+#for $p in $patches
+%patch${size} -p1
+#set $size += 1
+#end for
+
%build
%{__python} setup.py build
diff --git a/sysvinit/cloud-init b/sysvinit/cloud-init
index 4b44a615..f8ab5d5f 100755
--- a/sysvinit/cloud-init
+++ b/sysvinit/cloud-init
@@ -25,8 +25,8 @@
### BEGIN INIT INFO
# Provides: cloud-init
-# Required-Start: $local_fs $network $named $remote_fs
-# Should-Start: $time cloud-init-local
+# Required-Start: $local_fs $network $named $remote_fs cloud-init-local
+# Should-Start: $time
# Required-Stop:
# Should-Stop:
# Default-Start: 3 5
diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py
index 55573114..00379e03 100644
--- a/tests/unittests/test_datasource/test_configdrive.py
+++ b/tests/unittests/test_datasource/test_configdrive.py
@@ -2,10 +2,12 @@ from copy import copy
import json
import os
import os.path
-import shutil
-import tempfile
-from unittest import TestCase
+import mocker
+from mocker import MockerTestCase
+
+from cloudinit import helpers
+from cloudinit import settings
from cloudinit.sources import DataSourceConfigDrive as ds
from cloudinit import util
@@ -60,17 +62,140 @@ CFG_DRIVE_FILES_V2 = {
'openstack/latest/user_data': USER_DATA}
-class TestConfigDriveDataSource(TestCase):
+class TestConfigDriveDataSource(MockerTestCase):
def setUp(self):
super(TestConfigDriveDataSource, self).setUp()
- self.tmp = tempfile.mkdtemp()
+ self.tmp = self.makeDir()
- def tearDown(self):
- try:
- shutil.rmtree(self.tmp)
- except OSError:
- pass
+ def test_ec2_metadata(self):
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+ found = ds.read_config_drive_dir(self.tmp)
+ self.assertTrue('ec2-metadata' in found)
+ ec2_md = found['ec2-metadata']
+ self.assertEqual(EC2_META, ec2_md)
+
+ def test_dev_os_remap(self):
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+ cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
+ None,
+ helpers.Paths({}))
+ found = ds.read_config_drive_dir(self.tmp)
+ cfg_ds.metadata = found['metadata']
+ name_tests = {
+ 'ami': '/dev/vda1',
+ 'root': '/dev/vda1',
+ 'ephemeral0': '/dev/vda2',
+ 'swap': '/dev/vda3',
+ }
+ for name, dev_name in name_tests.items():
+ my_mock = mocker.Mocker()
+ find_mock = my_mock.replace(util.find_devs_with,
+ spec=False, passthrough=False)
+ provided_name = dev_name[len('/dev/'):]
+ provided_name = "s" + provided_name[1:]
+ find_mock(mocker.ARGS)
+ my_mock.result([provided_name])
+ exists_mock = my_mock.replace(os.path.exists,
+ spec=False, passthrough=False)
+ exists_mock(mocker.ARGS)
+ my_mock.result(False)
+ exists_mock(mocker.ARGS)
+ my_mock.result(True)
+ my_mock.replay()
+ device = cfg_ds.device_name_to_device(name)
+ my_mock.restore()
+ self.assertEquals(dev_name, device)
+
+ def test_dev_os_map(self):
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+ cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
+ None,
+ helpers.Paths({}))
+ found = ds.read_config_drive_dir(self.tmp)
+ os_md = found['metadata']
+ cfg_ds.metadata = os_md
+ name_tests = {
+ 'ami': '/dev/vda1',
+ 'root': '/dev/vda1',
+ 'ephemeral0': '/dev/vda2',
+ 'swap': '/dev/vda3',
+ }
+ for name, dev_name in name_tests.items():
+ my_mock = mocker.Mocker()
+ find_mock = my_mock.replace(util.find_devs_with,
+ spec=False, passthrough=False)
+ find_mock(mocker.ARGS)
+ my_mock.result([dev_name])
+ exists_mock = my_mock.replace(os.path.exists,
+ spec=False, passthrough=False)
+ exists_mock(mocker.ARGS)
+ my_mock.result(True)
+ my_mock.replay()
+ device = cfg_ds.device_name_to_device(name)
+ my_mock.restore()
+ self.assertEquals(dev_name, device)
+
+ def test_dev_ec2_remap(self):
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+ cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
+ None,
+ helpers.Paths({}))
+ found = ds.read_config_drive_dir(self.tmp)
+ ec2_md = found['ec2-metadata']
+ os_md = found['metadata']
+ cfg_ds.ec2_metadata = ec2_md
+ cfg_ds.metadata = os_md
+ name_tests = {
+ 'ami': '/dev/vda1',
+ 'root': '/dev/vda1',
+ 'ephemeral0': '/dev/vda2',
+ 'swap': '/dev/vda3',
+ None: None,
+ 'bob': None,
+ 'root2k': None,
+ }
+ for name, dev_name in name_tests.items():
+ my_mock = mocker.Mocker()
+ exists_mock = my_mock.replace(os.path.exists,
+ spec=False, passthrough=False)
+ exists_mock(mocker.ARGS)
+ my_mock.result(False)
+ exists_mock(mocker.ARGS)
+ my_mock.result(True)
+ my_mock.replay()
+ device = cfg_ds.device_name_to_device(name)
+ self.assertEquals(dev_name, device)
+ my_mock.restore()
+
+ def test_dev_ec2_map(self):
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+ cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
+ None,
+ helpers.Paths({}))
+ found = ds.read_config_drive_dir(self.tmp)
+ exists_mock = self.mocker.replace(os.path.exists,
+ spec=False, passthrough=False)
+ exists_mock(mocker.ARGS)
+ self.mocker.count(0, None)
+ self.mocker.result(True)
+ self.mocker.replay()
+ ec2_md = found['ec2-metadata']
+ os_md = found['metadata']
+ cfg_ds.ec2_metadata = ec2_md
+ cfg_ds.metadata = os_md
+ name_tests = {
+ 'ami': '/dev/sda1',
+ 'root': '/dev/sda1',
+ 'ephemeral0': '/dev/sda2',
+ 'swap': '/dev/sda3',
+ None: None,
+ 'bob': None,
+ 'root2k': None,
+ }
+ for name, dev_name in name_tests.items():
+ device = cfg_ds.device_name_to_device(name)
+ self.assertEquals(dev_name, device)
def test_dir_valid(self):
"""Verify a dir is read as such."""
diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py
new file mode 100644
index 00000000..8f0d8896
--- /dev/null
+++ b/tests/unittests/test_distros/test_user_data_normalize.py
@@ -0,0 +1,279 @@
+from mocker import MockerTestCase
+
+from cloudinit import distros
+from cloudinit import helpers
+from cloudinit import settings
+
+bcfg = {
+ 'name': 'bob',
+ 'plain_text_passwd': 'ubuntu',
+ 'home': "/home/ubuntu",
+ 'shell': "/bin/bash",
+ 'lock_passwd': True,
+ 'gecos': "Ubuntu",
+ 'groups': ["foo"]
+}
+
+
+class TestUGNormalize(MockerTestCase):
+
+ def _make_distro(self, dtype, def_user=None):
+ cfg = dict(settings.CFG_BUILTIN)
+ cfg['system_info']['distro'] = dtype
+ paths = helpers.Paths(cfg['system_info']['paths'])
+ distro_cls = distros.fetch(dtype)
+ if def_user:
+ cfg['system_info']['default_user'] = def_user.copy()
+ distro = distro_cls(dtype, cfg['system_info'], paths)
+ return distro
+
+ def _norm(self, cfg, distro):
+ return distros.normalize_users_groups(cfg, distro)
+
+ def test_basic_groups(self):
+ distro = self._make_distro('ubuntu')
+ ug_cfg = {
+ 'groups': ['bob'],
+ }
+ (users, groups) = self._norm(ug_cfg, distro)
+ self.assertIn('bob', groups)
+ self.assertEquals({}, users)
+
+ def test_csv_groups(self):
+ distro = self._make_distro('ubuntu')
+ ug_cfg = {
+ 'groups': 'bob,joe,steve',
+ }
+ (users, groups) = self._norm(ug_cfg, distro)
+ self.assertIn('bob', groups)
+ self.assertIn('joe', groups)
+ self.assertIn('steve', groups)
+ self.assertEquals({}, users)
+
+ def test_more_groups(self):
+ distro = self._make_distro('ubuntu')
+ ug_cfg = {
+ 'groups': ['bob', 'joe', 'steve']
+ }
+ (users, groups) = self._norm(ug_cfg, distro)
+ self.assertIn('bob', groups)
+ self.assertIn('joe', groups)
+ self.assertIn('steve', groups)
+ self.assertEquals({}, users)
+
+ def test_member_groups(self):
+ distro = self._make_distro('ubuntu')
+ ug_cfg = {
+ 'groups': {
+ 'bob': ['s'],
+ 'joe': [],
+ 'steve': [],
+ }
+ }
+ (users, groups) = self._norm(ug_cfg, distro)
+ self.assertIn('bob', groups)
+ self.assertEquals(['s'], groups['bob'])
+ self.assertEquals([], groups['joe'])
+ self.assertIn('joe', groups)
+ self.assertIn('steve', groups)
+ self.assertEquals({}, users)
+
+ def test_users_simple_dict(self):
+ distro = self._make_distro('ubuntu', bcfg)
+ ug_cfg = {
+ 'users': {
+ 'default': True,
+ }
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn('bob', users)
+ ug_cfg = {
+ 'users': {
+ 'default': 'yes',
+ }
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn('bob', users)
+ ug_cfg = {
+ 'users': {
+ 'default': '1',
+ }
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn('bob', users)
+
+ def test_users_simple_dict_no(self):
+ distro = self._make_distro('ubuntu', bcfg)
+ ug_cfg = {
+ 'users': {
+ 'default': False,
+ }
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertEquals({}, users)
+ ug_cfg = {
+ 'users': {
+ 'default': 'no',
+ }
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertEquals({}, users)
+
+ def test_users_simple_csv(self):
+ distro = self._make_distro('ubuntu')
+ ug_cfg = {
+ 'users': 'joe,bob',
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn('joe', users)
+ self.assertIn('bob', users)
+ self.assertEquals({'default': False}, users['joe'])
+ self.assertEquals({'default': False}, users['bob'])
+
+ def test_users_simple(self):
+ distro = self._make_distro('ubuntu')
+ ug_cfg = {
+ 'users': [
+ 'joe',
+ 'bob'
+ ],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn('joe', users)
+ self.assertIn('bob', users)
+ self.assertEquals({'default': False}, users['joe'])
+ self.assertEquals({'default': False}, users['bob'])
+
+ def test_users_old_user(self):
+ distro = self._make_distro('ubuntu', bcfg)
+ ug_cfg = {
+ 'user': 'zetta',
+ 'users': 'default'
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn('bob', users)
+ self.assertIn('zetta', users)
+ self.assertNotIn('default', users)
+ ug_cfg = {
+ 'user': 'zetta',
+ 'users': 'default, joe'
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn('bob', users)
+ self.assertIn('joe', users)
+ self.assertIn('zetta', users)
+ self.assertNotIn('default', users)
+ ug_cfg = {
+ 'user': 'zetta',
+ 'users': ['bob', 'joe']
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertNotIn('bob', users)
+ self.assertIn('joe', users)
+ self.assertIn('zetta', users)
+ ug_cfg = {
+ 'user': 'zetta',
+ 'users': {
+ 'bob': True,
+ 'joe': True,
+ }
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn('bob', users)
+ self.assertIn('joe', users)
+ self.assertIn('zetta', users)
+ ug_cfg = {
+ 'user': 'zetta',
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn('zetta', users)
+ ug_cfg = {}
+ (users, groups) = self._norm(ug_cfg, distro)
+ self.assertEquals({}, users)
+ self.assertEquals({}, groups)
+
+ def test_users_dict_default_additional(self):
+ distro = self._make_distro('ubuntu', bcfg)
+ ug_cfg = {
+ 'users': [
+ {'name': 'default', 'blah': True}
+ ],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn('bob', users)
+ self.assertEquals(",".join(distro.get_default_user()['groups']),
+ users['bob']['groups'])
+ self.assertEquals(True,
+ users['bob']['blah'])
+ self.assertEquals(True,
+ users['bob']['default'])
+
+ def test_users_dict_extract(self):
+ distro = self._make_distro('ubuntu', bcfg)
+ ug_cfg = {
+ 'users': [
+ 'default',
+ ],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn('bob', users)
+ (name, config) = distros.extract_default(users)
+ self.assertEquals(name, 'bob')
+ expected_config = {}
+ def_config = None
+ try:
+ def_config = distro.get_default_user()
+ except NotImplementedError:
+ pass
+ if not def_config:
+ def_config = {}
+ expected_config.update(def_config)
+
+ # Ignore these for now
+ expected_config.pop('name', None)
+ expected_config.pop('groups', None)
+ config.pop('groups', None)
+ self.assertEquals(config, expected_config)
+
+ def test_users_dict_default(self):
+ distro = self._make_distro('ubuntu', bcfg)
+ ug_cfg = {
+ 'users': [
+ 'default',
+ ],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn('bob', users)
+ self.assertEquals(",".join(distro.get_default_user()['groups']),
+ users['bob']['groups'])
+ self.assertEquals(True,
+ users['bob']['default'])
+
+ def test_users_dict_trans(self):
+ distro = self._make_distro('ubuntu')
+ ug_cfg = {
+ 'users': [
+ {'name': 'joe',
+ 'tr-me': True},
+ {'name': 'bob'},
+ ],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn('joe', users)
+ self.assertIn('bob', users)
+ self.assertEquals({'tr_me': True, 'default': False}, users['joe'])
+ self.assertEquals({'default': False}, users['bob'])
+
+ def test_users_dict(self):
+ distro = self._make_distro('ubuntu')
+ ug_cfg = {
+ 'users': [
+ {'name': 'joe'},
+ {'name': 'bob'},
+ ],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn('joe', users)
+ self.assertIn('bob', users)
+ self.assertEquals({'default': False}, users['joe'])
+ self.assertEquals({'default': False}, users['bob'])
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 15fcbd26..96962b91 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -1,5 +1,6 @@
import os
import stat
+import yaml
from mocker import MockerTestCase
from unittest import TestCase
@@ -268,4 +269,42 @@ class TestGetCmdline(TestCase):
os.environ['DEBUG_PROC_CMDLINE'] = 'abcd 123'
self.assertEqual(os.environ['DEBUG_PROC_CMDLINE'], util.get_cmdline())
+
+class TestLoadYaml(TestCase):
+ mydefault = "7b03a8ebace993d806255121073fed52"
+
+ def test_simple(self):
+ mydata = {'1': "one", '2': "two"}
+ self.assertEqual(util.load_yaml(yaml.dump(mydata)), mydata)
+
+ def test_nonallowed_returns_default(self):
+ # for now, anything not in the allowed list just returns the default.
+ myyaml = yaml.dump({'1': "one"})
+ self.assertEqual(util.load_yaml(blob=myyaml,
+ default=self.mydefault,
+ allowed=(str,)),
+ self.mydefault)
+
+ def test_bogus_returns_default(self):
+ badyaml = "1\n 2:"
+ self.assertEqual(util.load_yaml(blob=badyaml,
+ default=self.mydefault),
+ self.mydefault)
+
+ def test_unsafe_types(self):
+ # should not load complex types
+ unsafe_yaml = yaml.dump((1, 2, 3,))
+ self.assertEqual(util.load_yaml(blob=unsafe_yaml,
+ default=self.mydefault),
+ self.mydefault)
+
+ def test_python_unicode(self):
+ # complex type of python/unicde is explicitly allowed
+ myobj = {'1': unicode("FOOBAR")}
+ safe_yaml = yaml.dump(myobj)
+ self.assertEqual(util.load_yaml(blob=safe_yaml,
+ default=self.mydefault),
+ myobj)
+
+
# vi: ts=4 expandtab
diff --git a/tools/make-dist-tarball b/tools/make-dist-tarball
index 622283bd..7742caea 100755
--- a/tools/make-dist-tarball
+++ b/tools/make-dist-tarball
@@ -9,7 +9,7 @@ Usage: ${0##*/} version
EOF
}
-topdir="../$PWD"
+topdir="$PWD"
tag=${1}
[ -n "$tag" ] || { Usage 1>&2 ; exit 1; }