diff options
54 files changed, 2726 insertions, 481 deletions
| @@ -1,6 +1,15 @@  0.7.3:   - fix omnibus chef installer (LP: #1182265) [Chris Wing]   - small fix for OVF datasource for iso transport on non-iso9660 filesystem + - determine if upstart version is suitable for +   'initctl reload-configuration' (LP: #1124384).  If so, then invoke it. + - add Azure datasource. + - add support for SuSE / SLES [Juerg Haefliger] + - add a trailing carriage return to chpasswd input, which reportedly +   caused a problem on rhel5 if missing. + - support individual MIME segments to be gzip compressed (LP: #1203203) + - always finalize handlers even if processing failed (LP: #1203368) + - support merging into cloud-config via jsonp. (LP: #1200476)  0.7.2:   - add a debian watch file   - add 'sudo' entry to ubuntu's default user (LP: #1080717) @@ -8,6 +8,10 @@ YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f )  CHANGELOG_VERSION=$(shell $(CWD)/tools/read-version)  CODE_VERSION=$(shell python -c "from cloudinit import version; print version.version_string()") +ifeq ($(distro),) +  distro = redhat +endif +  all: test check_version  pep8: @@ -24,9 +28,9 @@ test:  check_version:  	@if [ "$(CHANGELOG_VERSION)" != "$(CODE_VERSION)" ]; then \ -        echo "Error: ChangeLog version $(CHANGELOG_VERSION)" \ -        	  "not equal to code version $(CODE_VERSION)"; exit 2; \ -    else true; fi +	    echo "Error: ChangeLog version $(CHANGELOG_VERSION)" \ +	    "not equal to code version $(CODE_VERSION)"; exit 2; \ +	    else true; fi  2to3:  	2to3 $(PY_FILES) @@ -37,9 +41,9 @@ clean:  yaml:  	@$(CWD)/tools/validate-yaml.py $(YAML_FILES) -		    +  rpm: -	./packages/brpm +	./packages/brpm --distro $(distro)  deb:  	./packages/bddeb @@ -27,3 +27,6 @@ requests  # Boto for ec2  boto + +# For patching pieces of cloud-config together +jsonpatch diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py index 896cb4d0..3ac22967 100644 --- a/cloudinit/config/cc_bootcmd.py +++ b/cloudinit/config/cc_bootcmd.py @@ -1,7 +1,7 @@  # vi: ts=4 expandtab  #  #    Copyright (C) 2009-2011 Canonical Ltd. -#    Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +#    Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.  #  #    Author: Scott Moser <scott.moser@canonical.com>  #    Author: Juerg Haefliger <juerg.haefliger@hp.com> @@ -50,6 +50,5 @@ def handle(name, cfg, cloud, log, _args):              cmd = ['/bin/sh', tmpf.name]              util.subp(cmd, env=env, capture=False)          except: -            util.logexc(log, -                        ("Failed to run bootcmd module %s"), name) +            util.logexc(log, "Failed to run bootcmd module %s", name)              raise diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index b6e1fd37..4f8c8f80 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -1,8 +1,10 @@  # vi: ts=4 expandtab  #  #    Copyright (C) 2011 Canonical Ltd. +#    Copyright (C) 2013 Hewlett-Packard Development Company, L.P.  #  #    Author: Scott Moser <scott.moser@canonical.com> +#    Author: Juerg Haefliger <juerg.haefliger@hp.com>  #  #    This program is free software: you can redistribute it and/or modify  #    it under the terms of the GNU General Public License version 3, as @@ -121,15 +123,15 @@ class ResizeGrowPart(object):              util.subp(["growpart", '--dry-run', diskdev, partnum])          except util.ProcessExecutionError as e:              if e.exit_code != 1: -                util.logexc(LOG, ("Failed growpart --dry-run for (%s, %s)" % -                                  (diskdev, partnum))) +                util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)", +                            diskdev, partnum)                  raise ResizeFailedException(e)              return (before, before)          try:              util.subp(["growpart", diskdev, partnum])          except util.ProcessExecutionError as e: -            util.logexc(LOG, "Failed: growpart %s %s" % (diskdev, partnum)) +            util.logexc(LOG, "Failed: growpart %s %s", diskdev, partnum)              raise ResizeFailedException(e)          return (before, get_size(partdev)) diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index c873c8a8..2e058ccd 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -1,7 +1,7 @@  # vi: ts=4 expandtab  #  #    Copyright (C) 2011 Canonical Ltd. -#    Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +#    Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.  #  #    Author: Scott Moser <scott.moser@canonical.com>  #    Author: Juerg Haefliger <juerg.haefliger@hp.com> @@ -65,8 +65,8 @@ def handle(name, cfg, cloud, log, args):          tries = int(tries)      except:          tries = 10 -        util.logexc(log, ("Configuration entry 'tries'" -                          " is not an integer, using %s instead"), tries) +        util.logexc(log, "Configuration entry 'tries' is not an integer, " +                    "using %s instead", tries)      if post_list == "all":          post_list = POST_LIST_ALL @@ -85,8 +85,8 @@ def handle(name, cfg, cloud, log, args):          try:              all_keys[n] = util.load_file(path)          except: -            util.logexc(log, ("%s: failed to open, can not" -                              " phone home that data!"), path) +            util.logexc(log, "%s: failed to open, can not phone home that " +                        "data!", path)      submit_keys = {}      for k in post_list: @@ -115,5 +115,5 @@ def handle(name, cfg, cloud, log, args):                                retries=tries, sec_between=3,                                ssl_details=util.fetch_ssl_details(cloud.paths))      except: -        util.logexc(log, ("Failed to post phone home data to" -                          " %s in %s tries"), url, tries) +        util.logexc(log, "Failed to post phone home data to %s in %s tries", +                    url, tries) diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py index 8a460f7e..879b62b1 100644 --- a/cloudinit/config/cc_resolv_conf.py +++ b/cloudinit/config/cc_resolv_conf.py @@ -1,8 +1,10 @@  # vi: ts=4 expandtab  #  #    Copyright (C) 2013 Craig Tracey +#    Copyright (C) 2013 Hewlett-Packard Development Company, L.P.  #  #    Author: Craig Tracey <craigtracey@gmail.com> +#    Author: Juerg Haefliger <juerg.haefliger@hp.com>  #  #    This program is free software: you can redistribute it and/or modify  #    it under the terms of the GNU General Public License version 3, as @@ -53,7 +55,7 @@ from cloudinit import util  frequency = PER_INSTANCE -distros = ['fedora', 'rhel'] +distros = ['fedora', 'rhel', 'sles']  def generate_resolv_conf(cloud, log, params): diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py index 4bf18516..c771728d 100644 --- a/cloudinit/config/cc_rightscale_userdata.py +++ b/cloudinit/config/cc_rightscale_userdata.py @@ -1,7 +1,7 @@  # vi: ts=4 expandtab  #  #    Copyright (C) 2011 Canonical Ltd. -#    Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +#    Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.  #  #    Author: Scott Moser <scott.moser@canonical.com>  #    Author: Juerg Haefliger <juerg.haefliger@hp.com> @@ -64,8 +64,8 @@ def handle(name, _cfg, cloud, log, _args):                         " raw userdata"), name, MY_HOOKNAME)              return      except: -        util.logexc(log, ("Failed to parse query string %s" -                           " into a dictionary"), ud) +        util.logexc(log, "Failed to parse query string %s into a dictionary", +                    ud)          raise      wrote_fns = [] @@ -86,8 +86,8 @@ def handle(name, _cfg, cloud, log, _args):                  wrote_fns.append(fname)          except Exception as e:              captured_excps.append(e) -            util.logexc(log, "%s failed to read %s and write %s", -                        MY_NAME, url, fname) +            util.logexc(log, "%s failed to read %s and write %s", MY_NAME, url, +                        fname)      if wrote_fns:          log.debug("Wrote out rightscale userdata to %s files", len(wrote_fns)) diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py index 2b32fc94..5d7f4331 100644 --- a/cloudinit/config/cc_set_hostname.py +++ b/cloudinit/config/cc_set_hostname.py @@ -1,7 +1,7 @@  # vi: ts=4 expandtab  #  #    Copyright (C) 2011 Canonical Ltd. -#    Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +#    Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.  #  #    Author: Scott Moser <scott.moser@canonical.com>  #    Author: Juerg Haefliger <juerg.haefliger@hp.com> @@ -32,6 +32,6 @@ def handle(name, cfg, cloud, log, _args):          log.debug("Setting the hostname to %s (%s)", fqdn, hostname)          cloud.distro.set_hostname(hostname, fqdn)      except Exception: -        util.logexc(log, "Failed to set the hostname to %s (%s)", -                    fqdn, hostname) +        util.logexc(log, "Failed to set the hostname to %s (%s)", fqdn, +                    hostname)          raise diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index c6bf62fd..56a36906 100644 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -1,7 +1,7 @@  # vi: ts=4 expandtab  #  #    Copyright (C) 2009-2010 Canonical Ltd. -#    Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +#    Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.  #  #    Author: Scott Moser <scott.moser@canonical.com>  #    Author: Juerg Haefliger <juerg.haefliger@hp.com> @@ -75,14 +75,14 @@ def handle(_name, cfg, cloud, log, args):              plist_in.append("%s:%s" % (u, p))              users.append(u) -        ch_in = '\n'.join(plist_in) +        ch_in = '\n'.join(plist_in) + '\n'          try:              log.debug("Changing password for %s:", users)              util.subp(['chpasswd'], ch_in)          except Exception as e:              errors.append(e) -            util.logexc(log, -                        "Failed to set passwords with chpasswd for %s", users) +            util.logexc(log, "Failed to set passwords with chpasswd for %s", +                        users)          if len(randlist):              blurb = ("Set the following 'random' passwords\n", diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 7ef20d9f..64a5e3cb 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -1,7 +1,7 @@  # vi: ts=4 expandtab  #  #    Copyright (C) 2009-2010 Canonical Ltd. -#    Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +#    Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.  #  #    Author: Scott Moser <scott.moser@canonical.com>  #    Author: Juerg Haefliger <juerg.haefliger@hp.com> @@ -85,8 +85,8 @@ def handle(_name, cfg, cloud, log, _args):                      util.subp(cmd, capture=False)                  log.debug("Generated a key for %s from %s", pair[0], pair[1])              except: -                util.logexc(log, ("Failed generated a key" -                                  " for %s from %s"), pair[0], pair[1]) +                util.logexc(log, "Failed generated a key for %s from %s", +                            pair[0], pair[1])      else:          # if not, generate them          genkeys = util.get_cfg_option_list(cfg, @@ -102,8 +102,8 @@ def handle(_name, cfg, cloud, log, _args):                      with util.SeLinuxGuard("/etc/ssh", recursive=True):                          util.subp(cmd, capture=False)                  except: -                    util.logexc(log, ("Failed generating key type" -                                      " %s to file %s"), keytype, keyfile) +                    util.logexc(log, "Failed generating key type %s to " +                                "file %s", keytype, keyfile)      try:          (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py index 83af36e9..50d96e15 100644 --- a/cloudinit/config/cc_ssh_import_id.py +++ b/cloudinit/config/cc_ssh_import_id.py @@ -1,7 +1,7 @@  # vi: ts=4 expandtab  #  #    Copyright (C) 2009-2010 Canonical Ltd. -#    Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +#    Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.  #  #    Author: Scott Moser <scott.moser@canonical.com>  #    Author: Juerg Haefliger <juerg.haefliger@hp.com> @@ -71,8 +71,8 @@ def handle(_name, cfg, cloud, log, args):          try:              import_ssh_ids(import_ids, user, log)          except Exception as exc: -            util.logexc(log, "ssh-import-id failed for: %s %s" % -                            (user, import_ids), exc) +            util.logexc(log, "ssh-import-id failed for: %s %s", user, +                        import_ids)              elist.append(exc)      if len(elist): diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py index 52225cd8..e396ba13 100644 --- a/cloudinit/config/cc_update_hostname.py +++ b/cloudinit/config/cc_update_hostname.py @@ -1,7 +1,7 @@  # vi: ts=4 expandtab  #  #    Copyright (C) 2011 Canonical Ltd. -#    Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +#    Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.  #  #    Author: Scott Moser <scott.moser@canonical.com>  #    Author: Juerg Haefliger <juerg.haefliger@hp.com> @@ -38,6 +38,6 @@ def handle(name, cfg, cloud, log, _args):          log.debug("Updating hostname to %s (%s)", fqdn, hostname)          cloud.distro.update_hostname(hostname, fqdn, prev_fn)      except Exception: -        util.logexc(log, "Failed to update the hostname to %s (%s)", -                    fqdn, hostname) +        util.logexc(log, "Failed to update the hostname to %s (%s)", fqdn, +                    hostname)          raise diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 50d52594..249e1b19 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -1,7 +1,7 @@  # vi: ts=4 expandtab  #  #    Copyright (C) 2012 Canonical Ltd. -#    Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +#    Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.  #    Copyright (C) 2012 Yahoo! Inc.  #  #    Author: Scott Moser <scott.moser@canonical.com> @@ -38,7 +38,8 @@ from cloudinit.distros.parsers import hosts  OSFAMILIES = {      'debian': ['debian', 'ubuntu'], -    'redhat': ['fedora', 'rhel'] +    'redhat': ['fedora', 'rhel'], +    'suse': ['sles']  }  LOG = logging.getLogger(__name__) @@ -142,8 +143,8 @@ class Distro(object):          try:              util.subp(['hostname', hostname])          except util.ProcessExecutionError: -            util.logexc(LOG, ("Failed to non-persistently adjust" -                              " the system hostname to %s"), hostname) +            util.logexc(LOG, "Failed to non-persistently adjust the system " +                        "hostname to %s", hostname)      @abc.abstractmethod      def _select_hostname(self, hostname, fqdn): @@ -200,8 +201,8 @@ class Distro(object):              try:                  self._write_hostname(hostname, fn)              except IOError: -                util.logexc(LOG, "Failed to write hostname %s to %s", -                            hostname, fn) +                util.logexc(LOG, "Failed to write hostname %s to %s", hostname, +                            fn)          if (sys_hostname and prev_hostname and              sys_hostname != prev_hostname): @@ -281,15 +282,16 @@ class Distro(object):      def get_default_user(self):          return self.get_option('default_user') -    def create_user(self, name, **kwargs): +    def add_user(self, name, **kwargs):          """ -            Creates users for the system using the GNU passwd tools. This -            will work on an GNU system. This should be overriden on -            distros where useradd is not desirable or not available. +        Add a user to the system using standard GNU tools          """ +        if util.is_user(name): +            LOG.info("User %s already exists, skipping." % name) +            return          adduser_cmd = ['useradd', name] -        x_adduser_cmd = ['useradd', name] +        log_adduser_cmd = ['useradd', name]          # Since we are creating users, we want to carefully validate the          # inputs. If something goes wrong, we can end up with a system @@ -306,63 +308,65 @@ class Distro(object):              "selinux_user": '--selinux-user',          } -        adduser_opts_flags = { +        adduser_flags = {              "no_user_group": '--no-user-group',              "system": '--system',              "no_log_init": '--no-log-init', -            "no_create_home": "-M",          } -        redact_fields = ['passwd'] +        redact_opts = ['passwd'] + +        # Check the values and create the command +        for key, val in kwargs.iteritems(): + +            if key in adduser_opts and val and isinstance(val, str): +                adduser_cmd.extend([adduser_opts[key], val]) -        # Now check the value and create the command -        for option in kwargs: -            value = kwargs[option] -            if option in adduser_opts and value \ -                and isinstance(value, str): -                adduser_cmd.extend([adduser_opts[option], value]) -                # Redact certain fields from the logs -                if option in redact_fields: -                    x_adduser_cmd.extend([adduser_opts[option], 'REDACTED']) -                else: -                    x_adduser_cmd.extend([adduser_opts[option], value]) -            elif option in adduser_opts_flags and value: -                adduser_cmd.append(adduser_opts_flags[option])                  # Redact certain fields from the logs -                if option in redact_fields: -                    x_adduser_cmd.append('REDACTED') +                if key in redact_opts: +                    log_adduser_cmd.extend([adduser_opts[key], 'REDACTED'])                  else: -                    x_adduser_cmd.append(adduser_opts_flags[option]) +                    log_adduser_cmd.extend([adduser_opts[key], val]) -        # Default to creating home directory unless otherwise directed -        #  Also, we do not create home directories for system users. -        if "no_create_home" not in kwargs and "system" not in kwargs: -            adduser_cmd.append('-m') +            elif key in adduser_flags and val: +                adduser_cmd.append(adduser_flags[key]) +                log_adduser_cmd.append(adduser_flags[key]) -        # Create the user -        if util.is_user(name): -            LOG.warn("User %s already exists, skipping." % name) +        # Don't create the home directory if directed so or if the user is a +        # system user +        if 'no_create_home' in kwargs or 'system' in kwargs: +            adduser_cmd.append('-M') +            log_adduser_cmd.append('-M')          else: -            LOG.debug("Adding user named %s", name) -            try: -                util.subp(adduser_cmd, logstring=x_adduser_cmd) -            except Exception as e: -                util.logexc(LOG, "Failed to create user %s due to error.", e) -                raise e +            adduser_cmd.append('-m') +            log_adduser_cmd.append('-m') + +        # Run the command +        LOG.debug("Adding user %s", name) +        try: +            util.subp(adduser_cmd, logstring=log_adduser_cmd) +        except Exception as e: +            util.logexc(LOG, "Failed to create user %s", name) +            raise e -        # Set password if plain-text password provided +    def create_user(self, name, **kwargs): +        """ +        Creates users for the system using the GNU passwd tools. This +        will work on an GNU system. This should be overriden on +        distros where useradd is not desirable or not available. +        """ + +        # Add the user +        self.add_user(name, **kwargs) + +        # Set password if plain-text password provided and non-empty          if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']:              self.set_passwd(name, kwargs['plain_text_passwd'])          # Default locking down the account.  'lock_passwd' defaults to True.          # lock account unless lock_password is False.          if kwargs.get('lock_passwd', True): -            try: -                util.subp(['passwd', '--lock', name]) -            except Exception as e: -                util.logexc(LOG, ("Failed to disable password logins for" -                            "user %s" % name), e) -                raise e +            self.lock_passwd(name)          # Configure sudo access          if 'sudo' in kwargs: @@ -375,17 +379,33 @@ class Distro(object):          return True +    def lock_passwd(self, name): +        """ +        Lock the password of a user, i.e., disable password logins +        """ +        try: +            # Need to use the short option name '-l' instead of '--lock' +            # (which would be more descriptive) since SLES 11 doesn't know +            # about long names. +            util.subp(['passwd', '-l', name]) +        except Exception as e: +            util.logexc(LOG, 'Failed to disable password for user %s', name) +            raise e +      def set_passwd(self, user, passwd, hashed=False):          pass_string = '%s:%s' % (user, passwd)          cmd = ['chpasswd']          if hashed: -            cmd.append('--encrypted') +            # Need to use the short option name '-e' instead of '--encrypted' +            # (which would be more descriptive) since SLES 11 doesn't know +            # about long names. +            cmd.append('-e')          try:              util.subp(cmd, pass_string, logstring="chpasswd for %s" % user)          except Exception as e: -            util.logexc(LOG, "Failed to set password for %s" % user) +            util.logexc(LOG, "Failed to set password for %s", user)              raise e          return True @@ -427,7 +447,7 @@ class Distro(object):                      util.append_file(sudo_base, sudoers_contents)                  LOG.debug("Added '#includedir %s' to %s" % (path, sudo_base))              except IOError as e: -                util.logexc(LOG, "Failed to write %s" % sudo_base, e) +                util.logexc(LOG, "Failed to write %s", sudo_base)                  raise e          util.ensure_dir(path, 0750) @@ -478,15 +498,15 @@ class Distro(object):              try:                  util.subp(group_add_cmd)                  LOG.info("Created new group %s" % name) -            except Exception as e: -                util.logexc("Failed to create group %s" % name, e) +            except Exception: +                util.logexc("Failed to create group %s", name)          # Add members to the group, if so defined          if len(members) > 0:              for member in members:                  if not util.is_user(member):                      LOG.warn("Unable to add group member '%s' to group '%s'" -                            "; user does not exist." % (member, name)) +                            "; user does not exist.", member, name)                      continue                  util.subp(['usermod', '-a', '-G', name, member]) diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 174da3ab..a022ca60 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -1,7 +1,7 @@  # vi: ts=4 expandtab  #  #    Copyright (C) 2012 Canonical Ltd. -#    Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +#    Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.  #    Copyright (C) 2012 Yahoo! Inc.  #  #    Author: Scott Moser <scott.moser@canonical.com> @@ -23,14 +23,11 @@  import os  from cloudinit import distros - -from cloudinit.distros.parsers.resolv_conf import ResolvConf -from cloudinit.distros.parsers.sys_conf import SysConf -  from cloudinit import helpers  from cloudinit import log as logging  from cloudinit import util +from cloudinit.distros import rhel_util  from cloudinit.settings import PER_INSTANCE  LOG = logging.getLogger(__name__) @@ -67,33 +64,9 @@ class Distro(distros.Distro):      def install_packages(self, pkglist):          self.package_command('install', pkgs=pkglist) -    def _adjust_resolve(self, dns_servers, search_servers): -        try: -            r_conf = ResolvConf(util.load_file(self.resolve_conf_fn)) -            r_conf.parse() -        except IOError: -            util.logexc(LOG, -                        "Failed at parsing %s reverting to an empty instance", -                        self.resolve_conf_fn) -            r_conf = ResolvConf('') -            r_conf.parse() -        if dns_servers: -            for s in dns_servers: -                try: -                    r_conf.add_nameserver(s) -                except ValueError: -                    util.logexc(LOG, "Failed at adding nameserver %s", s) -        if search_servers: -            for s in search_servers: -                try: -                    r_conf.add_search_domain(s) -                except ValueError: -                    util.logexc(LOG, "Failed at adding search domain %s", s) -        util.write_file(self.resolve_conf_fn, str(r_conf), 0644) -      def _write_network(self, settings):          # TODO(harlowja) fix this... since this is the ubuntu format -        entries = translate_network(settings) +        entries = rhel_util.translate_network(settings)          LOG.debug("Translated ubuntu style network settings %s into %s",                    settings, entries)          # Make the intermediate format as the rhel format... @@ -112,41 +85,21 @@ class Distro(distros.Distro):                  'MACADDR': info.get('hwaddress'),                  'ONBOOT': _make_sysconfig_bool(info.get('auto')),              } -            self._update_sysconfig_file(net_fn, net_cfg) +            rhel_util.update_sysconfig_file(net_fn, net_cfg)              if 'dns-nameservers' in info:                  nameservers.extend(info['dns-nameservers'])              if 'dns-search' in info:                  searchservers.extend(info['dns-search'])          if nameservers or searchservers: -            self._adjust_resolve(nameservers, searchservers) +            rhel_util.update_resolve_conf_file(self.resolve_conf_fn, +                                               nameservers, searchservers)          if dev_names:              net_cfg = {                  'NETWORKING': _make_sysconfig_bool(True),              } -            self._update_sysconfig_file(self.network_conf_fn, net_cfg) +            rhel_util.update_sysconfig_file(self.network_conf_fn, net_cfg)          return dev_names -    def _update_sysconfig_file(self, fn, adjustments, allow_empty=False): -        if not adjustments: -            return -        (exists, contents) = self._read_conf(fn) -        updated_am = 0 -        for (k, v) in adjustments.items(): -            if v is None: -                continue -            v = str(v) -            if len(v) == 0 and not allow_empty: -                continue -            contents[k] = v -            updated_am += 1 -        if updated_am: -            lines = [ -                str(contents), -            ] -            if not exists: -                lines.insert(0, util.make_header()) -            util.write_file(fn, "\n".join(lines) + "\n", 0644) -      def _dist_uses_systemd(self):          # Fedora 18 and RHEL 7 were the first adopters in their series          (dist, vers) = util.system_info()['dist'][:2] @@ -165,7 +118,7 @@ class Distro(distros.Distro):          locale_cfg = {              'LANG': locale,          } -        self._update_sysconfig_file(out_fn, locale_cfg) +        rhel_util.update_sysconfig_file(out_fn, locale_cfg)      def _write_hostname(self, hostname, out_fn):          if self._dist_uses_systemd(): @@ -174,7 +127,7 @@ class Distro(distros.Distro):              host_cfg = {                  'HOSTNAME': hostname,              } -            self._update_sysconfig_file(out_fn, host_cfg) +            rhel_util.update_sysconfig_file(out_fn, host_cfg)      def _select_hostname(self, hostname, fqdn):          # See: http://bit.ly/TwitgL @@ -198,22 +151,12 @@ class Distro(distros.Distro):              else:                  return default          else: -            (_exists, contents) = self._read_conf(filename) +            (_exists, contents) = rhel_util.read_sysconfig_file(filename)              if 'HOSTNAME' in contents:                  return contents['HOSTNAME']              else:                  return default -    def _read_conf(self, fn): -        exists = False -        try: -            contents = util.load_file(fn).splitlines() -            exists = True -        except IOError: -            contents = [] -        return (exists, -                SysConf(contents)) -      def _bring_up_interfaces(self, device_names):          if device_names and 'all' in device_names:              raise RuntimeError(('Distro %s can not translate ' @@ -237,7 +180,7 @@ class Distro(distros.Distro):              clock_cfg = {                  'ZONE': str(tz),              } -            self._update_sysconfig_file(self.clock_conf_fn, clock_cfg) +            rhel_util.update_sysconfig_file(self.clock_conf_fn, clock_cfg)              # This ensures that the correct tz will be used for the system              util.copy(tz_file, self.tz_local_fn) @@ -272,90 +215,3 @@ class Distro(distros.Distro):      def update_package_sources(self):          self._runner.run("update-sources", self.package_command,                           ["makecache"], freq=PER_INSTANCE) - - -# This is a util function to translate a ubuntu /etc/network/interfaces 'blob' -# to a rhel equiv. that can then be written to /etc/sysconfig/network-scripts/ -# TODO(harlowja) remove when we have python-netcf active... -def translate_network(settings): -    # Get the standard cmd, args from the ubuntu format -    entries = [] -    for line in settings.splitlines(): -        line = line.strip() -        if not line or line.startswith("#"): -            continue -        split_up = line.split(None, 1) -        if len(split_up) <= 1: -            continue -        entries.append(split_up) -    # Figure out where each iface section is -    ifaces = [] -    consume = {} -    for (cmd, args) in entries: -        if cmd == 'iface': -            if consume: -                ifaces.append(consume) -                consume = {} -            consume[cmd] = args -        else: -            consume[cmd] = args -    # Check if anything left over to consume -    absorb = False -    for (cmd, args) in consume.iteritems(): -        if cmd == 'iface': -            absorb = True -    if absorb: -        ifaces.append(consume) -    # Now translate -    real_ifaces = {} -    for info in ifaces: -        if 'iface' not in info: -            continue -        iface_details = info['iface'].split(None) -        dev_name = None -        if len(iface_details) >= 1: -            dev = iface_details[0].strip().lower() -            if dev: -                dev_name = dev -        if not dev_name: -            continue -        iface_info = {} -        if len(iface_details) >= 3: -            proto_type = iface_details[2].strip().lower() -            # Seems like this can be 'loopback' which we don't -            # really care about -            if proto_type in ['dhcp', 'static']: -                iface_info['bootproto'] = proto_type -        # These can just be copied over -        for k in ['netmask', 'address', 'gateway', 'broadcast']: -            if k in info: -                val = info[k].strip().lower() -                if val: -                    iface_info[k] = val -        # Name server info provided?? -        if 'dns-nameservers' in info: -            iface_info['dns-nameservers'] = info['dns-nameservers'].split() -        # Name server search info provided?? -        if 'dns-search' in info: -            iface_info['dns-search'] = info['dns-search'].split() -        # Is any mac address spoofing going on?? -        if 'hwaddress' in info: -            hw_info = info['hwaddress'].lower().strip() -            hw_split = hw_info.split(None, 1) -            if len(hw_split) == 2 and hw_split[0].startswith('ether'): -                hw_addr = hw_split[1] -                if hw_addr: -                    iface_info['hwaddress'] = hw_addr -        real_ifaces[dev_name] = iface_info -    # Check for those that should be started on boot via 'auto' -    for (cmd, args) in entries: -        if cmd == 'auto': -            # Seems like auto can be like 'auto eth0 eth0:1' so just get the -            # first part out as the device name -            args = args.split(None) -            if not args: -                continue -            dev_name = args[0].strip().lower() -            if dev_name in real_ifaces: -                real_ifaces[dev_name]['auto'] = True -    return real_ifaces diff --git a/cloudinit/distros/rhel_util.py b/cloudinit/distros/rhel_util.py new file mode 100644 index 00000000..1aba58b8 --- /dev/null +++ b/cloudinit/distros/rhel_util.py @@ -0,0 +1,177 @@ +# vi: ts=4 expandtab +# +#    Copyright (C) 2012 Canonical Ltd. +#    Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. +#    Copyright (C) 2012 Yahoo! Inc. +# +#    Author: Scott Moser <scott.moser@canonical.com> +#    Author: Juerg Haefliger <juerg.haefliger@hp.com> +#    Author: Joshua Harlow <harlowja@yahoo-inc.com> +# +#    This program is free software: you can redistribute it and/or modify +#    it under the terms of the GNU General Public License version 3, as +#    published by the Free Software Foundation. +# +#    This program is distributed in the hope that it will be useful, +#    but WITHOUT ANY WARRANTY; without even the implied warranty of +#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the +#    GNU General Public License for more details. +# +#    You should have received a copy of the GNU General Public License +#    along with this program.  If not, see <http://www.gnu.org/licenses/>. +# + +from cloudinit.distros.parsers.resolv_conf import ResolvConf +from cloudinit.distros.parsers.sys_conf import SysConf + +from cloudinit import log as logging +from cloudinit import util + +LOG = logging.getLogger(__name__) + + +# This is a util function to translate Debian based distro interface blobs as +# given in /etc/network/interfaces to an equivalent format for distributions +# that use ifcfg-* style (Red Hat and SUSE). +# TODO(harlowja) remove when we have python-netcf active... +def translate_network(settings): +    # Get the standard cmd, args from the ubuntu format +    entries = [] +    for line in settings.splitlines(): +        line = line.strip() +        if not line or line.startswith("#"): +            continue +        split_up = line.split(None, 1) +        if len(split_up) <= 1: +            continue +        entries.append(split_up) +    # Figure out where each iface section is +    ifaces = [] +    consume = {} +    for (cmd, args) in entries: +        if cmd == 'iface': +            if consume: +                ifaces.append(consume) +                consume = {} +            consume[cmd] = args +        else: +            consume[cmd] = args +    # Check if anything left over to consume +    absorb = False +    for (cmd, args) in consume.iteritems(): +        if cmd == 'iface': +            absorb = True +    if absorb: +        ifaces.append(consume) +    # Now translate +    real_ifaces = {} +    for info in ifaces: +        if 'iface' not in info: +            continue +        iface_details = info['iface'].split(None) +        dev_name = None +        if len(iface_details) >= 1: +            dev = iface_details[0].strip().lower() +            if dev: +                dev_name = dev +        if not dev_name: +            continue +        iface_info = {} +        if len(iface_details) >= 3: +            proto_type = iface_details[2].strip().lower() +            # Seems like this can be 'loopback' which we don't +            # really care about +            if proto_type in ['dhcp', 'static']: +                iface_info['bootproto'] = proto_type +        # These can just be copied over +        for k in ['netmask', 'address', 'gateway', 'broadcast']: +            if k in info: +                val = info[k].strip().lower() +                if val: +                    iface_info[k] = val +        # Name server info provided?? +        if 'dns-nameservers' in info: +            iface_info['dns-nameservers'] = info['dns-nameservers'].split() +        # Name server search info provided?? +        if 'dns-search' in info: +            iface_info['dns-search'] = info['dns-search'].split() +        # Is any mac address spoofing going on?? +        if 'hwaddress' in info: +            hw_info = info['hwaddress'].lower().strip() +            hw_split = hw_info.split(None, 1) +            if len(hw_split) == 2 and hw_split[0].startswith('ether'): +                hw_addr = hw_split[1] +                if hw_addr: +                    iface_info['hwaddress'] = hw_addr +        real_ifaces[dev_name] = iface_info +    # Check for those that should be started on boot via 'auto' +    for (cmd, args) in entries: +        if cmd == 'auto': +            # Seems like auto can be like 'auto eth0 eth0:1' so just get the +            # first part out as the device name +            args = args.split(None) +            if not args: +                continue +            dev_name = args[0].strip().lower() +            if dev_name in real_ifaces: +                real_ifaces[dev_name]['auto'] = True +    return real_ifaces + + +# Helper function to update a RHEL/SUSE /etc/sysconfig/* file +def update_sysconfig_file(fn, adjustments, allow_empty=False): +    if not adjustments: +        return +    (exists, contents) = read_sysconfig_file(fn) +    updated_am = 0 +    for (k, v) in adjustments.items(): +        if v is None: +            continue +        v = str(v) +        if len(v) == 0 and not allow_empty: +            continue +        contents[k] = v +        updated_am += 1 +    if updated_am: +        lines = [ +            str(contents), +        ] +        if not exists: +            lines.insert(0, util.make_header()) +        util.write_file(fn, "\n".join(lines) + "\n", 0644) + + +# Helper function to read a RHEL/SUSE /etc/sysconfig/* file +def read_sysconfig_file(fn): +    exists = False +    try: +        contents = util.load_file(fn).splitlines() +        exists = True +    except IOError: +        contents = [] +    return (exists, SysConf(contents)) + + +# Helper function to update RHEL/SUSE /etc/resolv.conf +def update_resolve_conf_file(fn, dns_servers, search_servers): +    try: +        r_conf = ResolvConf(util.load_file(fn)) +        r_conf.parse() +    except IOError: +        util.logexc(LOG, "Failed at parsing %s reverting to an empty " +                    "instance", fn) +        r_conf = ResolvConf('') +        r_conf.parse() +    if dns_servers: +        for s in dns_servers: +            try: +                r_conf.add_nameserver(s) +            except ValueError: +                util.logexc(LOG, "Failed at adding nameserver %s", s) +    if search_servers: +        for s in search_servers: +            try: +                r_conf.add_search_domain(s) +            except ValueError: +                util.logexc(LOG, "Failed at adding search domain %s", s) +    util.write_file(fn, str(r_conf), 0644) diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py new file mode 100644 index 00000000..904e931a --- /dev/null +++ b/cloudinit/distros/sles.py @@ -0,0 +1,193 @@ +# vi: ts=4 expandtab +# +#    Copyright (C) 2013 Hewlett-Packard Development Company, L.P. +# +#    Author: Juerg Haefliger <juerg.haefliger@hp.com> +# +#    Leaning very heavily on the RHEL and Debian implementation +# +#    This program is free software: you can redistribute it and/or modify +#    it under the terms of the GNU General Public License version 3, as +#    published by the Free Software Foundation. +# +#    This program is distributed in the hope that it will be useful, +#    but WITHOUT ANY WARRANTY; without even the implied warranty of +#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the +#    GNU General Public License for more details. +# +#    You should have received a copy of the GNU General Public License +#    along with this program.  If not, see <http://www.gnu.org/licenses/>. + +import os + +from cloudinit import distros + +from cloudinit.distros.parsers.hostname import HostnameConf + +from cloudinit import helpers +from cloudinit import log as logging +from cloudinit import util + +from cloudinit.distros import rhel_util +from cloudinit.settings import PER_INSTANCE + +LOG = logging.getLogger(__name__) + + +class Distro(distros.Distro): +    clock_conf_fn = '/etc/sysconfig/clock' +    locale_conf_fn = '/etc/sysconfig/language' +    network_conf_fn = '/etc/sysconfig/network' +    hostname_conf_fn = '/etc/HOSTNAME' +    network_script_tpl = '/etc/sysconfig/network/ifcfg-%s' +    resolve_conf_fn = '/etc/resolv.conf' +    tz_local_fn = '/etc/localtime' +    tz_zone_dir = '/usr/share/zoneinfo' + +    def __init__(self, name, cfg, paths): +        distros.Distro.__init__(self, name, cfg, paths) +        # This will be used to restrict certain +        # calls from repeatly happening (when they +        # should only happen say once per instance...) +        self._runner = helpers.Runners(paths) +        self.osfamily = 'suse' + +    def install_packages(self, pkglist): +        self.package_command('install', args='-l', pkgs=pkglist) + +    def _write_network(self, settings): +        # Convert debian settings to ifcfg format +        entries = rhel_util.translate_network(settings) +        LOG.debug("Translated ubuntu style network settings %s into %s", +                  settings, entries) +        # Make the intermediate format as the suse format... +        nameservers = [] +        searchservers = [] +        dev_names = entries.keys() +        for (dev, info) in entries.iteritems(): +            net_fn = self.network_script_tpl % (dev) +            mode = info.get('auto') +            if mode and mode.lower() == 'true': +                mode = 'auto' +            else: +                mode = 'manual' +            net_cfg = { +                'BOOTPROTO': info.get('bootproto'), +                'BROADCAST': info.get('broadcast'), +                'GATEWAY': info.get('gateway'), +                'IPADDR': info.get('address'), +                'LLADDR': info.get('hwaddress'), +                'NETMASK': info.get('netmask'), +                'STARTMODE': mode, +                'USERCONTROL': 'no' +            } +            if dev != 'lo': +                net_cfg['ETHERDEVICE'] = dev +                net_cfg['ETHTOOL_OPTIONS'] = '' +            else: +                net_cfg['FIREWALL'] = 'no' +            rhel_util.update_sysconfig_file(net_fn, net_cfg, True) +            if 'dns-nameservers' in info: +                nameservers.extend(info['dns-nameservers']) +            if 'dns-search' in info: +                searchservers.extend(info['dns-search']) +        if nameservers or searchservers: +            rhel_util.update_resolve_conf_file(self.resolve_conf_fn, +                                               nameservers, searchservers) +        return dev_names + +    def apply_locale(self, locale, out_fn=None): +        if not out_fn: +            out_fn = self.locale_conf_fn +        locale_cfg = { +            'RC_LANG': locale, +        } +        rhel_util.update_sysconfig_file(out_fn, locale_cfg) + +    def _write_hostname(self, hostname, out_fn): +        conf = None +        try: +            # Try to update the previous one +            # so lets see if we can read it first. +            conf = self._read_hostname_conf(out_fn) +        except IOError: +            pass +        if not conf: +            conf = HostnameConf('') +        conf.set_hostname(hostname) +        util.write_file(out_fn, str(conf), 0644) + +    def _select_hostname(self, hostname, fqdn): +        # Prefer the short hostname over the long +        # fully qualified domain name +        if not hostname: +            return fqdn +        return hostname + +    def _read_system_hostname(self): +        host_fn = self.hostname_conf_fn +        return (host_fn, self._read_hostname(host_fn)) + +    def _read_hostname_conf(self, filename): +        conf = HostnameConf(util.load_file(filename)) +        conf.parse() +        return conf + +    def _read_hostname(self, filename, default=None): +        hostname = None +        try: +            conf = self._read_hostname_conf(filename) +            hostname = conf.hostname +        except IOError: +            pass +        if not hostname: +            return default +        return hostname + +    def _bring_up_interfaces(self, device_names): +        if device_names and 'all' in device_names: +            raise RuntimeError(('Distro %s can not translate ' +                                'the device name "all"') % (self.name)) +        return distros.Distro._bring_up_interfaces(self, device_names) + +    def set_timezone(self, tz): +        # TODO(harlowja): move this code into +        # the parent distro... +        tz_file = os.path.join(self.tz_zone_dir, str(tz)) +        if not os.path.isfile(tz_file): +            raise RuntimeError(("Invalid timezone %s," +                                " no file found at %s") % (tz, tz_file)) +        # Adjust the sysconfig clock zone setting +        clock_cfg = { +            'TIMEZONE': str(tz), +        } +        rhel_util.update_sysconfig_file(self.clock_conf_fn, clock_cfg) +        # This ensures that the correct tz will be used for the system +        util.copy(tz_file, self.tz_local_fn) + +    def package_command(self, command, args=None, pkgs=None): +        if pkgs is None: +            pkgs = [] + +        cmd = ['zypper'] +        # No user interaction possible, enable non-interactive mode +        cmd.append('--non-interactive') + +        # Comand is the operation, such as install +        cmd.append(command) + +        # args are the arguments to the command, not global options +        if args and isinstance(args, str): +            cmd.append(args) +        elif args and isinstance(args, list): +            cmd.extend(args) + +        pkglist = util.expand_package_list('%s-%s', pkgs) +        cmd.extend(pkglist) + +        # Allow the output of this to flow outwards (ie not be captured) +        util.subp(cmd, capture=False) + +    def update_package_sources(self): +        self._runner.run("update-sources", self.package_command, +                         ['refresh'], freq=PER_INSTANCE) diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 924463ce..2ddc75f4 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -1,7 +1,7 @@  # vi: ts=4 expandtab  #  #    Copyright (C) 2012 Canonical Ltd. -#    Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +#    Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.  #    Copyright (C) 2012 Yahoo! Inc.  #  #    Author: Scott Moser <scott.moser@canonical.com> @@ -62,6 +62,7 @@ INCLUSION_TYPES_MAP = {      '#part-handler': 'text/part-handler',      '#cloud-boothook': 'text/cloud-boothook',      '#cloud-config-archive': 'text/cloud-config-archive', +    '#cloud-config-jsonp': 'text/cloud-config-jsonp',  }  # Sorted longest first @@ -117,10 +118,9 @@ def run_part(mod, data, filename, payload, frequency, headers):          else:              raise ValueError("Unknown module version %s" % (mod_ver))      except: -        util.logexc(LOG, ("Failed calling handler %s (%s, %s, %s)" -                         " with frequency %s"), -                    mod, content_type, filename, -                    mod_ver, frequency) +        util.logexc(LOG, "Failed calling handler %s (%s, %s, %s) with " +                    "frequency %s", mod, content_type, filename, mod_ver, +                    frequency)  def call_begin(mod, data, frequency): @@ -152,14 +152,13 @@ def walker_handle_handler(pdata, _ctype, _filename, payload):      try:          mod = fixup_handler(importer.import_module(modname))          call_begin(mod, pdata['data'], frequency) -        # Only register and increment -        # after the above have worked (so we don't if it -        # fails) -        handlers.register(mod) +        # Only register and increment after the above have worked, so we don't +        # register if it fails starting. +        handlers.register(mod, initialized=True)          pdata['handlercount'] = curcount + 1      except: -        util.logexc(LOG, ("Failed at registering python file: %s" -                          " (part handler %s)"), modfname, curcount) +        util.logexc(LOG, "Failed at registering python file: %s (part " +                    "handler %s)", modfname, curcount)  def _extract_first_or_bytes(blob, size): diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py index bf2899ab..1848ce2c 100644 --- a/cloudinit/handlers/boot_hook.py +++ b/cloudinit/handlers/boot_hook.py @@ -29,6 +29,7 @@ from cloudinit import util  from cloudinit.settings import (PER_ALWAYS)  LOG = logging.getLogger(__name__) +BOOTHOOK_PREFIX = "#cloud-boothook"  class BootHookPartHandler(handlers.Handler): @@ -41,19 +42,15 @@ class BootHookPartHandler(handlers.Handler):      def list_types(self):          return [ -            handlers.type_from_starts_with("#cloud-boothook"), +            handlers.type_from_starts_with(BOOTHOOK_PREFIX),          ]      def _write_part(self, payload, filename):          filename = util.clean_filename(filename) -        payload = util.dos2unix(payload) -        prefix = "#cloud-boothook" -        start = 0 -        if payload.startswith(prefix): -            start = len(prefix) + 1          filepath = os.path.join(self.boothook_dir, filename) -        contents = payload[start:] -        util.write_file(filepath, contents, 0700) +        contents = util.strip_prefix_suffix(util.dos2unix(payload), +                                            prefix=BOOTHOOK_PREFIX) +        util.write_file(filepath, contents.lstrip(), 0700)          return filepath      def handle_part(self, _data, ctype, filename,  # pylint: disable=W0221 @@ -70,5 +67,5 @@ class BootHookPartHandler(handlers.Handler):          except util.ProcessExecutionError:              util.logexc(LOG, "Boothooks script %s execution error", filepath)          except Exception: -            util.logexc(LOG, ("Boothooks unknown " -                              "error when running %s"), filepath) +            util.logexc(LOG, "Boothooks unknown error when running %s", +                        filepath) diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index c97ca3e8..34a73115 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -20,6 +20,8 @@  #    You should have received a copy of the GNU General Public License  #    along with this program.  If not, see <http://www.gnu.org/licenses/>. +import jsonpatch +  from cloudinit import handlers  from cloudinit import log as logging  from cloudinit import mergers @@ -49,6 +51,14 @@ MERGE_HEADER = 'Merge-Type'  #  # This gets loaded into yaml with final result {'a': 22}  DEF_MERGERS = mergers.string_extract_mergers('dict(replace)+list()+str()') +CLOUD_PREFIX = "#cloud-config" +JSONP_PREFIX = "#cloud-config-jsonp" + +# The file header -> content types this module will handle. +CC_TYPES = { +    JSONP_PREFIX: handlers.type_from_starts_with(JSONP_PREFIX), +    CLOUD_PREFIX: handlers.type_from_starts_with(CLOUD_PREFIX), +}  class CloudConfigPartHandler(handlers.Handler): @@ -59,9 +69,7 @@ class CloudConfigPartHandler(handlers.Handler):          self.file_names = []      def list_types(self): -        return [ -            handlers.type_from_starts_with("#cloud-config"), -        ] +        return list(CC_TYPES.values())      def _write_cloud_config(self):          if not self.cloud_fn: @@ -78,7 +86,7 @@ class CloudConfigPartHandler(handlers.Handler):          if self.cloud_buf is not None:              # Something was actually gathered....              lines = [ -                "#cloud-config", +                CLOUD_PREFIX,                  '',              ]              lines.extend(file_lines) @@ -107,13 +115,21 @@ class CloudConfigPartHandler(handlers.Handler):              all_mergers = DEF_MERGERS          return (payload_yaml, all_mergers) +    def _merge_patch(self, payload): +        # JSON doesn't handle comments in this manner, so ensure that +        # if we started with this 'type' that we remove it before +        # attempting to load it as json (which the jsonpatch library will +        # attempt to do). +        payload = payload.lstrip() +        payload = util.strip_prefix_suffix(payload, prefix=JSONP_PREFIX) +        patch = jsonpatch.JsonPatch.from_string(payload) +        LOG.debug("Merging by applying json patch %s", patch) +        self.cloud_buf = patch.apply(self.cloud_buf, in_place=False) +      def _merge_part(self, payload, headers):          (payload_yaml, my_mergers) = self._extract_mergers(payload, headers)          LOG.debug("Merging by applying %s", my_mergers)          merger = mergers.construct(my_mergers) -        if self.cloud_buf is None: -            # First time through, merge with an empty dict... -            self.cloud_buf = {}          self.cloud_buf = merger.merge(self.cloud_buf, payload_yaml)      def _reset(self): @@ -130,7 +146,13 @@ class CloudConfigPartHandler(handlers.Handler):              self._reset()              return          try: -            self._merge_part(payload, headers) +            # First time through, merge with an empty dict... +            if self.cloud_buf is None or not self.file_names: +                self.cloud_buf = {} +            if ctype == CC_TYPES[JSONP_PREFIX]: +                self._merge_patch(payload) +            else: +                self._merge_part(payload, headers)              # Ensure filename is ok to store              for i in ("\n", "\r", "\t"):                  filename = filename.replace(i, " ") diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py index b185c374..62289d98 100644 --- a/cloudinit/handlers/shell_script.py +++ b/cloudinit/handlers/shell_script.py @@ -29,6 +29,7 @@ from cloudinit import util  from cloudinit.settings import (PER_ALWAYS)  LOG = logging.getLogger(__name__) +SHELL_PREFIX = "#!"  class ShellScriptPartHandler(handlers.Handler): @@ -38,7 +39,7 @@ class ShellScriptPartHandler(handlers.Handler):      def list_types(self):          return [ -            handlers.type_from_starts_with("#!"), +            handlers.type_from_starts_with(SHELL_PREFIX),          ]      def handle_part(self, _data, ctype, filename,  # pylint: disable=W0221 diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py index edd56527..bac4cad2 100644 --- a/cloudinit/handlers/upstart_job.py +++ b/cloudinit/handlers/upstart_job.py @@ -22,6 +22,7 @@  import os +import re  from cloudinit import handlers  from cloudinit import log as logging @@ -30,6 +31,7 @@ from cloudinit import util  from cloudinit.settings import (PER_INSTANCE)  LOG = logging.getLogger(__name__) +UPSTART_PREFIX = "#upstart-job"  class UpstartJobPartHandler(handlers.Handler): @@ -39,7 +41,7 @@ class UpstartJobPartHandler(handlers.Handler):      def list_types(self):          return [ -            handlers.type_from_starts_with("#upstart-job"), +            handlers.type_from_starts_with(UPSTART_PREFIX),          ]      def handle_part(self, _data, ctype, filename,  # pylint: disable=W0221 @@ -66,14 +68,53 @@ class UpstartJobPartHandler(handlers.Handler):          path = os.path.join(self.upstart_dir, filename)          util.write_file(path, payload, 0644) -        # FIXME LATER (LP: #1124384) -        # a bug in upstart means that invoking reload-configuration -        # at this stage in boot causes havoc.  So, until that is fixed -        # we will not do that.  However, I'd like to be able to easily -        # test to see if this bug is still present in an image with -        # a newer upstart.  So, a boot hook could easiliy write this file. -        if os.path.exists("/run/cloud-init-upstart-reload"): -            # if inotify support is not present in the root filesystem -            # (overlayroot) then we need to tell upstart to re-read /etc - +        if SUITABLE_UPSTART:              util.subp(["initctl", "reload-configuration"], capture=False) + + +def _has_suitable_upstart(): +    # (LP: #1124384) +    # a bug in upstart means that invoking reload-configuration +    # at this stage in boot causes havoc.  So, try to determine if upstart +    # is installed, and reloading configuration is OK. +    if not os.path.exists("/sbin/initctl"): +        return False +    try: +        (version_out, _err) = util.subp(["initctl", "version"]) +    except: +        util.logexc(LOG, "initctl version failed") +        return False + +    # expecting 'initctl version' to output something like: init (upstart X.Y) +    if re.match("upstart 1.[0-7][)]", version_out): +        return False +    if "upstart 0." in version_out: +        return False +    elif "upstart 1.8" in version_out: +        if not os.path.exists("/usr/bin/dpkg-query"): +            return False +        try: +            (dpkg_ver, _err) = util.subp(["dpkg-query", +                                          "--showformat=${Version}", +                                          "--show", "upstart"], rcs=[0, 1]) +        except Exception: +            util.logexc(LOG, "dpkg-query failed") +            return False + +        try: +            good = "1.8-0ubuntu1.2" +            util.subp(["dpkg", "--compare-versions", dpkg_ver, "ge", good]) +            return True +        except util.ProcessExecutionError as e: +            if e.exit_code is 1: +                pass +            else: +                util.logexc(LOG, "dpkg --compare-versions failed [%s]", +                            e.exit_code) +        except Exception as e: +            util.logexc(LOG, "dpkg --compare-versions failed") +        return False +    else: +        return True + +SUITABLE_UPSTART = _has_suitable_upstart() diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index a4e6fb03..1c46efde 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -1,7 +1,7 @@  # vi: ts=4 expandtab  #  #    Copyright (C) 2012 Canonical Ltd. -#    Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +#    Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.  #    Copyright (C) 2012 Yahoo! Inc.  #  #    Author: Scott Moser <scott.moser@canonical.com> @@ -216,8 +216,8 @@ class ConfigMerger(object):                  if ds_cfg and isinstance(ds_cfg, (dict)):                      d_cfgs.append(ds_cfg)              except: -                util.logexc(LOG, ("Failed loading of datasource" -                                  " config object from %s"), self._ds) +                util.logexc(LOG, "Failed loading of datasource config object " +                            "from %s", self._ds)          return d_cfgs      def _get_env_configs(self): @@ -227,8 +227,8 @@ class ConfigMerger(object):              try:                  e_cfgs.append(util.read_conf(e_fn))              except: -                util.logexc(LOG, ('Failed loading of env. config' -                                  ' from %s'), e_fn) +                util.logexc(LOG, 'Failed loading of env. config from %s', +                            e_fn)          return e_cfgs      def _get_instance_configs(self): @@ -242,8 +242,8 @@ class ConfigMerger(object):              try:                  i_cfgs.append(util.read_conf(cc_fn))              except: -                util.logexc(LOG, ('Failed loading of cloud-config' -                                      ' from %s'), cc_fn) +                util.logexc(LOG, 'Failed loading of cloud-config from %s', +                            cc_fn)          return i_cfgs      def _read_cfg(self): @@ -259,8 +259,8 @@ class ConfigMerger(object):                  try:                      cfgs.append(util.read_conf(c_fn))                  except: -                    util.logexc(LOG, ("Failed loading of configuration" -                                       " from %s"), c_fn) +                    util.logexc(LOG, "Failed loading of configuration from %s", +                                c_fn)          cfgs.extend(self._get_env_configs())          cfgs.extend(self._get_instance_configs()) @@ -281,6 +281,7 @@ class ContentHandlers(object):      def __init__(self):          self.registered = {} +        self.initialized = []      def __contains__(self, item):          return self.is_registered(item) @@ -291,11 +292,13 @@ class ContentHandlers(object):      def is_registered(self, content_type):          return content_type in self.registered -    def register(self, mod): +    def register(self, mod, initialized=False):          types = set()          for t in mod.list_types():              self.registered[t] = mod              types.add(t) +        if initialized and mod not in self.initialized: +            self.initialized.append(mod)          return types      def _get_handler(self, content_type): diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py index 76591bea..62999b4e 100644 --- a/cloudinit/mergers/m_list.py +++ b/cloudinit/mergers/m_list.py @@ -19,6 +19,7 @@  DEF_MERGE_TYPE = 'replace'  MERGE_TYPES = ('append', 'prepend', DEF_MERGE_TYPE, 'no_replace') +  def _has_any(what, *keys):      for k in keys:          if k in what: diff --git a/cloudinit/settings.py b/cloudinit/settings.py index 8cc9e3b4..9f6badae 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -31,11 +31,13 @@ CFG_BUILTIN = {      'datasource_list': [          'NoCloud',          'ConfigDrive', +        'Azure',          'AltCloud',          'OVF',          'MAAS',          'Ec2',          'CloudStack', +        'SmartOS',          # At the end to act as a 'catch' when none of the above work...          'None',      ], diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index 64548d43..a834f8eb 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -1,10 +1,11 @@  # vi: ts=4 expandtab  #  #    Copyright (C) 2009-2010 Canonical Ltd. -#    Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +#    Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.  #    Copyright (C) 2012 Yahoo! Inc.  #  #    Author: Joe VLcek <JVLcek@RedHat.com> +#    Author: Juerg Haefliger <juerg.haefliger@hp.com>  #  #    This program is free software: you can redistribute it and/or modify  #    it under the terms of the GNU General Public License version 3, as @@ -79,7 +80,7 @@ def read_user_data_callback(mount_dir):          try:              user_data = util.load_file(user_data_file).strip()          except IOError: -            util.logexc(LOG, ('Failed accessing user data file.')) +            util.logexc(LOG, 'Failed accessing user data file.')              return None      return user_data @@ -178,7 +179,7 @@ class DataSourceAltCloud(sources.DataSource):              return False          # No user data found -        util.logexc(LOG, ('Failed accessing user data.')) +        util.logexc(LOG, 'Failed accessing user data.')          return False      def user_data_rhevm(self): @@ -205,12 +206,12 @@ class DataSourceAltCloud(sources.DataSource):              (cmd_out, _err) = util.subp(cmd)              LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))          except ProcessExecutionError, _err: -            util.logexc(LOG, (('Failed command: %s\n%s') % \ -                (' '.join(cmd), _err.message))) +            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), +                        _err.message)              return False          except OSError, _err: -            util.logexc(LOG, (('Failed command: %s\n%s') % \ -                (' '.join(cmd), _err.message))) +            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), +                        _err.message)              return False          floppy_dev = '/dev/fd0' @@ -222,12 +223,12 @@ class DataSourceAltCloud(sources.DataSource):              (cmd_out, _err) = util.subp(cmd)              LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))          except ProcessExecutionError, _err: -            util.logexc(LOG, (('Failed command: %s\n%s') % \ -                (' '.join(cmd), _err.message))) +            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), +                        _err.message)              return False          except OSError, _err: -            util.logexc(LOG, (('Failed command: %s\n%s') % \ -                (' '.join(cmd), _err.message))) +            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), +                        _err.message)              return False          try: @@ -236,8 +237,8 @@ class DataSourceAltCloud(sources.DataSource):              if err.errno != errno.ENOENT:                  raise          except util.MountFailedError: -            util.logexc(LOG, ("Failed to mount %s" -                              " when looking for user data"), floppy_dev) +            util.logexc(LOG, "Failed to mount %s when looking for user data", +                        floppy_dev)          self.userdata_raw = return_str          self.metadata = META_DATA_NOT_SUPPORTED @@ -272,8 +273,8 @@ class DataSourceAltCloud(sources.DataSource):                  if err.errno != errno.ENOENT:                      raise              except util.MountFailedError: -                util.logexc(LOG, ("Failed to mount %s" -                                  " when looking for user data"), cdrom_dev) +                util.logexc(LOG, "Failed to mount %s when looking for user " +                            "data", cdrom_dev)          self.userdata_raw = return_str          self.metadata = META_DATA_NOT_SUPPORTED diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py new file mode 100644 index 00000000..d4863429 --- /dev/null +++ b/cloudinit/sources/DataSourceAzure.py @@ -0,0 +1,485 @@ +# vi: ts=4 expandtab +# +#    Copyright (C) 2013 Canonical Ltd. +# +#    Author: Scott Moser <scott.moser@canonical.com> +# +#    This program is free software: you can redistribute it and/or modify +#    it under the terms of the GNU General Public License version 3, as +#    published by the Free Software Foundation. +# +#    This program is distributed in the hope that it will be useful, +#    but WITHOUT ANY WARRANTY; without even the implied warranty of +#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the +#    GNU General Public License for more details. +# +#    You should have received a copy of the GNU General Public License +#    along with this program.  If not, see <http://www.gnu.org/licenses/>. + +import base64 +import os +import os.path +import time +from xml.dom import minidom + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import util + +LOG = logging.getLogger(__name__) + +DS_NAME = 'Azure' +DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} +AGENT_START = ['service', 'walinuxagent', 'start'] +BOUNCE_COMMAND = ("i=$interface; x=0; ifdown $i || x=$?; " +                  "ifup $i || x=$?; exit $x") +BUILTIN_DS_CONFIG = { +    'agent_command': AGENT_START, +    'data_dir': "/var/lib/waagent", +    'set_hostname': True, +    'hostname_bounce': { +        'interface': 'eth0', +        'policy': True, +        'command': BOUNCE_COMMAND, +        'hostname_command': 'hostname', +    } +} +DS_CFG_PATH = ['datasource', DS_NAME] + + +class DataSourceAzureNet(sources.DataSource): +    def __init__(self, sys_cfg, distro, paths): +        sources.DataSource.__init__(self, sys_cfg, distro, paths) +        self.seed_dir = os.path.join(paths.seed_dir, 'azure') +        self.cfg = {} +        self.seed = None +        self.ds_cfg = util.mergemanydict([ +            util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), +            BUILTIN_DS_CONFIG]) + +    def __str__(self): +        root = sources.DataSource.__str__(self) +        return "%s [seed=%s]" % (root, self.seed) + +    def get_data(self): +        # azure removes/ejects the cdrom containing the ovf-env.xml +        # file on reboot.  So, in order to successfully reboot we +        # need to look in the datadir and consider that valid +        ddir = self.ds_cfg['data_dir'] + +        candidates = [self.seed_dir] +        candidates.extend(list_possible_azure_ds_devs()) +        if ddir: +            candidates.append(ddir) + +        found = None + +        for cdev in candidates: +            try: +                if cdev.startswith("/dev/"): +                    ret = util.mount_cb(cdev, load_azure_ds_dir) +                else: +                    ret = load_azure_ds_dir(cdev) + +            except NonAzureDataSource: +                continue +            except BrokenAzureDataSource as exc: +                raise exc +            except util.MountFailedError: +                LOG.warn("%s was not mountable" % cdev) +                continue + +            (md, self.userdata_raw, cfg, files) = ret +            self.seed = cdev +            self.metadata = util.mergemanydict([md, DEFAULT_METADATA]) +            self.cfg = cfg +            found = cdev + +            LOG.debug("found datasource in %s", cdev) +            break + +        if not found: +            return False + +        if found == ddir: +            LOG.debug("using files cached in %s", ddir) + +        # now update ds_cfg to reflect contents pass in config +        usercfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) +        self.ds_cfg = util.mergemanydict([usercfg, self.ds_cfg]) +        mycfg = self.ds_cfg + +        # walinux agent writes files world readable, but expects +        # the directory to be protected. +        write_files(mycfg['data_dir'], files, dirmode=0700) + +        # handle the hostname 'publishing' +        try: +            handle_set_hostname(mycfg.get('set_hostname'), +                                self.metadata.get('local-hostname'), +                                mycfg['hostname_bounce']) +        except Exception as e: +            LOG.warn("Failed publishing hostname: %s" % e) +            util.logexc(LOG, "handling set_hostname failed") + +        try: +            invoke_agent(mycfg['agent_command']) +        except util.ProcessExecutionError: +            # claim the datasource even if the command failed +            util.logexc(LOG, "agent command '%s' failed.", +                        mycfg['agent_command']) + +        shcfgxml = os.path.join(mycfg['data_dir'], "SharedConfig.xml") +        wait_for = [shcfgxml] + +        fp_files = [] +        for pk in self.cfg.get('_pubkeys', []): +            bname = pk['fingerprint'] + ".crt" +            fp_files += [os.path.join(mycfg['data_dir'], bname)] + +        start = time.time() +        missing = wait_for_files(wait_for + fp_files) +        if len(missing): +            LOG.warn("Did not find files, but going on: %s", missing) +        else: +            LOG.debug("waited %.3f seconds for %d files to appear", +                      time.time() - start, len(wait_for)) + +        if shcfgxml in missing: +            LOG.warn("SharedConfig.xml missing, using static instance-id") +        else: +            try: +                self.metadata['instance-id'] = iid_from_shared_config(shcfgxml) +            except ValueError as e: +                LOG.warn("failed to get instance id in %s: %s" % (shcfgxml, e)) + +        pubkeys = pubkeys_from_crt_files(fp_files) + +        self.metadata['public-keys'] = pubkeys + +        return True + +    def get_config_obj(self): +        return self.cfg + + +def handle_set_hostname(enabled, hostname, cfg): +    if not util.is_true(enabled): +        return + +    if not hostname: +        LOG.warn("set_hostname was true but no local-hostname") +        return + +    apply_hostname_bounce(hostname=hostname, policy=cfg['policy'], +                          interface=cfg['interface'], +                          command=cfg['command'], +                          hostname_command=cfg['hostname_command']) + + +def apply_hostname_bounce(hostname, policy, interface, command, +                          hostname_command="hostname"): +    # set the hostname to 'hostname' if it is not already set to that. +    # then, if policy is not off, bounce the interface using command +    prev_hostname = util.subp(hostname_command, capture=True)[0].strip() + +    util.subp([hostname_command, hostname]) + +    if util.is_false(policy): +        return + +    if prev_hostname == hostname and policy != "force": +        return + +    env = os.environ.copy() +    env['interface'] = interface + +    if command == "builtin": +        command = BOUNCE_COMMAND + +    util.subp(command, shell=(not isinstance(command, list)), capture=True) + + +def crtfile_to_pubkey(fname): +    pipeline = ('openssl x509 -noout -pubkey < "$0" |' +                'ssh-keygen -i -m PKCS8 -f /dev/stdin') +    (out, _err) = util.subp(['sh', '-c', pipeline, fname], capture=True) +    return out.rstrip() + + +def pubkeys_from_crt_files(flist): +    pubkeys = [] +    errors = [] +    for fname in flist: +        try: +            pubkeys.append(crtfile_to_pubkey(fname)) +        except util.ProcessExecutionError: +            errors.extend(fname) + +    if errors: +        LOG.warn("failed to convert the crt files to pubkey: %s" % errors) + +    return pubkeys + + +def wait_for_files(flist, maxwait=60, naplen=.5): +    need = set(flist) +    waited = 0 +    while waited < maxwait: +        need -= set([f for f in need if os.path.exists(f)]) +        if len(need) == 0: +            return [] +        time.sleep(naplen) +        waited += naplen +    return need + + +def write_files(datadir, files, dirmode=None): +    if not datadir: +        return +    if not files: +        files = {} +    util.ensure_dir(datadir, dirmode) +    for (name, content) in files.items(): +        util.write_file(filename=os.path.join(datadir, name), +                        content=content, mode=0600) + + +def invoke_agent(cmd): +    # this is a function itself to simplify patching it for test +    if cmd: +        LOG.debug("invoking agent: %s" % cmd) +        util.subp(cmd, shell=(not isinstance(cmd, list))) +    else: +        LOG.debug("not invoking agent") + + +def find_child(node, filter_func): +    ret = [] +    if not node.hasChildNodes(): +        return ret +    for child in node.childNodes: +        if filter_func(child): +            ret.append(child) +    return ret + + +def load_azure_ovf_pubkeys(sshnode): +    # This parses a 'SSH' node formatted like below, and returns +    # an array of dicts. +    #  [{'fp': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7', +    #    'path': 'where/to/go'}] +    # +    # <SSH><PublicKeys> +    #   <PublicKey><Fingerprint>ABC</FingerPrint><Path>/ABC</Path> +    #   ... +    # </PublicKeys></SSH> +    results = find_child(sshnode, lambda n: n.localName == "PublicKeys") +    if len(results) == 0: +        return [] +    if len(results) > 1: +        raise BrokenAzureDataSource("Multiple 'PublicKeys'(%s) in SSH node" % +                                    len(results)) + +    pubkeys_node = results[0] +    pubkeys = find_child(pubkeys_node, lambda n: n.localName == "PublicKey") + +    if len(pubkeys) == 0: +        return [] + +    found = [] +    text_node = minidom.Document.TEXT_NODE + +    for pk_node in pubkeys: +        if not pk_node.hasChildNodes(): +            continue +        cur = {'fingerprint': "", 'path': ""} +        for child in pk_node.childNodes: +            if (child.nodeType == text_node or not child.localName): +                continue + +            name = child.localName.lower() + +            if name not in cur.keys(): +                continue + +            if (len(child.childNodes) != 1 or +                child.childNodes[0].nodeType != text_node): +                continue + +            cur[name] = child.childNodes[0].wholeText.strip() +        found.append(cur) + +    return found + + +def single_node_at_path(node, pathlist): +    curnode = node +    for tok in pathlist: +        results = find_child(curnode, lambda n: n.localName == tok) +        if len(results) == 0: +            raise ValueError("missing %s token in %s" % (tok, str(pathlist))) +        if len(results) > 1: +            raise ValueError("found %s nodes of type %s looking for %s" % +                             (len(results), tok, str(pathlist))) +        curnode = results[0] + +    return curnode + + +def read_azure_ovf(contents): +    try: +        dom = minidom.parseString(contents) +    except Exception as e: +        raise NonAzureDataSource("invalid xml: %s" % e) + +    results = find_child(dom.documentElement, +        lambda n: n.localName == "ProvisioningSection") + +    if len(results) == 0: +        raise NonAzureDataSource("No ProvisioningSection") +    if len(results) > 1: +        raise BrokenAzureDataSource("found '%d' ProvisioningSection items" % +                                    len(results)) +    provSection = results[0] + +    lpcs_nodes = find_child(provSection, +        lambda n: n.localName == "LinuxProvisioningConfigurationSet") + +    if len(results) == 0: +        raise NonAzureDataSource("No LinuxProvisioningConfigurationSet") +    if len(results) > 1: +        raise BrokenAzureDataSource("found '%d' %ss" % +                                    ("LinuxProvisioningConfigurationSet", +                                     len(results))) +    lpcs = lpcs_nodes[0] + +    if not lpcs.hasChildNodes(): +        raise BrokenAzureDataSource("no child nodes of configuration set") + +    md_props = 'seedfrom' +    md = {'azure_data': {}} +    cfg = {} +    ud = "" +    password = None +    username = None + +    for child in lpcs.childNodes: +        if child.nodeType == dom.TEXT_NODE or not child.localName: +            continue + +        name = child.localName.lower() + +        simple = False +        value = "" +        if (len(child.childNodes) == 1 and +            child.childNodes[0].nodeType == dom.TEXT_NODE): +            simple = True +            value = child.childNodes[0].wholeText + +        attrs = {k: v for k, v in child.attributes.items()} + +        # we accept either UserData or CustomData.  If both are present +        # then behavior is undefined. +        if (name == "userdata" or name == "customdata"): +            if attrs.get('encoding') in (None, "base64"): +                ud = base64.b64decode(''.join(value.split())) +            else: +                ud = value +        elif name == "username": +            username = value +        elif name == "userpassword": +            password = value +        elif name == "hostname": +            md['local-hostname'] = value +        elif name == "dscfg": +            if attrs.get('encoding') in (None, "base64"): +                dscfg = base64.b64decode(''.join(value.split())) +            else: +                dscfg = value +            cfg['datasource'] = {DS_NAME: util.load_yaml(dscfg, default={})} +        elif name == "ssh": +            cfg['_pubkeys'] = load_azure_ovf_pubkeys(child) +        elif name == "disablesshpasswordauthentication": +            cfg['ssh_pwauth'] = util.is_false(value) +        elif simple: +            if name in md_props: +                md[name] = value +            else: +                md['azure_data'][name] = value + +    defuser = {} +    if username: +        defuser['name'] = username +    if password: +        defuser['password'] = password +        defuser['lock_passwd'] = False + +    if defuser: +        cfg['system_info'] = {'default_user': defuser} + +    if 'ssh_pwauth' not in cfg and password: +        cfg['ssh_pwauth'] = True + +    return (md, ud, cfg) + + +def list_possible_azure_ds_devs(): +    # return a sorted list of devices that might have a azure datasource +    devlist = [] +    for fstype in ("iso9660", "udf"): +        devlist.extend(util.find_devs_with("TYPE=%s" % fstype)) + +    devlist.sort(reverse=True) +    return devlist + + +def load_azure_ds_dir(source_dir): +    ovf_file = os.path.join(source_dir, "ovf-env.xml") + +    if not os.path.isfile(ovf_file): +        raise NonAzureDataSource("No ovf-env file found") + +    with open(ovf_file, "r") as fp: +        contents = fp.read() + +    md, ud, cfg = read_azure_ovf(contents) +    return (md, ud, cfg, {'ovf-env.xml': contents}) + + +def iid_from_shared_config(path): +    with open(path, "rb") as fp: +        content = fp.read() +    return iid_from_shared_config_content(content) + + +def iid_from_shared_config_content(content): +    """ +    find INSTANCE_ID in: +    <?xml version="1.0" encoding="utf-8"?> +    <SharedConfig version="1.0.0.0" goalStateIncarnation="1"> +      <Deployment name="INSTANCE_ID" guid="{...}" incarnation="0"> +        <Service name="..." guid="{00000000-0000-0000-0000-000000000000}" /> +    """ +    dom = minidom.parseString(content) +    depnode = single_node_at_path(dom, ["SharedConfig", "Deployment"]) +    return depnode.attributes.get('name').value + + +class BrokenAzureDataSource(Exception): +    pass + + +class NonAzureDataSource(Exception): +    pass + + +# Used to match classes to dependencies +datasources = [ +  (DataSourceAzureNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): +    return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 81c8cda9..08f661e4 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -4,11 +4,13 @@  #    Copyright (C) 2012 Cosmin Luta  #    Copyright (C) 2012 Yahoo! Inc.  #    Copyright (C) 2012 Gerard Dethier +#    Copyright (C) 2013 Hewlett-Packard Development Company, L.P.  #  #    Author: Cosmin Luta <q4break@gmail.com>  #    Author: Scott Moser <scott.moser@canonical.com>  #    Author: Joshua Harlow <harlowja@yahoo-inc.com>  #    Author: Gerard Dethier <g.dethier@gmail.com> +#    Author: Juerg Haefliger <juerg.haefliger@hp.com>  #  #    This program is free software: you can redistribute it and/or modify  #    it under the terms of the GNU General Public License version 3, as @@ -109,8 +111,8 @@ class DataSourceCloudStack(sources.DataSource):                        int(time.time() - start_time))              return True          except Exception: -            util.logexc(LOG, ('Failed fetching from metadata ' -                              'service %s'), self.metadata_address) +            util.logexc(LOG, 'Failed fetching from metadata service %s', +                        self.metadata_address)              return False      def get_instance_id(self): diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 084abca7..4ef92a56 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -1,7 +1,7 @@  # vi: ts=4 expandtab  #  #    Copyright (C) 2009-2010 Canonical Ltd. -#    Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +#    Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.  #    Copyright (C) 2012 Yahoo! Inc.  #  #    Author: Scott Moser <scott.moser@canonical.com> @@ -119,8 +119,8 @@ class DataSourceNoCloud(sources.DataSource):                      if e.errno != errno.ENOENT:                          raise                  except util.MountFailedError: -                    util.logexc(LOG, ("Failed to mount %s" -                                      " when looking for data"), dev) +                    util.logexc(LOG, "Failed to mount %s when looking for " +                                "data", dev)          # There was no indication on kernel cmdline or data          # in the seeddir suggesting this handler should be used. diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py new file mode 100644 index 00000000..1ce20c10 --- /dev/null +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -0,0 +1,195 @@ +# vi: ts=4 expandtab +# +#    Copyright (C) 2013 Canonical Ltd. +# +#    Author: Ben Howard <ben.howard@canonical.com> +# +#    This program is free software: you can redistribute it and/or modify +#    it under the terms of the GNU General Public License version 3, as +#    published by the Free Software Foundation. +# +#    This program is distributed in the hope that it will be useful, +#    but WITHOUT ANY WARRANTY; without even the implied warranty of +#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the +#    GNU General Public License for more details. +# +#    You should have received a copy of the GNU General Public License +#    along with this program.  If not, see <http://www.gnu.org/licenses/>. +# +# +#    Datasource for provisioning on SmartOS. This works on Joyent +#        and public/private Clouds using SmartOS. +# +#    SmartOS hosts use a serial console (/dev/ttyS1) on Linux Guests. +#        The meta-data is transmitted via key/value pairs made by +#        requests on the console. For example, to get the hostname, you +#        would send "GET hostname" on /dev/ttyS1. +# + + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import util +import os +import os.path +import serial + + +DEF_TTY_LOC = '/dev/ttyS1' +DEF_TTY_TIMEOUT = 60 +LOG = logging.getLogger(__name__) + +SMARTOS_ATTRIB_MAP = { +    #Cloud-init Key : (SmartOS Key, Strip line endings) +    'local-hostname': ('hostname', True), +    'public-keys': ('root_authorized_keys', True), +    'user-script': ('user-script', False), +    'user-data': ('user-data', False), +    'iptables_disable': ('iptables_disable', True), +    'motd_sys_info': ('motd_sys_info', True), +} + + +class DataSourceSmartOS(sources.DataSource): +    def __init__(self, sys_cfg, distro, paths): +        sources.DataSource.__init__(self, sys_cfg, distro, paths) +        self.seed_dir = os.path.join(paths.seed_dir, 'sdc') +        self.is_smartdc = None +        self.seed = self.sys_cfg.get("serial_device", DEF_TTY_LOC) +        self.seed_timeout = self.sys_cfg.get("serial_timeout", +                                             DEF_TTY_TIMEOUT) + +    def __str__(self): +        root = sources.DataSource.__str__(self) +        return "%s [seed=%s]" % (root, self.seed) + +    def get_data(self): +        md = {} +        ud = "" + +        if not os.path.exists(self.seed): +            LOG.debug("Host does not appear to be on SmartOS") +            return False +        self.seed = self.seed + +        dmi_info = dmi_data() +        if dmi_info is False: +            LOG.debug("No dmidata utility found") +            return False + +        system_uuid, system_type = dmi_info +        if 'smartdc' not in system_type.lower(): +            LOG.debug("Host is not on SmartOS") +            return False +        self.is_smartdc = True +        md['instance-id'] = system_uuid + +        for ci_noun, attribute in SMARTOS_ATTRIB_MAP.iteritems(): +            smartos_noun, strip = attribute +            md[ci_noun] = query_data(smartos_noun, self.seed, +                                     self.seed_timeout, strip=strip) + +        if not md['local-hostname']: +            md['local-hostname'] = system_uuid + +        if md['user-data']: +            ud = md['user-data'] +        else: +            ud = md['user-script'] + +        self.metadata = md +        self.userdata_raw = ud +        return True + +    def get_instance_id(self): +        return self.metadata['instance-id'] + + +def get_serial(seed_device, seed_timeout): +    """This is replaced in unit testing, allowing us to replace +        serial.Serial with a mocked class + +        The timeout value of 60 seconds should never be hit. The value +        is taken from SmartOS own provisioning tools. Since we are reading +        each line individually up until the single ".", the transfer is +        usually very fast (i.e. microseconds) to get the response. +    """ +    if not seed_device: +        raise AttributeError("seed_device value is not set") + +    ser = serial.Serial(seed_device, timeout=seed_timeout) +    if not ser.isOpen(): +        raise SystemError("Unable to open %s" % seed_device) + +    return ser + + +def query_data(noun, seed_device, seed_timeout, strip=False): +    """Makes a request to via the serial console via "GET <NOUN>" + +        In the response, the first line is the status, while subsequent lines +        are is the value. A blank line with a "." is used to indicate end of +        response. +    """ + +    if not noun: +        return False + +    ser = get_serial(seed_device, seed_timeout) +    ser.write("GET %s\n" % noun.rstrip()) +    status = str(ser.readline()).rstrip() +    response = [] +    eom_found = False + +    if 'SUCCESS' not in status: +        ser.close() +        return None + +    while not eom_found: +        m = ser.readline() +        if m.rstrip() == ".": +            eom_found = True +        else: +            response.append(m) + +    ser.close() +    if not strip: +        return "".join(response) +    else: +        return "".join(response).rstrip() + +    return None + + +def dmi_data(): +    sys_uuid, sys_type = None, None +    dmidecode_path = util.which('dmidecode') +    if not dmidecode_path: +        return False + +    sys_uuid_cmd = [dmidecode_path, "-s", "system-uuid"] +    try: +        LOG.debug("Getting hostname from dmidecode") +        (sys_uuid, _err) = util.subp(sys_uuid_cmd) +    except Exception as e: +        util.logexc(LOG, "Failed to get system UUID", e) + +    sys_type_cmd = [dmidecode_path, "-s", "system-product-name"] +    try: +        LOG.debug("Determining hypervisor product name via dmidecode") +        (sys_type, _err) = util.subp(sys_type_cmd) +    except Exception as e: +        util.logexc(LOG, "Failed to get system UUID", e) + +    return sys_uuid.lower(), sys_type + + +# Used to match classes to dependencies +datasources = [ +    (DataSourceSmartOS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): +    return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index d8fbacdd..974c0407 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -135,7 +135,8 @@ class DataSource(object):      @property      def availability_zone(self): -        return self.metadata.get('availability-zone') +        return self.metadata.get('availability-zone', +                                 self.metadata.get('availability_zone'))      def get_instance_id(self):          if not self.metadata or 'instance-id' not in self.metadata: diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 95133236..70a577bc 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -229,11 +229,9 @@ def extract_authorized_keys(username):          except (IOError, OSError):              # Give up and use a default key filename              auth_key_fn = os.path.join(ssh_dir, 'authorized_keys') -            util.logexc(LOG, ("Failed extracting 'AuthorizedKeysFile'" -                              " in ssh config" -                              " from %r, using 'AuthorizedKeysFile' file" -                              " %r instead"), -                        DEF_SSHD_CFG, auth_key_fn) +            util.logexc(LOG, "Failed extracting 'AuthorizedKeysFile' in ssh " +                        "config from %r, using 'AuthorizedKeysFile' file " +                        "%r instead", DEF_SSHD_CFG, auth_key_fn)      return (auth_key_fn, parse_authorized_keys(auth_key_fn)) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 543d247f..3e49e8c5 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -1,7 +1,7 @@  # vi: ts=4 expandtab  #  #    Copyright (C) 2012 Canonical Ltd. -#    Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +#    Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.  #    Copyright (C) 2012 Yahoo! Inc.  #  #    Author: Scott Moser <scott.moser@canonical.com> @@ -154,9 +154,8 @@ class Init(object):                  try:                      util.chownbyname(log_file, u, g)                  except OSError: -                    util.logexc(LOG, ("Unable to change the ownership" -                                      " of %s to user %s, group %s"), -                                log_file, u, g) +                    util.logexc(LOG, "Unable to change the ownership of %s to " +                                "user %s, group %s", log_file, u, g)      def read_cfg(self, extra_fns=None):          # None check so that we don't keep on re-loading if empty @@ -345,12 +344,13 @@ class Init(object):          cdir = self.paths.get_cpath("handlers")          idir = self._get_ipath("handlers") -        # Add the path to the plugins dir to the top of our list for import -        # instance dir should be read before cloud-dir -        if cdir and cdir not in sys.path: -            sys.path.insert(0, cdir) -        if idir and idir not in sys.path: -            sys.path.insert(0, idir) +        # Add the path to the plugins dir to the top of our list for importing +        # new handlers. +        # +        # Note(harlowja): instance dir should be read before cloud-dir +        for d in [cdir, idir]: +            if d and d not in sys.path: +                sys.path.insert(0, d)          # Ensure datasource fetched before activation (just incase)          user_data_msg = self.datasource.get_userdata(True) @@ -358,24 +358,34 @@ class Init(object):          # This keeps track of all the active handlers          c_handlers = helpers.ContentHandlers() -        # Add handlers in cdir -        potential_handlers = util.find_modules(cdir) -        for (fname, mod_name) in potential_handlers.iteritems(): -            try: -                mod_locs = importer.find_module(mod_name, [''], -                                                ['list_types', -                                                 'handle_part']) -                if not mod_locs: -                    LOG.warn(("Could not find a valid user-data handler" -                              " named %s in file %s"), mod_name, fname) -                    continue -                mod = importer.import_module(mod_locs[0]) -                mod = handlers.fixup_handler(mod) -                types = c_handlers.register(mod) -                LOG.debug("Added handler for %s from %s", types, fname) -            except: -                util.logexc(LOG, "Failed to register handler from %s", fname) - +        def register_handlers_in_dir(path): +            # Attempts to register any handler modules under the given path. +            if not path or not os.path.isdir(path): +                return +            potential_handlers = util.find_modules(path) +            for (fname, mod_name) in potential_handlers.iteritems(): +                try: +                    mod_locs = importer.find_module(mod_name, [''], +                                                    ['list_types', +                                                     'handle_part']) +                    if not mod_locs: +                        LOG.warn(("Could not find a valid user-data handler" +                                  " named %s in file %s"), mod_name, fname) +                        continue +                    mod = importer.import_module(mod_locs[0]) +                    mod = handlers.fixup_handler(mod) +                    types = c_handlers.register(mod) +                    LOG.debug("Added handler for %s from %s", types, fname) +                except Exception: +                    util.logexc(LOG, "Failed to register handler from %s", +                                fname) + +        # Add any handlers in the cloud-dir +        register_handlers_in_dir(cdir) + +        # Register any other handlers that come from the default set. This +        # is done after the cloud-dir handlers so that the cdir modules can +        # take over the default user-data handler content-types.          def_handlers = self._default_userdata_handlers()          applied_def_handlers = c_handlers.register_defaults(def_handlers)          if applied_def_handlers: @@ -384,36 +394,51 @@ class Init(object):          # Form our cloud interface          data = self.cloudify() -        # Init the handlers first -        called = [] -        for (_ctype, mod) in c_handlers.iteritems(): -            if mod in called: -                continue -            handlers.call_begin(mod, data, frequency) -            called.append(mod) - -        # Walk the user data -        part_data = { -            'handlers': c_handlers, -            # Any new handlers that are encountered get writen here -            'handlerdir': idir, -            'data': data, -            # The default frequency if handlers don't have one -            'frequency': frequency, -            # This will be used when new handlers are found -            # to help write there contents to files with numbered -            # names... -            'handlercount': 0, -        } -        handlers.walk(user_data_msg, handlers.walker_callback, data=part_data) +        def init_handlers(): +            # Init the handlers first +            for (_ctype, mod) in c_handlers.iteritems(): +                if mod in c_handlers.initialized: +                    # Avoid initing the same module twice (if said module +                    # is registered to more than one content-type). +                    continue +                handlers.call_begin(mod, data, frequency) +                c_handlers.initialized.append(mod) + +        def walk_handlers(): +            # Walk the user data +            part_data = { +                'handlers': c_handlers, +                # Any new handlers that are encountered get writen here +                'handlerdir': idir, +                'data': data, +                # The default frequency if handlers don't have one +                'frequency': frequency, +                # This will be used when new handlers are found +                # to help write there contents to files with numbered +                # names... +                'handlercount': 0, +            } +            handlers.walk(user_data_msg, handlers.walker_callback, +                          data=part_data) + +        def finalize_handlers(): +            # Give callbacks opportunity to finalize +            for (_ctype, mod) in c_handlers.iteritems(): +                if mod not in c_handlers.initialized: +                    # Said module was never inited in the first place, so lets +                    # not attempt to finalize those that never got called. +                    continue +                c_handlers.initialized.remove(mod) +                try: +                    handlers.call_end(mod, data, frequency) +                except: +                    util.logexc(LOG, "Failed to finalize handler: %s", mod) -        # Give callbacks opportunity to finalize -        called = [] -        for (_ctype, mod) in c_handlers.iteritems(): -            if mod in called: -                continue -            handlers.call_end(mod, data, frequency) -            called.append(mod) +        try: +            init_handlers() +            walk_handlers() +        finally: +            finalize_handlers()          # Perform post-consumption adjustments so that          # modules that run during the init stage reflect diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index df069ff8..d49ea094 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -23,8 +23,10 @@  import os  import email +  from email.mime.base import MIMEBase  from email.mime.multipart import MIMEMultipart +from email.mime.nonmultipart import MIMENonMultipart  from email.mime.text import MIMEText  from cloudinit import handlers @@ -48,6 +50,18 @@ ARCHIVE_TYPES = ["text/cloud-config-archive"]  UNDEF_TYPE = "text/plain"  ARCHIVE_UNDEF_TYPE = "text/cloud-config" +# This seems to hit most of the gzip possible content types. +DECOMP_TYPES = [ +    'application/gzip', +    'application/gzip-compressed', +    'application/gzipped', +    'application/x-compress', +    'application/x-compressed', +    'application/x-gunzip', +    'application/x-gzip', +    'application/x-gzip-compressed', +] +  # Msg header used to track attachments  ATTACHMENT_FIELD = 'Number-Attachments' @@ -56,6 +70,17 @@ ATTACHMENT_FIELD = 'Number-Attachments'  EXAMINE_FOR_LAUNCH_INDEX = ["text/cloud-config"] +def _replace_header(msg, key, value): +    del msg[key] +    msg[key] = value + + +def _set_filename(msg, filename): +    del msg['Content-Disposition'] +    msg.add_header('Content-Disposition', +                   'attachment', filename=str(filename)) + +  class UserDataProcessor(object):      def __init__(self, paths):          self.paths = paths @@ -67,6 +92,10 @@ class UserDataProcessor(object):          return accumulating_msg      def _process_msg(self, base_msg, append_msg): + +        def find_ctype(payload): +            return handlers.type_from_starts_with(payload) +          for part in base_msg.walk():              if is_skippable(part):                  continue @@ -74,21 +103,51 @@ class UserDataProcessor(object):              ctype = None              ctype_orig = part.get_content_type()              payload = part.get_payload(decode=True) +            was_compressed = False + +            # When the message states it is of a gzipped content type ensure +            # that we attempt to decode said payload so that the decompressed +            # data can be examined (instead of the compressed data). +            if ctype_orig in DECOMP_TYPES: +                try: +                    payload = util.decomp_gzip(payload, quiet=False) +                    # At this point we don't know what the content-type is +                    # since we just decompressed it. +                    ctype_orig = None +                    was_compressed = True +                except util.DecompressionError as e: +                    LOG.warn("Failed decompressing payload from %s of length" +                             " %s due to: %s", ctype_orig, len(payload), e) +                    continue +            # Attempt to figure out the payloads content-type              if not ctype_orig:                  ctype_orig = UNDEF_TYPE -              if ctype_orig in TYPE_NEEDED: -                ctype = handlers.type_from_starts_with(payload) - +                ctype = find_ctype(payload)              if ctype is None:                  ctype = ctype_orig +            # In the case where the data was compressed, we want to make sure +            # that we create a new message that contains the found content +            # type with the uncompressed content since later traversals of the +            # messages will expect a part not compressed. +            if was_compressed: +                maintype, subtype = ctype.split("/", 1) +                n_part = MIMENonMultipart(maintype, subtype) +                n_part.set_payload(payload) +                # Copy various headers from the old part to the new one, +                # but don't include all the headers since some are not useful +                # after decoding and decompression. +                if part.get_filename(): +                    _set_filename(n_part, part.get_filename()) +                for h in ('Launch-Index',): +                    if h in part: +                        _replace_header(n_part, h, str(part[h])) +                part = n_part +              if ctype != ctype_orig: -                if CONTENT_TYPE in part: -                    part.replace_header(CONTENT_TYPE, ctype) -                else: -                    part[CONTENT_TYPE] = ctype +                _replace_header(part, CONTENT_TYPE, ctype)              if ctype in INCLUDE_TYPES:                  self._do_include(payload, append_msg) @@ -98,12 +157,9 @@ class UserDataProcessor(object):                  self._explode_archive(payload, append_msg)                  continue -            # Should this be happening, shouldn't +            # TODO(harlowja): Should this be happening, shouldn't              # the part header be modified and not the base? -            if CONTENT_TYPE in base_msg: -                base_msg.replace_header(CONTENT_TYPE, ctype) -            else: -                base_msg[CONTENT_TYPE] = ctype +            _replace_header(base_msg, CONTENT_TYPE, ctype)              self._attach_part(append_msg, part) @@ -138,8 +194,7 @@ class UserDataProcessor(object):      def _process_before_attach(self, msg, attached_id):          if not msg.get_filename(): -            msg.add_header('Content-Disposition', -                           'attachment', filename=PART_FN_TPL % (attached_id)) +            _set_filename(msg, PART_FN_TPL % (attached_id))          self._attach_launch_index(msg)      def _do_include(self, content, append_msg): @@ -217,13 +272,15 @@ class UserDataProcessor(object):                  msg.set_payload(content)              if 'filename' in ent: -                msg.add_header('Content-Disposition', -                               'attachment', filename=ent['filename']) +                _set_filename(msg, ent['filename'])              if 'launch-index' in ent:                  msg.add_header('Launch-Index', str(ent['launch-index']))              for header in list(ent.keys()): -                if header in ('content', 'filename', 'type', 'launch-index'): +                if header.lower() in ('content', 'filename', 'type', +                                      'launch-index', 'content-disposition', +                                      ATTACHMENT_FIELD.lower(), +                                      CONTENT_TYPE.lower()):                      continue                  msg.add_header(header, ent[header]) @@ -238,13 +295,13 @@ class UserDataProcessor(object):              outer_msg[ATTACHMENT_FIELD] = '0'          if new_count is not None: -            outer_msg.replace_header(ATTACHMENT_FIELD, str(new_count)) +            _replace_header(outer_msg, ATTACHMENT_FIELD, str(new_count))          fetched_count = 0          try:              fetched_count = int(outer_msg.get(ATTACHMENT_FIELD))          except (ValueError, TypeError): -            outer_msg.replace_header(ATTACHMENT_FIELD, str(fetched_count)) +            _replace_header(outer_msg, ATTACHMENT_FIELD, str(fetched_count))          return fetched_count      def _attach_part(self, outer_msg, part): @@ -276,10 +333,7 @@ def convert_string(raw_data, headers=None):      if "mime-version:" in data[0:4096].lower():          msg = email.message_from_string(data)          for (key, val) in headers.iteritems(): -            if key in msg: -                msg.replace_header(key, val) -            else: -                msg[key] = val +            _replace_header(msg, key, val)      else:          mtype = headers.get(CONTENT_TYPE, NOT_MULTIPART_TYPE)          maintype, subtype = mtype.split("/", 1) diff --git a/cloudinit/util.py b/cloudinit/util.py index b27b3567..8542fe27 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1,7 +1,7 @@  # vi: ts=4 expandtab  #  #    Copyright (C) 2012 Canonical Ltd. -#    Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +#    Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.  #    Copyright (C) 2012 Yahoo! Inc.  #  #    Author: Scott Moser <scott.moser@canonical.com> @@ -219,8 +219,7 @@ def fork_cb(child_cb, *args):              child_cb(*args)              os._exit(0)  # pylint: disable=W0212          except: -            logexc(LOG, ("Failed forking and" -                         " calling callback %s"), +            logexc(LOG, "Failed forking and calling callback %s",                     type_utils.obj_name(child_cb))              os._exit(1)  # pylint: disable=W0212      else: @@ -1531,6 +1530,14 @@ def shellify(cmdlist, add_header=True):      return content +def strip_prefix_suffix(line, prefix=None, suffix=None): +    if prefix and line.startswith(prefix): +        line = line[len(prefix):] +    if suffix and line.endswith(suffix): +        line = line[:-len(suffix)] +    return line + +  def is_container():      """      Checks to see if this code running in a container of some sort @@ -1744,3 +1751,22 @@ def get_mount_info(path, log=LOG):      mountinfo_path = '/proc/%s/mountinfo' % os.getpid()      lines = load_file(mountinfo_path).splitlines()      return parse_mount_info(path, lines, log) + + +def which(program): +    # Return path of program for execution if found in path +    def is_exe(fpath): +        return os.path.isfile(fpath) and os.access(fpath, os.X_OK) + +    _fpath, _ = os.path.split(program) +    if _fpath: +        if is_exe(program): +            return program +    else: +        for path in os.environ["PATH"].split(os.pathsep): +            path = path.strip('"') +            exe_file = os.path.join(path, program) +            if is_exe(exe_file): +                return exe_file + +    return None diff --git a/cloudinit/version.py b/cloudinit/version.py index 024d5118..4b29a587 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -20,7 +20,7 @@ from distutils import version as vr  def version(): -    return vr.StrictVersion("0.7.2") +    return vr.StrictVersion("0.7.3")  def version_string(): diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt index fc8c22d4..6544448e 100644 --- a/doc/examples/cloud-config-datasources.txt +++ b/doc/examples/cloud-config-datasources.txt @@ -42,3 +42,18 @@ datasource:      meta-data:         instance-id: i-87018aed         local-hostname: myhost.internal + +  Azure: +    agent_command: [service, walinuxagent, start] +    set_hostname: True +    hostname_bounce: +      interface: eth0 +      policy: on # [can be 'on', 'off' or 'force'] +    } + +  SmartOS: +    # Smart OS datasource works over a serial console interacting with +    # a server on the other end. By default, the second serial console is the +    # device. SmartOS also uses a serial timeout of 60 seconds. +    serial device: /dev/ttyS1 +    serial timeout: 60 diff --git a/doc/sources/azure/README.rst b/doc/sources/azure/README.rst new file mode 100644 index 00000000..8239d1fa --- /dev/null +++ b/doc/sources/azure/README.rst @@ -0,0 +1,134 @@ +================ +Azure Datasource +================ + +This datasource finds metadata and user-data from the Azure cloud platform. + +Azure Platform +-------------- +The azure cloud-platform provides initial data to an instance via an attached +CD formated in UDF.  That CD contains a 'ovf-env.xml' file that provides some +information.  Additional information is obtained via interaction with the +"endpoint".  The ip address of the endpoint is advertised to the instance +inside of dhcp option 245.  On ubuntu, that can be seen in +/var/lib/dhcp/dhclient.eth0.leases as a colon delimited hex value (example: +``option unknown-245 64:41:60:82;`` is 100.65.96.130) + +walinuxagent +------------ +In order to operate correctly, cloud-init needs walinuxagent to provide much +of the interaction with azure.  In addition to "provisioning" code, walinux +does the following on the agent is a long running daemon that handles the +following things: +- generate a x509 certificate and send that to the endpoint + +waagent.conf config +~~~~~~~~~~~~~~~~~~~ +in order to use waagent.conf with cloud-init, the following settings are recommended.  Other values can be changed or set to the defaults. + +  :: + +   # disabling provisioning turns off all 'Provisioning.*' function +   Provisioning.Enabled=n +   # this is currently not handled by cloud-init, so let walinuxagent do it. +   ResourceDisk.Format=y +   ResourceDisk.MountPoint=/mnt + + +Userdata +-------- +Userdata is provided to cloud-init inside the ovf-env.xml file. Cloud-init +expects that user-data will be provided as base64 encoded value inside the +text child of a element named ``UserData`` or ``CustomData`` which is a direct +child of the ``LinuxProvisioningConfigurationSet`` (a sibling to ``UserName``) +If both ``UserData`` and ``CustomData`` are provided behavior is undefined on +which will be selected. + +In the example below, user-data provided is 'this is my userdata', and the +datasource config provided is ``{"agent_command": ["start", "walinuxagent"]}``. +That agent command will take affect as if it were specified in system config. + +Example: + +.. code:: + + <wa:ProvisioningSection> +  <wa:Version>1.0</wa:Version> +  <LinuxProvisioningConfigurationSet +     xmlns="http://schemas.microsoft.com/windowsazure" +     xmlns:i="http://www.w3.org/2001/XMLSchema-instance"> +   <ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType> +   <HostName>myHost</HostName> +   <UserName>myuser</UserName> +   <UserPassword/> +   <CustomData>dGhpcyBpcyBteSB1c2VyZGF0YQ===</CustomData> +   <dscfg>eyJhZ2VudF9jb21tYW5kIjogWyJzdGFydCIsICJ3YWxpbnV4YWdlbnQiXX0=</dscfg> +   <DisableSshPasswordAuthentication>true</DisableSshPasswordAuthentication> +   <SSH> +    <PublicKeys> +     <PublicKey> +      <Fingerprint>6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7</Fingerprint> +      <Path>this-value-unused</Path> +     </PublicKey> +    </PublicKeys> +   </SSH> +   </LinuxProvisioningConfigurationSet> + </wa:ProvisioningSection> + +Configuration +------------- +Configuration for the datasource can be read from the system config's or set +via the `dscfg` entry in the `LinuxProvisioningConfigurationSet`.  Content in +dscfg node is expected to be base64 encoded yaml content, and it will be +merged into the 'datasource: Azure' entry. + +The '``hostname_bounce: command``' entry can be either the literal string +'builtin' or a command to execute.  The command will be invoked after the +hostname is set, and will have the 'interface' in its environment.  If +``set_hostname`` is not true, then ``hostname_bounce`` will be ignored. + +An example might be: +  command:  ["sh", "-c", "killall dhclient; dhclient $interface"] + +.. code:: + +  datasource: +   agent_command +   Azure: +    agent_command: [service, walinuxagent, start] +    set_hostname: True +    hostname_bounce: +     # the name of the interface to bounce +     interface: eth0 +     # policy can be 'on', 'off' or 'force' +     policy: on +     # the method 'bounce' command. +     command: "builtin" +     hostname_command: "hostname" +    } + +hostname +-------- +When the user launches an instance, they provide a hostname for that instance. +The hostname is provided to the instance in the ovf-env.xml file as +``HostName``. + +Whatever value the instance provides in its dhcp request will resolve in the +domain returned in the 'search' request. + +The interesting issue is that a generic image will already have a hostname +configured.  The ubuntu cloud images have 'ubuntu' as the hostname of the +system, and the initial dhcp request on eth0 is not guaranteed to occur after +the datasource code has been run.  So, on first boot, that initial value will +be sent in the dhcp request and *that* value will resolve. + +In order to make the ``HostName`` provided in the ovf-env.xml resolve, a +dhcp request must be made with the new value.  Walinuxagent (in its current +version) handles this by polling the state of hostname and bouncing ('``ifdown +eth0; ifup eth0``' the network interface if it sees that a change has been +made. + +cloud-init handles this by setting the hostname in the DataSource's 'get_data' +method via '``hostname $HostName``', and then bouncing the interface.  This +behavior can be configured or disabled in the datasource config.  See +'Configuration' above. diff --git a/packages/bddeb b/packages/bddeb index 7bf59fcd..15d424f2 100755 --- a/packages/bddeb +++ b/packages/bddeb @@ -36,6 +36,7 @@ PKG_MP = {      'prettytable': 'python-prettytable',      'pyyaml': 'python-yaml',      'requests': 'python-requests', +    'jsonpatch': 'python-json-patch',  }  DEBUILD_ARGS = ["-us", "-S", "-uc", "-d"] diff --git a/packages/brpm b/packages/brpm index 53de802c..91a0a0ec 100755 --- a/packages/brpm +++ b/packages/brpm @@ -34,14 +34,28 @@ from cloudinit import util  # this is a translation of the 'requires'  # file pypi package name to a redhat/fedora package name.  PKG_MP = { -    'argparse': 'python-argparse', -    'boto': 'python-boto', -    'cheetah': 'python-cheetah', -    'configobj': 'python-configobj', -    'oauth': 'python-oauth', -    'prettytable': 'python-prettytable', -    'pyyaml': 'PyYAML', -    'requests': 'python-requests', +    'redhat': { +        'argparse': 'python-argparse', +        'boto': 'python-boto', +        'cheetah': 'python-cheetah', +        'configobj': 'python-configobj', +        'jsonpatch': 'python-jsonpatch', +        'oauth': 'python-oauth', +        'prettytable': 'python-prettytable', +        'pyyaml': 'PyYAML', +        'requests': 'python-requests', +    }, +    'suse': { +        'argparse': 'python-argparse', +        'boto': 'python-boto', +        'cheetah': 'python-cheetah', +        'configobj': 'python-configobj', +        'jsonpatch': 'python-jsonpatch', +        'oauth': 'python-oauth', +        'prettytable': 'python-prettytable', +        'pyyaml': 'python-yaml', +        'requests': 'python-requests', +    }  }  # Subdirectories of the ~/rpmbuild dir @@ -120,7 +134,7 @@ def generate_spec_contents(args, tmpl_fn, top_dir, arc_fn):      # Map to known packages      requires = []      for p in pkgs: -        tgt_pkg = PKG_MP.get(p) +        tgt_pkg = PKG_MP[args.distro].get(p)          if not tgt_pkg:              raise RuntimeError(("Do not know how to translate pypi dependency"                                  " %r to a known package") % (p)) @@ -142,10 +156,11 @@ def generate_spec_contents(args, tmpl_fn, top_dir, arc_fn):                  missing_versions += 1                  if missing_versions == 1:                      # Must be using a new 'dev'/'trunk' release -                    changelog_lines.append(format_change_line(datetime.now(), '??')) +                    changelog_lines.append(format_change_line(datetime.now(), +                                                              '??'))                  else: -                    sys.stderr.write(("Changelog version line %s " -                                      "does not have a corresponding tag!\n") % (line)) +                    sys.stderr.write(("Changelog version line %s does not " +                                      "have a corresponding tag!\n") % (line))              else:                  changelog_lines.append(header)          else: @@ -171,6 +186,10 @@ def generate_spec_contents(args, tmpl_fn, top_dir, arc_fn):  def main():      parser = argparse.ArgumentParser() +    parser.add_argument("-d", "--distro", dest="distro", +                        help="select distro (default: %(default)s)", +                        metavar="DISTRO", default='redhat', +                        choices=('redhat', 'suse'))      parser.add_argument("-b", "--boot", dest="boot",                          help="select boot type (default: %(default)s)",                           metavar="TYPE", default='sysvinit', @@ -218,7 +237,7 @@ def main():      # Form the spec file to be used      tmpl_fn = util.abs_join(find_root(), 'packages', -                            'redhat', 'cloud-init.spec.in') +                            args.distro, 'cloud-init.spec.in')      contents = generate_spec_contents(args, tmpl_fn, root_dir,                                        os.path.basename(archive_fn))      spec_fn = util.abs_join(root_dir, 'cloud-init.spec') @@ -237,13 +256,15 @@ def main():      globs.extend(glob.glob("%s/*.rpm" %                             (util.abs_join(root_dir, 'RPMS', 'noarch'))))      globs.extend(glob.glob("%s/*.rpm" % +                           (util.abs_join(root_dir, 'RPMS', 'x86_64')))) +    globs.extend(glob.glob("%s/*.rpm" %                             (util.abs_join(root_dir, 'RPMS'))))      globs.extend(glob.glob("%s/*.rpm" %                             (util.abs_join(root_dir, 'SRPMS'))))      for rpm_fn in globs:          tgt_fn = util.abs_join(os.getcwd(), os.path.basename(rpm_fn))          shutil.move(rpm_fn, tgt_fn) -        print("Wrote out redhat package %r" % (tgt_fn)) +        print("Wrote out %s package %r" % (args.distro, tgt_fn))      return 0 diff --git a/packages/suse/cloud-init.spec.in b/packages/suse/cloud-init.spec.in new file mode 100644 index 00000000..296505c6 --- /dev/null +++ b/packages/suse/cloud-init.spec.in @@ -0,0 +1,162 @@ +## This is a cheetah template + +# See: http://www.zarb.org/~jasonc/macros.php +# Or: http://fedoraproject.org/wiki/Packaging:ScriptletSnippets +# Or: http://www.rpm.org/max-rpm/ch-rpm-inside.html + +#for $d in $defines +%define ${d} +#end for + +Name:           cloud-init +Version:        ${version} +Release:        ${release}${subrelease}%{?dist} +Summary:        Cloud instance init scripts + +Group:          System/Management +License:        GPLv3 +URL:            http://launchpad.net/cloud-init + +Source0:        ${archive_name} +BuildRoot:      %{_tmppath}/%{name}-%{version}-build + +%if 0%{?suse_version} && 0%{?suse_version} <= 1110 +%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")} +%else +BuildArch:      noarch +%endif + +BuildRequires:        fdupes +BuildRequires:        filesystem +BuildRequires:        python-devel +BuildRequires:        python-setuptools +BuildRequires:        python-cheetah + +%if 0%{?suse_version} && 0%{?suse_version} <= 1210 +  %define initsys sysvinit +%else +  %define initsys systemd +%endif + +# System util packages needed +Requires:       iproute2 +Requires:       e2fsprogs +Requires:       net-tools +Requires:       procps + +# Install pypi 'dynamic' requirements +#for $r in $requires +Requires:       ${r} +#end for + +# Custom patches +#set $size = 0 +#for $p in $patches +Patch${size}: $p +#set $size += 1 +#end for + +%description +Cloud-init is a set of init scripts for cloud instances.  Cloud instances +need special scripts to run during initialization to retrieve and install +ssh keys and to let the user run various scripts. + +%prep +%setup -q -n %{name}-%{version}~${release} + +# Custom patches activation +#set $size = 0 +#for $p in $patches +%patch${size} -p1 +#set $size += 1 +#end for + +%build +%{__python} setup.py build + +%install +%{__python} setup.py install \ +            --skip-build --root=%{buildroot} --prefix=%{_prefix} \ +            --record-rpm=INSTALLED_FILES --install-lib=%{python_sitelib} \ +            --init-system=%{initsys} + +# Remove non-SUSE templates +rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.debian.* +rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.redhat.* +rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.ubuntu.* + +# Remove cloud-init tests +rm -r %{buildroot}/%{python_sitelib}/tests + +# Move sysvinit scripts to the correct place and create symbolic links +%if %{initsys} == sysvinit +   mkdir -p %{buildroot}/%{_initddir} +   mv %{buildroot}%{_sysconfdir}/rc.d/init.d/* %{buildroot}%{_initddir}/ +   rmdir %{buildroot}%{_sysconfdir}/rc.d/init.d +   rmdir %{buildroot}%{_sysconfdir}/rc.d + +   mkdir -p %{buildroot}/%{_sbindir} +   pushd %{buildroot}/%{_initddir} +   for file in * ; do +      ln -s %{_initddir}/\${file} %{buildroot}/%{_sbindir}/rc\${file} +   done +   popd +%endif + +# Move documentation +mkdir -p %{buildroot}/%{_defaultdocdir} +mv %{buildroot}/usr/share/doc/cloud-init %{buildroot}/%{_defaultdocdir} +for doc in TODO LICENSE ChangeLog Requires ; do +   cp \${doc} %{buildroot}/%{_defaultdocdir}/cloud-init +done + +# Remove duplicate files +%if 0%{?suse_version} +   %fdupes %{buildroot}/%{python_sitelib} +%endif + +mkdir -p %{buildroot}/var/lib/cloud + +%postun +%insserv_cleanup + +%files + +# Sysvinit scripts +%if %{initsys} == sysvinit +   %attr(0755, root, root) %{_initddir}/cloud-config +   %attr(0755, root, root) %{_initddir}/cloud-final +   %attr(0755, root, root) %{_initddir}/cloud-init-local +   %attr(0755, root, root) %{_initddir}/cloud-init + +   %{_sbindir}/rccloud-* +%endif + +# Program binaries +%{_bindir}/cloud-init* + +# There doesn't seem to be an agreed upon place for these +# although it appears the standard says /usr/lib but rpmbuild +# will try /usr/lib64 ?? +/usr/lib/%{name}/uncloud-init +/usr/lib/%{name}/write-ssh-key-fingerprints + +# Docs +%doc %{_defaultdocdir}/cloud-init/* + +# Configs +%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg +%dir               %{_sysconfdir}/cloud/cloud.cfg.d +%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/*.cfg +%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/README +%dir               %{_sysconfdir}/cloud/templates +%config(noreplace) %{_sysconfdir}/cloud/templates/* + +# Python code is here... +%{python_sitelib}/* + +/var/lib/cloud + +%changelog + +${changelog} diff --git a/templates/hosts.suse.tmpl b/templates/hosts.suse.tmpl new file mode 100644 index 00000000..5d3d57e4 --- /dev/null +++ b/templates/hosts.suse.tmpl @@ -0,0 +1,24 @@ +#* +    This file /etc/cloud/templates/hosts.suse.tmpl is only utilized +    if enabled in cloud-config.  Specifically, in order to enable it +    you need to add the following to config: +      manage_etc_hosts: True +*# +# Your system has configured 'manage_etc_hosts' as True. +# As a result, if you wish for changes to this file to persist +# then you will need to either +# a.) make changes to the master file in /etc/cloud/templates/hosts.suse.tmpl +# b.) change or remove the value of 'manage_etc_hosts' in +#     /etc/cloud/cloud.cfg or cloud-config from user-data +# +# The following lines are desirable for IPv4 capable hosts +127.0.0.1 localhost + +# The following lines are desirable for IPv6 capable hosts +::1 localhost ipv6-localhost ipv6-loopback +fe00::0 ipv6-localnet + +ff00::0 ipv6-mcastprefix +ff02::1 ipv6-allnodes +ff02::2 ipv6-allrouters +ff02::3 ipv6-allhosts diff --git a/tests/data/mountinfo_precise_ext4.txt b/tests/data/mountinfo_precise_ext4.txt new file mode 100644 index 00000000..a7a1db67 --- /dev/null +++ b/tests/data/mountinfo_precise_ext4.txt @@ -0,0 +1,24 @@ +15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=16422216k,nr_inodes=4105554,mode=755 +18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +19 20 0:15 / /run rw,nosuid,relatime - tmpfs tmpfs rw,size=6572812k,mode=755 +20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root rw,errors=remount-ro,data=ordered +21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs cgroup rw,mode=755 +22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw +23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw +25 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +26 19 0:19 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k +27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw +28 19 0:21 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 +24 21 0:18 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset +29 21 0:22 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu +30 21 0:23 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct +31 21 0:24 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory +32 21 0:25 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices +33 21 0:26 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer +34 21 0:27 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio +35 21 0:28 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event +36 20 9:0 / /boot rw,relatime - ext4 /dev/md0 rw,data=ordered +37 16 0:29 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw +39 28 0:30 / /run/user/foobar/gvfs rw,nosuid,nodev,relatime - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000 diff --git a/tests/data/mountinfo_raring_btrfs.txt b/tests/data/mountinfo_raring_btrfs.txt new file mode 100644 index 00000000..c5795636 --- /dev/null +++ b/tests/data/mountinfo_raring_btrfs.txt @@ -0,0 +1,13 @@ +15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=865556k,nr_inodes=216389,mode=755 +18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +19 20 0:15 / /run rw,nosuid,relatime - tmpfs tmpfs rw,size=348196k,mode=755 +20 1 0:16 /@ / rw,relatime - btrfs /dev/vda1 rw,compress=lzo,space_cache +21 15 0:19 / /sys/fs/fuse/connections rw,relatime - fusectl none rw +22 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw +23 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +24 19 0:20 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k +25 19 0:21 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw +26 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 +27 20 0:16 /@home /home rw,relatime - btrfs /dev/vda1 rw,compress=lzo,space_cache diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index e020a3ec..c0da0983 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -146,7 +146,8 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):                     ('chmod', 1),                     ('delete_dir_contents', 1),                     ('del_file', 1), -                   ('sym_link', -1)], +                   ('sym_link', -1), +                   ('copy', -1)],          }          for (mod, funcs) in patch_funcs.items():              for (f, am) in funcs: diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py index 9cf28215..b387f13b 100644 --- a/tests/unittests/test_builtin_handlers.py +++ b/tests/unittests/test_builtin_handlers.py @@ -1,7 +1,6 @@  """Tests of the built-in user data handlers."""  import os -import unittest  from tests.unittests import helpers as test_helpers @@ -35,7 +34,6 @@ class TestBuiltins(test_helpers.FilesystemMockingTestCase):                        None, None, None)          self.assertEquals(0, len(os.listdir(up_root))) -    @unittest.skip("until LP: #1124384 fixed")      def test_upstart_frequency_single(self):          # files should be written out when frequency is ! per-instance          new_root = self.makeDir() @@ -47,6 +45,7 @@ class TestBuiltins(test_helpers.FilesystemMockingTestCase):              'upstart_dir': "/etc/upstart",          }) +        upstart_job.SUITABLE_UPSTART = True          util.ensure_dir("/run")          util.ensure_dir("/etc/upstart") diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py new file mode 100644 index 00000000..4cd3f213 --- /dev/null +++ b/tests/unittests/test_datasource/test_azure.py @@ -0,0 +1,327 @@ +from cloudinit import helpers +from cloudinit.sources import DataSourceAzure +from tests.unittests.helpers import populate_dir + +import base64 +from mocker import MockerTestCase +import os +import yaml + + +def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): +    if data is None: +        data = {'HostName': 'FOOHOST'} +    if pubkeys is None: +        pubkeys = {} + +    content = """<?xml version="1.0" encoding="utf-8"?> +<Environment xmlns="http://schemas.dmtf.org/ovf/environment/1" + xmlns:oe="http://schemas.dmtf.org/ovf/environment/1" + xmlns:wa="http://schemas.microsoft.com/windowsazure" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> + + <wa:ProvisioningSection><wa:Version>1.0</wa:Version> + <LinuxProvisioningConfigurationSet +  xmlns="http://schemas.microsoft.com/windowsazure" +  xmlns:i="http://www.w3.org/2001/XMLSchema-instance"> +  <ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType> +    """ +    for key, dval in data.items(): +        if isinstance(dval, dict): +            val = dval.get('text') +            attrs = ' ' + ' '.join(["%s='%s'" % (k, v) for k, v in dval.items() +                                    if k != 'text']) +        else: +            val = dval +            attrs = "" +        content += "<%s%s>%s</%s>\n" % (key, attrs, val, key) + +    if userdata: +        content += "<UserData>%s</UserData>\n" % (base64.b64encode(userdata)) + +    if pubkeys: +        content += "<SSH><PublicKeys>\n" +        for fp, path in pubkeys: +            content += " <PublicKey>" +            content += ("<Fingerprint>%s</Fingerprint><Path>%s</Path>" % +                        (fp, path)) +            content += "</PublicKey>\n" +        content += "</PublicKeys></SSH>" +    content += """ + </LinuxProvisioningConfigurationSet> + </wa:ProvisioningSection> + <wa:PlatformSettingsSection><wa:Version>1.0</wa:Version> + <PlatformSettings xmlns="http://schemas.microsoft.com/windowsazure" +  xmlns:i="http://www.w3.org/2001/XMLSchema-instance"> + <KmsServerHostname>kms.core.windows.net</KmsServerHostname> + <ProvisionGuestAgent>false</ProvisionGuestAgent> + <GuestAgentPackageName i:nil="true" /> + </PlatformSettings></wa:PlatformSettingsSection> +</Environment> +    """ + +    return content + + +class TestAzureDataSource(MockerTestCase): + +    def setUp(self): +        # makeDir comes from MockerTestCase +        self.tmp = self.makeDir() + +        # patch cloud_dir, so our 'seed_dir' is guaranteed empty +        self.paths = helpers.Paths({'cloud_dir': self.tmp}) + +        self.unapply = [] +        super(TestAzureDataSource, self).setUp() + +    def tearDown(self): +        apply_patches([i for i in reversed(self.unapply)]) +        super(TestAzureDataSource, self).tearDown() + +    def apply_patches(self, patches): +        ret = apply_patches(patches) +        self.unapply += ret + +    def _get_ds(self, data): + +        def dsdevs(): +            return data.get('dsdevs', []) + +        def _invoke_agent(cmd): +            data['agent_invoked'] = cmd + +        def _write_files(datadir, files, dirmode): +            data['files'] = {} +            data['datadir'] = datadir +            data['datadir_mode'] = dirmode +            for (fname, content) in files.items(): +                data['files'][fname] = content + +        def _wait_for_files(flist, _maxwait=None, _naplen=None): +            data['waited'] = flist +            return [] + +        def _pubkeys_from_crt_files(flist): +            data['pubkey_files'] = flist +            return ["pubkey_from: %s" % f for f in flist] + +        def _iid_from_shared_config(path): +            data['iid_from_shared_cfg'] = path +            return 'i-my-azure-id' + +        def _apply_hostname_bounce(**kwargs): +            data['apply_hostname_bounce'] = kwargs + +        if data.get('ovfcontent') is not None: +            populate_dir(os.path.join(self.paths.seed_dir, "azure"), +                         {'ovf-env.xml': data['ovfcontent']}) + +        mod = DataSourceAzure + +        if data.get('dsdevs'): +            self.apply_patches([(mod, 'list_possible_azure_ds_devs', dsdevs)]) + +        self.apply_patches([(mod, 'invoke_agent', _invoke_agent), +                            (mod, 'write_files', _write_files), +                            (mod, 'wait_for_files', _wait_for_files), +                            (mod, 'pubkeys_from_crt_files', +                             _pubkeys_from_crt_files), +                            (mod, 'iid_from_shared_config', +                             _iid_from_shared_config), +                            (mod, 'apply_hostname_bounce', +                             _apply_hostname_bounce), ]) + +        dsrc = mod.DataSourceAzureNet( +            data.get('sys_cfg', {}), distro=None, paths=self.paths) + +        return dsrc + +    def test_basic_seed_dir(self): +        odata = {'HostName': "myhost", 'UserName': "myuser"} +        data = {'ovfcontent': construct_valid_ovf_env(data=odata), +                'sys_cfg': {}} + +        dsrc = self._get_ds(data) +        ret = dsrc.get_data() +        self.assertTrue(ret) +        self.assertEqual(dsrc.userdata_raw, "") +        self.assertEqual(dsrc.metadata['local-hostname'], odata['HostName']) +        self.assertTrue('ovf-env.xml' in data['files']) +        self.assertEqual(0700, data['datadir_mode']) +        self.assertEqual(dsrc.metadata['instance-id'], 'i-my-azure-id') + +    def test_user_cfg_set_agent_command_plain(self): +        # set dscfg in via plaintext +        cfg = {'agent_command': "my_command"} +        odata = {'HostName': "myhost", 'UserName': "myuser", +                'dscfg': {'text': yaml.dump(cfg), 'encoding': 'plain'}} +        data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + +        dsrc = self._get_ds(data) +        ret = dsrc.get_data() +        self.assertTrue(ret) +        self.assertEqual(data['agent_invoked'], cfg['agent_command']) + +    def test_user_cfg_set_agent_command(self): +        # set dscfg in via base64 encoded yaml +        cfg = {'agent_command': "my_command"} +        odata = {'HostName': "myhost", 'UserName': "myuser", +                'dscfg': {'text': base64.b64encode(yaml.dump(cfg)), +                          'encoding': 'base64'}} +        data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + +        dsrc = self._get_ds(data) +        ret = dsrc.get_data() +        self.assertTrue(ret) +        self.assertEqual(data['agent_invoked'], cfg['agent_command']) + +    def test_sys_cfg_set_agent_command(self): +        sys_cfg = {'datasource': {'Azure': {'agent_command': '_COMMAND'}}} +        data = {'ovfcontent': construct_valid_ovf_env(data={}), +                'sys_cfg': sys_cfg} + +        dsrc = self._get_ds(data) +        ret = dsrc.get_data() +        self.assertTrue(ret) +        self.assertEqual(data['agent_invoked'], '_COMMAND') + +    def test_username_used(self): +        odata = {'HostName': "myhost", 'UserName': "myuser"} +        data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + +        dsrc = self._get_ds(data) +        ret = dsrc.get_data() +        self.assertTrue(ret) +        self.assertEqual(dsrc.cfg['system_info']['default_user']['name'], +                         "myuser") + +    def test_password_given(self): +        odata = {'HostName': "myhost", 'UserName': "myuser", +                 'UserPassword': "mypass"} +        data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + +        dsrc = self._get_ds(data) +        ret = dsrc.get_data() +        self.assertTrue(ret) +        self.assertTrue('default_user' in dsrc.cfg['system_info']) +        defuser = dsrc.cfg['system_info']['default_user'] + +        # default user shoudl be updated for password and username +        # and should not be locked. +        self.assertEqual(defuser['name'], odata['UserName']) +        self.assertEqual(defuser['password'], odata['UserPassword']) +        self.assertFalse(defuser['lock_passwd']) + +    def test_userdata_found(self): +        mydata = "FOOBAR" +        odata = {'UserData': base64.b64encode(mydata)} +        data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + +        dsrc = self._get_ds(data) +        ret = dsrc.get_data() +        self.assertTrue(ret) +        self.assertEqual(dsrc.userdata_raw, mydata) + +    def test_no_datasource_expected(self): +        #no source should be found if no seed_dir and no devs +        data = {} +        dsrc = self._get_ds({}) +        ret = dsrc.get_data() +        self.assertFalse(ret) +        self.assertFalse('agent_invoked' in data) + +    def test_cfg_has_pubkeys(self): +        odata = {'HostName': "myhost", 'UserName': "myuser"} +        mypklist = [{'fingerprint': 'fp1', 'path': 'path1'}] +        pubkeys = [(x['fingerprint'], x['path']) for x in mypklist] +        data = {'ovfcontent': construct_valid_ovf_env(data=odata, +                                                      pubkeys=pubkeys)} + +        dsrc = self._get_ds(data) +        ret = dsrc.get_data() +        self.assertTrue(ret) +        for mypk in mypklist: +            self.assertIn(mypk, dsrc.cfg['_pubkeys']) + +    def test_disabled_bounce(self): +        pass + +    def test_apply_bounce_call_1(self): +        # hostname needs to get through to apply_hostname_bounce +        mydata = "FOOBAR" +        odata = {'HostName': 'my-random-hostname'} +        data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + +        self._get_ds(data).get_data() +        self.assertIn('hostname', data['apply_hostname_bounce']) +        self.assertEqual(data['apply_hostname_bounce']['hostname'], +                         odata['HostName']) + +    def test_apply_bounce_call_configurable(self): +        # hostname_bounce should be configurable in datasource cfg +        cfg = {'hostname_bounce': {'interface': 'eth1', 'policy': 'off', +                                   'command': 'my-bounce-command', +                                   'hostname_command': 'my-hostname-command'}} +        odata = {'HostName': "xhost", +                'dscfg': {'text': base64.b64encode(yaml.dump(cfg)), +                          'encoding': 'base64'}} +        data = {'ovfcontent': construct_valid_ovf_env(data=odata)} +        self._get_ds(data).get_data() + +        for k in cfg['hostname_bounce']: +            self.assertIn(k, data['apply_hostname_bounce']) + +        for k, v in cfg['hostname_bounce'].items(): +            self.assertEqual(data['apply_hostname_bounce'][k], v) + +    def test_set_hostname_disabled(self): +        # config specifying set_hostname off should not bounce +        cfg = {'set_hostname': False} +        odata = {'HostName': "xhost", +                'dscfg': {'text': base64.b64encode(yaml.dump(cfg)), +                          'encoding': 'base64'}} +        data = {'ovfcontent': construct_valid_ovf_env(data=odata)} +        self._get_ds(data).get_data() + +        self.assertEqual(data.get('apply_hostname_bounce', "N/A"), "N/A") + + +class TestReadAzureOvf(MockerTestCase): +    def test_invalid_xml_raises_non_azure_ds(self): +        invalid_xml = "<foo>" + construct_valid_ovf_env(data={}) +        self.assertRaises(DataSourceAzure.NonAzureDataSource, +            DataSourceAzure.read_azure_ovf, invalid_xml) + +    def test_load_with_pubkeys(self): +        mypklist = [{'fingerprint': 'fp1', 'path': 'path1'}] +        pubkeys = [(x['fingerprint'], x['path']) for x in mypklist] +        content = construct_valid_ovf_env(pubkeys=pubkeys) +        (_md, _ud, cfg) = DataSourceAzure.read_azure_ovf(content) +        for mypk in mypklist: +            self.assertIn(mypk, cfg['_pubkeys']) + + +class TestReadAzureSharedConfig(MockerTestCase): +    def test_valid_content(self): +        xml = """<?xml version="1.0" encoding="utf-8"?> +            <SharedConfig> +             <Deployment name="MY_INSTANCE_ID"> +              <Service name="myservice"/> +              <ServiceInstance name="INSTANCE_ID.0" guid="{abcd-uuid}" /> +             </Deployment> +            <Incarnation number="1"/> +            </SharedConfig>""" +        ret = DataSourceAzure.iid_from_shared_config_content(xml) +        self.assertEqual("MY_INSTANCE_ID", ret) + + +def apply_patches(patches): +    ret = [] +    for (ref, name, replace) in patches: +        if replace is None: +            continue +        orig = getattr(ref, name) +        setattr(ref, name, replace) +        ret.append((ref, name, orig)) +    return ret diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index 62fc5358..7328b240 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -22,7 +22,7 @@ class TestNoCloudDataSource(MockerTestCase):      def tearDown(self):          apply_patches([i for i in reversed(self.unapply)]) -        super(TestNoCloudDataSource, self).setUp() +        super(TestNoCloudDataSource, self).tearDown()      def apply_patches(self, patches):          ret = apply_patches(patches) diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py new file mode 100644 index 00000000..6c12f1e2 --- /dev/null +++ b/tests/unittests/test_datasource/test_smartos.py @@ -0,0 +1,191 @@ +# vi: ts=4 expandtab +# +#    Copyright (C) 2013 Canonical Ltd. +# +#    Author: Ben Howard <ben.howard@canonical.com> +# +#    This program is free software: you can redistribute it and/or modify +#    it under the terms of the GNU General Public License version 3, as +#    published by the Free Software Foundation. +# +#    This program is distributed in the hope that it will be useful, +#    but WITHOUT ANY WARRANTY; without even the implied warranty of +#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the +#    GNU General Public License for more details. +# +#    You should have received a copy of the GNU General Public License +#    along with this program.  If not, see <http://www.gnu.org/licenses/>. +# +# +#   This is a testcase for the SmartOS datasource. It replicates a serial +#   console and acts like the SmartOS console does in order to validate +#   return responses. +# + +from cloudinit import helpers +from cloudinit.sources import DataSourceSmartOS + +from mocker import MockerTestCase +import uuid + +mock_returns = { +    'hostname': 'test-host', +    'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname', +    'disable_iptables_flag': None, +    'enable_motd_sys_info': None, +    'system_uuid': str(uuid.uuid4()), +    'smartdc': 'smartdc', +    'userdata': """ +#!/bin/sh +/bin/true +""", +} + + +class MockSerial(object): +    """Fake a serial terminal for testing the code that +        interfaces with the serial""" + +    port = None + +    def __init__(self): +        self.last = None +        self.last = None +        self.new = True +        self.count = 0 +        self.mocked_out = [] + +    def open(self): +        return True + +    def close(self): +        return True + +    def isOpen(self): +        return True + +    def write(self, line): +        line = line.replace('GET ', '') +        self.last = line.rstrip() + +    def readline(self): +        if self.new: +            self.new = False +            if self.last in mock_returns: +                return 'SUCCESS\n' +            else: +                return 'NOTFOUND %s\n' % self.last + +        if self.last in mock_returns: +            if not self.mocked_out: +                self.mocked_out = [x for x in self._format_out()] +                print self.mocked_out + +            if len(self.mocked_out) > self.count: +                self.count += 1 +                return self.mocked_out[self.count - 1] + +    def _format_out(self): +        if self.last in mock_returns: +            try: +                for l in mock_returns[self.last].splitlines(): +                    yield "%s\n" % l +            except: +                yield "%s\n" % mock_returns[self.last] + +            yield '\n' +            yield '.' + + +class TestSmartOSDataSource(MockerTestCase): +    def setUp(self): +        # makeDir comes from MockerTestCase +        self.tmp = self.makeDir() + +        # patch cloud_dir, so our 'seed_dir' is guaranteed empty +        self.paths = helpers.Paths({'cloud_dir': self.tmp}) + +        self.unapply = [] +        super(TestSmartOSDataSource, self).setUp() + +    def tearDown(self): +        apply_patches([i for i in reversed(self.unapply)]) +        super(TestSmartOSDataSource, self).tearDown() + +    def apply_patches(self, patches): +        ret = apply_patches(patches) +        self.unapply += ret + +    def _get_ds(self): + +        def _get_serial(*_): +            return MockSerial() + +        def _dmi_data(): +            return mock_returns['system_uuid'], 'smartdc' + +        data = {'sys_cfg': {}} +        mod = DataSourceSmartOS +        self.apply_patches([(mod, 'get_serial', _get_serial)]) +        self.apply_patches([(mod, 'dmi_data', _dmi_data)]) +        dsrc = mod.DataSourceSmartOS( +            data.get('sys_cfg', {}), distro=None, paths=self.paths) +        return dsrc + +    def test_seed(self): +        dsrc = self._get_ds() +        ret = dsrc.get_data() +        self.assertTrue(ret) +        self.assertEquals('/dev/ttyS1', dsrc.seed) + +    def test_issmartdc(self): +        dsrc = self._get_ds() +        ret = dsrc.get_data() +        self.assertTrue(ret) +        self.assertTrue(dsrc.is_smartdc) + +    def test_uuid(self): +        dsrc = self._get_ds() +        ret = dsrc.get_data() +        self.assertTrue(ret) +        self.assertEquals(mock_returns['system_uuid'], +                          dsrc.metadata['instance-id']) + +    def test_root_keys(self): +        dsrc = self._get_ds() +        ret = dsrc.get_data() +        self.assertTrue(ret) +        self.assertEquals(mock_returns['root_authorized_keys'], +                          dsrc.metadata['public-keys']) + +    def test_hostname(self): +        dsrc = self._get_ds() +        ret = dsrc.get_data() +        self.assertTrue(ret) +        self.assertEquals(mock_returns['hostname'], +                          dsrc.metadata['local-hostname']) + +    def test_disable_iptables_flag(self): +        dsrc = self._get_ds() +        ret = dsrc.get_data() +        self.assertTrue(ret) +        self.assertEquals(mock_returns['disable_iptables_flag'], +                          dsrc.metadata['iptables_disable']) + +    def test_motd_sys_info(self): +        dsrc = self._get_ds() +        ret = dsrc.get_data() +        self.assertTrue(ret) +        self.assertEquals(mock_returns['enable_motd_sys_info'], +                          dsrc.metadata['motd_sys_info']) + + +def apply_patches(patches): +    ret = [] +    for (ref, name, replace) in patches: +        if replace is None: +            continue +        orig = getattr(ref, name) +        setattr(ref, name, replace) +        ret.append((ref, name, orig)) +    return ret diff --git a/tests/unittests/test_handler/test_handler_locale.py b/tests/unittests/test_handler/test_handler_locale.py new file mode 100644 index 00000000..72ad00fd --- /dev/null +++ b/tests/unittests/test_handler/test_handler_locale.py @@ -0,0 +1,64 @@ +#    Copyright (C) 2013 Hewlett-Packard Development Company, L.P. +# +#    Author: Juerg Haefliger <juerg.haefliger@hp.com> +# +#    Based on test_handler_set_hostname.py +# +#    This program is free software: you can redistribute it and/or modify +#    it under the terms of the GNU General Public License version 3, as +#    published by the Free Software Foundation. +# +#    This program is distributed in the hope that it will be useful, +#    but WITHOUT ANY WARRANTY; without even the implied warranty of +#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the +#    GNU General Public License for more details. +# +#    You should have received a copy of the GNU General Public License +#    along with this program.  If not, see <http://www.gnu.org/licenses/>. + +from cloudinit.config import cc_locale + +from cloudinit import cloud +from cloudinit import distros +from cloudinit import helpers +from cloudinit import util + +from cloudinit.sources import DataSourceNoCloud + +from tests.unittests import helpers as t_help + +from configobj import ConfigObj + +from StringIO import StringIO + +import logging + +LOG = logging.getLogger(__name__) + + +class TestLocale(t_help.FilesystemMockingTestCase): +    def setUp(self): +        super(TestLocale, self).setUp() +        self.new_root = self.makeDir(prefix="unittest_") + +    def _get_cloud(self, distro): +        self.patchUtils(self.new_root) +        paths = helpers.Paths({}) + +        cls = distros.fetch(distro) +        d = cls(distro, {}, paths) +        ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths) +        cc = cloud.Cloud(ds, paths, {}, d, None) +        return cc + +    def test_set_locale_sles(self): + +        cfg = { +            'locale': 'My.Locale', +        } +        cc = self._get_cloud('sles') +        cc_locale.handle('cc_locale', cfg, cc, LOG, []) + +        contents = util.load_file('/etc/sysconfig/language') +        n_cfg = ConfigObj(StringIO(contents)) +        self.assertEquals({'RC_LANG': cfg['locale']}, dict(n_cfg)) diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py index b2f01cdb..6344ec0c 100644 --- a/tests/unittests/test_handler/test_handler_set_hostname.py +++ b/tests/unittests/test_handler/test_handler_set_hostname.py @@ -55,3 +55,16 @@ class TestHostname(t_help.FilesystemMockingTestCase):                                 cfg, cc, LOG, [])          contents = util.load_file("/etc/hostname")          self.assertEquals('blah', contents.strip()) + +    def test_write_hostname_sles(self): +        cfg = { +            'hostname': 'blah.blah.blah.suse.com', +        } +        distro = self._fetch_distro('sles') +        paths = helpers.Paths({}) +        ds = None +        cc = cloud.Cloud(ds, paths, {}, distro, None) +        self.patchUtils(self.tmp) +        cc_set_hostname.handle('cc_set_hostname', cfg, cc, LOG, []) +        contents = util.load_file("/etc/HOSTNAME") +        self.assertEquals('blah', contents.strip()) diff --git a/tests/unittests/test_handler/test_handler_timezone.py b/tests/unittests/test_handler/test_handler_timezone.py new file mode 100644 index 00000000..40b69773 --- /dev/null +++ b/tests/unittests/test_handler/test_handler_timezone.py @@ -0,0 +1,75 @@ +#    Copyright (C) 2013 Hewlett-Packard Development Company, L.P. +# +#    Author: Juerg Haefliger <juerg.haefliger@hp.com> +# +#    Based on test_handler_set_hostname.py +# +#    This program is free software: you can redistribute it and/or modify +#    it under the terms of the GNU General Public License version 3, as +#    published by the Free Software Foundation. +# +#    This program is distributed in the hope that it will be useful, +#    but WITHOUT ANY WARRANTY; without even the implied warranty of +#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the +#    GNU General Public License for more details. +# +#    You should have received a copy of the GNU General Public License +#    along with this program.  If not, see <http://www.gnu.org/licenses/>. + +from cloudinit.config import cc_timezone + +from cloudinit import cloud +from cloudinit import distros +from cloudinit import helpers +from cloudinit import util + +from cloudinit.sources import DataSourceNoCloud + +from tests.unittests import helpers as t_help + +from configobj import ConfigObj + +from StringIO import StringIO + +import logging + +LOG = logging.getLogger(__name__) + + +class TestTimezone(t_help.FilesystemMockingTestCase): +    def setUp(self): +        super(TestTimezone, self).setUp() +        self.new_root = self.makeDir(prefix="unittest_") + +    def _get_cloud(self, distro): +        self.patchUtils(self.new_root) +        self.patchOS(self.new_root) + +        paths = helpers.Paths({}) + +        cls = distros.fetch(distro) +        d = cls(distro, {}, paths) +        ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths) +        cc = cloud.Cloud(ds, paths, {}, d, None) +        return cc + +    def test_set_timezone_sles(self): + +        cfg = { +            'timezone': 'Tatooine/Bestine', +        } +        cc = self._get_cloud('sles') + +        # Create a dummy timezone file +        dummy_contents = '0123456789abcdefgh' +        util.write_file('/usr/share/zoneinfo/%s' % cfg['timezone'], +                        dummy_contents) + +        cc_timezone.handle('cc_timezone', cfg, cc, LOG, []) + +        contents = util.load_file('/etc/sysconfig/clock') +        n_cfg = ConfigObj(StringIO(contents)) +        self.assertEquals({'TIMEZONE': cfg['timezone']}, dict(n_cfg)) + +        contents = util.load_file('/etc/localtime') +        self.assertEquals(dummy_contents, contents.strip()) diff --git a/tests/unittests/test_userdata.py b/tests/unittests/test_userdata.py index 0ebb0484..b227616c 100644 --- a/tests/unittests/test_userdata.py +++ b/tests/unittests/test_userdata.py @@ -2,10 +2,13 @@  import StringIO +import gzip  import logging  import os  from email.mime.base import MIMEBase +from email.mime.multipart import MIMEMultipart +from email.mime.application import MIMEApplication  from cloudinit import handlers  from cloudinit import helpers as c_helpers @@ -50,6 +53,64 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):          self._log.addHandler(self._log_handler)          return log_file +    def test_simple_jsonp(self): +        blob = ''' +#cloud-config-jsonp +[ +     { "op": "add", "path": "/baz", "value": "qux" }, +     { "op": "add", "path": "/bar", "value": "qux2" } +] +''' + +        ci = stages.Init() +        ci.datasource = FakeDataSource(blob) +        new_root = self.makeDir() +        self.patchUtils(new_root) +        self.patchOS(new_root) +        ci.fetch() +        ci.consume_userdata() +        cc_contents = util.load_file(ci.paths.get_ipath("cloud_config")) +        cc = util.load_yaml(cc_contents) +        self.assertEquals(2, len(cc)) +        self.assertEquals('qux', cc['baz']) +        self.assertEquals('qux2', cc['bar']) + +    def test_mixed_cloud_config(self): +        blob_cc = ''' +#cloud-config +a: b +c: d +''' +        message_cc = MIMEBase("text", "cloud-config") +        message_cc.set_payload(blob_cc) + +        blob_jp = ''' +#cloud-config-jsonp +[ +     { "op": "replace", "path": "/a", "value": "c" }, +     { "op": "remove", "path": "/c" } +] +''' + +        message_jp = MIMEBase('text', "cloud-config-jsonp") +        message_jp.set_payload(blob_jp) + +        message = MIMEMultipart() +        message.attach(message_cc) +        message.attach(message_jp) + +        ci = stages.Init() +        ci.datasource = FakeDataSource(str(message)) +        new_root = self.makeDir() +        self.patchUtils(new_root) +        self.patchOS(new_root) +        ci.fetch() +        ci.consume_userdata() +        cc_contents = util.load_file(ci.paths.get_ipath("cloud_config")) +        cc = util.load_yaml(cc_contents) +        self.assertEquals(1, len(cc)) +        self.assertEquals('c', cc['a']) +      def test_merging_cloud_config(self):          blob = '''  #cloud-config @@ -118,7 +179,7 @@ p: 1          ci.datasource = FakeDataSource(data)          mock_write = self.mocker.replace("cloudinit.util.write_file", -                                              passthrough=False) +                                         passthrough=False)          mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)          self.mocker.replay() @@ -129,6 +190,46 @@ p: 1              "Unhandled non-multipart (text/x-not-multipart) userdata:",              log_file.getvalue()) +    def test_mime_gzip_compressed(self): +        """Tests that individual message gzip encoding works.""" + +        def gzip_part(text): +            contents = StringIO.StringIO() +            f = gzip.GzipFile(fileobj=contents, mode='w') +            f.write(str(text)) +            f.flush() +            f.close() +            return MIMEApplication(contents.getvalue(), 'gzip') + +        base_content1 = ''' +#cloud-config +a: 2 +''' + +        base_content2 = ''' +#cloud-config +b: 3 +c: 4 +''' + +        message = MIMEMultipart('test') +        message.attach(gzip_part(base_content1)) +        message.attach(gzip_part(base_content2)) +        ci = stages.Init() +        ci.datasource = FakeDataSource(str(message)) +        new_root = self.makeDir() +        self.patchUtils(new_root) +        self.patchOS(new_root) +        ci.fetch() +        ci.consume_userdata() +        contents = util.load_file(ci.paths.get_ipath("cloud_config")) +        contents = util.load_yaml(contents) +        self.assertTrue(isinstance(contents, dict)) +        self.assertEquals(3, len(contents)) +        self.assertEquals(2, contents['a']) +        self.assertEquals(3, contents['b']) +        self.assertEquals(4, contents['c']) +      def test_mime_text_plain(self):          """Mime message of type text/plain is ignored but shows warning."""          ci = stages.Init() @@ -137,7 +238,7 @@ p: 1          ci.datasource = FakeDataSource(message.as_string())          mock_write = self.mocker.replace("cloudinit.util.write_file", -                                              passthrough=False) +                                         passthrough=False)          mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)          self.mocker.replay() @@ -156,7 +257,7 @@ p: 1          outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")          mock_write = self.mocker.replace("cloudinit.util.write_file", -                                              passthrough=False) +                                         passthrough=False)          mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)          mock_write(outpath, script, 0700)          self.mocker.replay() @@ -176,7 +277,7 @@ p: 1          outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")          mock_write = self.mocker.replace("cloudinit.util.write_file", -                                              passthrough=False) +                                         passthrough=False)          mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)          mock_write(outpath, script, 0700)          self.mocker.replay() diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 5853cb0f..87415cb5 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -6,6 +6,7 @@ import yaml  from mocker import MockerTestCase  from unittest import TestCase +from tests.unittests import helpers  from cloudinit import importer  from cloudinit import util @@ -250,50 +251,10 @@ class TestLoadYaml(TestCase):                           myobj) -class TestMountinfoParsing(TestCase): -    precise_ext4_mountinfo = \ -"""15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw -16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw -17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=16422216k,nr_inodes=4105554,mode=755 -18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 -19 20 0:15 / /run rw,nosuid,relatime - tmpfs tmpfs rw,size=6572812k,mode=755 -20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root rw,errors=remount-ro,data=ordered -21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs cgroup rw,mode=755 -22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw -23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw -25 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw -26 19 0:19 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k -27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw -28 19 0:21 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 -24 21 0:18 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset -29 21 0:22 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu -30 21 0:23 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct -31 21 0:24 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory -32 21 0:25 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices -33 21 0:26 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer -34 21 0:27 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio -35 21 0:28 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event -36 20 9:0 / /boot rw,relatime - ext4 /dev/md0 rw,data=ordered -37 16 0:29 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw -39 28 0:30 / /run/user/foobar/gvfs rw,nosuid,nodev,relatime - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000""" - -    raring_btrfs_mountinfo = \ -"""15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw -16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw -17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=865556k,nr_inodes=216389,mode=755 -18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 -19 20 0:15 / /run rw,nosuid,relatime - tmpfs tmpfs rw,size=348196k,mode=755 -20 1 0:16 /@ / rw,relatime - btrfs /dev/vda1 rw,compress=lzo,space_cache -21 15 0:19 / /sys/fs/fuse/connections rw,relatime - fusectl none rw -22 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw -23 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw -24 19 0:20 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k -25 19 0:21 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw -26 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 -27 20 0:16 /@home /home rw,relatime - btrfs /dev/vda1 rw,compress=lzo,space_cache""" - +class TestMountinfoParsing(helpers.ResourceUsingTestCase):      def test_invalid_mountinfo(self): -        line = "20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root rw,errors=remount-ro,data=ordered" +        line = ("20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root" +                "rw,errors=remount-ro,data=ordered")          elements = line.split()          for i in range(len(elements) + 1):              lines = [' '.join(elements[0:i])] @@ -304,7 +265,8 @@ class TestMountinfoParsing(TestCase):              self.assertEqual(expected, util.parse_mount_info('/', lines))      def test_precise_ext4_root(self): -        lines = TestMountinfoParsing.precise_ext4_mountinfo.splitlines() + +        lines = self.readResource('mountinfo_precise_ext4.txt').splitlines()          expected = ('/dev/mapper/vg0-root', 'ext4', '/')          self.assertEqual(expected, util.parse_mount_info('/', lines)) @@ -326,7 +288,7 @@ class TestMountinfoParsing(TestCase):          self.assertEqual(expected, util.parse_mount_info('/run/lock', lines))      def test_raring_btrfs_root(self): -        lines = TestMountinfoParsing.raring_btrfs_mountinfo.splitlines() +        lines = self.readResource('mountinfo_raring_btrfs.txt').splitlines()          expected = ('/dev/vda1', 'btrfs', '/')          self.assertEqual(expected, util.parse_mount_info('/', lines)) | 
