diff options
57 files changed, 737 insertions, 315 deletions
@@ -25,6 +25,15 @@   - docs: fix disk-setup to reference 'table_type' [Rail Aliiev] (LP: #1313114)   - ssh_authkey_fingerprints: fix bug that prevented disabling the module.     (LP: #1340903) [Patrick Lucas] + - no longer use pylint as a checker, fix pep8 [Jay Faulkner]. + - Openstack: do not load some urls twice. + - FreeBsd: fix initscripts and add working config file [Harm Weites] + - Datasource: fix broken logic to provide hostname if datasource does not +   provide one + - Improved and less verbose logging. + - resizefs: first check that device is writable. + - configdrive: fix reading of vendor data to be like metadata service reader. +   [Jay Faulkner]  0.7.5:   - open 0.7.5   - Add a debug log message around import failures @@ -58,5 +58,5 @@ rpm:  deb:  	./packages/bddeb -.PHONY: test pylint pyflakes 2to3 clean pep8 rpm deb yaml check_version +.PHONY: test pyflakes 2to3 clean pep8 rpm deb yaml check_version  .PHONY: pip-test-requirements pip-requirements clean_pyc diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index a5209268..1660832b 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -484,7 +484,7 @@ def get_partition_mbr_layout(size, layout):  def purge_disk_ptable(device):      # wipe the first and last megabyte of a disk (or file)      # gpt stores partition table both at front and at end. -    null = '\0'  # pylint: disable=W1401 +    null = '\0'      start_len = 1024 * 1024      end_len = 1024 * 1024      with open(device, "rb+") as fp: diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 80590118..ba1303d1 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -18,7 +18,7 @@  #    You should have received a copy of the GNU General Public License  #    along with this program.  If not, see <http://www.gnu.org/licenses/>. -from string import whitespace  # pylint: disable=W0402 +from string import whitespace  import logging  import os.path diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 638daef8..09d37371 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -119,7 +119,7 @@ def load_power_state(cfg):  def doexit(sysexit): -    os._exit(sysexit)  # pylint: disable=W0212 +    os._exit(sysexit)  def execmd(exe_args, output=None, data_in=None): @@ -127,7 +127,7 @@ def execmd(exe_args, output=None, data_in=None):          proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE,                                  stdout=output, stderr=subprocess.STDOUT)          proc.communicate(data_in) -        ret = proc.returncode  # pylint: disable=E1101 +        ret = proc.returncode      except Exception:          doexit(EXIT_FAIL)      doexit(ret) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index a6280e6c..cbc07853 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -28,19 +28,19 @@ from cloudinit import util  frequency = PER_ALWAYS -def _resize_btrfs(mount_point, devpth):  # pylint: disable=W0613 +def _resize_btrfs(mount_point, devpth):      return ('btrfs', 'filesystem', 'resize', 'max', mount_point) -def _resize_ext(mount_point, devpth):  # pylint: disable=W0613 +def _resize_ext(mount_point, devpth):      return ('resize2fs', devpth) -def _resize_xfs(mount_point, devpth):  # pylint: disable=W0613 +def _resize_xfs(mount_point, devpth):      return ('xfs_growfs', devpth) -def _resize_ufs(mount_point, devpth):  # pylint: disable=W0613 +def _resize_ufs(mount_point, devpth):      return ('growfs', devpth)  # Do not use a dictionary as these commands should be able to be used @@ -98,12 +98,12 @@ def handle(name, cfg, _cloud, log, args):      (devpth, fs_type, mount_point) = result -    # Ensure the path is a block device.      info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)      log.debug("resize_info: %s" % info)      container = util.is_container() +    # Ensure the path is a block device.      if (devpth == "/dev/root" and not os.path.exists(devpth) and              not container):          devpth = rootdev_from_cmdline(util.get_cmdline()) @@ -117,14 +117,22 @@ def handle(name, cfg, _cloud, log, args):      except OSError as exc:          if container and exc.errno == errno.ENOENT:              log.debug("Device '%s' did not exist in container. " -                      "cannot resize: %s" % (devpth, info)) +                      "cannot resize: %s", devpth, info)          elif exc.errno == errno.ENOENT: -            log.warn("Device '%s' did not exist. cannot resize: %s" % -                     (devpth, info)) +            log.warn("Device '%s' did not exist. cannot resize: %s", +                     devpth, info)          else:              raise exc          return +    if not os.access(devpth, os.W_OK): +        if container: +            log.debug("'%s' not writable in container. cannot resize: %s", +                      devpth, info) +        else: +            log.warn("'%s' not writable. cannot resize: %s", devpth, info) +        return +      if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):          if container:              log.debug("device '%s' not a block device in container." diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index 4a3b21af..4ca85e21 100644 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -28,7 +28,7 @@ from cloudinit import distros as ds  from cloudinit import ssh_util  from cloudinit import util -from string import letters, digits  # pylint: disable=W0402 +from string import letters, digits  # We are removing certain 'painful' letters/numbers  PW_SET = (letters.translate(None, 'loLOI') + @@ -132,7 +132,7 @@ def handle(_name, cfg, cloud, log, args):                                                       'PasswordAuthentication',                                                       pw_auth)) -        lines = [str(e) for e in new_lines] +        lines = [str(l) for l in new_lines]          util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines))          try: diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py index 76c1663d..2d480d7e 100644 --- a/cloudinit/config/cc_ssh_import_id.py +++ b/cloudinit/config/cc_ssh_import_id.py @@ -85,7 +85,7 @@ def import_ssh_ids(ids, user, log):          return      try: -        _check = pwd.getpwnam(user) +        pwd.getpwnam(user)      except KeyError as exc:          raise exc diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 4b41220e..2599d9f2 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -847,12 +847,10 @@ def extract_default(users, default_name=None, default_config=None):  def fetch(name): -    locs = importer.find_module(name, -                                ['', __name__], -                                ['Distro']) +    locs, looked_locs = importer.find_module(name, ['', __name__], ['Distro'])      if not locs: -        raise ImportError("No distribution found for distro %s" -                           % (name)) +        raise ImportError("No distribution found for distro %s (searched %s)" +                           % (name, looked_locs))      mod = importer.import_module(locs[0])      cls = getattr(mod, 'Distro')      return cls @@ -863,5 +861,5 @@ def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone",      util.write_file(tz_conf, str(tz).rstrip() + "\n")      # This ensures that the correct tz will be used for the system      if tz_local and tz_file: -        util.copy(tz_file, self.tz_local_fn) +        util.copy(tz_file, tz_local)      return diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index 9f11b89c..005a0dd4 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -159,7 +159,7 @@ class Distro(distros.Distro):          return hostname      def set_timezone(self, tz): -        set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) +        distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))      def package_command(self, command, args=None, pkgs=None):          if pkgs is None: diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 7cf4a9ef..010be67d 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -131,7 +131,7 @@ class Distro(distros.Distro):          return "127.0.1.1"      def set_timezone(self, tz): -        set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) +        distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))      def package_command(self, command, args=None, pkgs=None):          if pkgs is None: diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index d98f9578..cff10387 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -26,6 +26,9 @@ from cloudinit import log as logging  from cloudinit import ssh_util  from cloudinit import util +from cloudinit.distros import net_util +from cloudinit.distros.parsers.resolv_conf import ResolvConf +  LOG = logging.getLogger(__name__) @@ -33,6 +36,8 @@ class Distro(distros.Distro):      rc_conf_fn = "/etc/rc.conf"      login_conf_fn = '/etc/login.conf'      login_conf_fn_bak = '/etc/login.conf.orig' +    resolv_conf_fn = '/etc/resolv.conf' +    ci_sudoers_fn = '/usr/local/etc/sudoers.d/90-cloud-init-users'      def __init__(self, name, cfg, paths):          distros.Distro.__init__(self, name, cfg, paths) @@ -44,30 +49,53 @@ class Distro(distros.Distro):      # Updates a key in /etc/rc.conf.      def updatercconf(self, key, value): -        LOG.debug("updatercconf: %s => %s", key, value) +        LOG.debug("Checking %s for: %s = %s", self.rc_conf_fn, key, value)          conf = self.loadrcconf()          config_changed = False -        for item in conf: -            if item == key and conf[item] != value: -                conf[item] = value -                LOG.debug("[rc.conf]: Value %s for key %s needs to be changed", -                          value, key) -                config_changed = True +        if key not in conf: +            LOG.debug("Adding key in %s: %s = %s", self.rc_conf_fn, key, +                      value) +            conf[key] = value +            config_changed = True +        else: +            for item in conf.keys(): +                if item == key and conf[item] != value: +                    conf[item] = value +                    LOG.debug("Changing key in %s: %s = %s", self.rc_conf_fn, +                              key, value) +                    config_changed = True          if config_changed: -            LOG.debug("Writing new %s file", self.rc_conf_fn) +            LOG.info("Writing %s", self.rc_conf_fn)              buf = StringIO()              for keyval in conf.items(): -                buf.write("%s=%s\n" % keyval) +                buf.write('%s="%s"\n' % keyval)              util.write_file(self.rc_conf_fn, buf.getvalue()) -    # Load the contents of /etc/rc.conf and store all keys in a dict. +    # Load the contents of /etc/rc.conf and store all keys in a dict. Make sure +    # quotes are ignored: +    #  hostname="bla"      def loadrcconf(self): +        RE_MATCH = re.compile(r'^(\w+)\s*=\s*(.*)\s*')          conf = {}          lines = util.load_file(self.rc_conf_fn).splitlines()          for line in lines: -            tok = line.split('=') -            conf[tok[0]] = tok[1].rstrip() +            m = RE_MATCH.match(line) +            if not m: +                LOG.debug("Skipping line from /etc/rc.conf: %s", line) +                continue +            key = m.group(1).rstrip() +            val = m.group(2).rstrip() +            # Kill them quotes (not completely correct, aka won't handle +            # quoted values, but should be ok ...) +            if val[0] in ('"', "'"): +                val = val[1:] +            if val[-1] in ('"', "'"): +                val = val[0:-1] +            if len(val) == 0: +                LOG.debug("Skipping empty value from /etc/rc.conf: %s", line) +                continue +            conf[key] = val          return conf      def readrcconf(self, key): @@ -192,10 +220,6 @@ class Distro(distros.Distro):              util.logexc(LOG, "Failed to lock user %s", name)              raise e -    # TODO: -    def write_sudo_rules(self, name, rules, sudo_file=None): -        LOG.debug("[write_sudo_rules] Name: %s", name) -      def create_user(self, name, **kwargs):          self.add_user(name, **kwargs) @@ -218,7 +242,60 @@ class Distro(distros.Distro):              ssh_util.setup_user_keys(keys, name, options=None)      def _write_network(self, settings): -        return +        entries = net_util.translate_network(settings) +        nameservers = [] +        searchdomains = [] +        dev_names = entries.keys() +        for (dev, info) in entries.iteritems(): +            # Skip the loopback interface. +            if dev.startswith('lo'): +                continue + +            LOG.info('Configuring interface %s', dev) + +            if info.get('bootproto') == 'static': +                LOG.debug('Configuring dev %s with %s / %s', dev, info.get('address'), info.get('netmask')) +                # Configure an ipv4 address. +                ifconfig = info.get('address') + ' netmask ' + info.get('netmask') + +                # Configure the gateway. +                self.updatercconf('defaultrouter', info.get('gateway')) + +                if 'dns-nameservers' in info: +                    nameservers.extend(info['dns-nameservers']) +                if 'dns-search' in info: +                    searchdomains.extend(info['dns-search']) +            else: +                ifconfig = 'DHCP' +      +            self.updatercconf('ifconfig_' + dev, ifconfig) + +        # Try to read the /etc/resolv.conf or just start from scratch if that +        # fails. +        try: +            resolvconf = ResolvConf(util.load_file(self.resolv_conf_fn)) +            resolvconf.parse() +        except IOError: +            util.logexc(LOG, "Failed to parse %s, use new empty file", self.resolv_conf_fn) +            resolvconf = ResolvConf('') +            resolvconf.parse() + +        # Add some nameservers +        for server in nameservers: +            try: +                resolvconf.add_nameserver(server) +            except ValueError: +                util.logexc(LOG, "Failed to add nameserver %s", server) + +        # And add any searchdomains. +        for domain in searchdomains: +            try: +                resolvconf.add_search_domain(domain) +            except ValueError: +                util.logexc(LOG, "Failed to add search domain %s", domain) +        util.write_file(self.resolv_conf_fn, str(resolvconf), 0644) + +        return dev_names      def apply_locale(self, locale, out_fn=None):          # Adjust the locals value to the new value diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py index c4b02de1..45c2e658 100644 --- a/cloudinit/distros/gentoo.py +++ b/cloudinit/distros/gentoo.py @@ -138,7 +138,7 @@ class Distro(distros.Distro):          return hostname      def set_timezone(self, tz): -        set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) +        distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))      def package_command(self, command, args=None, pkgs=None):          if pkgs is None: diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py index 1be9d46b..5733c25a 100644 --- a/cloudinit/distros/parsers/resolv_conf.py +++ b/cloudinit/distros/parsers/resolv_conf.py @@ -137,8 +137,8 @@ class ResolvConf(object):          self._contents.append(('option', ['search', s_list, '']))          return flat_sds -    @local_domain.setter  # pl51222 pylint: disable=E1101 -    def local_domain(self, domain):  # pl51222 pylint: disable=E0102 +    @local_domain.setter +    def local_domain(self, domain):          self.parse()          self._remove_option('domain')          self._contents.append(('option', ['domain', str(domain), ''])) diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py index 1848ce2c..3a50cf87 100644 --- a/cloudinit/handlers/boot_hook.py +++ b/cloudinit/handlers/boot_hook.py @@ -53,8 +53,7 @@ class BootHookPartHandler(handlers.Handler):          util.write_file(filepath, contents.lstrip(), 0700)          return filepath -    def handle_part(self, _data, ctype, filename,  # pylint: disable=W0221 -                    payload, frequency):  # pylint: disable=W0613 +    def handle_part(self, data, ctype, filename, payload, frequency):          if ctype in handlers.CONTENT_SIGNALS:              return diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index 4232700f..bf994e33 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -138,8 +138,7 @@ class CloudConfigPartHandler(handlers.Handler):          self.file_names = []          self.cloud_buf = None -    def handle_part(self, _data, ctype, filename,  # pylint: disable=W0221 -                    payload, _frequency, headers):  # pylint: disable=W0613 +    def handle_part(self, data, ctype, filename, payload, frequency, headers):          if ctype == handlers.CONTENT_START:              self._reset()              return diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py index 30c1ed89..9755ab05 100644 --- a/cloudinit/handlers/shell_script.py +++ b/cloudinit/handlers/shell_script.py @@ -44,8 +44,7 @@ class ShellScriptPartHandler(handlers.Handler):              handlers.type_from_starts_with(SHELL_PREFIX),          ] -    def handle_part(self, _data, ctype, filename,  # pylint: disable=W0221 -                    payload, frequency):  # pylint: disable=W0613 +    def handle_part(self, data, ctype, filename, payload, frequency):          if ctype in handlers.CONTENT_SIGNALS:              # TODO(harlowja): maybe delete existing things here              return diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py index bac4cad2..50d193c4 100644 --- a/cloudinit/handlers/upstart_job.py +++ b/cloudinit/handlers/upstart_job.py @@ -44,8 +44,7 @@ class UpstartJobPartHandler(handlers.Handler):              handlers.type_from_starts_with(UPSTART_PREFIX),          ] -    def handle_part(self, _data, ctype, filename,  # pylint: disable=W0221 -                    payload, frequency): +    def handle_part(self, data, ctype, filename, payload, frequency):          if ctype in handlers.CONTENT_SIGNALS:              return diff --git a/cloudinit/importer.py b/cloudinit/importer.py index a1929137..fb57253c 100644 --- a/cloudinit/importer.py +++ b/cloudinit/importer.py @@ -22,10 +22,6 @@  import sys -from cloudinit import log as logging - -LOG = logging.getLogger(__name__) -  def import_module(module_name):      __import__(module_name) @@ -33,25 +29,24 @@ def import_module(module_name):  def find_module(base_name, search_paths, required_attrs=None): -    found_places = []      if not required_attrs:          required_attrs = []      # NOTE(harlowja): translate the search paths to include the base name. -    real_paths = [] +    lookup_paths = []      for path in search_paths:          real_path = []          if path:              real_path.extend(path.split("."))          real_path.append(base_name)          full_path = '.'.join(real_path) -        real_paths.append(full_path) -    for full_path in real_paths: +        lookup_paths.append(full_path) +    found_paths = [] +    for full_path in lookup_paths:          mod = None          try:              mod = import_module(full_path) -        except ImportError as e: -            LOG.debug("Failed at attempted import of '%s' due to: %s", -                      full_path, e) +        except ImportError: +            pass          if not mod:              continue          found_attrs = 0 @@ -59,5 +54,5 @@ def find_module(base_name, search_paths, required_attrs=None):              if hasattr(mod, attr):                  found_attrs += 1          if found_attrs == len(required_attrs): -            found_places.append(full_path) -    return found_places +            found_paths.append(full_path) +    return (found_paths, lookup_paths) diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py index 650b42a9..03aa1ee1 100644 --- a/cloudinit/mergers/__init__.py +++ b/cloudinit/mergers/__init__.py @@ -143,12 +143,14 @@ def construct(parsed_mergers):      for (m_name, m_ops) in parsed_mergers:          if not m_name.startswith(MERGER_PREFIX):              m_name = MERGER_PREFIX + str(m_name) -        merger_locs = importer.find_module(m_name, -                                           [__name__], -                                           [MERGER_ATTR]) +        merger_locs, looked_locs = importer.find_module(m_name, +                                                        [__name__], +                                                        [MERGER_ATTR])          if not merger_locs:              msg = ("Could not find merger module named '%s' " -                   "with attribute '%s'") % (m_name, MERGER_ATTR) +                   "with attribute '%s' (searched %s)") % (m_name, +                                                           MERGER_ATTR, +                                                           looked_locs)              raise ImportError(msg)          else:              mod = importer.import_module(merger_locs[0]) diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index 1bdca9f7..8d4df342 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -21,10 +21,13 @@  #    along with this program.  If not, see <http://www.gnu.org/licenses/>.  import cloudinit.util as util +from cloudinit.log import logging  import re  from prettytable import PrettyTable +LOG = logging.getLogger() +  def netdev_info(empty=""):      fields = ("hwaddr", "addr", "bcast", "mask") @@ -168,8 +171,9 @@ def route_pformat():      lines = []      try:          routes = route_info() -    except Exception: +    except Exception as e:          lines.append(util.center('Route info failed', '!', 80)) +        util.logexc(LOG, "Route info failed: %s" % e)          routes = None      if routes is not None:          fields = ['Route', 'Destination', 'Gateway', diff --git a/cloudinit/patcher.py b/cloudinit/patcher.py index 0f3c034e..f6609d6f 100644 --- a/cloudinit/patcher.py +++ b/cloudinit/patcher.py @@ -41,7 +41,7 @@ def _patch_logging():      fallback_handler = QuietStreamHandler(sys.stderr)      fallback_handler.setFormatter(logging.Formatter(FALL_FORMAT)) -    def handleError(self, record):  # pylint: disable=W0613 +    def handleError(self, record):          try:              fallback_handler.handle(record)              fallback_handler.flush() diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 83cc6b25..4e5d90de 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -126,12 +126,13 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):          self.version = results['version']          self.files.update(results.get('files', {})) -        # If there is no vendordata, set vd to an empty dict instead of None -        vd = results.get('vendordata', {}) -        # if vendordata includes 'cloud-init', then read that explicitly -        # for cloud-init (for namespacing). -        if 'cloud-init' in vd: -            self.vendordata_raw = vd['cloud-init'] +        vd = results.get('vendordata') +        self.vendordata_pure = vd +        try: +            self.vendordata_raw = openstack.convert_vendordata_json(vd) +        except ValueError as e: +            LOG.warn("Invalid content in vendor-data: %s", e) +            self.vendordata_raw = None          return True @@ -168,16 +169,12 @@ def get_ds_mode(cfgdrv_ver, ds_cfg=None, user=None):  def read_config_drive(source_dir): -    excps = [] -    finders = []      reader = openstack.ConfigDriveReader(source_dir) - -    # openstack.OS_VERSIONS is stored in chronological order, so to check the -    # newest first, use reversed() -    for version in reversed(openstack.OS_VERSIONS): -        finders.append((reader.read_v2, [], {'version': version})) -    finders.append((reader.read_v1, [], {})) - +    finders = [ +        (reader.read_v2, [], {}), +        (reader.read_v1, [], {}), +    ] +    excps = []      for (functor, args, kwargs) in finders:          try:              return functor(*args, **kwargs) diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 34557f8b..e2469f6e 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -28,7 +28,7 @@ import base64  import os  import pwd  import re -import string  # pylint: disable=W0402 +import string  from cloudinit import log as logging  from cloudinit import sources diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index 0970d07b..469c2e2a 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -88,11 +88,9 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):          md_urls = []          url2base = {}          for url in urls: -            for version in openstack.OS_VERSIONS + (openstack.OS_LATEST,): -                md_url = url_helper.combine_url(url, 'openstack', -                                                version, 'meta_data.json') -                md_urls.append(md_url) -                url2base[md_url] = url +            md_url = url_helper.combine_url(url, 'openstack') +            md_urls.append(md_url) +            url2base[md_url] = url          (max_wait, timeout) = self._get_url_settings()          start_time = time.time() @@ -119,8 +117,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):                                      'Crawl of openstack metadata service',                                      read_metadata_service,                                      args=[self.metadata_address], -                                    kwargs={'ssl_details': self.ssl_details, -                                            'version': openstack.OS_HAVANA}) +                                    kwargs={'ssl_details': self.ssl_details})          except openstack.NonReadable:              return False          except (openstack.BrokenMetadata, IOError): @@ -143,20 +140,20 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):          self.version = results['version']          self.files.update(results.get('files', {})) -        # if vendordata includes 'cloud-init', then read that explicitly -        # for cloud-init (for namespacing).          vd = results.get('vendordata') -        if isinstance(vd, dict) and 'cloud-init' in vd: -            self.vendordata_raw = vd['cloud-init'] -        else: -            self.vendordata_raw = vd +        self.vendordata_pure = vd +        try: +            self.vendordata_raw = openstack.convert_vendordata_json(vd) +        except ValueError as e: +            LOG.warn("Invalid content in vendor-data: %s", e) +            self.vendordata_raw = None          return True -def read_metadata_service(base_url, version=None, ssl_details=None): +def read_metadata_service(base_url, ssl_details=None):      reader = openstack.MetadataReader(base_url, ssl_details=ssl_details) -    return reader.read_v2(version=version) +    return reader.read_v2()  # Used to match classes to dependencies diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 7d52a2e6..7c7ef9ab 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -66,7 +66,7 @@ class DataSource(object):              name = name[0:-3]          self.ds_cfg = util.get_cfg_by_path(self.sys_cfg, -                                          ("datasource", name), {}) +                                           ("datasource", name), {})          if not ud_proc:              self.ud_proc = ud.UserDataProcessor(self.paths)          else: @@ -166,7 +166,7 @@ class DataSource(object):          defhost = "localhost"          domain = defdomain -        if self.metadata or 'local-hostname' not in self.metadata: +        if not self.metadata or 'local-hostname' not in self.metadata:              # this is somewhat questionable really.              # the cloud datasource was asked for a hostname              # and didn't have one. raising error might be more appropriate @@ -272,9 +272,9 @@ def list_sources(cfg_list, depends, pkg_list):      for ds_name in cfg_list:          if not ds_name.startswith(DS_PREFIX):              ds_name = '%s%s' % (DS_PREFIX, ds_name) -        m_locs = importer.find_module(ds_name, -                                      pkg_list, -                                      ['get_datasource_list']) +        m_locs, _looked_locs = importer.find_module(ds_name, +                                                    pkg_list, +                                                    ['get_datasource_list'])          for m_loc in m_locs:              mod = importer.import_module(m_loc)              lister = getattr(mod, "get_datasource_list") diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index ed102c4c..b7e19314 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -21,6 +21,7 @@  import abc  import base64  import copy +import functools  import os  from cloudinit import ec2_utils @@ -48,7 +49,7 @@ OS_LATEST = 'latest'  OS_FOLSOM = '2012-08-10'  OS_GRIZZLY = '2013-04-04'  OS_HAVANA = '2013-10-17' -# keep this in chronological order by time: add new entries to the end +# keep this in chronological order. new supported versions go at the end.  OS_VERSIONS = (      OS_FOLSOM,      OS_GRIZZLY, @@ -151,17 +152,40 @@ class BaseReader(object):          pass      @abc.abstractmethod -    def _path_exists(self, path): +    def _path_read(self, path):          pass      @abc.abstractmethod -    def _path_read(self, path): +    def _fetch_available_versions(self):          pass      @abc.abstractmethod      def _read_ec2_metadata(self):          pass +    def _find_working_version(self): +        try: +            versions_available = self._fetch_available_versions() +        except Exception as e: +            LOG.debug("Unable to read openstack versions from %s due to: %s", +                      self.base_path, e) +            versions_available = [] + +        # openstack.OS_VERSIONS is stored in chronological order, so +        # reverse it to check newest first. +        supported = [v for v in reversed(list(OS_VERSIONS))] +        selected_version = OS_LATEST + +        for potential_version in supported: +            if potential_version not in versions_available: +                continue +            selected_version = potential_version +            break + +        LOG.debug("Selected version '%s' from %s", selected_version, +                  versions_available) +        return selected_version +      def _read_content_path(self, item):          path = item.get('content_path', '').lstrip("/")          path_pieces = path.split("/") @@ -171,24 +195,7 @@ class BaseReader(object):          path = self._path_join(self.base_path, "openstack", *path_pieces)          return self._path_read(path) -    def _find_working_version(self, version): -        search_versions = [version] + list(OS_VERSIONS) -        for potential_version in search_versions: -            if not potential_version: -                continue -            path = self._path_join(self.base_path, "openstack", -                                   potential_version) -            if self._path_exists(path): -                if potential_version != version: -                    LOG.debug("Version '%s' not available, attempting to use" -                              " version '%s' instead", version, -                              potential_version) -                return potential_version -        LOG.debug("Version '%s' not available, attempting to use '%s'" -                  " instead", version, OS_LATEST) -        return OS_LATEST - -    def read_v2(self, version=None): +    def read_v2(self):          """Reads a version 2 formatted location.          Return a dict with metadata, userdata, ec2-metadata, dsmode, @@ -197,6 +204,9 @@ class BaseReader(object):          If not a valid location, raise a NonReadable exception.          """ +        load_json_anytype = functools.partial( +            util.load_json, root_types=(dict, basestring, list)) +          def datafiles(version):              files = {}              files['metadata'] = ( @@ -215,29 +225,32 @@ class BaseReader(object):              files['vendordata'] = (                  self._path_join("openstack", version, 'vendor_data.json'),                  False, -                util.load_json, +                load_json_anytype,              )              return files -        version = self._find_working_version(version)          results = {              'userdata': '',              'version': 2,          } -        data = datafiles(version) +        data = datafiles(self._find_working_version())          for (name, (path, required, translator)) in data.iteritems():              path = self._path_join(self.base_path, path)              data = None              found = False -            if self._path_exists(path): -                try: -                    data = self._path_read(path) -                except IOError: -                    raise NonReadable("Failed to read: %s" % path) -                found = True +            try: +                data = self._path_read(path) +            except IOError as e: +                if not required: +                    LOG.debug("Failed reading optional path %s due" +                              " to: %s", path, e) +                else: +                    LOG.debug("Failed reading mandatory path %s due" +                              " to: %s", path, e)              else: -                if required: -                    raise NonReadable("Missing mandatory path: %s" % path) +                found = True +            if required and not found: +                raise NonReadable("Missing mandatory path: %s" % path)              if found and translator:                  try:                      data = translator(data) @@ -305,21 +318,27 @@ class BaseReader(object):  class ConfigDriveReader(BaseReader):      def __init__(self, base_path):          super(ConfigDriveReader, self).__init__(base_path) +        self._versions = None      def _path_join(self, base, *add_ons):          components = [base] + list(add_ons)          return os.path.join(*components) -    def _path_exists(self, path): -        return os.path.exists(path) -      def _path_read(self, path):          return util.load_file(path) +    def _fetch_available_versions(self): +        if self._versions is None: +            path = self._path_join(self.base_path, 'openstack') +            found = [d for d in os.listdir(path) +                     if os.path.isdir(os.path.join(path))] +            self._versions = found +        return self._versions +      def _read_ec2_metadata(self):          path = self._path_join(self.base_path,                                 'ec2', 'latest', 'meta-data.json') -        if not self._path_exists(path): +        if not os.path.exists(path):              return {}          else:              try: @@ -339,7 +358,7 @@ class ConfigDriveReader(BaseReader):          found = {}          for name in FILES_V1.keys():              path = self._path_join(self.base_path, name) -            if self._path_exists(path): +            if os.path.exists(path):                  found[name] = path          if len(found) == 0:              raise NonReadable("%s: no files found" % (self.base_path)) @@ -401,17 +420,26 @@ class MetadataReader(BaseReader):          self.ssl_details = ssl_details          self.timeout = float(timeout)          self.retries = int(retries) +        self._versions = None + +    def _fetch_available_versions(self): +        # <baseurl>/openstack/ returns a newline separated list of versions +        if self._versions is not None: +            return self._versions +        found = [] +        version_path = self._path_join(self.base_path, "openstack") +        content = self._path_read(version_path) +        for line in content.splitlines(): +            line = line.strip() +            if not line: +                continue +            found.append(line) +        self._versions = found +        return self._versions      def _path_read(self, path): -        response = url_helper.readurl(path, -                                      retries=self.retries, -                                      ssl_details=self.ssl_details, -                                      timeout=self.timeout) -        return response.contents -    def _path_exists(self, path): - -        def should_retry_cb(request, cause): +        def should_retry_cb(_request_args, cause):              try:                  code = int(cause.code)                  if code >= 400: @@ -421,15 +449,12 @@ class MetadataReader(BaseReader):                  pass              return True -        try: -            response = url_helper.readurl(path, -                                          retries=self.retries, -                                          ssl_details=self.ssl_details, -                                          timeout=self.timeout, -                                          exception_cb=should_retry_cb) -            return response.ok() -        except IOError: -            return False +        response = url_helper.readurl(path, +                                      retries=self.retries, +                                      ssl_details=self.ssl_details, +                                      timeout=self.timeout, +                                      exception_cb=should_retry_cb) +        return response.contents      def _path_join(self, base, *add_ons):          return url_helper.combine_url(base, *add_ons) @@ -438,3 +463,28 @@ class MetadataReader(BaseReader):          return ec2_utils.get_instance_metadata(ssl_details=self.ssl_details,                                                 timeout=self.timeout,                                                 retries=self.retries) + + +def convert_vendordata_json(data, recurse=True): +    """ data: a loaded json *object* (strings, arrays, dicts). +    return something suitable for cloudinit vendordata_raw. + +    if data is: +       None: return None +       string: return string +       list: return data +             the list is then processed in UserDataProcessor +       dict: return convert_vendordata_json(data.get('cloud-init')) +    """ +    if not data: +        return None +    if isinstance(data, (str, unicode, basestring)): +        return data +    if isinstance(data, list): +        return copy.deepcopy(data) +    if isinstance(data, dict): +        if recurse is True: +            return convert_vendordata_json(data.get('cloud-init'), +                                           recurse=False) +        raise ValueError("vendordata['cloud-init'] cannot be dict") +    raise ValueError("Unknown data type for vendordata: %s" % type(data)) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index d29d480a..67f467f7 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -386,12 +386,12 @@ class Init(object):              potential_handlers = util.find_modules(path)              for (fname, mod_name) in potential_handlers.iteritems():                  try: -                    mod_locs = importer.find_module(mod_name, [''], -                                                    ['list_types', -                                                     'handle_part']) +                    mod_locs, looked_locs = importer.find_module( +                        mod_name, [''], ['list_types', 'handle_part'])                      if not mod_locs: -                        LOG.warn(("Could not find a valid user-data handler" -                                  " named %s in file %s"), mod_name, fname) +                        LOG.warn("Could not find a valid user-data handler" +                                 " named %s in file %s (searched %s)", +                                 mod_name, fname, looked_locs)                          continue                      mod = importer.import_module(mod_locs[0])                      mod = handlers.fixup_handler(mod) @@ -621,11 +621,11 @@ class Modules(object):                            " has an unknown frequency %s"), raw_name, freq)                  # Reset it so when ran it will get set to a known value                  freq = None -            mod_locs = importer.find_module(mod_name, -                                            ['', type_utils.obj_name(config)], -                                            ['handle']) +            mod_locs, looked_locs = importer.find_module( +                mod_name, ['', type_utils.obj_name(config)], ['handle'])              if not mod_locs: -                LOG.warn("Could not find module named %s", mod_name) +                LOG.warn("Could not find module named %s (searched %s)", +                         mod_name, looked_locs)                  continue              mod = config.fixup_module(importer.import_module(mod_locs[0]))              mostly_mods.append([mod, raw_name, freq, run_args]) diff --git a/cloudinit/type_utils.py b/cloudinit/type_utils.py index 2decbfc5..cc3d9495 100644 --- a/cloudinit/type_utils.py +++ b/cloudinit/type_utils.py @@ -19,8 +19,6 @@  #  #    You should have received a copy of the GNU General Public License  #    along with this program.  If not, see <http://www.gnu.org/licenses/>. -# -# pylint: disable=C0302  import types diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 73c1fa4e..3074dd08 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -44,7 +44,7 @@ try:      from distutils.version import LooseVersion      import pkg_resources      _REQ = pkg_resources.get_distribution('requests') -    _REQ_VER = LooseVersion(_REQ.version)  # pylint: disable=E1103 +    _REQ_VER = LooseVersion(_REQ.version)      if _REQ_VER >= LooseVersion('0.8.8'):          SSL_ENABLED = True      if _REQ_VER >= LooseVersion('0.7.0') and _REQ_VER < LooseVersion('1.0.0'): @@ -54,7 +54,7 @@ except:  def _cleanurl(url): -    parsed_url = list(urlparse(url, scheme='http'))  # pylint: disable=E1123 +    parsed_url = list(urlparse(url, scheme='http'))      if not parsed_url[1] and parsed_url[2]:          # Swap these since this seems to be a common          # occurrence when given urls like 'www.google.com' @@ -90,7 +90,7 @@ class StringResponse(object):          self.contents = contents          self.url = None -    def ok(self, *args, **kwargs):  # pylint: disable=W0613 +    def ok(self, *args, **kwargs):          if self.code != 200:              return False          return True @@ -150,7 +150,7 @@ class UrlError(IOError):  def _get_ssl_args(url, ssl_details):      ssl_args = {} -    scheme = urlparse(url).scheme  # pylint: disable=E1101 +    scheme = urlparse(url).scheme      if scheme == 'https' and ssl_details:          if not SSL_ENABLED:              LOG.warn("SSL is not supported in requests v%s, " @@ -227,10 +227,9 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,              r = requests.request(**req_args)              if check_status: -                r.raise_for_status()  # pylint: disable=E1103 +                r.raise_for_status()              LOG.debug("Read from %s (%s, %sb) after %s attempts", url, -                      r.status_code, len(r.content),  # pylint: disable=E1103 -                      (i + 1)) +                      r.status_code, len(r.content), (i + 1))              # Doesn't seem like we can make it use a different              # subclass for responses, so add our own backward-compat              # attrs diff --git a/cloudinit/util.py b/cloudinit/util.py index 9bbb6b3c..946059e9 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -19,8 +19,6 @@  #  #    You should have received a copy of the GNU General Public License  #    along with this program.  If not, see <http://www.gnu.org/licenses/>. -# -# pylint: disable=C0302  from StringIO import StringIO @@ -42,7 +40,7 @@ import re  import shutil  import socket  import stat -import string  # pylint: disable=W0402 +import string  import subprocess  import sys  import tempfile @@ -487,7 +485,7 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None):              new_fp = open(arg, owith)          elif mode == "|":              proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE) -            new_fp = proc.stdin  # pylint: disable=E1101 +            new_fp = proc.stdin          else:              raise TypeError("Invalid type for output format: %s" % outfmt) @@ -509,7 +507,7 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None):              new_fp = open(arg, owith)          elif mode == "|":              proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE) -            new_fp = proc.stdin  # pylint: disable=E1101 +            new_fp = proc.stdin          else:              raise TypeError("Invalid type for error format: %s" % errfmt) @@ -937,7 +935,7 @@ def is_resolvable(name):      should also not exist.  The random entry will be resolved inside      the search list.      """ -    global _DNS_REDIRECT_IP  # pylint: disable=W0603 +    global _DNS_REDIRECT_IP      if _DNS_REDIRECT_IP is None:          badips = set()          badnames = ("does-not-exist.example.com.", "example.invalid.", @@ -1532,7 +1530,7 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,          (out, err) = sp.communicate(data)      except OSError as e:          raise ProcessExecutionError(cmd=args, reason=e) -    rc = sp.returncode  # pylint: disable=E1101 +    rc = sp.returncode      if rc not in rcs:          raise ProcessExecutionError(stdout=out, stderr=err,                                      exit_code=rc, diff --git a/config/cloud.cfg-freebsd b/config/cloud.cfg-freebsd new file mode 100644 index 00000000..bb3a4a51 --- /dev/null +++ b/config/cloud.cfg-freebsd @@ -0,0 +1,88 @@ +# The top level settings are used as module +# and system configuration. + +syslog_fix_perms: root:wheel + +# This should not be required, but leave it in place until the real cause of +# not beeing able to find -any- datasources is resolved. +datasource_list: ['OpenStack'] + +# A set of users which may be applied and/or used by various modules +# when a 'default' entry is found it will reference the 'default_user' +# from the distro configuration specified below +users: +   - default + +# If this is set, 'root' will not be able to ssh in and they  +# will get a message to login instead as the above $user (ubuntu) +disable_root: false + +# This will cause the set+update hostname module to not operate (if true) +preserve_hostname: false + +# Example datasource config +# datasource:  +#    Ec2:  +#      metadata_urls: [ 'blah.com' ] +#      timeout: 5 # (defaults to 50 seconds) +#      max_wait: 10 # (defaults to 120 seconds) + +# The modules that run in the 'init' stage +cloud_init_modules: +# - migrator + - seed_random + - bootcmd +# - write-files + - growpart + - resizefs + - set_hostname + - update_hostname +# - update_etc_hosts +# - ca-certs +# - rsyslog + - users-groups + - ssh + +# The modules that run in the 'config' stage +cloud_config_modules: +# - disk_setup +# - mounts + - ssh-import-id + - locale +# - set-passwords +# - package-update-upgrade-install +# - landscape +# - timezone +# - puppet +# - chef +# - salt-minion +# - mcollective + - disable-ec2-metadata + - runcmd +# - byobu + +# The modules that run in the 'final' stage +cloud_final_modules: + - rightscale_userdata + - scripts-vendor + - scripts-per-once + - scripts-per-boot + - scripts-per-instance + - scripts-user + - ssh-authkey-fingerprints + - keys-to-console + - phone-home + - final-message + - power-state-change + +# System and/or distro specific settings +# (not accessible to handlers/transforms) +system_info: +   distro: freebsd +   default_user: +     name: beastie +     lock_passwd: True +     gecos: FreeBSD +     groups: [wheel] +     sudo: ["ALL=(ALL) NOPASSWD:ALL"] +     shell: /bin/sh diff --git a/packages/debian/control.in b/packages/debian/control.in index c892747c..9207e5f4 100644 --- a/packages/debian/control.in +++ b/packages/debian/control.in @@ -9,7 +9,6 @@ Build-Depends: debhelper (>= 9),                 python (>= 2.6.6-3~),                 python-nose,                 pyflakes, -               pylint,                 python-setuptools,                 python-selinux,                 python-cheetah, diff --git a/pylintrc b/pylintrc deleted file mode 100644 index ee886510..00000000 --- a/pylintrc +++ /dev/null @@ -1,19 +0,0 @@ -[General] -init-hook='import sys; sys.path.append("tests/")' - -[MESSAGES CONTROL] -# See: http://pylint-messages.wikidot.com/all-codes -# W0142: *args and **kwargs are fine. -# W0511: TODOs in code comments are fine. -# W0702: No exception type(s) specified -# W0703: Catch "Exception" -# C0103: Invalid name -# C0111: Missing docstring -disable=W0142,W0511,W0702,W0703,C0103,C0111 - -[REPORTS] -reports=no -include-ids=yes - -[FORMAT] -max-line-length=79 @@ -46,7 +46,7 @@ def tiny_p(cmd, capture=True):      sp = subprocess.Popen(cmd, stdout=stdout,                      stderr=stderr, stdin=None)      (out, err) = sp.communicate() -    ret = sp.returncode  # pylint: disable=E1101 +    ret = sp.returncode      if ret not in [0]:          raise RuntimeError("Failed running %s [rc=%s] (%s, %s)"                              % (cmd, ret, out, err)) @@ -63,18 +63,28 @@ def systemd_unitdir():  INITSYS_FILES = {      'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)], +    'sysvinit_freebsd': [f for f in glob('sysvinit/freebsd/*') if is_f(f)],      'sysvinit_deb': [f for f in glob('sysvinit/debian/*') if is_f(f)],      'systemd': [f for f in glob('systemd/*') if is_f(f)],      'upstart': [f for f in glob('upstart/*') if is_f(f)],  }  INITSYS_ROOTS = {      'sysvinit': '/etc/rc.d/init.d', +    'sysvinit_freebsd': '/usr/local/etc/rc.d',      'sysvinit_deb': '/etc/init.d',      'systemd': systemd_unitdir(),      'upstart': '/etc/init/',  }  INITSYS_TYPES = sorted(list(INITSYS_ROOTS.keys())) +# Install everything in the right location and take care of Linux (default) and +# FreeBSD systems. +USR = "/usr" +ETC = "/etc" +if os.uname()[0] == 'FreeBSD': +    USR = "/usr/local" +    ETC = "/usr/local/etc" +  def get_version():      cmd = ['tools/read-version'] @@ -136,18 +146,17 @@ setuptools.setup(name='cloud-init',                 'tools/cloud-init-per',                 ],        license='GPLv3', -      data_files=[('/etc/cloud', glob('config/*.cfg')), -                  ('/etc/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')), -                  ('/etc/cloud/templates', glob('templates/*')), -                  ('/usr/share/cloud-init', []), -                  ('/usr/lib/cloud-init', +      data_files=[(ETC + '/cloud', glob('config/*.cfg')), +                  (ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')), +                  (ETC + '/cloud/templates', glob('templates/*')), +                  (USR + '/lib/cloud-init',                      ['tools/uncloud-init',                       'tools/write-ssh-key-fingerprints']), -                  ('/usr/share/doc/cloud-init', +                  (USR + '/share/doc/cloud-init',                     [f for f in glob('doc/*') if is_f(f)]), -                  ('/usr/share/doc/cloud-init/examples', +                  (USR + '/share/doc/cloud-init/examples',                     [f for f in glob('doc/examples/*') if is_f(f)]), -                  ('/usr/share/doc/cloud-init/examples/seed', +                  (USR + '/share/doc/cloud-init/examples/seed',                     [f for f in glob('doc/examples/seed/*') if is_f(f)]),                   ],        install_requires=read_requires(), diff --git a/sysvinit/freebsd/cloudconfig b/sysvinit/freebsd/cloudconfig index 15d7ab95..44c216b3 100755 --- a/sysvinit/freebsd/cloudconfig +++ b/sysvinit/freebsd/cloudconfig @@ -6,28 +6,28 @@  . /etc/rc.subr +export CLOUD_CFG=/usr/local/etc/cloud/cloud.cfg +  name="cloudconfig" -command="/usr/bin/cloud-init" +command="/usr/local/bin/cloud-init"  start_cmd="cloudconfig_start"  stop_cmd=":"  rcvar="cloudinit_enable"  start_precmd="cloudinit_override"  start_cmd="cloudconfig_start" -: ${cloudinit_config:="/etc/cloud/cloud.cfg"} -  cloudinit_override()  { -	# If there exist sysconfig/default variable override files use it... -	if [ -f /etc/default/cloud-init ]; then -		. /etc/default/cloud-init +	# If there exist sysconfig/defaults variable override files use it... +	if [ -f /etc/defaults/cloud-init ]; then +		. /etc/defaults/cloud-init  	fi  }  cloudconfig_start()  {  	echo "${command} starting" -	${command} ${cloudinit_config} modules --mode config +	${command} modules --mode config  }  load_rc_config $name diff --git a/sysvinit/freebsd/cloudfinal b/sysvinit/freebsd/cloudfinal index 49945ecd..f668e036 100755 --- a/sysvinit/freebsd/cloudfinal +++ b/sysvinit/freebsd/cloudfinal @@ -6,28 +6,28 @@  . /etc/rc.subr +export CLOUD_CFG=/usr/local/etc/cloud/cloud.cfg +  name="cloudfinal" -command="/usr/bin/cloud_init" +command="/usr/local/bin/cloud-init"  start_cmd="cloudfinal_start"  stop_cmd=":"  rcvar="cloudinit_enable"  start_precmd="cloudinit_override"  start_cmd="cloudfinal_start" -: ${cloudinit_config:="/etc/cloud/cloud.cfg"} -  cloudinit_override()  { -	# If there exist sysconfig/default variable override files use it... -	if [ -f /etc/default/cloud-init ]; then -		 . /etc/default/cloud-init +	# If there exist sysconfig/defaults variable override files use it... +	if [ -f /etc/defaults/cloud-init ]; then +		 . /etc/defaults/cloud-init  	fi  }  cloudfinal_start()  {  	echo -n "${command} starting" -	${command} ${cloudinit_config} modules --mode final +	${command} modules --mode final  }  load_rc_config $name diff --git a/sysvinit/freebsd/cloudinit b/sysvinit/freebsd/cloudinit index 8d5ff10e..c5478678 100755 --- a/sysvinit/freebsd/cloudinit +++ b/sysvinit/freebsd/cloudinit @@ -6,28 +6,28 @@  . /etc/rc.subr +export CLOUD_CFG=/usr/local/etc/cloud/cloud.cfg +  name="cloudinit" -command="/usr/bin/cloud_init" +command="/usr/local/bin/cloud-init"  start_cmd="cloudinit_start"  stop_cmd=":"  rcvar="cloudinit_enable"  start_precmd="cloudinit_override"  start_cmd="cloudinit_start" -: ${cloudinit_config:="/etc/cloud/cloud.cfg"} -  cloudinit_override()  { -	# If there exist sysconfig/default variable override files use it... -	if [ -f /etc/default/cloud-init ]; then -		. /etc/default/cloud-init +	# If there exist sysconfig/defaults variable override files use it... +	if [ -f /etc/defaults/cloud-init ]; then +		. /etc/defaults/cloud-init  	fi  }  cloudinit_start()  {  	echo -n "${command} starting" -	${command} ${cloudinit_config} init +	${command} init  }  load_rc_config $name diff --git a/sysvinit/freebsd/cloudinitlocal b/sysvinit/freebsd/cloudinitlocal index b55705c0..c340d5d0 100755 --- a/sysvinit/freebsd/cloudinitlocal +++ b/sysvinit/freebsd/cloudinitlocal @@ -6,28 +6,28 @@  . /etc/rc.subr +export CLOUD_CFG=/usr/local/etc/cloud/cloud.cfg +  name="cloudinitlocal" -command="/usr/bin/cloud-init" +command="/usr/local/bin/cloud-init"  start_cmd="cloudlocal_start"  stop_cmd=":"  rcvar="cloudinit_enable"  start_precmd="cloudinit_override"  start_cmd="cloudlocal_start" -: ${cloudinit_config:="/etc/cloud/cloud.cfg"} -  cloudinit_override()  { -	# If there exist sysconfig/default variable override files use it... -	if [ -f /etc/default/cloud-init ]; then -		. /etc/default/cloud-init +	# If there exist sysconfig/defaults variable override files use it... +	if [ -f /etc/defaults/cloud-init ]; then +		. /etc/defaults/cloud-init  	fi  }  cloudlocal_start()  {  	echo -n "${command} starting" -	${command} ${cloudinit_config} init --local +	${command} init --local  }  load_rc_config $name diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index 03065c8b..17965488 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -18,8 +18,7 @@ class FakeModule(handlers.Handler):      def list_types(self):          return self.types -    def handle_part(self, _data, ctype, filename,  # pylint: disable=W0221 -                    payload, frequency): +    def handle_part(self, data, ctype, filename, payload, frequency):          pass diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index 41d0dc29..fd6bd8a1 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -106,7 +106,7 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):          initer.read_cfg()          initer.initialize()          initer.fetch() -        _iid = initer.instancify() +        initer.instancify()          initer.update()          initer.cloudify().run('consume_data',                                initer.consume_data, @@ -145,7 +145,7 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):          initer.read_cfg()          initer.initialize()          initer.fetch() -        _iid = initer.instancify() +        initer.instancify()          initer.update()          initer.cloudify().run('consume_data',                                initer.consume_data, @@ -221,7 +221,7 @@ run:          initer.read_cfg()          initer.initialize()          initer.fetch() -        _iid = initer.instancify() +        initer.instancify()          initer.update()          initer.cloudify().run('consume_data',                                initer.consume_data, @@ -256,7 +256,7 @@ vendor_data:          initer.read_cfg()          initer.initialize()          initer.fetch() -        _iid = initer.instancify() +        initer.instancify()          initer.update()          initer.cloudify().run('consume_data',                                initer.consume_data, @@ -264,7 +264,6 @@ vendor_data:                                freq=PER_INSTANCE)          mods = stages.Modules(initer)          (_which_ran, _failures) = mods.run_section('cloud_init_modules') -        _cfg = mods.cfg          vendor_script = initer.paths.get_ipath_cur('vendor_scripts')          vendor_script_fns = "%s%s/part-001" % (new_root, vendor_script)          self.assertTrue(os.path.exists(vendor_script_fns)) diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index 8bcc026c..e9235951 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -57,7 +57,6 @@ class TestNoCloudDataSource(MockerTestCase):              pass          def my_find_devs_with(*args, **kwargs): -            _f = (args, kwargs)              raise PsuedoException          self.apply_patches([(util, 'find_devs_with', my_find_devs_with)]) diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index f43cbec8..7b4e651a 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -19,6 +19,7 @@  import copy  import json  import re +import unittest  from StringIO import StringIO @@ -67,8 +68,8 @@ OSTACK_META = {  CONTENT_0 = 'This is contents of /etc/foo.cfg\n'  CONTENT_1 = '# this is /etc/bar/bar.cfg\n'  OS_FILES = { -    'openstack/2012-08-10/meta_data.json': json.dumps(OSTACK_META), -    'openstack/2012-08-10/user_data': USER_DATA, +    'openstack/latest/meta_data.json': json.dumps(OSTACK_META), +    'openstack/latest/user_data': USER_DATA,      'openstack/content/0000': CONTENT_0,      'openstack/content/0001': CONTENT_1,      'openstack/latest/meta_data.json': json.dumps(OSTACK_META), @@ -78,6 +79,9 @@ OS_FILES = {  EC2_FILES = {      'latest/user-data': USER_DATA,  } +EC2_VERSIONS = [ +    'latest', +]  def _register_uris(version, ec2_files, ec2_meta, os_files): @@ -85,6 +89,9 @@ def _register_uris(version, ec2_files, ec2_meta, os_files):      same data returned by the openstack metadata service (and ec2 service)."""      def match_ec2_url(uri, headers): +        path = uri.path.strip("/") +        if len(path) == 0: +            return (200, headers, "\n".join(EC2_VERSIONS))          path = uri.path.lstrip("/")          if path in ec2_files:              return (200, headers, ec2_files.get(path)) @@ -110,11 +117,20 @@ def _register_uris(version, ec2_files, ec2_meta, os_files):                  return (200, headers, str(value))          return (404, headers, '') -    def get_request_callback(method, uri, headers): -        uri = urlparse(uri) +    def match_os_uri(uri, headers): +        path = uri.path.strip("/") +        if path == 'openstack': +            return (200, headers, "\n".join([openstack.OS_LATEST]))          path = uri.path.lstrip("/")          if path in os_files:              return (200, headers, os_files.get(path)) +        return (404, headers, '') + +    def get_request_callback(method, uri, headers): +        uri = urlparse(uri) +        path = uri.path.lstrip("/").split("/") +        if path[0] == 'openstack': +            return match_os_uri(uri, headers)          return match_ec2_url(uri, headers)      hp.register_uri(hp.GET, re.compile(r'http://169.254.169.254/.*'), @@ -127,7 +143,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):      @hp.activate      def test_successful(self):          _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) -        f = ds.read_metadata_service(BASE_URL, version=self.VERSION) +        f = ds.read_metadata_service(BASE_URL)          self.assertEquals(VENDOR_DATA, f.get('vendordata'))          self.assertEquals(CONTENT_0, f['files']['/etc/foo.cfg'])          self.assertEquals(CONTENT_1, f['files']['/etc/bar/bar.cfg']) @@ -149,7 +165,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):      @hp.activate      def test_no_ec2(self):          _register_uris(self.VERSION, {}, {}, OS_FILES) -        f = ds.read_metadata_service(BASE_URL, version=self.VERSION) +        f = ds.read_metadata_service(BASE_URL)          self.assertEquals(VENDOR_DATA, f.get('vendordata'))          self.assertEquals(CONTENT_0, f['files']['/etc/foo.cfg'])          self.assertEquals(CONTENT_1, f['files']['/etc/bar/bar.cfg']) @@ -165,7 +181,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):                  os_files.pop(k, None)          _register_uris(self.VERSION, {}, {}, os_files)          self.assertRaises(openstack.NonReadable, ds.read_metadata_service, -                          BASE_URL, version=self.VERSION) +                          BASE_URL)      @hp.activate      def test_bad_uuid(self): @@ -177,7 +193,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):                  os_files[k] = json.dumps(os_meta)          _register_uris(self.VERSION, {}, {}, os_files)          self.assertRaises(openstack.BrokenMetadata, ds.read_metadata_service, -                          BASE_URL, version=self.VERSION) +                          BASE_URL)      @hp.activate      def test_userdata_empty(self): @@ -186,7 +202,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):              if k.endswith('user_data'):                  os_files.pop(k, None)          _register_uris(self.VERSION, {}, {}, os_files) -        f = ds.read_metadata_service(BASE_URL, version=self.VERSION) +        f = ds.read_metadata_service(BASE_URL)          self.assertEquals(VENDOR_DATA, f.get('vendordata'))          self.assertEquals(CONTENT_0, f['files']['/etc/foo.cfg'])          self.assertEquals(CONTENT_1, f['files']['/etc/bar/bar.cfg']) @@ -199,7 +215,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):              if k.endswith('vendor_data.json'):                  os_files.pop(k, None)          _register_uris(self.VERSION, {}, {}, os_files) -        f = ds.read_metadata_service(BASE_URL, version=self.VERSION) +        f = ds.read_metadata_service(BASE_URL)          self.assertEquals(CONTENT_0, f['files']['/etc/foo.cfg'])          self.assertEquals(CONTENT_1, f['files']['/etc/bar/bar.cfg'])          self.assertFalse(f.get('vendordata')) @@ -212,7 +228,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):                  os_files[k] = '{'  # some invalid json          _register_uris(self.VERSION, {}, {}, os_files)          self.assertRaises(openstack.BrokenMetadata, ds.read_metadata_service, -                          BASE_URL, version=self.VERSION) +                          BASE_URL)      @hp.activate      def test_metadata_invalid(self): @@ -222,7 +238,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):                  os_files[k] = '{'  # some invalid json          _register_uris(self.VERSION, {}, {}, os_files)          self.assertRaises(openstack.BrokenMetadata, ds.read_metadata_service, -                          BASE_URL, version=self.VERSION) +                          BASE_URL)      @hp.activate      def test_datasource(self): @@ -241,7 +257,8 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):          self.assertEquals(EC2_META, ds_os.ec2_metadata)          self.assertEquals(USER_DATA, ds_os.userdata_raw)          self.assertEquals(2, len(ds_os.files)) -        self.assertEquals(VENDOR_DATA, ds_os.vendordata_raw) +        self.assertEquals(VENDOR_DATA, ds_os.vendordata_pure) +        self.assertEquals(ds_os.vendordata_raw, None)      @hp.activate      def test_bad_datasource_meta(self): @@ -299,3 +316,34 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):          found = ds_os.get_data()          self.assertFalse(found)          self.assertIsNone(ds_os.version) + + +class TestVendorDataLoading(unittest.TestCase): +    def cvj(self, data): +        return openstack.convert_vendordata_json(data) + +    def test_vd_load_none(self): +        # non-existant vendor-data should return none +        self.assertIsNone(self.cvj(None)) + +    def test_vd_load_string(self): +        self.assertEqual(self.cvj("foobar"), "foobar") + +    def test_vd_load_list(self): +        data = [{'foo': 'bar'}, 'mystring', list(['another', 'list'])] +        self.assertEqual(self.cvj(data), data) + +    def test_vd_load_dict_no_ci(self): +        self.assertEqual(self.cvj({'foo': 'bar'}), None) + +    def test_vd_load_dict_ci_dict(self): +        self.assertRaises(ValueError, self.cvj, +                          {'foo': 'bar', 'cloud-init': {'x': 1}}) + +    def test_vd_load_dict_ci_string(self): +        data = {'foo': 'bar', 'cloud-init': 'VENDOR_DATA'} +        self.assertEqual(self.cvj(data), data['cloud-init']) + +    def test_vd_load_dict_ci_list(self): +        data = {'foo': 'bar', 'cloud-init': ['VD_1', 'VD_2']} +        self.assertEqual(self.cvj(data), data['cloud-init']) diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py index a972568f..db6aa0e8 100644 --- a/tests/unittests/test_distros/test_generic.py +++ b/tests/unittests/test_distros/test_generic.py @@ -26,8 +26,8 @@ package_mirrors = [      unknown_arch_info  ] -gpmi = distros._get_package_mirror_info  # pylint: disable=W0212 -gapmi = distros._get_arch_package_mirror_info  # pylint: disable=W0212 +gpmi = distros._get_package_mirror_info +gapmi = distros._get_arch_package_mirror_info  class TestGenericDistro(helpers.FilesystemMockingTestCase): diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index 9763b14b..ed997a1d 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -173,3 +173,60 @@ NETWORKING=yes  '''          self.assertCfgEquals(expected_buf, str(write_buf))          self.assertEquals(write_buf.mode, 0644) + +    def test_simple_write_freebsd(self): +        fbsd_distro = self._get_distro('freebsd') +        util_mock = self.mocker.replace(util.write_file, +                                        spec=False, passthrough=False) +        exists_mock = self.mocker.replace(os.path.isfile, +                                          spec=False, passthrough=False) +        load_mock = self.mocker.replace(util.load_file, +                                        spec=False, passthrough=False) + +        exists_mock(mocker.ARGS) +        self.mocker.count(0, None) +        self.mocker.result(False) + +        write_bufs = {} +        read_bufs = { +            '/etc/rc.conf': '', +        } + +        def replace_write(filename, content, mode=0644, omode="wb"): +            buf = WriteBuffer() +            buf.mode = mode +            buf.omode = omode +            buf.write(content) +            write_bufs[filename] = buf + +        def replace_read(fname, read_cb=None, quiet=False): +            if fname not in read_bufs: +                if fname in write_bufs: +                    return str(write_bufs[fname]) +                raise IOError("%s not found" % fname) +            else: +                if fname in write_bufs: +                    return str(write_bufs[fname]) +                return read_bufs[fname] + +        util_mock(mocker.ARGS) +        self.mocker.call(replace_write) +        self.mocker.count(0, None) + +        load_mock(mocker.ARGS) +        self.mocker.call(replace_read) +        self.mocker.count(0, None) + +        self.mocker.replay() +        fbsd_distro.apply_network(BASE_NET_CFG, False) + +        self.assertIn('/etc/rc.conf', write_bufs) +        write_buf = write_bufs['/etc/rc.conf'] +        expected_buf = ''' +ifconfig_eth0="192.168.1.5 netmask 255.255.255.0" +ifconfig_eth1="DHCP" +defaultrouter="192.168.1.254" +''' +        self.assertCfgEquals(expected_buf, str(write_buf)) +        self.assertEquals(write_buf.mode, 0644) + diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index fa624197..5d0636d1 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -203,8 +203,6 @@ def simple_device_part_info(devpath):  class Bunch(object): -    st_mode = None  # fix pylint complaint -      def __init__(self, **kwds):          self.__dict__.update(kwds) diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index 17704f8e..07b610f7 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -11,7 +11,7 @@ import glob  import os  import random  import re -import string  # pylint: disable=W0402 +import string  SOURCE_PAT = "source*.*yaml"  EXPECTED_PAT = "expected%s.yaml" diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/test_runs/test_merge_run.py index 32b41925..977adb34 100644 --- a/tests/unittests/test_runs/test_merge_run.py +++ b/tests/unittests/test_runs/test_merge_run.py @@ -33,7 +33,7 @@ class TestMergeRun(helpers.FilesystemMockingTestCase):          initer.initialize()          initer.fetch()          initer.datasource.userdata_raw = ud -        _iid = initer.instancify() +        initer.instancify()          initer.update()          initer.cloudify().run('consume_data',                                initer.consume_data, diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 0cb41520..35e92445 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -1,5 +1,3 @@ -# pylint: disable=C0301 -# the mountinfo data lines are too long  import os  import stat  import yaml @@ -18,7 +16,7 @@ class FakeSelinux(object):          self.match_what = match_what          self.restored = [] -    def matchpathcon(self, path, mode):  # pylint: disable=W0613 +    def matchpathcon(self, path, mode):          if path == self.match_what:              return          else: @@ -27,7 +25,7 @@ class FakeSelinux(object):      def is_selinux_enabled(self):          return True -    def restorecon(self, path, recursive):  # pylint: disable=W0613 +    def restorecon(self, path, recursive):          self.restored.append(path) diff --git a/tools/build-on-freebsd b/tools/build-on-freebsd new file mode 100755 index 00000000..65d783f7 --- /dev/null +++ b/tools/build-on-freebsd @@ -0,0 +1,58 @@ +#!/bin/sh +# Since there is no official FreeBSD port yet, we need some way of building and +# installing cloud-init. This script takes care of building and installing. It +# will optionally make a first run at the end. + +fail() { echo "FAILED:" "$@" 1>&2; exit 1; } + +# Check dependencies: +depschecked=/tmp/c-i.dependencieschecked +pkgs=" +   dmidecode +   py27-argparse +   py27-boto gpart sudo +   py27-configobj py27-yaml +   py27-Jinja2 +   py27-oauth py27-serial +   py27-prettytable +   py27-requests py27-six +   python py27-cheetah +   py27-jsonpointer py27-jsonpatch +" +[ -f "$depschecked" ] || pkg install ${pkgs} || fail "install packages" +touch $depschecked + +# Required but unavailable port/pkg: py27-jsonpatch py27-jsonpointer +# Luckily, the install step will take care of this by installing it from pypi... + +# Build the code and install in /usr/local/: +python setup.py build +python setup.py install -O1 --skip-build --prefix /usr/local/ --init-system sysvinit_freebsd + +# Install the correct config file: +cp config/cloud.cfg-freebsd /usr/local/etc/cloud/cloud.cfg + +# Enable cloud-init in /etc/rc.conf: +sed -i.bak -e "/cloudinit_enable=.*/d" /etc/rc.conf +echo 'cloudinit_enable="YES"' >> /etc/rc.conf + +echo "Installation completed." + +if [ "$1" = "run" ]; then +	echo "Ok, now let's see if it works." + +	# Backup SSH keys +	mv /etc/ssh/ssh_host_* /tmp/ + +	# Remove old metadata +	rm -rf /var/lib/cloud + +	# Just log everything, quick&dirty +	rm /usr/local/etc/cloud/cloud.cfg.d/05_logging.cfg  + +	# Start: +	/usr/local/etc/rc.d/cloudinit start + +	# Restore SSH keys +	mv /tmp/ssh_host_* /etc/ssh/ +fi diff --git a/tools/hacking.py b/tools/hacking.py index 14bd0cda..e7797564 100755 --- a/tools/hacking.py +++ b/tools/hacking.py @@ -154,7 +154,7 @@ def add_cloud():          if not inspect.isfunction(function):              continue          if name.startswith("cloud_"): -            exec("pep8.%s = %s" % (name, name))  # pylint: disable=W0122 +            exec("pep8.%s = %s" % (name, name))  if __name__ == "__main__":      # NOVA based 'hacking.py' error codes start with an N @@ -163,7 +163,7 @@ if __name__ == "__main__":      pep8.current_file = current_file      pep8.readlines = readlines      try: -        pep8._main()  # pylint: disable=W0212 +        pep8._main()      finally:          if len(_missingImport) > 0:              print >> sys.stderr, ("%i imports missing in this test environment" diff --git a/tools/mock-meta.py b/tools/mock-meta.py index c79f0598..dfbc2a71 100755 --- a/tools/mock-meta.py +++ b/tools/mock-meta.py @@ -23,7 +23,7 @@ import json  import logging  import os  import random -import string  # pylint: disable=W0402 +import string  import sys  import yaml @@ -306,7 +306,7 @@ class UserDataHandler(object):              blob = "\n".join(lines)          return blob.strip() -    def get_data(self, params, who, **kwargs):  # pylint: disable=W0613 +    def get_data(self, params, who, **kwargs):          if not params:              return self._get_user_blob(who=who)          return NOT_IMPL_RESPONSE @@ -427,8 +427,8 @@ def extract_opts():  def setup_fetchers(opts): -    global meta_fetcher  # pylint: disable=W0603 -    global user_fetcher  # pylint: disable=W0603 +    global meta_fetcher +    global user_fetcher      meta_fetcher = MetaDataHandler(opts)      user_fetcher = UserDataHandler(opts) diff --git a/tools/run-pep8 b/tools/run-pep8 index cfce5edd..d0a131f6 100755 --- a/tools/run-pep8 +++ b/tools/run-pep8 @@ -13,7 +13,7 @@ else      base=`pwd`/tools/  fi -IGNORE="E501" # Line too long (these are caught by pylint) +IGNORE=""  # King Arthur: Be quiet! ... Be Quiet! I Order You to Be Quiet.  IGNORE="$IGNORE,E121" # Continuation line indentation is not a multiple of four diff --git a/tools/run-pylint b/tools/run-pylint deleted file mode 100755 index 0fe0c64a..00000000 --- a/tools/run-pylint +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -if [ $# -eq 0 ]; then -   files=( bin/cloud-init $(find * -name "*.py" -type f) ) -else -   files=( "$@" ); -fi  - -RC_FILE="pylintrc" -if [ ! -f $RC_FILE ]; then -    RC_FILE="../pylintrc" -fi - -cmd=( -    pylint -    --rcfile=$RC_FILE -    --disable=R -    --disable=I -    --dummy-variables-rgx="_" -    "${files[@]}" -) - -echo -e "\nRunning pylint:" -echo "${cmd[@]}" -"${cmd[@]}" - diff --git a/upstart/cloud-init-blocknet.conf b/upstart/cloud-init-blocknet.conf new file mode 100644 index 00000000..be09e7d8 --- /dev/null +++ b/upstart/cloud-init-blocknet.conf @@ -0,0 +1,83 @@ +# cloud-init-blocknet +# the purpose of this job is +#  * to block networking from coming up until cloud-init-nonet has run +#  * timeout if they all do not come up in a reasonable amount of time +description "block networking until cloud-init-local" +start on (starting network-interface +          or starting network-manager +          or starting networking) +stop on stopped cloud-init-local + +instance $JOB${INTERFACE:+/}${INTERFACE:-} +export INTERFACE +task + +script +   set +e  # you cannot trap TERM reliably with 'set -e' +   SLEEP_CHILD="" + +   static_network_up() { +      local emitted="/run/network/static-network-up-emitted" +      # /run/network/static-network-up-emitted is written by +      # upstart (via /etc/network/if-up.d/upstart). its presense would +      # indicate that static-network-up has already fired. +      [ -e "$emitted" -o -e "/var/$emitted" ] +   } +   msg() { +      local uptime="" idle="" msg="" +      if [ -r /proc/uptime ]; then +         read uptime idle < /proc/uptime +      fi +      msg="${UPSTART_INSTANCE}${uptime:+[${uptime}]}: $*" +      echo "$msg" +   } + +   handle_sigterm() { +      # if we received sigterm and static networking is up then it probably +      # came from upstart as a result of 'stop on static-network-up' +      msg "got sigterm" +      if [ -n "$SLEEP_CHILD" ]; then +          if ! kill $SLEEP_CHILD 2>/dev/null; then +              [ ! -d "/proc/$SLEEP_CHILD" ] || +                  msg "hm.. failed to kill sleep pid $SLEEP_CHILD" +          fi +      fi +      msg "stopped" +      exit 0 +   } + +   dowait() { +      msg "blocking $1 seconds" +      # all this 'exec -a' does is get me a nicely named process in 'ps' +      # ie, 'sleep-block-network-interface.eth1' +      if [ -x /bin/bash ]; then  +         bash -c 'exec -a sleep-block-$1 sleep $2' -- "$UPSTART_INSTANCE" "$1" & +      else +         sleep "$1" & +      fi +      SLEEP_CHILD=$! +      msg "sleepchild=$SLEEP_CHILD" +      wait $SLEEP_CHILD +      SLEEP_CHILD="" +   } + +   trap handle_sigterm TERM + +   if [ -n "$INTERFACE" -a "${INTERFACE#lo}" != "${INTERFACE}" ]; then +     msg "ignoring interface ${INTERFACE}"; +     exit 0; +   fi + +   # static_network_up already occurred +   static_network_up && { msg "static_network_up already"; exit 0; } + +   # local-finished cloud-init-local success or failure +   lfin="/run/cloud-init/local-finished" +   disable="/etc/cloud/no-blocknet" +   [ -f "$lfin" ] && { msg "$lfin found"; exit 0; } +   [ -f "$disable" ] && { msg "$disable found"; exit 0; } + +   dowait 120 +   msg "gave up waiting for $lfin" +   exit 1 +end script diff --git a/upstart/cloud-init-local.conf b/upstart/cloud-init-local.conf index 061fe406..5def043d 100644 --- a/upstart/cloud-init-local.conf +++ b/upstart/cloud-init-local.conf @@ -1,9 +1,16 @@  # cloud-init - the initial cloud-init job  #   crawls metadata service, emits cloud-config -start on mounted MOUNTPOINT=/ +start on mounted MOUNTPOINT=/ and mounted MOUNTPOINT=/run  task  console output -exec /usr/bin/cloud-init init --local   +script +  lfin=/run/cloud-init/local-finished +  ret=0 +  cloud-init init --local || ret=$? +  [ -r /proc/uptime ] && read up idle < /proc/uptime || up="N/A" +  echo "$ret up $up" > "$lfin" +  exit $ret +end script diff --git a/upstart/cloud-init-nonet.conf b/upstart/cloud-init-nonet.conf index e8ebee96..6abf6573 100644 --- a/upstart/cloud-init-nonet.conf +++ b/upstart/cloud-init-nonet.conf @@ -46,7 +46,7 @@ script     }     dowait() { -      msg "waiting $1 seconds for network device" +      [ $# -eq 2 ] || msg "waiting $1 seconds for network device"        sleep "$1" &        SLEEP_CHILD=$!        wait $SLEEP_CHILD @@ -58,12 +58,9 @@ script     # static_network_up already occurred     static_network_up && exit 0 -   # obj.pkl comes from cloud-init-local (or previous boot and -   # manual_cache_clean) -   [ -f /var/lib/cloud/instance/obj.pkl ] && exit 0 - +   dowait 5 silent     dowait 10 -   dowait 120 +   dowait 115     msg "gave up waiting for a network device."     : > /var/lib/cloud/data/no-net  end script  | 
