diff options
-rw-r--r-- | ChangeLog | 6 | ||||
-rw-r--r-- | cloudinit/config/cc_grub_dpkg.py | 3 | ||||
-rw-r--r-- | cloudinit/config/cc_resizefs.py | 20 | ||||
-rw-r--r-- | cloudinit/config/cc_set_passwords.py | 2 | ||||
-rw-r--r-- | cloudinit/config/cc_ssh_import_id.py | 2 | ||||
-rw-r--r-- | cloudinit/distros/__init__.py | 2 | ||||
-rw-r--r-- | cloudinit/distros/arch.py | 2 | ||||
-rw-r--r-- | cloudinit/distros/debian.py | 2 | ||||
-rw-r--r-- | cloudinit/distros/freebsd.py | 7 | ||||
-rw-r--r-- | cloudinit/distros/gentoo.py | 2 | ||||
-rw-r--r-- | cloudinit/netinfo.py | 6 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceConfigDrive.py | 14 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceOpenStack.py | 27 | ||||
-rw-r--r-- | cloudinit/sources/helpers/openstack.py | 68 | ||||
-rw-r--r-- | cloudinit/util.py | 4 | ||||
-rw-r--r-- | tests/unittests/test_data.py | 9 | ||||
-rw-r--r-- | tests/unittests/test_datasource/test_nocloud.py | 1 | ||||
-rw-r--r-- | tests/unittests/test_datasource/test_openstack.py | 51 | ||||
-rw-r--r-- | tests/unittests/test_runs/test_merge_run.py | 2 | ||||
-rwxr-xr-x | tools/build-on-freebsd | 1 | ||||
-rw-r--r-- | upstart/cloud-init-blocknet.conf | 83 | ||||
-rw-r--r-- | upstart/cloud-init-local.conf | 11 | ||||
-rw-r--r-- | upstart/cloud-init-nonet.conf | 9 |
23 files changed, 253 insertions, 81 deletions
@@ -31,6 +31,12 @@ - Datasource: fix broken logic to provide hostname if datasource does not provide one - Improved and less verbose logging. + - resizefs: first check that device is writable. + - configdrive: fix reading of vendor data to be like metadata service reader. + [Jay Faulkner] + - resizefs: fix broken background resizing [Jay Faulkner] (LP: #1338614) + - cc_grub_dpkg: fix EC2 hvm instances to avoid prompt on grub update. + (LP: #1336855) 0.7.5: - open 0.7.5 - Add a debug log message around import failures diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py index 85716a91..e3219e81 100644 --- a/cloudinit/config/cc_grub_dpkg.py +++ b/cloudinit/config/cc_grub_dpkg.py @@ -47,7 +47,8 @@ def handle(_name, cfg, _cloud, log, _args): idevs_empty = "false" if idevs is None: idevs = "/dev/sda" - for dev in ("/dev/sda", "/dev/vda", "/dev/sda1", "/dev/vda1"): + for dev in ("/dev/sda", "/dev/vda", "/dev/xvda", + "/dev/sda1", "/dev/vda1", "/dev/xvda1"): if os.path.exists(dev): idevs = dev break diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index b9655749..cbc07853 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -98,12 +98,12 @@ def handle(name, cfg, _cloud, log, args): (devpth, fs_type, mount_point) = result - # Ensure the path is a block device. info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what) log.debug("resize_info: %s" % info) container = util.is_container() + # Ensure the path is a block device. if (devpth == "/dev/root" and not os.path.exists(devpth) and not container): devpth = rootdev_from_cmdline(util.get_cmdline()) @@ -117,14 +117,22 @@ def handle(name, cfg, _cloud, log, args): except OSError as exc: if container and exc.errno == errno.ENOENT: log.debug("Device '%s' did not exist in container. " - "cannot resize: %s" % (devpth, info)) + "cannot resize: %s", devpth, info) elif exc.errno == errno.ENOENT: - log.warn("Device '%s' did not exist. cannot resize: %s" % - (devpth, info)) + log.warn("Device '%s' did not exist. cannot resize: %s", + devpth, info) else: raise exc return + if not os.access(devpth, os.W_OK): + if container: + log.debug("'%s' not writable in container. cannot resize: %s", + devpth, info) + else: + log.warn("'%s' not writable. cannot resize: %s", devpth, info) + return + if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode): if container: log.debug("device '%s' not a block device in container." @@ -154,8 +162,8 @@ def handle(name, cfg, _cloud, log, args): # Fork to a child that will run # the resize command util.fork_cb( - util.log_time(logfunc=log.debug, msg="backgrounded Resizing", - func=do_resize, args=(resize_cmd, log))) + util.log_time, logfunc=log.debug, msg="backgrounded Resizing", + func=do_resize, args=(resize_cmd, log)) else: util.log_time(logfunc=log.debug, msg="Resizing", func=do_resize, args=(resize_cmd, log)) diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index 24e33915..4ca85e21 100644 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -132,7 +132,7 @@ def handle(_name, cfg, cloud, log, args): 'PasswordAuthentication', pw_auth)) - lines = [str(e) for e in new_lines] + lines = [str(l) for l in new_lines] util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines)) try: diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py index 76c1663d..2d480d7e 100644 --- a/cloudinit/config/cc_ssh_import_id.py +++ b/cloudinit/config/cc_ssh_import_id.py @@ -85,7 +85,7 @@ def import_ssh_ids(ids, user, log): return try: - _check = pwd.getpwnam(user) + pwd.getpwnam(user) except KeyError as exc: raise exc diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 9c9211fe..2599d9f2 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -861,5 +861,5 @@ def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone", util.write_file(tz_conf, str(tz).rstrip() + "\n") # This ensures that the correct tz will be used for the system if tz_local and tz_file: - util.copy(tz_file, self.tz_local_fn) + util.copy(tz_file, tz_local) return diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index 9f11b89c..005a0dd4 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -159,7 +159,7 @@ class Distro(distros.Distro): return hostname def set_timezone(self, tz): - set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) + distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) def package_command(self, command, args=None, pkgs=None): if pkgs is None: diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 7cf4a9ef..010be67d 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -131,7 +131,7 @@ class Distro(distros.Distro): return "127.0.1.1" def set_timezone(self, tz): - set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) + distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) def package_command(self, command, args=None, pkgs=None): if pkgs is None: diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 42ef2290..cff10387 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -37,6 +37,7 @@ class Distro(distros.Distro): login_conf_fn = '/etc/login.conf' login_conf_fn_bak = '/etc/login.conf.orig' resolv_conf_fn = '/etc/resolv.conf' + ci_sudoers_fn = '/usr/local/etc/sudoers.d/90-cloud-init-users' def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) @@ -219,10 +220,6 @@ class Distro(distros.Distro): util.logexc(LOG, "Failed to lock user %s", name) raise e - # TODO: - def write_sudo_rules(self, name, rules, sudo_file=None): - LOG.debug("[write_sudo_rules] Name: %s", name) - def create_user(self, name, **kwargs): self.add_user(name, **kwargs) @@ -267,7 +264,7 @@ class Distro(distros.Distro): if 'dns-nameservers' in info: nameservers.extend(info['dns-nameservers']) if 'dns-search' in info: - searchservers.extend(info['dns-search']) + searchdomains.extend(info['dns-search']) else: ifconfig = 'DHCP' diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py index c4b02de1..45c2e658 100644 --- a/cloudinit/distros/gentoo.py +++ b/cloudinit/distros/gentoo.py @@ -138,7 +138,7 @@ class Distro(distros.Distro): return hostname def set_timezone(self, tz): - set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) + distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) def package_command(self, command, args=None, pkgs=None): if pkgs is None: diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index 1bdca9f7..8d4df342 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -21,10 +21,13 @@ # along with this program. If not, see <http://www.gnu.org/licenses/>. import cloudinit.util as util +from cloudinit.log import logging import re from prettytable import PrettyTable +LOG = logging.getLogger() + def netdev_info(empty=""): fields = ("hwaddr", "addr", "bcast", "mask") @@ -168,8 +171,9 @@ def route_pformat(): lines = [] try: routes = route_info() - except Exception: + except Exception as e: lines.append(util.center('Route info failed', '!', 80)) + util.logexc(LOG, "Route info failed: %s" % e) routes = None if routes is not None: fields = ['Route', 'Destination', 'Gateway', diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 0c35f83a..4e5d90de 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -125,7 +125,15 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): self.userdata_raw = results.get('userdata') self.version = results['version'] self.files.update(results.get('files', {})) - self.vendordata_raw = results.get('vendordata') + + vd = results.get('vendordata') + self.vendordata_pure = vd + try: + self.vendordata_raw = openstack.convert_vendordata_json(vd) + except ValueError as e: + LOG.warn("Invalid content in vendor-data: %s", e) + self.vendordata_raw = None + return True @@ -160,10 +168,10 @@ def get_ds_mode(cfgdrv_ver, ds_cfg=None, user=None): return "net" -def read_config_drive(source_dir, version="2012-08-10"): +def read_config_drive(source_dir): reader = openstack.ConfigDriveReader(source_dir) finders = [ - (reader.read_v2, [], {'version': version}), + (reader.read_v2, [], {}), (reader.read_v1, [], {}), ] excps = [] diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index 0970d07b..469c2e2a 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -88,11 +88,9 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): md_urls = [] url2base = {} for url in urls: - for version in openstack.OS_VERSIONS + (openstack.OS_LATEST,): - md_url = url_helper.combine_url(url, 'openstack', - version, 'meta_data.json') - md_urls.append(md_url) - url2base[md_url] = url + md_url = url_helper.combine_url(url, 'openstack') + md_urls.append(md_url) + url2base[md_url] = url (max_wait, timeout) = self._get_url_settings() start_time = time.time() @@ -119,8 +117,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): 'Crawl of openstack metadata service', read_metadata_service, args=[self.metadata_address], - kwargs={'ssl_details': self.ssl_details, - 'version': openstack.OS_HAVANA}) + kwargs={'ssl_details': self.ssl_details}) except openstack.NonReadable: return False except (openstack.BrokenMetadata, IOError): @@ -143,20 +140,20 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): self.version = results['version'] self.files.update(results.get('files', {})) - # if vendordata includes 'cloud-init', then read that explicitly - # for cloud-init (for namespacing). vd = results.get('vendordata') - if isinstance(vd, dict) and 'cloud-init' in vd: - self.vendordata_raw = vd['cloud-init'] - else: - self.vendordata_raw = vd + self.vendordata_pure = vd + try: + self.vendordata_raw = openstack.convert_vendordata_json(vd) + except ValueError as e: + LOG.warn("Invalid content in vendor-data: %s", e) + self.vendordata_raw = None return True -def read_metadata_service(base_url, version=None, ssl_details=None): +def read_metadata_service(base_url, ssl_details=None): reader = openstack.MetadataReader(base_url, ssl_details=ssl_details) - return reader.read_v2(version=version) + return reader.read_v2() # Used to match classes to dependencies diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 3c6bb6aa..b7e19314 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -21,6 +21,7 @@ import abc import base64 import copy +import functools import os from cloudinit import ec2_utils @@ -48,6 +49,7 @@ OS_LATEST = 'latest' OS_FOLSOM = '2012-08-10' OS_GRIZZLY = '2013-04-04' OS_HAVANA = '2013-10-17' +# keep this in chronological order. new supported versions go at the end. OS_VERSIONS = ( OS_FOLSOM, OS_GRIZZLY, @@ -161,25 +163,27 @@ class BaseReader(object): def _read_ec2_metadata(self): pass - def _find_working_version(self, version): + def _find_working_version(self): try: - versions_available = self._fetch_available_versions(self) + versions_available = self._fetch_available_versions() except Exception as e: - LOG.warn("Unable to read openstack versions from %s due to: %s", - self.base_path, e) + LOG.debug("Unable to read openstack versions from %s due to: %s", + self.base_path, e) versions_available = [] - search_versions = [version] + list(OS_VERSIONS) + # openstack.OS_VERSIONS is stored in chronological order, so + # reverse it to check newest first. + supported = [v for v in reversed(list(OS_VERSIONS))] selected_version = OS_LATEST - for potential_version in search_versions: + + for potential_version in supported: if potential_version not in versions_available: continue selected_version = potential_version break - if selected_version != version: - LOG.warn("Version '%s' not available, attempting to use" - " version '%s' instead", version, selected_version) + LOG.debug("Selected version '%s' from %s", selected_version, + versions_available) return selected_version def _read_content_path(self, item): @@ -191,7 +195,7 @@ class BaseReader(object): path = self._path_join(self.base_path, "openstack", *path_pieces) return self._path_read(path) - def read_v2(self, version=None): + def read_v2(self): """Reads a version 2 formatted location. Return a dict with metadata, userdata, ec2-metadata, dsmode, @@ -200,6 +204,9 @@ class BaseReader(object): If not a valid location, raise a NonReadable exception. """ + load_json_anytype = functools.partial( + util.load_json, root_types=(dict, basestring, list)) + def datafiles(version): files = {} files['metadata'] = ( @@ -218,16 +225,15 @@ class BaseReader(object): files['vendordata'] = ( self._path_join("openstack", version, 'vendor_data.json'), False, - util.load_json, + load_json_anytype, ) return files - version = self._find_working_version(version) results = { 'userdata': '', 'version': 2, } - data = datafiles(version) + data = datafiles(self._find_working_version()) for (name, (path, required, translator)) in data.iteritems(): path = self._path_join(self.base_path, path) data = None @@ -239,7 +245,8 @@ class BaseReader(object): LOG.debug("Failed reading optional path %s due" " to: %s", path, e) else: - LOG.exception("Failed reading mandatory path %s", path) + LOG.debug("Failed reading mandatory path %s due" + " to: %s", path, e) else: found = True if required and not found: @@ -325,7 +332,7 @@ class ConfigDriveReader(BaseReader): path = self._path_join(self.base_path, 'openstack') found = [d for d in os.listdir(path) if os.path.isdir(os.path.join(path))] - self._versions = tuple(found) + self._versions = found return self._versions def _read_ec2_metadata(self): @@ -418,18 +425,18 @@ class MetadataReader(BaseReader): def _fetch_available_versions(self): # <baseurl>/openstack/ returns a newline separated list of versions if self._versions is not None: - return self.os_versions + return self._versions found = [] + version_path = self._path_join(self.base_path, "openstack") content = self._path_read(version_path) for line in content.splitlines(): line = line.strip() if not line: continue found.append(line) - self._versions = tuple(found) + self._versions = found return self._versions - def _path_read(self, path): def should_retry_cb(_request_args, cause): @@ -456,3 +463,28 @@ class MetadataReader(BaseReader): return ec2_utils.get_instance_metadata(ssl_details=self.ssl_details, timeout=self.timeout, retries=self.retries) + + +def convert_vendordata_json(data, recurse=True): + """ data: a loaded json *object* (strings, arrays, dicts). + return something suitable for cloudinit vendordata_raw. + + if data is: + None: return None + string: return string + list: return data + the list is then processed in UserDataProcessor + dict: return convert_vendordata_json(data.get('cloud-init')) + """ + if not data: + return None + if isinstance(data, (str, unicode, basestring)): + return data + if isinstance(data, list): + return copy.deepcopy(data) + if isinstance(data, dict): + if recurse is True: + return convert_vendordata_json(data.get('cloud-init'), + recurse=False) + raise ValueError("vendordata['cloud-init'] cannot be dict") + raise ValueError("Unknown data type for vendordata: %s" % type(data)) diff --git a/cloudinit/util.py b/cloudinit/util.py index bdb0f268..946059e9 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -191,11 +191,11 @@ def ExtendedTemporaryFile(**kwargs): return fh -def fork_cb(child_cb, *args): +def fork_cb(child_cb, *args, **kwargs): fid = os.fork() if fid == 0: try: - child_cb(*args) + child_cb(*args, **kwargs) os._exit(0) except: logexc(LOG, "Failed forking and calling callback %s", diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index 41d0dc29..fd6bd8a1 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -106,7 +106,7 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase): initer.read_cfg() initer.initialize() initer.fetch() - _iid = initer.instancify() + initer.instancify() initer.update() initer.cloudify().run('consume_data', initer.consume_data, @@ -145,7 +145,7 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase): initer.read_cfg() initer.initialize() initer.fetch() - _iid = initer.instancify() + initer.instancify() initer.update() initer.cloudify().run('consume_data', initer.consume_data, @@ -221,7 +221,7 @@ run: initer.read_cfg() initer.initialize() initer.fetch() - _iid = initer.instancify() + initer.instancify() initer.update() initer.cloudify().run('consume_data', initer.consume_data, @@ -256,7 +256,7 @@ vendor_data: initer.read_cfg() initer.initialize() initer.fetch() - _iid = initer.instancify() + initer.instancify() initer.update() initer.cloudify().run('consume_data', initer.consume_data, @@ -264,7 +264,6 @@ vendor_data: freq=PER_INSTANCE) mods = stages.Modules(initer) (_which_ran, _failures) = mods.run_section('cloud_init_modules') - _cfg = mods.cfg vendor_script = initer.paths.get_ipath_cur('vendor_scripts') vendor_script_fns = "%s%s/part-001" % (new_root, vendor_script) self.assertTrue(os.path.exists(vendor_script_fns)) diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index 8bcc026c..e9235951 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -57,7 +57,6 @@ class TestNoCloudDataSource(MockerTestCase): pass def my_find_devs_with(*args, **kwargs): - _f = (args, kwargs) raise PsuedoException self.apply_patches([(util, 'find_devs_with', my_find_devs_with)]) diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index 530fba20..7b4e651a 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -19,6 +19,7 @@ import copy import json import re +import unittest from StringIO import StringIO @@ -142,7 +143,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): @hp.activate def test_successful(self): _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) - f = ds.read_metadata_service(BASE_URL, version=self.VERSION) + f = ds.read_metadata_service(BASE_URL) self.assertEquals(VENDOR_DATA, f.get('vendordata')) self.assertEquals(CONTENT_0, f['files']['/etc/foo.cfg']) self.assertEquals(CONTENT_1, f['files']['/etc/bar/bar.cfg']) @@ -164,7 +165,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): @hp.activate def test_no_ec2(self): _register_uris(self.VERSION, {}, {}, OS_FILES) - f = ds.read_metadata_service(BASE_URL, version=self.VERSION) + f = ds.read_metadata_service(BASE_URL) self.assertEquals(VENDOR_DATA, f.get('vendordata')) self.assertEquals(CONTENT_0, f['files']['/etc/foo.cfg']) self.assertEquals(CONTENT_1, f['files']['/etc/bar/bar.cfg']) @@ -180,7 +181,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): os_files.pop(k, None) _register_uris(self.VERSION, {}, {}, os_files) self.assertRaises(openstack.NonReadable, ds.read_metadata_service, - BASE_URL, version=self.VERSION) + BASE_URL) @hp.activate def test_bad_uuid(self): @@ -192,7 +193,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): os_files[k] = json.dumps(os_meta) _register_uris(self.VERSION, {}, {}, os_files) self.assertRaises(openstack.BrokenMetadata, ds.read_metadata_service, - BASE_URL, version=self.VERSION) + BASE_URL) @hp.activate def test_userdata_empty(self): @@ -201,7 +202,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): if k.endswith('user_data'): os_files.pop(k, None) _register_uris(self.VERSION, {}, {}, os_files) - f = ds.read_metadata_service(BASE_URL, version=self.VERSION) + f = ds.read_metadata_service(BASE_URL) self.assertEquals(VENDOR_DATA, f.get('vendordata')) self.assertEquals(CONTENT_0, f['files']['/etc/foo.cfg']) self.assertEquals(CONTENT_1, f['files']['/etc/bar/bar.cfg']) @@ -214,7 +215,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): if k.endswith('vendor_data.json'): os_files.pop(k, None) _register_uris(self.VERSION, {}, {}, os_files) - f = ds.read_metadata_service(BASE_URL, version=self.VERSION) + f = ds.read_metadata_service(BASE_URL) self.assertEquals(CONTENT_0, f['files']['/etc/foo.cfg']) self.assertEquals(CONTENT_1, f['files']['/etc/bar/bar.cfg']) self.assertFalse(f.get('vendordata')) @@ -227,7 +228,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): os_files[k] = '{' # some invalid json _register_uris(self.VERSION, {}, {}, os_files) self.assertRaises(openstack.BrokenMetadata, ds.read_metadata_service, - BASE_URL, version=self.VERSION) + BASE_URL) @hp.activate def test_metadata_invalid(self): @@ -237,7 +238,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): os_files[k] = '{' # some invalid json _register_uris(self.VERSION, {}, {}, os_files) self.assertRaises(openstack.BrokenMetadata, ds.read_metadata_service, - BASE_URL, version=self.VERSION) + BASE_URL) @hp.activate def test_datasource(self): @@ -256,7 +257,8 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertEquals(EC2_META, ds_os.ec2_metadata) self.assertEquals(USER_DATA, ds_os.userdata_raw) self.assertEquals(2, len(ds_os.files)) - self.assertEquals(VENDOR_DATA, ds_os.vendordata_raw) + self.assertEquals(VENDOR_DATA, ds_os.vendordata_pure) + self.assertEquals(ds_os.vendordata_raw, None) @hp.activate def test_bad_datasource_meta(self): @@ -314,3 +316,34 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): found = ds_os.get_data() self.assertFalse(found) self.assertIsNone(ds_os.version) + + +class TestVendorDataLoading(unittest.TestCase): + def cvj(self, data): + return openstack.convert_vendordata_json(data) + + def test_vd_load_none(self): + # non-existant vendor-data should return none + self.assertIsNone(self.cvj(None)) + + def test_vd_load_string(self): + self.assertEqual(self.cvj("foobar"), "foobar") + + def test_vd_load_list(self): + data = [{'foo': 'bar'}, 'mystring', list(['another', 'list'])] + self.assertEqual(self.cvj(data), data) + + def test_vd_load_dict_no_ci(self): + self.assertEqual(self.cvj({'foo': 'bar'}), None) + + def test_vd_load_dict_ci_dict(self): + self.assertRaises(ValueError, self.cvj, + {'foo': 'bar', 'cloud-init': {'x': 1}}) + + def test_vd_load_dict_ci_string(self): + data = {'foo': 'bar', 'cloud-init': 'VENDOR_DATA'} + self.assertEqual(self.cvj(data), data['cloud-init']) + + def test_vd_load_dict_ci_list(self): + data = {'foo': 'bar', 'cloud-init': ['VD_1', 'VD_2']} + self.assertEqual(self.cvj(data), data['cloud-init']) diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/test_runs/test_merge_run.py index 32b41925..977adb34 100644 --- a/tests/unittests/test_runs/test_merge_run.py +++ b/tests/unittests/test_runs/test_merge_run.py @@ -33,7 +33,7 @@ class TestMergeRun(helpers.FilesystemMockingTestCase): initer.initialize() initer.fetch() initer.datasource.userdata_raw = ud - _iid = initer.instancify() + initer.instancify() initer.update() initer.cloudify().run('consume_data', initer.consume_data, diff --git a/tools/build-on-freebsd b/tools/build-on-freebsd index 23bdf487..65d783f7 100755 --- a/tools/build-on-freebsd +++ b/tools/build-on-freebsd @@ -17,6 +17,7 @@ pkgs=" py27-prettytable py27-requests py27-six python py27-cheetah + py27-jsonpointer py27-jsonpatch " [ -f "$depschecked" ] || pkg install ${pkgs} || fail "install packages" touch $depschecked diff --git a/upstart/cloud-init-blocknet.conf b/upstart/cloud-init-blocknet.conf new file mode 100644 index 00000000..be09e7d8 --- /dev/null +++ b/upstart/cloud-init-blocknet.conf @@ -0,0 +1,83 @@ +# cloud-init-blocknet +# the purpose of this job is +# * to block networking from coming up until cloud-init-nonet has run +# * timeout if they all do not come up in a reasonable amount of time +description "block networking until cloud-init-local" +start on (starting network-interface + or starting network-manager + or starting networking) +stop on stopped cloud-init-local + +instance $JOB${INTERFACE:+/}${INTERFACE:-} +export INTERFACE +task + +script + set +e # you cannot trap TERM reliably with 'set -e' + SLEEP_CHILD="" + + static_network_up() { + local emitted="/run/network/static-network-up-emitted" + # /run/network/static-network-up-emitted is written by + # upstart (via /etc/network/if-up.d/upstart). its presense would + # indicate that static-network-up has already fired. + [ -e "$emitted" -o -e "/var/$emitted" ] + } + msg() { + local uptime="" idle="" msg="" + if [ -r /proc/uptime ]; then + read uptime idle < /proc/uptime + fi + msg="${UPSTART_INSTANCE}${uptime:+[${uptime}]}: $*" + echo "$msg" + } + + handle_sigterm() { + # if we received sigterm and static networking is up then it probably + # came from upstart as a result of 'stop on static-network-up' + msg "got sigterm" + if [ -n "$SLEEP_CHILD" ]; then + if ! kill $SLEEP_CHILD 2>/dev/null; then + [ ! -d "/proc/$SLEEP_CHILD" ] || + msg "hm.. failed to kill sleep pid $SLEEP_CHILD" + fi + fi + msg "stopped" + exit 0 + } + + dowait() { + msg "blocking $1 seconds" + # all this 'exec -a' does is get me a nicely named process in 'ps' + # ie, 'sleep-block-network-interface.eth1' + if [ -x /bin/bash ]; then + bash -c 'exec -a sleep-block-$1 sleep $2' -- "$UPSTART_INSTANCE" "$1" & + else + sleep "$1" & + fi + SLEEP_CHILD=$! + msg "sleepchild=$SLEEP_CHILD" + wait $SLEEP_CHILD + SLEEP_CHILD="" + } + + trap handle_sigterm TERM + + if [ -n "$INTERFACE" -a "${INTERFACE#lo}" != "${INTERFACE}" ]; then + msg "ignoring interface ${INTERFACE}"; + exit 0; + fi + + # static_network_up already occurred + static_network_up && { msg "static_network_up already"; exit 0; } + + # local-finished cloud-init-local success or failure + lfin="/run/cloud-init/local-finished" + disable="/etc/cloud/no-blocknet" + [ -f "$lfin" ] && { msg "$lfin found"; exit 0; } + [ -f "$disable" ] && { msg "$disable found"; exit 0; } + + dowait 120 + msg "gave up waiting for $lfin" + exit 1 +end script diff --git a/upstart/cloud-init-local.conf b/upstart/cloud-init-local.conf index 061fe406..5def043d 100644 --- a/upstart/cloud-init-local.conf +++ b/upstart/cloud-init-local.conf @@ -1,9 +1,16 @@ # cloud-init - the initial cloud-init job # crawls metadata service, emits cloud-config -start on mounted MOUNTPOINT=/ +start on mounted MOUNTPOINT=/ and mounted MOUNTPOINT=/run task console output -exec /usr/bin/cloud-init init --local +script + lfin=/run/cloud-init/local-finished + ret=0 + cloud-init init --local || ret=$? + [ -r /proc/uptime ] && read up idle < /proc/uptime || up="N/A" + echo "$ret up $up" > "$lfin" + exit $ret +end script diff --git a/upstart/cloud-init-nonet.conf b/upstart/cloud-init-nonet.conf index e8ebee96..6abf6573 100644 --- a/upstart/cloud-init-nonet.conf +++ b/upstart/cloud-init-nonet.conf @@ -46,7 +46,7 @@ script } dowait() { - msg "waiting $1 seconds for network device" + [ $# -eq 2 ] || msg "waiting $1 seconds for network device" sleep "$1" & SLEEP_CHILD=$! wait $SLEEP_CHILD @@ -58,12 +58,9 @@ script # static_network_up already occurred static_network_up && exit 0 - # obj.pkl comes from cloud-init-local (or previous boot and - # manual_cache_clean) - [ -f /var/lib/cloud/instance/obj.pkl ] && exit 0 - + dowait 5 silent dowait 10 - dowait 120 + dowait 115 msg "gave up waiting for a network device." : > /var/lib/cloud/data/no-net end script |