From 6d40d5b2e3da9577d4a2686444d47125e62817fe Mon Sep 17 00:00:00 2001 From: harlowja Date: Tue, 19 Feb 2013 22:51:49 -0800 Subject: Continue working on integrating requests. --- cloudinit/util.py | 71 ++++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 62 insertions(+), 9 deletions(-) (limited to 'cloudinit/util.py') diff --git a/cloudinit/util.py b/cloudinit/util.py index f5a7ac12..da2cdeda 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -51,7 +51,7 @@ import yaml from cloudinit import importer from cloudinit import log as logging from cloudinit import safeyaml -from cloudinit import url_helper as uhelp +from cloudinit import url_helper from cloudinit.settings import (CFG_BUILTIN) @@ -69,6 +69,18 @@ FN_ALLOWED = ('_-.()' + string.digits + string.ascii_letters) CONTAINER_TESTS = ['running-in-container', 'lxc-is-container'] +class FileResponse(object): + def __init__(self, path, contents): + self.code = 200 + self.headers = {} + self.contents = contents + self.ok = True + self.url = path + + def __str__(self): + return self.contents + + class ProcessExecutionError(IOError): MESSAGE_TMPL = ('%(description)s\n' @@ -628,12 +640,53 @@ def read_optional_seed(fill, base="", ext="", timeout=5): raise -def read_file_or_url(url, timeout=5, retries=10, file_retries=0): +def fetch_ssl_details(paths=None): + ssl_details = {} + # Lookup in these locations for ssl key/cert files + ssl_cert_paths = [ + '/var/lib/cloud/data/ssl', + '/var/lib/cloud/instance/data/ssl', + ] + if paths: + ssl_cert_paths.extend([ + os.path.join(paths.get_ipath_cur('data'), 'ssl'), + os.path.join(paths.get_cpath('data'), 'ssl'), + ]) + ssl_cert_paths = uniq_merge(ssl_cert_paths) + ssl_cert_paths = [d for d in ssl_cert_paths if d and os.path.isdir(d)] + cert_file = None + for d in ssl_cert_paths: + if os.path.isfile(os.path.join(d, 'cert.pem')): + cert_file = os.path.join(d, 'cert.pem') + break + key_file = None + for d in ssl_cert_paths: + if os.path.isfile(os.path.join(d, 'key.pem')): + key_file = os.path.join(d, 'key.pem') + break + if cert_file and key_file: + ssl_details['cert_file'] = cert_file + ssl_details['key_file'] = key_file + elif cert_file: + ssl_details['cert_file'] = cert_file + return ssl_details + + +def read_file_or_url(url, timeout=5, retries=10, + headers=None, data=None, sec_between=1, paths=None): if url.startswith("/"): url = "file://%s" % url - if url.startswith("file://"): - retries = file_retries - return uhelp.readurl(url, timeout=timeout, retries=retries) + if url.lower().startswith("file://"): + file_path = url[len("file://"):] + return FileResponse(file_path, contents=load_file(file_path)) + else: + return url_helper.readurl(url, + timeout=timeout, + retries=retries, + headers=headers, + data=data, + sec_between=sec_between, + ssl_details=fetch_ssl_details(paths)) def load_yaml(blob, default=None, allowed=(dict,)): @@ -675,13 +728,13 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): md_resp = read_file_or_url(md_url, timeout, retries, file_retries) md = None - if md_resp.ok(): + if md_resp.ok: md_str = str(md_resp) md = load_yaml(md_str, default={}) ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries) ud = None - if ud_resp.ok(): + if ud_resp.ok: ud_str = str(ud_resp) ud = ud_str @@ -850,8 +903,8 @@ def get_cmdline_url(names=('cloud-config-url', 'url'), if not url: return (None, None, None) - resp = uhelp.readurl(url) - if resp.contents.startswith(starts) and resp.ok(): + resp = read_file_or_url(url) + if resp.contents.startswith(starts) and resp.ok: return (key, url, str(resp)) return (key, url, None) -- cgit v1.2.3 From 9dfb60d3144860334ab1ad1d72920d962139461f Mon Sep 17 00:00:00 2001 From: harlowja Date: Thu, 21 Feb 2013 22:39:30 -0800 Subject: More work on requests integration. --- cloudinit/config/cc_phone_home.py | 3 ++- cloudinit/ec2_utils.py | 17 +++++++++++------ cloudinit/sources/DataSourceEc2.py | 6 ++++-- cloudinit/sources/DataSourceMAAS.py | 15 +++++++++------ cloudinit/util.py | 4 ++-- 5 files changed, 28 insertions(+), 17 deletions(-) (limited to 'cloudinit/util.py') diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index 90834080..5a4332ef 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -112,7 +112,8 @@ def handle(name, cfg, cloud, log, args): url = templater.render_string(url, url_params) try: util.read_file_or_url(url, data=real_submit_keys, - retries=tries, sec_between=3) + retries=tries, sec_between=3, + ssl_details=util.fetch_ssl_details(cloud.paths)) except: util.logexc(log, ("Failed to post phone home data to" " %s in %s tries"), url, tries) diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index c422eea9..026ee178 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -38,11 +38,16 @@ def combine_url(base, add_on): # See: http://bit.ly/TyoUQs +# +# Since boto metadata reader uses the old urllib which does not +# support ssl, we need to ahead and create our own reader which +# works the same as the boto one (for now). class MetadataMaterializer(object): - def __init__(self, blob, base_url): + def __init__(self, blob, base_url, ssl_details): self._blob = blob self._md = None self._base_url = base_url + self._ssl_details = ssl_details def _parse(self, blob): leaves = {} @@ -89,7 +94,7 @@ class MetadataMaterializer(object): return self._md def _fetch_url(self, url): - response = util.read_file_or_url(url) + response = util.read_file_or_url(url, ssl_details=self._ssl_details) return str(response) def _decode_leaf_blob(self, blob): @@ -134,19 +139,19 @@ def get_instance_userdata(url, version='latest', ssl_details=None): ud_url = combine_url(url, version) ud_url = combine_url(ud_url, 'user-data') try: - response = util.read_file_or_url(ud_url) + response = util.read_file_or_url(ud_url, ssl_details=ssl_details) return str(response) except Exception: util.logexc(LOG, "Failed fetching userdata from url %s", ud_url) return None -def get_instance_metadata(url, version='latest'): +def get_instance_metadata(url, version='latest', ssl_details=None): md_url = combine_url(url, version) md_url = combine_url(md_url, 'meta-data') try: - response = util.read_file_or_url(md_url) - materializer = MetadataMaterializer(str(response), md_url) + response = util.read_file_or_url(md_url, ssl_details=ssl_details) + materializer = MetadataMaterializer(str(response), md_url, ssl_details) return materializer.materialize() except Exception: util.logexc(LOG, "Failed fetching metadata from url %s", md_url) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 47f677d4..eee4e6bc 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -64,8 +64,10 @@ class DataSourceEc2(sources.DataSource): if not self.wait_for_metadata_service(): return False start_time = time.time() - self.userdata_raw = ec2_utils.get_instance_userdata(self.metadata_address, self.api_ver) - self.metadata = ec2_utils.get_instance_metadata(self.metadata_address, self.api_ver) + self.userdata_raw = ec2_utils.get_instance_userdata(self.metadata_address, self.api_ver, + ssl_details=util.fetch_ssl_details(self.paths)) + self.metadata = ec2_utils.get_instance_metadata(self.metadata_address, self.api_ver, + ssl_details=util.fetch_ssl_details(self.paths)) LOG.debug("Crawl of metadata service took %s seconds", int(time.time() - start_time)) return True diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 2de31305..dc048943 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -81,7 +81,8 @@ class DataSourceMAAS(sources.DataSource): self.base_url = url (userdata, metadata) = read_maas_seed_url(self.base_url, - self.md_headers) + self.md_headers, + paths=self.paths) self.userdata_raw = userdata self.metadata = metadata return True @@ -141,7 +142,7 @@ class DataSourceMAAS(sources.DataSource): LOG.debug("Using metadata source: '%s'", url) else: LOG.critical("Giving up on md from %s after %i seconds", - urls, int(time.time() - starttime)) + urls, int(time.time() - starttime)) return bool(url) @@ -190,7 +191,7 @@ def read_maas_seed_dir(seed_d): def read_maas_seed_url(seed_url, header_cb=None, timeout=None, - version=MD_VERSION): + version=MD_VERSION, paths=None): """ Read the maas datasource at seed_url. - header_cb is a method that should return a headers dictionary for @@ -222,12 +223,13 @@ def read_maas_seed_url(seed_url, header_cb=None, timeout=None, else: headers = {} try: - resp = util.read_file_or_url(url, headers=headers, timeout=timeout) + resp = util.read_file_or_url(url, headers=headers, timeout=timeout, + ssl_details=util.fetch_ssl_details(paths)) if resp.ok: md[name] = str(resp) else: LOG.warn(("Fetching from %s resulted in" - " an invalid http code %s"), url, resp.status_code) + " an invalid http code %s"), url, resp.code) except url_helper.UrlError as e: if e.code != 404: raise @@ -372,7 +374,8 @@ if __name__ == "__main__": if args.subcmd == "check-seed": if args.url.startswith("http"): (userdata, metadata) = read_maas_seed_url(args.url, - header_cb=my_headers, version=args.apiver) + header_cb=my_headers, + version=args.apiver) else: (userdata, metadata) = read_maas_seed_url(args.url) print "=== userdata ===" diff --git a/cloudinit/util.py b/cloudinit/util.py index da2cdeda..307ed613 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -673,7 +673,7 @@ def fetch_ssl_details(paths=None): def read_file_or_url(url, timeout=5, retries=10, - headers=None, data=None, sec_between=1, paths=None): + headers=None, data=None, sec_between=1, ssl_details=None): if url.startswith("/"): url = "file://%s" % url if url.lower().startswith("file://"): @@ -686,7 +686,7 @@ def read_file_or_url(url, timeout=5, retries=10, headers=headers, data=data, sec_between=sec_between, - ssl_details=fetch_ssl_details(paths)) + ssl_details=ssl_details) def load_yaml(blob, default=None, allowed=(dict,)): -- cgit v1.2.3 From eacfc7ffbec3e6a0348ed484da895e2d2fc5ba10 Mon Sep 17 00:00:00 2001 From: harlowja Date: Sat, 23 Feb 2013 21:23:24 -0800 Subject: Get tests working and further adjustments. --- cloudinit/sources/DataSourceMAAS.py | 9 +++++--- cloudinit/url_helper.py | 21 +++++++++-------- cloudinit/user_data.py | 8 ++++--- cloudinit/util.py | 34 ++++++++++++++++++++-------- tests/unittests/test__init__.py | 10 ++++---- tests/unittests/test_datasource/test_maas.py | 11 +++++---- 6 files changed, 60 insertions(+), 33 deletions(-) (limited to 'cloudinit/util.py') diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 6e1133b2..0c526305 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -223,9 +223,12 @@ def read_maas_seed_url(seed_url, header_cb=None, timeout=None, else: headers = {} try: - resp = util.read_file_or_url(url, headers=headers, timeout=timeout, - ssl_details=util.fetch_ssl_details(paths)) - if resp.ok: + ssl_details = util.fetch_ssl_details(paths) + resp = util.read_file_or_url(url, + headers=headers, + timeout=timeout, + ssl_details=ssl_details) + if resp.ok(): md[name] = str(resp) else: LOG.warn(("Fetching from %s resulted in" diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 0839e63b..300e70c2 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -70,9 +70,14 @@ class UrlResponse(object): def url(self): return self._response.url - @property - def ok(self): - return self._response.ok + def ok(self, redirects_ok=False): + upper = 300 + if redirects_ok: + upper = 400 + if self.code >= 200 and self.code < upper: + return True + else: + return False @property def headers(self): @@ -158,11 +163,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, r = requests.request(**req_args) if check_status: r.raise_for_status() - contents = r.content - status = r.status_code - headers = r.headers LOG.debug("Read from %s (%s, %sb) after %s attempts", url, - status, len(contents), (i + 1)) + r.status_code, len(r.content), (i + 1)) # Doesn't seem like we can make it use a different # subclass for responses, so add our own backward-compat # attrs @@ -256,8 +258,9 @@ def wait_for_url(urls, max_wait=None, timeout=None, time_taken = int(time.time() - start_time) status_msg = "Calling '%s' failed [%s/%ss]: %s" % (url, - time_taken, - max_wait, reason) + time_taken, + max_wait, + reason) status_cb(status_msg) if exception_cb: exception_cb(msg=status_msg, exception=e) diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index eaf448a7..df069ff8 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -59,6 +59,7 @@ EXAMINE_FOR_LAUNCH_INDEX = ["text/cloud-config"] class UserDataProcessor(object): def __init__(self, paths): self.paths = paths + self.ssl_details = util.fetch_ssl_details(paths) def process(self, blob): accumulating_msg = MIMEMultipart() @@ -172,10 +173,11 @@ class UserDataProcessor(object): if include_once_on and os.path.isfile(include_once_fn): content = util.load_file(include_once_fn) else: - resp = util.read_file_or_url(include_url) - if include_once_on and resp.ok: + resp = util.read_file_or_url(include_url, + ssl_details=self.ssl_details) + if include_once_on and resp.ok(): util.write_file(include_once_fn, str(resp), mode=0600) - if resp.ok: + if resp.ok(): content = str(resp) else: LOG.warn(("Fetching from %s resulted in" diff --git a/cloudinit/util.py b/cloudinit/util.py index 42b3ab01..dc3c5639 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -70,18 +70,31 @@ FN_ALLOWED = ('_-.()' + string.digits + string.ascii_letters) CONTAINER_TESTS = ['running-in-container', 'lxc-is-container'] -class FileResponse(object): - def __init__(self, path, contents): - self.code = 200 +# Made to have same accessors as UrlResponse so that the +# read_file_or_url can return this or that object and the +# 'user' of those objects will not need to know the difference. +class StringResponse(object): + def __init__(self, contents, code=200): + self.code = code self.headers = {} self.contents = contents - self.ok = True - self.url = path + self.url = None + + def ok(self, *args, **kwargs): + if self.code != 200: + return False + return True def __str__(self): return self.contents +class FileResponse(StringResponse): + def __init__(self, path, contents, code=200): + StringResponse.__init__(self, contents, code=code) + self.url = path + + class ProcessExecutionError(IOError): MESSAGE_TMPL = ('%(description)s\n' @@ -630,7 +643,7 @@ def read_optional_seed(fill, base="", ext="", timeout=5): fill['user-data'] = ud fill['meta-data'] = md return True - except OSError as e: + except IOError as e: if e.errno == errno.ENOENT: return False raise @@ -670,9 +683,12 @@ def fetch_ssl_details(paths=None): def read_file_or_url(url, timeout=5, retries=10, headers=None, data=None, sec_between=1, ssl_details=None): + url = url.lstrip() if url.startswith("/"): url = "file://%s" % url if url.lower().startswith("file://"): + if data: + LOG.warn("Unable to post data to file resource %s", url) file_path = url[len("file://"):] return FileResponse(file_path, contents=load_file(file_path)) else: @@ -724,13 +740,13 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): md_resp = read_file_or_url(md_url, timeout, retries, file_retries) md = None - if md_resp.ok: + if md_resp.ok(): md_str = str(md_resp) md = load_yaml(md_str, default={}) ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries) ud = None - if ud_resp.ok: + if ud_resp.ok(): ud_str = str(ud_resp) ud = ud_str @@ -900,7 +916,7 @@ def get_cmdline_url(names=('cloud-config-url', 'url'), return (None, None, None) resp = read_file_or_url(url) - if resp.contents.startswith(starts) and resp.ok: + if resp.contents.startswith(starts) and resp.ok(): return (key, url, str(resp)) return (key, url, None) diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index ac082076..d707afa9 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -191,8 +191,8 @@ class TestCmdlineUrl(MockerTestCase): mock_readurl = self.mocker.replace(url_helper.readurl, passthrough=False) - mock_readurl(url) - self.mocker.result(url_helper.UrlResponse(200, payload)) + mock_readurl(url, ARGS, KWARGS) + self.mocker.result(util.StringResponse(payload)) self.mocker.replay() self.assertEqual((key, url, None), @@ -207,8 +207,8 @@ class TestCmdlineUrl(MockerTestCase): mock_readurl = self.mocker.replace(url_helper.readurl, passthrough=False) - mock_readurl(url) - self.mocker.result(url_helper.UrlResponse(200, payload)) + mock_readurl(url, ARGS, KWARGS) + self.mocker.result(util.StringResponse(payload)) self.mocker.replay() self.assertEqual((key, url, payload), @@ -221,7 +221,7 @@ class TestCmdlineUrl(MockerTestCase): cmdline = "ro %s=%s bar=1" % (key, url) self.mocker.replace(url_helper.readurl, passthrough=False) - self.mocker.result(url_helper.UrlResponse(400)) + self.mocker.result(util.StringResponse("")) self.mocker.replay() self.assertEqual((None, None, None), diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index b56fea82..47f8caa4 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -3,12 +3,13 @@ import os from cloudinit.sources import DataSourceMAAS from cloudinit import url_helper +from cloudinit import util from tests.unittests.helpers import populate_dir -from mocker import MockerTestCase +import mocker -class TestMAASDataSource(MockerTestCase): +class TestMAASDataSource(mocker.MockerTestCase): def setUp(self): super(TestMAASDataSource, self).setUp() @@ -115,9 +116,11 @@ class TestMAASDataSource(MockerTestCase): for key in valid_order: url = "%s/%s/%s" % (my_seed, my_ver, key) - mock_request(url, headers=my_headers, timeout=None) + mock_request(url, headers=my_headers, timeout=mocker.ANY, + data=mocker.ANY, sec_between=mocker.ANY, + ssl_details=mocker.ANY, retries=mocker.ANY) resp = valid.get(key) - self.mocker.result(url_helper.UrlResponse(200, resp)) + self.mocker.result(util.StringResponse(resp)) self.mocker.replay() (userdata, metadata) = DataSourceMAAS.read_maas_seed_url(my_seed, -- cgit v1.2.3 From 368d2ba20a1ea7a97bf7186493b17be429a031d4 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 1 Mar 2013 01:43:06 -0500 Subject: initial stab at growpart module LP: #1136936 --- cloudinit/config/cc_growpart.py | 98 +++++++++++++++++++++++++++++++++++++++++ cloudinit/config/cc_resizefs.py | 85 +---------------------------------- cloudinit/util.py | 83 ++++++++++++++++++++++++++++++++++ config/cloud.cfg | 1 + 4 files changed, 183 insertions(+), 84 deletions(-) create mode 100644 cloudinit/config/cc_growpart.py (limited to 'cloudinit/util.py') diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py new file mode 100644 index 00000000..f958cd53 --- /dev/null +++ b/cloudinit/config/cc_growpart.py @@ -0,0 +1,98 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2011 Canonical Ltd. +# +# Author: Scott Moser +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os.path +import stat + +from cloudinit.settings import PER_ALWAYS +from cloudinit import util + +frequency = PER_ALWAYS + + +def device_part_info(devpath, log): + # convert an entry in /dev/ to parent disk and partition number + + # input of /dev/vdb or /dev/disk/by-label/foo + # rpath is hopefully a real-ish path in /dev (vda, sdb..) + rpath = os.path.realpath(devpath) + + bname = os.path.basename(rpath) + syspath = "/sys/class/block/%s" % bname + + if not os.path.exists(syspath): + log.debug("%s had no syspath (%s)" % (devpath, syspath)) + return None + + ptpath = os.path.join(syspath, "partition") + if not os.path.exists(ptpath): + log.debug("%s not a partition" % devpath) + return None + + ptnum = util.load_file(ptpath).rstrip() + + # for a partition, real syspath is something like: + # /sys/devices/pci0000:00/0000:00:04.0/virtio1/block/vda/vda1 + rsyspath = os.path.realpath(syspath) + disksyspath = os.path.dirname(rsyspath) + + diskmajmin = util.load_file(os.path.join(disksyspath, "dev")).rstrip() + diskdevpath = os.path.realpath("/dev/block/%s" % diskmajmin) + + # diskdevpath has something like 253:0 + # and udev has put links in /dev/block/253:0 to the device name in /dev/ + return (diskdevpath, ptnum) + + +def handle(name, cfg, _cloud, log, args): + if len(args) != 0: + growroot = args[0] + else: + growroot = util.get_cfg_option_bool(cfg, "growroot", True) + + if not growroot: + log.debug("Skipping module named %s, growroot disabled", name) + return + + resize_what = "/" + result = util.get_mount_info(resize_what, log) + if not result: + log.warn("Could not determine filesystem type of %s" % resize_what) + return + + (devpth, _fs_type, mount_point) = result + + # Ensure the path is a block device. + if not stat.S_ISBLK(os.stat(devpth).st_mode): + log.debug("The %s device which was found for mount point %s for %s " + "is not a block device" % (devpth, mount_point, resize_what)) + return + + result = device_part_info(devpth, log) + if not result: + log.debug("%s did not look like a partition" % devpth) + + (disk, ptnum) = result + + try: + (out, _err) = util.subp(["growpart", disk, ptnum], rcs=[0, 1]) + except util.ProcessExecutionError as e: + log.warn("growpart failed: %s/%s" % (e.stdout, e.stderr)) + return + + log.debug("growpart said: %s" % out) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 44b27933..51dead2f 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -51,89 +51,6 @@ RESIZE_FS_PREFIXES_CMDS = [ NOBLOCK = "noblock" -def get_mount_info(path, log): - # Use /proc/$$/mountinfo to find the device where path is mounted. - # This is done because with a btrfs filesystem using os.stat(path) - # does not return the ID of the device. - # - # Here, / has a device of 18 (decimal). - # - # $ stat / - # File: '/' - # Size: 234 Blocks: 0 IO Block: 4096 directory - # Device: 12h/18d Inode: 256 Links: 1 - # Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root) - # Access: 2013-01-13 07:31:04.358011255 +0000 - # Modify: 2013-01-13 18:48:25.930011255 +0000 - # Change: 2013-01-13 18:48:25.930011255 +0000 - # Birth: - - # - # Find where / is mounted: - # - # $ mount | grep ' / ' - # /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo) - # - # And the device ID for /dev/vda1 is not 18: - # - # $ ls -l /dev/vda1 - # brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1 - # - # So use /proc/$$/mountinfo to find the device underlying the - # input path. - path_elements = [e for e in path.split('/') if e] - devpth = None - fs_type = None - match_mount_point = None - match_mount_point_elements = None - mountinfo_path = '/proc/%s/mountinfo' % os.getpid() - for line in util.load_file(mountinfo_path).splitlines(): - parts = line.split() - - mount_point = parts[4] - mount_point_elements = [e for e in mount_point.split('/') if e] - - # Ignore mounts deeper than the path in question. - if len(mount_point_elements) > len(path_elements): - continue - - # Ignore mounts where the common path is not the same. - l = min(len(mount_point_elements), len(path_elements)) - if mount_point_elements[0:l] != path_elements[0:l]: - continue - - # Ignore mount points higher than an already seen mount - # point. - if (match_mount_point_elements is not None and - len(match_mount_point_elements) > len(mount_point_elements)): - continue - - # Find the '-' which terminates a list of optional columns to - # find the filesystem type and the path to the device. See - # man 5 proc for the format of this file. - try: - i = parts.index('-') - except ValueError: - log.debug("Did not find column named '-' in %s", - mountinfo_path) - return None - - # Get the path to the device. - try: - fs_type = parts[i + 1] - devpth = parts[i + 2] - except IndexError: - log.debug("Too few columns in %s after '-' column", mountinfo_path) - return None - - match_mount_point = mount_point - match_mount_point_elements = mount_point_elements - - if devpth and fs_type and match_mount_point: - return (devpth, fs_type, match_mount_point) - else: - return None - - def handle(name, cfg, _cloud, log, args): if len(args) != 0: resize_root = args[0] @@ -150,7 +67,7 @@ def handle(name, cfg, _cloud, log, args): # TODO(harlowja): allow what is to be resized to be configurable?? resize_what = "/" - result = get_mount_info(resize_what, log) + result = util.get_mount_info(resize_what, log) if not result: log.warn("Could not determine filesystem type of %s", resize_what) return diff --git a/cloudinit/util.py b/cloudinit/util.py index ffe844b2..1e9ca5d9 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1586,3 +1586,86 @@ def expand_package_list(version_fmt, pkgs): raise RuntimeError("Invalid package type.") return pkglist + + +def get_mount_info(path, log): + # Use /proc/$$/mountinfo to find the device where path is mounted. + # This is done because with a btrfs filesystem using os.stat(path) + # does not return the ID of the device. + # + # Here, / has a device of 18 (decimal). + # + # $ stat / + # File: '/' + # Size: 234 Blocks: 0 IO Block: 4096 directory + # Device: 12h/18d Inode: 256 Links: 1 + # Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root) + # Access: 2013-01-13 07:31:04.358011255 +0000 + # Modify: 2013-01-13 18:48:25.930011255 +0000 + # Change: 2013-01-13 18:48:25.930011255 +0000 + # Birth: - + # + # Find where / is mounted: + # + # $ mount | grep ' / ' + # /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo) + # + # And the device ID for /dev/vda1 is not 18: + # + # $ ls -l /dev/vda1 + # brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1 + # + # So use /proc/$$/mountinfo to find the device underlying the + # input path. + path_elements = [e for e in path.split('/') if e] + devpth = None + fs_type = None + match_mount_point = None + match_mount_point_elements = None + mountinfo_path = '/proc/%s/mountinfo' % os.getpid() + for line in load_file(mountinfo_path).splitlines(): + parts = line.split() + + mount_point = parts[4] + mount_point_elements = [e for e in mount_point.split('/') if e] + + # Ignore mounts deeper than the path in question. + if len(mount_point_elements) > len(path_elements): + continue + + # Ignore mounts where the common path is not the same. + l = min(len(mount_point_elements), len(path_elements)) + if mount_point_elements[0:l] != path_elements[0:l]: + continue + + # Ignore mount points higher than an already seen mount + # point. + if (match_mount_point_elements is not None and + len(match_mount_point_elements) > len(mount_point_elements)): + continue + + # Find the '-' which terminates a list of optional columns to + # find the filesystem type and the path to the device. See + # man 5 proc for the format of this file. + try: + i = parts.index('-') + except ValueError: + log.debug("Did not find column named '-' in %s", + mountinfo_path) + return None + + # Get the path to the device. + try: + fs_type = parts[i + 1] + devpth = parts[i + 2] + except IndexError: + log.debug("Too few columns in %s after '-' column", mountinfo_path) + return None + + match_mount_point = mount_point + match_mount_point_elements = mount_point_elements + + if devpth and fs_type and match_mount_point: + return (devpth, fs_type, match_mount_point) + else: + return None diff --git a/config/cloud.cfg b/config/cloud.cfg index a8c74486..b61b8a7d 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -26,6 +26,7 @@ cloud_init_modules: - migrator - bootcmd - write-files + - growpart - resizefs - set_hostname - update_hostname -- cgit v1.2.3 From 1d015f6ec3284287ad1383d0e2d9a264128f23eb Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sun, 3 Mar 2013 20:56:32 -0500 Subject: add default log value to get_mount_info --- cloudinit/util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit/util.py') diff --git a/cloudinit/util.py b/cloudinit/util.py index 1e9ca5d9..d0a6f81c 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1588,7 +1588,7 @@ def expand_package_list(version_fmt, pkgs): return pkglist -def get_mount_info(path, log): +def get_mount_info(path, log=LOG): # Use /proc/$$/mountinfo to find the device where path is mounted. # This is done because with a btrfs filesystem using os.stat(path) # does not return the ID of the device. -- cgit v1.2.3 From fc6aa5aa54ee35ff0a3eff823bae0d3cf9b34bc1 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 6 Mar 2013 19:24:05 -0800 Subject: Continue working on merging code. --- cloudinit/config/cc_landscape.py | 3 ++- cloudinit/config/cc_mounts.py | 3 ++- cloudinit/distros/__init__.py | 15 +++++++------ cloudinit/handlers/__init__.py | 3 ++- cloudinit/handlers/cloud_config.py | 15 ++++++------- cloudinit/helpers.py | 3 ++- cloudinit/mergers/__init__.py | 13 +++++++++--- cloudinit/sources/DataSourceAltCloud.py | 5 +++-- cloudinit/sources/DataSourceCloudStack.py | 3 --- cloudinit/sources/DataSourceConfigDrive.py | 4 +++- cloudinit/sources/DataSourceEc2.py | 3 --- cloudinit/sources/DataSourceMAAS.py | 3 ++- cloudinit/sources/DataSourceNoCloud.py | 5 ++--- cloudinit/sources/DataSourceNone.py | 3 --- cloudinit/sources/DataSourceOVF.py | 3 ++- cloudinit/sources/__init__.py | 10 ++++++--- cloudinit/stages.py | 9 ++++---- cloudinit/type_utils.py | 34 ++++++++++++++++++++++++++++++ cloudinit/util.py | 33 ++++++++++------------------- tests/unittests/test_userdata.py | 4 +++- 20 files changed, 104 insertions(+), 70 deletions(-) create mode 100644 cloudinit/type_utils.py (limited to 'cloudinit/util.py') diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index 02610dd0..6734efee 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -24,6 +24,7 @@ from StringIO import StringIO from configobj import ConfigObj +from cloudinit import type_utils from cloudinit import util from cloudinit.settings import PER_INSTANCE @@ -58,7 +59,7 @@ def handle(_name, cfg, cloud, log, _args): if not isinstance(ls_cloudcfg, (dict)): raise RuntimeError(("'landscape' key existed in config," " but not a dictionary type," - " is a %s instead"), util.obj_name(ls_cloudcfg)) + " is a %s instead"), type_utils.obj_name(ls_cloudcfg)) if not ls_cloudcfg: return diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index cb772c86..6ebe563d 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -22,6 +22,7 @@ from string import whitespace # pylint: disable=W0402 import re +from cloudinit import type_utils from cloudinit import util # Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1 @@ -60,7 +61,7 @@ def handle(_name, cfg, cloud, log, _args): # skip something that wasn't a list if not isinstance(cfgmnt[i], list): log.warn("Mount option %s not a list, got a %s instead", - (i + 1), util.obj_name(cfgmnt[i])) + (i + 1), type_utils.obj_name(cfgmnt[i])) continue startname = str(cfgmnt[i][0]) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 6a684b89..eeea6af1 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -31,6 +31,7 @@ import re from cloudinit import importer from cloudinit import log as logging from cloudinit import ssh_util +from cloudinit import type_utils from cloudinit import util from cloudinit.distros.parsers import hosts @@ -427,7 +428,7 @@ class Distro(object): lines.append("%s %s" % (user, rules)) else: msg = "Can not create sudoers rule addition with type %r" - raise TypeError(msg % (util.obj_name(rules))) + raise TypeError(msg % (type_utils.obj_name(rules))) content = "\n".join(lines) content += "\n" # trailing newline @@ -550,7 +551,7 @@ def _normalize_groups(grp_cfg): c_grp_cfg[k] = [v] else: raise TypeError("Bad group member type %s" % - util.obj_name(v)) + type_utils.obj_name(v)) else: if isinstance(v, (list)): c_grp_cfg[k].extend(v) @@ -558,13 +559,13 @@ def _normalize_groups(grp_cfg): c_grp_cfg[k].append(v) else: raise TypeError("Bad group member type %s" % - util.obj_name(v)) + type_utils.obj_name(v)) elif isinstance(i, (str, basestring)): if i not in c_grp_cfg: c_grp_cfg[i] = [] else: raise TypeError("Unknown group name type %s" % - util.obj_name(i)) + type_utils.obj_name(i)) grp_cfg = c_grp_cfg groups = {} if isinstance(grp_cfg, (dict)): @@ -573,7 +574,7 @@ def _normalize_groups(grp_cfg): else: raise TypeError(("Group config must be list, dict " " or string types only and not %s") % - util.obj_name(grp_cfg)) + type_utils.obj_name(grp_cfg)) return groups @@ -604,7 +605,7 @@ def _normalize_users(u_cfg, def_user_cfg=None): ad_ucfg.append(v) else: raise TypeError(("Unmappable user value type %s" - " for key %s") % (util.obj_name(v), k)) + " for key %s") % (type_utils.obj_name(v), k)) u_cfg = ad_ucfg elif isinstance(u_cfg, (str, basestring)): u_cfg = util.uniq_merge_sorted(u_cfg) @@ -629,7 +630,7 @@ def _normalize_users(u_cfg, def_user_cfg=None): else: raise TypeError(("User config must be dictionary/list " " or string types only and not %s") % - util.obj_name(user_config)) + type_utils.obj_name(user_config)) # Ensure user options are in the right python friendly format if users: diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 63fdb948..924463ce 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -27,6 +27,7 @@ from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES) from cloudinit import importer from cloudinit import log as logging +from cloudinit import type_utils from cloudinit import util LOG = logging.getLogger(__name__) @@ -76,7 +77,7 @@ class Handler(object): self.frequency = frequency def __repr__(self): - return "%s: [%s]" % (util.obj_name(self), self.list_types()) + return "%s: [%s]" % (type_utils.obj_name(self), self.list_types()) @abc.abstractmethod def list_types(self): diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index d458dee2..5f519f78 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -29,8 +29,8 @@ from cloudinit.settings import (PER_ALWAYS) LOG = logging.getLogger(__name__) -DEF_MERGE_TYPE = "list(extend)+dict()+str(append)" MERGE_HEADER = 'Merge-Type' +DEF_MERGERS = mergers.default_mergers() class CloudConfigPartHandler(handlers.Handler): @@ -39,9 +39,7 @@ class CloudConfigPartHandler(handlers.Handler): self.cloud_buf = None self.cloud_fn = paths.get_ipath("cloud_config") self.file_names = [] - self.mergers = [ - mergers.string_extract_mergers(DEF_MERGE_TYPE), - ] + self.mergers = [DEF_MERGERS] def list_types(self): return [ @@ -59,6 +57,7 @@ class CloudConfigPartHandler(handlers.Handler): file_lines.append("# %s" % (fn)) file_lines.append("") if self.cloud_buf is not None: + # Something was actually gathered.... lines = [ "#cloud-config", '', @@ -86,7 +85,7 @@ class CloudConfigPartHandler(handlers.Handler): all_mergers.extend(mergers_yaml) all_mergers.extend(mergers_header) if not all_mergers: - all_mergers = mergers.string_extract_mergers(DEF_MERGE_TYPE) + all_mergers = DEF_MERGERS return all_mergers def _merge_part(self, payload, headers): @@ -94,7 +93,7 @@ class CloudConfigPartHandler(handlers.Handler): # Use the merger list from the last call, since it is the one # that will be defining how to merge with the next payload. curr_mergers = list(self.mergers[-1]) - LOG.debug("Merging with %s", curr_mergers) + LOG.debug("Merging by applying %s", curr_mergers) self.mergers.append(next_mergers) merger = mergers.construct(curr_mergers) if self.cloud_buf is None: @@ -106,9 +105,7 @@ class CloudConfigPartHandler(handlers.Handler): def _reset(self): self.file_names = [] self.cloud_buf = None - self.mergers = [ - mergers.string_extract_mergers(DEF_MERGE_TYPE), - ] + self.mergers = [DEF_MERGERS] def handle_part(self, _data, ctype, filename, payload, _freq, headers): if ctype == handlers.CONTENT_START: diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index 2077401c..a4e6fb03 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -32,6 +32,7 @@ from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE, CFG_ENV_NAME) from cloudinit import log as logging +from cloudinit import type_utils from cloudinit import util LOG = logging.getLogger(__name__) @@ -68,7 +69,7 @@ class FileLock(object): self.fn = fn def __str__(self): - return "<%s using file %r>" % (util.obj_name(self), self.fn) + return "<%s using file %r>" % (type_utils.obj_name(self), self.fn) def canon_sem_name(name): diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py index 4a112165..453426af 100644 --- a/cloudinit/mergers/__init__.py +++ b/cloudinit/mergers/__init__.py @@ -20,11 +20,12 @@ import re from cloudinit import importer from cloudinit import log as logging -from cloudinit import util +from cloudinit import type_utils NAME_MTCH = re.compile(r"(^[a-zA-Z_][A-Za-z0-9_]*)\((.*?)\)$") LOG = logging.getLogger(__name__) +DEF_MERGE_TYPE = "list(extend)+dict()+str(append)" class UnknownMerger(object): @@ -42,7 +43,7 @@ class UnknownMerger(object): # If not found the merge will be given to a '_handle_unknown' # function which can decide what to do wit the 2 values. def merge(self, source, merge_with): - type_name = util.obj_name(source) + type_name = type_utils.obj_name(source) type_name = type_name.lower() method_name = "_on_%s" % (type_name) meth = None @@ -127,6 +128,10 @@ def string_extract_mergers(merge_how): return parsed_mergers +def default_mergers(): + return tuple(string_extract_mergers(DEF_MERGE_TYPE)) + + def construct(parsed_mergers): mergers_to_be = [] for (m_name, m_ops) in parsed_mergers: @@ -145,4 +150,6 @@ def construct(parsed_mergers): root = LookupMerger(mergers) for (attr, opts) in mergers_to_be: mergers.append(attr(root, opts)) - return root \ No newline at end of file + return root + + diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index 9812bdcb..64548d43 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -30,6 +30,7 @@ import os.path from cloudinit import log as logging from cloudinit import sources from cloudinit import util + from cloudinit.util import ProcessExecutionError LOG = logging.getLogger(__name__) @@ -91,8 +92,8 @@ class DataSourceAltCloud(sources.DataSource): self.supported_seed_starts = ("/", "file://") def __str__(self): - mstr = "%s [seed=%s]" % (util.obj_name(self), self.seed) - return mstr + root = sources.DataSource.__str__(self) + return "%s [seed=%s]" % (root, self.seed) def get_cloud_type(self): ''' diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 076dba5a..c0e1a23c 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -59,9 +59,6 @@ class DataSourceCloudStack(sources.DataSource): return gw return None - def __str__(self): - return util.obj_name(self) - def _get_url_settings(self): mcfg = self.ds_cfg if not mcfg: diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index c7826851..46abd772 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -51,7 +51,9 @@ class DataSourceConfigDrive(sources.DataSource): self.ec2_metadata = None def __str__(self): - mstr = "%s [%s,ver=%s]" % (util.obj_name(self), self.dsmode, + root = sources.DataSource.__str__(self) + mstr = "%s [%s,ver=%s]" % (root, + self.dsmode, self.version) mstr += "[source=%s]" % (self.source) return mstr diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 2db53446..f010e640 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -49,9 +49,6 @@ class DataSourceEc2(sources.DataSource): self.seed_dir = os.path.join(paths.seed_dir, "ec2") self.api_ver = DEF_MD_VERSION - def __str__(self): - return util.obj_name(self) - def get_data(self): seed_ret = {} if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")): diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index b55d8a21..612d8ffa 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -50,7 +50,8 @@ class DataSourceMAAS(sources.DataSource): self.oauth_clockskew = None def __str__(self): - return "%s [%s]" % (util.obj_name(self), self.base_url) + root = sources.DataSource.__str__(self) + return "%s [%s]" % (root, self.base_url) def get_data(self): mcfg = self.ds_cfg diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index bed500a2..9a770d38 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -40,9 +40,8 @@ class DataSourceNoCloud(sources.DataSource): self.supported_seed_starts = ("/", "file://") def __str__(self): - mstr = "%s [seed=%s][dsmode=%s]" % (util.obj_name(self), - self.seed, self.dsmode) - return mstr + root = sources.DataSource.__str__(self) + return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode) def get_data(self): defaults = { diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py index c2125bee..e2175e1f 100644 --- a/cloudinit/sources/DataSourceNone.py +++ b/cloudinit/sources/DataSourceNone.py @@ -41,9 +41,6 @@ class DataSourceNone(sources.DataSource): def get_instance_id(self): return 'iid-datasource-none' - def __str__(self): - return util.obj_name(self) - @property def is_disconnected(self): return True diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index e90150c6..ae139074 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -43,7 +43,8 @@ class DataSourceOVF(sources.DataSource): self.supported_seed_starts = ("/", "file://") def __str__(self): - return "%s [seed=%s]" % (util.obj_name(self), self.seed) + root = sources.DataSource.__str__(self) + return "%s [seed=%s]" % (root, self.seed) def get_data(self): found = [] diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 96baff90..d8fbacdd 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -25,6 +25,7 @@ import os from cloudinit import importer from cloudinit import log as logging +from cloudinit import type_utils from cloudinit import user_data as ud from cloudinit import util @@ -52,7 +53,7 @@ class DataSource(object): self.userdata = None self.metadata = None self.userdata_raw = None - name = util.obj_name(self) + name = type_utils.obj_name(self) if name.startswith(DS_PREFIX): name = name[len(DS_PREFIX):] self.ds_cfg = util.get_cfg_by_path(self.sys_cfg, @@ -62,6 +63,9 @@ class DataSource(object): else: self.ud_proc = ud_proc + def __str__(self): + return type_utils.obj_name(self) + def get_userdata(self, apply_filter=False): if self.userdata is None: self.userdata = self.ud_proc.process(self.get_userdata_raw()) @@ -214,7 +218,7 @@ def normalize_pubkey_data(pubkey_data): def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list): ds_list = list_sources(cfg_list, ds_deps, pkg_list) - ds_names = [util.obj_name(f) for f in ds_list] + ds_names = [type_utils.obj_name(f) for f in ds_list] LOG.debug("Searching for data source in: %s", ds_names) for cls in ds_list: @@ -222,7 +226,7 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list): LOG.debug("Seeing if we can get any data from %s", cls) s = cls(sys_cfg, distro, paths) if s.get_data(): - return (s, util.obj_name(cls)) + return (s, type_utils.obj_name(cls)) except Exception: util.logexc(LOG, "Getting data from %s failed", cls) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 94a267df..531e7997 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -43,6 +43,7 @@ from cloudinit import helpers from cloudinit import importer from cloudinit import log as logging from cloudinit import sources +from cloudinit import type_utils from cloudinit import util LOG = logging.getLogger(__name__) @@ -220,7 +221,7 @@ class Init(object): # Any config provided??? pkg_list = self.cfg.get('datasource_pkg_list') or [] # Add the defaults at the end - for n in ['', util.obj_name(sources)]: + for n in ['', type_utils.obj_name(sources)]: if n not in pkg_list: pkg_list.append(n) cfg_list = self.cfg.get('datasource_list') or [] @@ -280,7 +281,7 @@ class Init(object): dp = self.paths.get_cpath('data') # Write what the datasource was and is.. - ds = "%s: %s" % (util.obj_name(self.datasource), self.datasource) + ds = "%s: %s" % (type_utils.obj_name(self.datasource), self.datasource) previous_ds = None ds_fn = os.path.join(idir, 'datasource') try: @@ -497,7 +498,7 @@ class Modules(object): else: raise TypeError(("Failed to read '%s' item in config," " unknown type %s") % - (item, util.obj_name(item))) + (item, type_utils.obj_name(item))) return module_list def _fixup_modules(self, raw_mods): @@ -515,7 +516,7 @@ class Modules(object): # Reset it so when ran it will get set to a known value freq = None mod_locs = importer.find_module(mod_name, - ['', util.obj_name(config)], + ['', type_utils.obj_name(config)], ['handle']) if not mod_locs: LOG.warn("Could not find module named %s", mod_name) diff --git a/cloudinit/type_utils.py b/cloudinit/type_utils.py new file mode 100644 index 00000000..2decbfc5 --- /dev/null +++ b/cloudinit/type_utils.py @@ -0,0 +1,34 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Canonical Ltd. +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Scott Moser +# Author: Juerg Haefliger +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# pylint: disable=C0302 + +import types + + +def obj_name(obj): + if isinstance(obj, (types.TypeType, + types.ModuleType, + types.FunctionType, + types.LambdaType)): + return str(obj.__name__) + return obj_name(obj.__class__) diff --git a/cloudinit/util.py b/cloudinit/util.py index ab918433..73bf6304 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -43,14 +43,15 @@ import subprocess import sys import tempfile import time -import types import urlparse import yaml from cloudinit import importer from cloudinit import log as logging +from cloudinit import mergers from cloudinit import safeyaml +from cloudinit import type_utils from cloudinit import url_helper as uhelp from cloudinit import version @@ -194,11 +195,12 @@ def fork_cb(child_cb, *args): os._exit(0) # pylint: disable=W0212 except: logexc(LOG, ("Failed forking and" - " calling callback %s"), obj_name(child_cb)) + " calling callback %s"), + type_utils.obj_name(child_cb)) os._exit(1) # pylint: disable=W0212 else: LOG.debug("Forked child %s who will run callback %s", - fid, obj_name(child_cb)) + fid, type_utils.obj_name(child_cb)) def is_true(val, addons=None): @@ -513,15 +515,6 @@ def make_url(scheme, host, port=None, return urlparse.urlunparse(pieces) -def obj_name(obj): - if isinstance(obj, (types.TypeType, - types.ModuleType, - types.FunctionType, - types.LambdaType)): - return str(obj.__name__) - return obj_name(obj.__class__) - - def mergemanydict(srcs, reverse=False): if reverse: srcs = reversed(srcs) @@ -538,13 +531,9 @@ def mergedict(src, cand): If C{src} has a key C{cand} will not override. Nested dictionaries are merged recursively. """ - if isinstance(src, dict) and isinstance(cand, dict): - for (k, v) in cand.iteritems(): - if k not in src: - src[k] = v - else: - src[k] = mergedict(src[k], v) - return src + raw_mergers = mergers.default_mergers() + merger = mergers.construct(raw_mergers) + return merger.merge(src, cand) @contextlib.contextmanager @@ -645,7 +634,7 @@ def load_yaml(blob, default=None, allowed=(dict,)): # Yes this will just be caught, but thats ok for now... raise TypeError(("Yaml load allows %s root types," " but got %s instead") % - (allowed, obj_name(converted))) + (allowed, type_utils.obj_name(converted))) loaded = converted except (yaml.YAMLError, TypeError, ValueError): if len(blob) == 0: @@ -714,7 +703,7 @@ def read_conf_with_confd(cfgfile): if not isinstance(confd, (str, basestring)): raise TypeError(("Config file %s contains 'conf_d' " "with non-string type %s") % - (cfgfile, obj_name(confd))) + (cfgfile, type_utils.obj_name(confd))) else: confd = str(confd).strip() elif os.path.isdir("%s.d" % cfgfile): @@ -1472,7 +1461,7 @@ def shellify(cmdlist, add_header=True): else: raise RuntimeError(("Unable to shellify type %s" " which is not a list or string") - % (obj_name(args))) + % (type_utils.obj_name(args))) LOG.debug("Shellified %s commands.", cmds_made) return content diff --git a/tests/unittests/test_userdata.py b/tests/unittests/test_userdata.py index 9e1fed7e..ef0dd7b8 100644 --- a/tests/unittests/test_userdata.py +++ b/tests/unittests/test_userdata.py @@ -74,7 +74,7 @@ run: - morestuff ''' message2 = MIMEBase("text", "cloud-config") - message2['Merge-Type'] = 'dict()+list(extend)+str()' + message2['X-Merge-Type'] = 'dict()+list(extend)+str()' message2.set_payload(blob2) blob3 = ''' @@ -83,6 +83,7 @@ e: - 1 - 2 - 3 +p: 1 ''' message3 = MIMEBase("text", "cloud-config") message3['Merge-Type'] = 'dict()+list()+str()' @@ -109,6 +110,7 @@ e: self.assertEquals(contents['run'], ['b', 'c', 'stuff', 'morestuff']) self.assertEquals(contents['a'], 'be') self.assertEquals(contents['e'], 'fg') + self.assertEquals(contents['p'], 1) def test_unhandled_type_warning(self): """Raw text without magic is ignored but shows warning.""" -- cgit v1.2.3 From 1e4f41e900a9c942354428b0f312428af00031ce Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 6 Mar 2013 19:36:31 -0800 Subject: Make conf.d and the default merging use the new merging algos. --- cloudinit/sources/DataSourceConfigDrive.py | 2 +- cloudinit/sources/DataSourceNoCloud.py | 8 ++++---- cloudinit/sources/DataSourceOVF.py | 4 ++-- cloudinit/util.py | 25 ++++++++++++------------- 4 files changed, 19 insertions(+), 20 deletions(-) (limited to 'cloudinit/util.py') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 46abd772..0216ed07 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -154,7 +154,7 @@ class DataSourceConfigDrive(sources.DataSource): return False md = results['metadata'] - md = util.mergedict(md, DEFAULT_METADATA) + md = util.mergemanydict([md, DEFAULT_METADATA]) # Perform some metadata 'fixups' # diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 9a770d38..7800812b 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -64,7 +64,7 @@ class DataSourceNoCloud(sources.DataSource): # Check to see if the seed dir has data. seedret = {} if util.read_optional_seed(seedret, base=self.seed_dir + "/"): - md = util.mergedict(md, seedret['meta-data']) + md = util.mergemanydict([md, seedret['meta-data']]) ud = seedret['user-data'] found.append(self.seed_dir) LOG.debug("Using seeded cache data from %s", self.seed_dir) @@ -88,7 +88,7 @@ class DataSourceNoCloud(sources.DataSource): LOG.debug("Attempting to use data from %s", dev) (newmd, newud) = util.mount_cb(dev, util.read_seeded) - md = util.mergedict(newmd, md) + md = util.mergemanydict([newmd, md]) ud = newud # For seed from a device, the default mode is 'net'. @@ -139,11 +139,11 @@ class DataSourceNoCloud(sources.DataSource): LOG.debug("Using seeded cache data from %s", seedfrom) # Values in the command line override those from the seed - md = util.mergedict(md, md_seed) + md = util.mergemanydict([md, md_seed]) found.append(seedfrom) # Now that we have exhausted any other places merge in the defaults - md = util.mergedict(md, defaults) + md = util.mergemanydict([md, defaults]) # Update the network-interfaces if metadata had 'network-interfaces' # entry and this is the local datasource, or 'seedfrom' was used diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index ae139074..0530c4b7 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -94,11 +94,11 @@ class DataSourceOVF(sources.DataSource): (md_seed, ud) = util.read_seeded(seedfrom, timeout=None) LOG.debug("Using seeded cache data from %s", seedfrom) - md = util.mergedict(md, md_seed) + md = util.mergemanydict([md, md_seed]) found.append(seedfrom) # Now that we have exhausted any other places merge in the defaults - md = util.mergedict(md, defaults) + md = util.mergemanydict([md, defaults]) self.seed = ",".join(found) self.metadata = md diff --git a/cloudinit/util.py b/cloudinit/util.py index 73bf6304..e5c6f4ea 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -519,23 +519,22 @@ def mergemanydict(srcs, reverse=False): if reverse: srcs = reversed(srcs) m_cfg = {} + merge_how = [mergers.default_mergers()] for a_cfg in srcs: if a_cfg: - m_cfg = mergedict(m_cfg, a_cfg) + # Take the last merger as the one that + # will define how to merge next... + mergers_to_apply = list(merge_how[-1]) + merger = mergers.construct(mergers_to_apply) + m_cfg = merger.merge(m_cfg, a_cfg) + # If the config has now has new merger set, + # extract them to be used next time... + new_mergers = mergers.dict_extract_mergers(m_cfg) + if new_mergers: + merge_how.append(new_mergers) return m_cfg -def mergedict(src, cand): - """ - Merge values from C{cand} into C{src}. - If C{src} has a key C{cand} will not override. - Nested dictionaries are merged recursively. - """ - raw_mergers = mergers.default_mergers() - merger = mergers.construct(raw_mergers) - return merger.merge(src, cand) - - @contextlib.contextmanager def chdir(ndir): curr = os.getcwd() @@ -714,7 +713,7 @@ def read_conf_with_confd(cfgfile): # Conf.d settings override input configuration confd_cfg = read_conf_d(confd) - return mergedict(confd_cfg, cfg) + return mergemanydict([confd_cfg, cfg]) def read_cc_from_cmdline(cmdline=None): -- cgit v1.2.3 From dca9b6c94e10f9f42ad0f129ae6fd38ebb44f4b5 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 7 Mar 2013 14:54:25 -0500 Subject: pep8 and pylint fixes --- cloudinit/config/cc_power_state_change.py | 2 +- cloudinit/distros/__init__.py | 6 +++--- cloudinit/distros/debian.py | 5 ++++- cloudinit/distros/rhel.py | 5 ++++- cloudinit/ssh_util.py | 10 ++++------ cloudinit/util.py | 2 +- doc/rtd/conf.py | 8 ++++---- tests/unittests/helpers.py | 1 + tests/unittests/test_datasource/test_nocloud.py | 2 +- .../test_handler/test_handler_growpart.py | 22 +++++++++++----------- tests/unittests/test_sshutil.py | 5 +++-- 11 files changed, 37 insertions(+), 31 deletions(-) (limited to 'cloudinit/util.py') diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index aefa3aff..de0c0bbd 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -75,7 +75,7 @@ def load_power_state(cfg): ','.join(opt_map.keys())) delay = pstate.get("delay", "now") - if delay != "now" and not re.match("\+[0-9]+", delay): + if delay != "now" and not re.match(r"\+[0-9]+", delay): raise TypeError("power_state[delay] must be 'now' or '+m' (minutes).") args = ["shutdown", opt_map[mode], delay] diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 0db4aac7..2a2d8216 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -73,7 +73,7 @@ class Distro(object): self._apply_hostname(hostname) @abc.abstractmethod - def package_command(self, cmd, args=None): + def package_command(self, cmd, args=None, pkgs=None): raise NotImplementedError() @abc.abstractmethod @@ -370,7 +370,7 @@ class Distro(object): # Import SSH keys if 'ssh_authorized_keys' in kwargs: keys = set(kwargs['ssh_authorized_keys']) or [] - ssh_util.setup_user_keys(keys, name, key_prefix=None) + ssh_util.setup_user_keys(keys, name, options=None) return True @@ -776,7 +776,7 @@ def normalize_users_groups(cfg, distro): # Just add it on at the end... base_users.append({'name': 'default'}) elif isinstance(base_users, (dict)): - base_users['default'] = base_users.get('default', True) + base_users['default'] = dict(base_users).get('default', True) elif isinstance(base_users, (str, basestring)): # Just append it on to be re-parsed later base_users += ",default" diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 1a8e927b..1f2848d2 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -142,7 +142,10 @@ class Distro(distros.Distro): # This ensures that the correct tz will be used for the system util.copy(tz_file, self.tz_local_fn) - def package_command(self, command, args=None, pkgs=[]): + def package_command(self, command, args=None, pkgs=None): + if pkgs is None: + pkgs = [] + e = os.environ.copy() # See: http://tiny.cc/kg91fw # Or: http://tiny.cc/mh91fw diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 2f91e386..9fee5fd1 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -208,7 +208,10 @@ class Distro(distros.Distro): # This ensures that the correct tz will be used for the system util.copy(tz_file, self.tz_local_fn) - def package_command(self, command, args=None, pkgs=[]): + def package_command(self, command, args=None, pkgs=None): + if pkgs is None: + pkgs = [] + cmd = ['yum'] # If enabled, then yum will be tolerant of errors on the command line # with regard to packages. diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 65fab117..95133236 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -19,9 +19,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from StringIO import StringIO - -import csv import os import pwd @@ -42,6 +39,7 @@ VALID_KEY_TYPES = ("rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa", "ecdsa-sha2-nistp384-cert-v01@openssh.com", "ecdsa-sha2-nistp521-cert-v01@openssh.com") + class AuthKeyLine(object): def __init__(self, source, keytype=None, base64=None, comment=None, options=None): @@ -141,14 +139,14 @@ class AuthKeyLineParser(object): ent = line.strip() try: (keytype, base64, comment) = parse_ssh_key(ent) - except TypeError as e: + except TypeError: (keyopts, remain) = self._extract_options(ent) if options is None: options = keyopts - + try: (keytype, base64, comment) = parse_ssh_key(remain) - except TypeError as e: + except TypeError: return AuthKeyLine(src_line) return AuthKeyLine(src_line, keytype=keytype, base64=base64, diff --git a/cloudinit/util.py b/cloudinit/util.py index d0a6f81c..afde2066 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1530,7 +1530,7 @@ def get_proc_env(pid): fn = os.path.join("/proc/", str(pid), "environ") try: contents = load_file(fn) - toks = contents.split("\0") + toks = contents.split("\x00") for tok in toks: if tok == "": continue diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py index 87fc40ab..c9ae79f4 100644 --- a/doc/rtd/conf.py +++ b/doc/rtd/conf.py @@ -17,13 +17,13 @@ from cloudinit import version # General information about the project. project = 'Cloud-Init' -# -- General configuration ----------------------------------------------------- +# -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.intersphinx', ] @@ -55,7 +55,7 @@ exclude_patterns = [] # output. They are ignored by default. show_authors = False -# -- Options for HTML output --------------------------------------------------- +# -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index 91a50e18..904677f1 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -183,6 +183,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): setattr(mod, f, trap_func) self.patched_funcs.append((mod, f, func)) + def populate_dir(path, files): os.makedirs(path) for (name, content) in files.iteritems(): diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index 28e0a472..62fc5358 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -1,7 +1,7 @@ from cloudinit import helpers -from tests.unittests.helpers import populate_dir from cloudinit.sources import DataSourceNoCloud from cloudinit import util +from tests.unittests.helpers import populate_dir from mocker import MockerTestCase import os diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index 74c254e0..325244f2 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -1,7 +1,6 @@ from mocker import MockerTestCase from cloudinit import cloud -from cloudinit import helpers from cloudinit import util from cloudinit.config import cc_growpart @@ -9,9 +8,7 @@ from cloudinit.config import cc_growpart import errno import logging import os -import mocker import re -import stat # growpart: # mode: auto # off, on, auto, 'growpart', 'parted' @@ -85,6 +82,7 @@ growpart disk partition Resize partition 1 on /dev/sda """ + class TestDisabled(MockerTestCase): def setUp(self): super(TestDisabled, self).setUp() @@ -106,6 +104,7 @@ class TestDisabled(MockerTestCase): self.handle(self.name, config, self.cloud_init, self.log, self.args) + class TestConfig(MockerTestCase): def setUp(self): super(TestConfig, self).setUp() @@ -125,9 +124,9 @@ class TestConfig(MockerTestCase): def test_no_resizers_auto_is_fine(self): subp = self.mocker.replace(util.subp, passthrough=False) subp(['parted', '--help'], env={'LANG': 'C'}) - self.mocker.result((HELP_PARTED_NO_RESIZE,"")) + self.mocker.result((HELP_PARTED_NO_RESIZE, "")) subp(['growpart', '--help'], env={'LANG': 'C'}) - self.mocker.result((HELP_GROWPART_NO_RESIZE,"")) + self.mocker.result((HELP_GROWPART_NO_RESIZE, "")) self.mocker.replay() config = {'growpart': {'mode': 'auto'}} @@ -136,7 +135,7 @@ class TestConfig(MockerTestCase): def test_no_resizers_mode_growpart_is_exception(self): subp = self.mocker.replace(util.subp, passthrough=False) subp(['growpart', '--help'], env={'LANG': 'C'}) - self.mocker.result((HELP_GROWPART_NO_RESIZE,"")) + self.mocker.result((HELP_GROWPART_NO_RESIZE, "")) self.mocker.replay() config = {'growpart': {'mode': "growpart"}} @@ -146,7 +145,7 @@ class TestConfig(MockerTestCase): def test_mode_auto_prefers_parted(self): subp = self.mocker.replace(util.subp, passthrough=False) subp(['parted', '--help'], env={'LANG': 'C'}) - self.mocker.result((HELP_PARTED_RESIZE,"")) + self.mocker.result((HELP_PARTED_RESIZE, "")) self.mocker.replay() ret = cc_growpart.resizer_factory(mode="auto") @@ -173,7 +172,7 @@ class TestConfig(MockerTestCase): self.handle(self.name, {}, self.cloud_init, self.log, self.args) finally: cc_growpart.RESIZERS = orig_resizers - + class TestResize(MockerTestCase): def setUp(self): @@ -196,7 +195,7 @@ class TestResize(MockerTestCase): real_stat = os.stat resize_calls = [] - class myresizer(): + class myresizer(object): def resize(self, diskdev, partnum, partdev): resize_calls.append((diskdev, partnum, partdev)) if partdev == "/dev/YYda2": @@ -224,7 +223,7 @@ class TestResize(MockerTestCase): if f[0] == name: return f return None - + self.assertEqual(cc_growpart.RESIZE.NOCHANGE, find("/dev/XXda1", resized)[1]) self.assertEqual(cc_growpart.RESIZE.CHANGED, @@ -244,7 +243,8 @@ def simple_device_part_info(devpath): ret = re.search("([^0-9]*)([0-9]*)$", devpath) x = (ret.group(1), ret.group(2)) return x - + + class Bunch: def __init__(self, **kwds): self.__dict__.update(kwds) diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index 2415d06f..d8662cac 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -1,5 +1,5 @@ -from unittest import TestCase from cloudinit import ssh_util +from unittest import TestCase VALID_CONTENT = { @@ -34,6 +34,7 @@ TEST_OPTIONS = ("no-port-forwarding,no-agent-forwarding,no-X11-forwarding," 'command="echo \'Please login as the user \"ubuntu\" rather than the' 'user \"root\".\';echo;sleep 10"') + class TestAuthKeyLineParser(TestCase): def test_simple_parse(self): # test key line with common 3 fields (keytype, base64, comment) @@ -61,7 +62,7 @@ class TestAuthKeyLineParser(TestCase): self.assertFalse(key.options) self.assertFalse(key.comment) self.assertEqual(key.keytype, ktype) - + def test_parse_with_keyoptions(self): # test key line with options in it parser = ssh_util.AuthKeyLineParser() -- cgit v1.2.3 From eab08ade4bc56219e98bcc1d5568b75b6f4bb6ea Mon Sep 17 00:00:00 2001 From: Blair Zajac Date: Sun, 10 Mar 2013 19:43:54 -0700 Subject: Refactor util.get_mount_info() to facilitate unit testing. Refactor the parsing portion of util.get_mount_info() into a new util.parse_mount_info() method. Now util.get_mount_info() opens /proc/$$/mountinfo, splits on newlines and passes the lines to util.parse_mount_info(). --- cloudinit/util.py | 77 +++++++++++++++++++++++++++++++------------------------ 1 file changed, 43 insertions(+), 34 deletions(-) (limited to 'cloudinit/util.py') diff --git a/cloudinit/util.py b/cloudinit/util.py index 709d5cca..0c30f771 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1576,42 +1576,16 @@ def expand_package_list(version_fmt, pkgs): return pkglist -def get_mount_info(path, log=LOG): - # Use /proc/$$/mountinfo to find the device where path is mounted. - # This is done because with a btrfs filesystem using os.stat(path) - # does not return the ID of the device. - # - # Here, / has a device of 18 (decimal). - # - # $ stat / - # File: '/' - # Size: 234 Blocks: 0 IO Block: 4096 directory - # Device: 12h/18d Inode: 256 Links: 1 - # Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root) - # Access: 2013-01-13 07:31:04.358011255 +0000 - # Modify: 2013-01-13 18:48:25.930011255 +0000 - # Change: 2013-01-13 18:48:25.930011255 +0000 - # Birth: - - # - # Find where / is mounted: - # - # $ mount | grep ' / ' - # /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo) - # - # And the device ID for /dev/vda1 is not 18: - # - # $ ls -l /dev/vda1 - # brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1 - # - # So use /proc/$$/mountinfo to find the device underlying the - # input path. +def parse_mount_info(path, mountinfo_lines, log=LOG): + """Return the mount information for PATH given the lines from + /proc/$$/mountinfo.""" + path_elements = [e for e in path.split('/') if e] devpth = None fs_type = None match_mount_point = None match_mount_point_elements = None - mountinfo_path = '/proc/%s/mountinfo' % os.getpid() - for line in load_file(mountinfo_path).splitlines(): + for i, line in enumerate(mountinfo_lines): parts = line.split() mount_point = parts[4] @@ -1638,8 +1612,8 @@ def get_mount_info(path, log=LOG): try: i = parts.index('-') except ValueError: - log.debug("Did not find column named '-' in %s", - mountinfo_path) + log.debug("Did not find column named '-' in line %d: %s", + i + 1, line) return None # Get the path to the device. @@ -1647,7 +1621,8 @@ def get_mount_info(path, log=LOG): fs_type = parts[i + 1] devpth = parts[i + 2] except IndexError: - log.debug("Too few columns in %s after '-' column", mountinfo_path) + log.debug("Too few columns after '-' column in line %d: %s", + i + 1, line) return None match_mount_point = mount_point @@ -1657,3 +1632,37 @@ def get_mount_info(path, log=LOG): return (devpth, fs_type, match_mount_point) else: return None + + +def get_mount_info(path, log=LOG): + # Use /proc/$$/mountinfo to find the device where path is mounted. + # This is done because with a btrfs filesystem using os.stat(path) + # does not return the ID of the device. + # + # Here, / has a device of 18 (decimal). + # + # $ stat / + # File: '/' + # Size: 234 Blocks: 0 IO Block: 4096 directory + # Device: 12h/18d Inode: 256 Links: 1 + # Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root) + # Access: 2013-01-13 07:31:04.358011255 +0000 + # Modify: 2013-01-13 18:48:25.930011255 +0000 + # Change: 2013-01-13 18:48:25.930011255 +0000 + # Birth: - + # + # Find where / is mounted: + # + # $ mount | grep ' / ' + # /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo) + # + # And the device ID for /dev/vda1 is not 18: + # + # $ ls -l /dev/vda1 + # brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1 + # + # So use /proc/$$/mountinfo to find the device underlying the + # input path. + mountinfo_path = '/proc/%s/mountinfo' % os.getpid() + lines = load_file(mountinfo_path).splitlines() + return parse_mount_info(path, lines, log) -- cgit v1.2.3 From 335aded5400d6eb019cd0ee68dac2b643398240c Mon Sep 17 00:00:00 2001 From: Blair Zajac Date: Sun, 10 Mar 2013 19:45:42 -0700 Subject: util.parse_mount_info(): handle short lines. --- cloudinit/util.py | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'cloudinit/util.py') diff --git a/cloudinit/util.py b/cloudinit/util.py index 0c30f771..a1f6e004 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1588,6 +1588,17 @@ def parse_mount_info(path, mountinfo_lines, log=LOG): for i, line in enumerate(mountinfo_lines): parts = line.split() + # Completely fail if there is anything in any line that is + # unexpected, as continuing to parse past a bad line could + # cause an incorrect result to be returned, so it's better + # return nothing than an incorrect result. + + # The minimum number of elements in a valid line is 10. + if len(parts) < 10: + log.debug("Line %d has two few columns (%d): %s", + i + 1, len(parts), line) + return None + mount_point = parts[4] mount_point_elements = [e for e in mount_point.split('/') if e] -- cgit v1.2.3 From ae0f94c8f39a234d73ab8e2caf24d73439c8b5ee Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 13 Mar 2013 10:43:40 -0400 Subject: fix / workaround potential for socket.getaddrinfo to raise socket.error As reported in bug 1154599, I'm seeing this on my desktop system: $ python -c \ 'from cloudinit import util; print util.is_resolvable("brickies.neiit")' Traceback (most recent call last): File "", line 1, in File "cloudinit/util.py", line 865, in is_resolvable socket.SOCK_STREAM, socket.AI_CANONNAME) LP: #1154599 --- cloudinit/util.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'cloudinit/util.py') diff --git a/cloudinit/util.py b/cloudinit/util.py index a1f6e004..10297ca2 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -867,7 +867,7 @@ def is_resolvable(name): for (_fam, _stype, _proto, cname, sockaddr) in result: badresults[iname].append("%s: %s" % (cname, sockaddr[0])) badips.add(sockaddr[0]) - except socket.gaierror: + except (socket.gaierror, socket.error): pass _DNS_REDIRECT_IP = badips if badresults: @@ -880,7 +880,7 @@ def is_resolvable(name): if addr in _DNS_REDIRECT_IP: return False return True - except socket.gaierror: + except (socket.gaierror, socket.error): return False -- cgit v1.2.3 From f8318f8eec9c8f1c1676ce6a5b5c2c77fa2f7cc5 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 19 Mar 2013 09:06:31 -0400 Subject: pylint fixes a.) appease pylint on raring, as it doesn't like subprocess pylint: 0.26.0-1ubuntu1 This is mentioned in comments at http://www.logilab.org/ticket/46273 b.) tests/unittests/test_util.py: the mountinfo lines are longer than 80 chars. Just disable long lines complaints for this file. --- cloudinit/config/cc_power_state_change.py | 2 +- cloudinit/util.py | 6 +++--- setup.py | 5 +++-- tests/unittests/test_util.py | 2 ++ 4 files changed, 9 insertions(+), 6 deletions(-) (limited to 'cloudinit/util.py') diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index de0c0bbd..188047e5 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -100,7 +100,7 @@ def execmd(exe_args, output=None, data_in=None): proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE, stdout=output, stderr=subprocess.STDOUT) proc.communicate(data_in) - ret = proc.returncode + ret = proc.returncode # pylint: disable=E1101 except Exception: doexit(EXIT_FAIL) doexit(ret) diff --git a/cloudinit/util.py b/cloudinit/util.py index 10297ca2..636ed20e 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -462,7 +462,7 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None): new_fp = open(arg, owith) elif mode == "|": proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE) - new_fp = proc.stdin + new_fp = proc.stdin # pylint: disable=E1101 else: raise TypeError("Invalid type for output format: %s" % outfmt) @@ -484,7 +484,7 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None): new_fp = open(arg, owith) elif mode == "|": proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE) - new_fp = proc.stdin + new_fp = proc.stdin # pylint: disable=E1101 else: raise TypeError("Invalid type for error format: %s" % errfmt) @@ -1409,7 +1409,7 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, (out, err) = sp.communicate(data) except OSError as e: raise ProcessExecutionError(cmd=args, reason=e) - rc = sp.returncode + rc = sp.returncode # pylint: disable=E1101 if rc not in rcs: raise ProcessExecutionError(stdout=out, stderr=err, exit_code=rc, diff --git a/setup.py b/setup.py index b30cd53b..4aa1a47c 100755 --- a/setup.py +++ b/setup.py @@ -61,9 +61,10 @@ def tiny_p(cmd, capture=True): sp = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, stdin=None) (out, err) = sp.communicate() - if sp.returncode not in [0]: + ret = sp.returncode # pylint: disable=E1101 + if ret not in [0]: raise RuntimeError("Failed running %s [rc=%s] (%s, %s)" - % (cmd, sp.returncode, out, err)) + % (cmd, ret, out, err)) return (out, err) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 7ff9a57f..5853cb0f 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -1,3 +1,5 @@ +# pylint: disable=C0301 +# the mountinfo data lines are too long import os import stat import yaml -- cgit v1.2.3 From 8fbe938228909e153afb88f189b269df60501510 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 19 Mar 2013 09:32:04 -0400 Subject: appease pylint and pep8 * cloudinit/distros/parsers/resolv_conf.py added some pylint overrides with 'plXXXXX' syntax. example: # pl51222 pylint: disable=E0102 The pl51222 there means: http://www.logilab.org/ticket/51222 This specific issue is present in 12.04 pylint, but not 13.04. * pylint doesn't like the requests special handling we have. which makes sense as it is only checking versus one specific version. * general pep8 and pylint cleanups. --- cloudinit/distros/parsers/resolv_conf.py | 4 ++-- cloudinit/ec2_utils.py | 1 + cloudinit/url_helper.py | 22 ++++++++++++---------- cloudinit/util.py | 4 ++-- 4 files changed, 17 insertions(+), 14 deletions(-) (limited to 'cloudinit/util.py') diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py index 5733c25a..1be9d46b 100644 --- a/cloudinit/distros/parsers/resolv_conf.py +++ b/cloudinit/distros/parsers/resolv_conf.py @@ -137,8 +137,8 @@ class ResolvConf(object): self._contents.append(('option', ['search', s_list, ''])) return flat_sds - @local_domain.setter - def local_domain(self, domain): + @local_domain.setter # pl51222 pylint: disable=E1101 + def local_domain(self, domain): # pl51222 pylint: disable=E0102 self.parse() self._remove_option('domain') self._contents.append(('option', ['domain', str(domain), ''])) diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 4812eccb..6b2754aa 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -145,6 +145,7 @@ def get_instance_userdata(api_version, metadata_address, ssl_details=None): util.logexc(LOG, "Failed fetching userdata from url %s", ud_url) return None + def get_instance_metadata(api_version, metadata_address, ssl_details=None): md_url = combine_url(metadata_address, api_version) md_url = combine_url(md_url, 'meta-data') diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index bfc5cfdd..de73cc84 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -34,12 +34,12 @@ LOG = logging.getLogger(__name__) # Check if requests has ssl support (added in requests >= 0.8.8) SSL_ENABLED = False -CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0) +CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0) try: - import pkg_resources from distutils.version import LooseVersion + import pkg_resources _REQ = pkg_resources.get_distribution('requests') - _REQ_VER = LooseVersion(_REQ.version) + _REQ_VER = LooseVersion(_REQ.version) # pylint: disable=E1103 if _REQ_VER >= LooseVersion('0.8.8'): SSL_ENABLED = True if _REQ_VER >= LooseVersion('0.7.0') and _REQ_VER < LooseVersion('1.0.0'): @@ -49,7 +49,7 @@ except: def _cleanurl(url): - parsed_url = list(urlparse(url, scheme='http')) + parsed_url = list(urlparse(url, scheme='http')) # pylint: disable=E1123 if not parsed_url[1] and parsed_url[2]: # Swap these since this seems to be a common # occurrence when given urls like 'www.google.com' @@ -108,7 +108,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, req_args = { 'url': url, } - if urlparse(url).scheme == 'https' and ssl_details: + scheme = urlparse(url).scheme # pylint: disable=E1101 + if scheme == 'https' and ssl_details: if not SSL_ENABLED: LOG.warn("SSL is not enabled, cert. verification can not occur!") else: @@ -121,7 +122,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, ssl_details['key_file']] elif 'cert_file' in ssl_details: req_args['cert'] = str(ssl_details['cert_file']) - + req_args['allow_redirects'] = allow_redirects req_args['method'] = 'GET' if timeout is not None: @@ -162,16 +163,17 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, try: r = requests.request(**req_args) if check_status: - r.raise_for_status() + r.raise_for_status() # pylint: disable=E1103 LOG.debug("Read from %s (%s, %sb) after %s attempts", url, - r.status_code, len(r.content), (i + 1)) + r.status_code, len(r.content), # pylint: disable=E1103 + (i + 1)) # Doesn't seem like we can make it use a different # subclass for responses, so add our own backward-compat # attrs return UrlResponse(r) except exceptions.RequestException as e: if (isinstance(e, (exceptions.HTTPError)) - and hasattr(e, 'response') # This appeared in v 0.10.8 + and hasattr(e, 'response') # This appeared in v 0.10.8 and e.response): excps.append(UrlError(e, code=e.response.status_code, headers=e.response.headers)) @@ -183,7 +185,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, time.sleep(sec_between) if excps: raise excps[-1] - return None # Should throw before this... + return None # Should throw before this... def wait_for_url(urls, max_wait=None, timeout=None, diff --git a/cloudinit/util.py b/cloudinit/util.py index 52b528ea..36e9b83b 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -51,8 +51,8 @@ from cloudinit import importer from cloudinit import log as logging from cloudinit import mergers from cloudinit import safeyaml -from cloudinit import url_helper from cloudinit import type_utils +from cloudinit import url_helper from cloudinit import version from cloudinit.settings import (CFG_BUILTIN) @@ -81,7 +81,7 @@ class StringResponse(object): self.contents = contents self.url = None - def ok(self, *args, **kwargs): + def ok(self, *args, **kwargs): # pylint: disable=W0613 if self.code != 200: return False return True -- cgit v1.2.3 From 984c72e522c585c6d3f6b3d3aec39fb21dd84028 Mon Sep 17 00:00:00 2001 From: Greg Padgett Date: Tue, 26 Mar 2013 17:48:40 -0400 Subject: compatibility fixes for Fedora and RHEL This patch fixes issues in Fedora 18 (and upcoming RHEL 7) which are present due to their use of systemd: - store locale configuration in /etc/locale.conf - store hostname in /etc/hostname - use a symlink for /etc/localtime (prior code would set the timezone but corrupt data in /usr/share/zoneinfo due to presence of symlink) It also contains fixes for issues unrelated to systemd adoption: - explicitly scan /dev/sr0 with blkid in order to get the optical drive in the blkid cache. This prevents an issue on systems running 2.6 kernels (such as RHEL 6) in which config disks on some devices won't be detected unless the device has previously been queried. (For reference, see https://patchwork.kernel.org/patch/1770241/) - append a newline when rewriting sysconfig files, as this is customary text configuration file formatting and is expected by some parsers (such as the ifcfg-rh plugin for NetworkManager) --- cloudinit/distros/rhel.py | 73 ++++++++++++++++------ cloudinit/sources/DataSourceConfigDrive.py | 3 + cloudinit/sources/DataSourceNoCloud.py | 3 + cloudinit/util.py | 1 + .../unittests/test_datasource/test_configdrive.py | 5 +- 5 files changed, 63 insertions(+), 22 deletions(-) (limited to 'cloudinit/util.py') diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 9fee5fd1..174da3ab 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -47,8 +47,10 @@ class Distro(distros.Distro): # See: http://tiny.cc/6r99fw clock_conf_fn = "/etc/sysconfig/clock" locale_conf_fn = '/etc/sysconfig/i18n' + systemd_locale_conf_fn = '/etc/locale.conf' network_conf_fn = "/etc/sysconfig/network" hostname_conf_fn = "/etc/sysconfig/network" + systemd_hostname_conf_fn = "/etc/hostname" network_script_tpl = '/etc/sysconfig/network-scripts/ifcfg-%s' resolve_conf_fn = "/etc/resolv.conf" tz_local_fn = "/etc/localtime" @@ -143,21 +145,36 @@ class Distro(distros.Distro): ] if not exists: lines.insert(0, util.make_header()) - util.write_file(fn, "\n".join(lines), 0644) + util.write_file(fn, "\n".join(lines) + "\n", 0644) + + def _dist_uses_systemd(self): + # Fedora 18 and RHEL 7 were the first adopters in their series + (dist, vers) = util.system_info()['dist'][:2] + major = (int)(vers.split('.')[0]) + return ((dist.startswith('Red Hat Enterprise Linux') and major >= 7) + or (dist.startswith('Fedora') and major >= 18)) def apply_locale(self, locale, out_fn=None): - if not out_fn: - out_fn = self.locale_conf_fn + if self._dist_uses_systemd(): + if not out_fn: + out_fn = self.systemd_locale_conf_fn + out_fn = self.systemd_locale_conf_fn + else: + if not out_fn: + out_fn = self.locale_conf_fn locale_cfg = { 'LANG': locale, } self._update_sysconfig_file(out_fn, locale_cfg) def _write_hostname(self, hostname, out_fn): - host_cfg = { - 'HOSTNAME': hostname, - } - self._update_sysconfig_file(out_fn, host_cfg) + if self._dist_uses_systemd(): + util.subp(['hostnamectl', 'set-hostname', str(hostname)]) + else: + host_cfg = { + 'HOSTNAME': hostname, + } + self._update_sysconfig_file(out_fn, host_cfg) def _select_hostname(self, hostname, fqdn): # See: http://bit.ly/TwitgL @@ -167,15 +184,25 @@ class Distro(distros.Distro): return hostname def _read_system_hostname(self): - return (self.network_conf_fn, - self._read_hostname(self.network_conf_fn)) + if self._dist_uses_systemd(): + host_fn = self.systemd_hostname_conf_fn + else: + host_fn = self.hostname_conf_fn + return (host_fn, self._read_hostname(host_fn)) def _read_hostname(self, filename, default=None): - (_exists, contents) = self._read_conf(filename) - if 'HOSTNAME' in contents: - return contents['HOSTNAME'] + if self._dist_uses_systemd(): + (out, _err) = util.subp(['hostname']) + if len(out): + return out + else: + return default else: - return default + (_exists, contents) = self._read_conf(filename) + if 'HOSTNAME' in contents: + return contents['HOSTNAME'] + else: + return default def _read_conf(self, fn): exists = False @@ -200,13 +227,19 @@ class Distro(distros.Distro): if not os.path.isfile(tz_file): raise RuntimeError(("Invalid timezone %s," " no file found at %s") % (tz, tz_file)) - # Adjust the sysconfig clock zone setting - clock_cfg = { - 'ZONE': str(tz), - } - self._update_sysconfig_file(self.clock_conf_fn, clock_cfg) - # This ensures that the correct tz will be used for the system - util.copy(tz_file, self.tz_local_fn) + if self._dist_uses_systemd(): + # Currently, timedatectl complains if invoked during startup + # so for compatibility, create the link manually. + util.del_file(self.tz_local_fn) + util.sym_link(tz_file, self.tz_local_fn) + else: + # Adjust the sysconfig clock zone setting + clock_cfg = { + 'ZONE': str(tz), + } + self._update_sysconfig_file(self.clock_conf_fn, clock_cfg) + # This ensures that the correct tz will be used for the system + util.copy(tz_file, self.tz_local_fn) def package_command(self, command, args=None, pkgs=None): if pkgs is None: diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 5f152299..d3443c2b 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -258,6 +258,9 @@ def find_candidate_devs(): * labeled with 'config-2' """ + # Query optical drive to get it in blkid cache for 2.6 kernels + util.find_devs_with(path="/dev/sr0") + by_fstype = (util.find_devs_with("TYPE=vfat") + util.find_devs_with("TYPE=iso9660")) by_label = util.find_devs_with("LABEL=config-2") diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 08a853cc..01c99028 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -87,6 +87,9 @@ class DataSourceNoCloud(sources.DataSource): label = self.ds_cfg.get('fs_label', "cidata") if label is not None: + # Query optical drive to get it in blkid cache for 2.6 kernels + util.find_devs_with(path="/dev/sr0") + fslist = util.find_devs_with("TYPE=vfat") fslist.extend(util.find_devs_with("TYPE=iso9660")) diff --git a/cloudinit/util.py b/cloudinit/util.py index 36e9b83b..50de55fe 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -408,6 +408,7 @@ def system_info(): 'release': platform.release(), 'python': platform.python_version(), 'uname': platform.uname(), + 'dist': platform.linux_distribution(), } diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 930086db..d5935294 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -259,8 +259,9 @@ class TestConfigDriveDataSource(MockerTestCase): def test_find_candidates(self): devs_with_answers = {} - def my_devs_with(criteria): - return devs_with_answers[criteria] + def my_devs_with(*args, **kwargs): + criteria = args[0] if len(args) else kwargs.pop('criteria', None) + return devs_with_answers.get(criteria, []) def my_is_partition(dev): return dev[-1] in "0123456789" and not dev.startswith("sr") -- cgit v1.2.3 From 6ad068d1ae175d784481fe8f8e190b2721a221f5 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 17 Apr 2013 10:17:09 -0700 Subject: Altering the order of merging. --- cloudinit/handlers/cloud_config.py | 12 +++--------- cloudinit/util.py | 23 +++++++++-------------- 2 files changed, 12 insertions(+), 23 deletions(-) (limited to 'cloudinit/util.py') diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index d30d6338..7678a5b0 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -39,7 +39,6 @@ class CloudConfigPartHandler(handlers.Handler): self.cloud_buf = None self.cloud_fn = paths.get_ipath("cloud_config") self.file_names = [] - self.mergers = [DEF_MERGERS] def list_types(self): return [ @@ -89,13 +88,9 @@ class CloudConfigPartHandler(handlers.Handler): return all_mergers def _merge_part(self, payload, headers): - next_mergers = self._extract_mergers(payload, headers) - # Use the merger list from the last call, since it is the one - # that will be defining how to merge with the next payload. - curr_mergers = list(self.mergers[-1]) - LOG.debug("Merging by applying %s", curr_mergers) - self.mergers.append(next_mergers) - merger = mergers.construct(curr_mergers) + my_mergers = self._extract_mergers(payload, headers) + LOG.debug("Merging by applying %s", my_mergers) + merger = mergers.construct(my_mergers) if self.cloud_buf is None: # First time through, merge with an empty dict... self.cloud_buf = {} @@ -105,7 +100,6 @@ class CloudConfigPartHandler(handlers.Handler): def _reset(self): self.file_names = [] self.cloud_buf = None - self.mergers = [DEF_MERGERS] def handle_part(self, _data, ctype, filename, # pylint: disable=W0221 payload, _frequency, headers): # pylint: disable=W0613 diff --git a/cloudinit/util.py b/cloudinit/util.py index 50de55fe..f7ff28cc 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -543,21 +543,16 @@ def make_url(scheme, host, port=None, def mergemanydict(srcs, reverse=False): if reverse: srcs = reversed(srcs) - m_cfg = {} - merge_how = [mergers.default_mergers()] - for a_cfg in srcs: - if a_cfg: - # Take the last merger as the one that - # will define how to merge next... - mergers_to_apply = list(merge_how[-1]) + merged_cfg = {} + for cfg in srcs: + if cfg: + # Figure out which mergers to apply... + mergers_to_apply = mergers.dict_extract_mergers(cfg) + if not mergers_to_apply: + mergers_to_apply = mergers.default_mergers() merger = mergers.construct(mergers_to_apply) - m_cfg = merger.merge(m_cfg, a_cfg) - # If the config has now has new merger set, - # extract them to be used next time... - new_mergers = mergers.dict_extract_mergers(m_cfg) - if new_mergers: - merge_how.append(new_mergers) - return m_cfg + merged_cfg = merger.merge(merged_cfg, cfg) + return merged_cfg @contextlib.contextmanager -- cgit v1.2.3 From 1b7e36a966ce1a0964e93eefa98c9efcbc4c323d Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 25 Apr 2013 11:58:38 -0400 Subject: re-work maas datasource and headers call backs Couple of things here: * do not re-try on user-data (404 means 'not here') * re-generate headers on retry requests LP: #1172742 --- cloudinit/sources/DataSourceMAAS.py | 16 +++++++++++----- cloudinit/url_helper.py | 18 +++++++++++++----- cloudinit/util.py | 4 +++- 3 files changed, 27 insertions(+), 11 deletions(-) (limited to 'cloudinit/util.py') diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index de822924..dfe90bc6 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -218,14 +218,20 @@ def read_maas_seed_url(seed_url, header_cb=None, timeout=None, md = {} for name in file_order: url = files.get(name) - if header_cb: - headers = header_cb(url) + if not header_cb: + def _cb(url): + return {} + header_cb = _cb + + if name == 'user-data': + retries = 0 else: - headers = {} + retries = None + try: ssl_details = util.fetch_ssl_details(paths) - resp = util.read_file_or_url(url, - headers=headers, + resp = util.read_file_or_url(url, retries=retries, + headers_cb=header_cb, timeout=timeout, ssl_details=ssl_details) if resp.ok(): diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 6b4516e0..24ce6871 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -102,8 +102,8 @@ class UrlError(IOError): def readurl(url, data=None, timeout=None, retries=0, sec_between=1, - headers=None, ssl_details=None, check_status=True, - allow_redirects=True): + headers=None, headers_cb=None, ssl_details=None, + check_status=True, allow_redirects=True): url = _cleanurl(url) req_args = { 'url': url, @@ -149,8 +149,11 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, headers = { 'User-Agent': 'Cloud-Init/%s' % (version.version_string()), } - req_args['headers'] = headers - LOG.debug("Attempting to open '%s' with %s configuration", url, req_args) + if not headers_cb: + def _cb(url): + return headers + headers_cb = _cb + if data: # Do this after the log (it might be large) req_args['data'] = data @@ -161,6 +164,11 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, # doesn't handle sleeping between tries... for i in range(0, manual_tries): try: + req_args['headers'] = headers_cb(url) + LOG.debug("[%s/%s] open '%s' with %s configuration", i, + manual_tries, url, + {k: req_args[k] for k in req_args if k != 'data'}) + r = requests.request(**req_args) if check_status: r.raise_for_status() # pylint: disable=E1103 @@ -174,7 +182,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, except exceptions.RequestException as e: if (isinstance(e, (exceptions.HTTPError)) and hasattr(e, 'response') # This appeared in v 0.10.8 - and e.response): + and hasattr(e.response, 'status_code')): excps.append(UrlError(e, code=e.response.status_code, headers=e.response.headers)) else: diff --git a/cloudinit/util.py b/cloudinit/util.py index 50de55fe..053fa95d 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -671,7 +671,8 @@ def fetch_ssl_details(paths=None): def read_file_or_url(url, timeout=5, retries=10, - headers=None, data=None, sec_between=1, ssl_details=None): + headers=None, data=None, sec_between=1, ssl_details=None, + headers_cb=None): url = url.lstrip() if url.startswith("/"): url = "file://%s" % url @@ -685,6 +686,7 @@ def read_file_or_url(url, timeout=5, retries=10, timeout=timeout, retries=retries, headers=headers, + headers_cb=headers_cb, data=data, sec_between=sec_between, ssl_details=ssl_details) -- cgit v1.2.3 From 944623f4ad3e4c7319758c64053d06a3b05555a2 Mon Sep 17 00:00:00 2001 From: Juerg Haefliger Date: Wed, 19 Jun 2013 08:44:00 +0200 Subject: fix and cleanup usage of util.logexc --- cloudinit/config/cc_bootcmd.py | 5 ++--- cloudinit/config/cc_growpart.py | 8 +++++--- cloudinit/config/cc_phone_home.py | 14 +++++++------- cloudinit/config/cc_rightscale_userdata.py | 10 +++++----- cloudinit/config/cc_set_hostname.py | 6 +++--- cloudinit/config/cc_set_passwords.py | 6 +++--- cloudinit/config/cc_ssh.py | 10 +++++----- cloudinit/config/cc_ssh_import_id.py | 6 +++--- cloudinit/config/cc_update_hostname.py | 6 +++--- cloudinit/distros/__init__.py | 26 ++++++++++++------------- cloudinit/distros/rhel.py | 7 +++---- cloudinit/handlers/__init__.py | 13 ++++++------- cloudinit/handlers/boot_hook.py | 4 ++-- cloudinit/helpers.py | 18 ++++++++--------- cloudinit/sources/DataSourceAltCloud.py | 31 +++++++++++++++--------------- cloudinit/sources/DataSourceCloudStack.py | 6 ++++-- cloudinit/sources/DataSourceNoCloud.py | 6 +++--- cloudinit/ssh_util.py | 8 +++----- cloudinit/stages.py | 7 +++---- cloudinit/util.py | 5 ++--- 20 files changed, 100 insertions(+), 102 deletions(-) (limited to 'cloudinit/util.py') diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py index 896cb4d0..3ac22967 100644 --- a/cloudinit/config/cc_bootcmd.py +++ b/cloudinit/config/cc_bootcmd.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2009-2011 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # # Author: Scott Moser # Author: Juerg Haefliger @@ -50,6 +50,5 @@ def handle(name, cfg, cloud, log, _args): cmd = ['/bin/sh', tmpf.name] util.subp(cmd, env=env, capture=False) except: - util.logexc(log, - ("Failed to run bootcmd module %s"), name) + util.logexc(log, "Failed to run bootcmd module %s", name) raise diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index b6e1fd37..4f8c8f80 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -1,8 +1,10 @@ # vi: ts=4 expandtab # # Copyright (C) 2011 Canonical Ltd. +# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. # # Author: Scott Moser +# Author: Juerg Haefliger # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as @@ -121,15 +123,15 @@ class ResizeGrowPart(object): util.subp(["growpart", '--dry-run', diskdev, partnum]) except util.ProcessExecutionError as e: if e.exit_code != 1: - util.logexc(LOG, ("Failed growpart --dry-run for (%s, %s)" % - (diskdev, partnum))) + util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)", + diskdev, partnum) raise ResizeFailedException(e) return (before, before) try: util.subp(["growpart", diskdev, partnum]) except util.ProcessExecutionError as e: - util.logexc(LOG, "Failed: growpart %s %s" % (diskdev, partnum)) + util.logexc(LOG, "Failed: growpart %s %s", diskdev, partnum) raise ResizeFailedException(e) return (before, get_size(partdev)) diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index c873c8a8..2e058ccd 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2011 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # # Author: Scott Moser # Author: Juerg Haefliger @@ -65,8 +65,8 @@ def handle(name, cfg, cloud, log, args): tries = int(tries) except: tries = 10 - util.logexc(log, ("Configuration entry 'tries'" - " is not an integer, using %s instead"), tries) + util.logexc(log, "Configuration entry 'tries' is not an integer, " + "using %s instead", tries) if post_list == "all": post_list = POST_LIST_ALL @@ -85,8 +85,8 @@ def handle(name, cfg, cloud, log, args): try: all_keys[n] = util.load_file(path) except: - util.logexc(log, ("%s: failed to open, can not" - " phone home that data!"), path) + util.logexc(log, "%s: failed to open, can not phone home that " + "data!", path) submit_keys = {} for k in post_list: @@ -115,5 +115,5 @@ def handle(name, cfg, cloud, log, args): retries=tries, sec_between=3, ssl_details=util.fetch_ssl_details(cloud.paths)) except: - util.logexc(log, ("Failed to post phone home data to" - " %s in %s tries"), url, tries) + util.logexc(log, "Failed to post phone home data to %s in %s tries", + url, tries) diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py index 4bf18516..c771728d 100644 --- a/cloudinit/config/cc_rightscale_userdata.py +++ b/cloudinit/config/cc_rightscale_userdata.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2011 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # # Author: Scott Moser # Author: Juerg Haefliger @@ -64,8 +64,8 @@ def handle(name, _cfg, cloud, log, _args): " raw userdata"), name, MY_HOOKNAME) return except: - util.logexc(log, ("Failed to parse query string %s" - " into a dictionary"), ud) + util.logexc(log, "Failed to parse query string %s into a dictionary", + ud) raise wrote_fns = [] @@ -86,8 +86,8 @@ def handle(name, _cfg, cloud, log, _args): wrote_fns.append(fname) except Exception as e: captured_excps.append(e) - util.logexc(log, "%s failed to read %s and write %s", - MY_NAME, url, fname) + util.logexc(log, "%s failed to read %s and write %s", MY_NAME, url, + fname) if wrote_fns: log.debug("Wrote out rightscale userdata to %s files", len(wrote_fns)) diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py index 2b32fc94..5d7f4331 100644 --- a/cloudinit/config/cc_set_hostname.py +++ b/cloudinit/config/cc_set_hostname.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2011 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # # Author: Scott Moser # Author: Juerg Haefliger @@ -32,6 +32,6 @@ def handle(name, cfg, cloud, log, _args): log.debug("Setting the hostname to %s (%s)", fqdn, hostname) cloud.distro.set_hostname(hostname, fqdn) except Exception: - util.logexc(log, "Failed to set the hostname to %s (%s)", - fqdn, hostname) + util.logexc(log, "Failed to set the hostname to %s (%s)", fqdn, + hostname) raise diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index c6bf62fd..e93c8c6f 100644 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2009-2010 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # # Author: Scott Moser # Author: Juerg Haefliger @@ -81,8 +81,8 @@ def handle(_name, cfg, cloud, log, args): util.subp(['chpasswd'], ch_in) except Exception as e: errors.append(e) - util.logexc(log, - "Failed to set passwords with chpasswd for %s", users) + util.logexc(log, "Failed to set passwords with chpasswd for %s", + users) if len(randlist): blurb = ("Set the following 'random' passwords\n", diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 7ef20d9f..64a5e3cb 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2009-2010 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # # Author: Scott Moser # Author: Juerg Haefliger @@ -85,8 +85,8 @@ def handle(_name, cfg, cloud, log, _args): util.subp(cmd, capture=False) log.debug("Generated a key for %s from %s", pair[0], pair[1]) except: - util.logexc(log, ("Failed generated a key" - " for %s from %s"), pair[0], pair[1]) + util.logexc(log, "Failed generated a key for %s from %s", + pair[0], pair[1]) else: # if not, generate them genkeys = util.get_cfg_option_list(cfg, @@ -102,8 +102,8 @@ def handle(_name, cfg, cloud, log, _args): with util.SeLinuxGuard("/etc/ssh", recursive=True): util.subp(cmd, capture=False) except: - util.logexc(log, ("Failed generating key type" - " %s to file %s"), keytype, keyfile) + util.logexc(log, "Failed generating key type %s to " + "file %s", keytype, keyfile) try: (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py index 83af36e9..50d96e15 100644 --- a/cloudinit/config/cc_ssh_import_id.py +++ b/cloudinit/config/cc_ssh_import_id.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2009-2010 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # # Author: Scott Moser # Author: Juerg Haefliger @@ -71,8 +71,8 @@ def handle(_name, cfg, cloud, log, args): try: import_ssh_ids(import_ids, user, log) except Exception as exc: - util.logexc(log, "ssh-import-id failed for: %s %s" % - (user, import_ids), exc) + util.logexc(log, "ssh-import-id failed for: %s %s", user, + import_ids) elist.append(exc) if len(elist): diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py index 52225cd8..e396ba13 100644 --- a/cloudinit/config/cc_update_hostname.py +++ b/cloudinit/config/cc_update_hostname.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2011 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # # Author: Scott Moser # Author: Juerg Haefliger @@ -38,6 +38,6 @@ def handle(name, cfg, cloud, log, _args): log.debug("Updating hostname to %s (%s)", fqdn, hostname) cloud.distro.update_hostname(hostname, fqdn, prev_fn) except Exception: - util.logexc(log, "Failed to update the hostname to %s (%s)", - fqdn, hostname) + util.logexc(log, "Failed to update the hostname to %s (%s)", fqdn, + hostname) raise diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 50d52594..e99cb16f 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2012 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # Copyright (C) 2012 Yahoo! Inc. # # Author: Scott Moser @@ -142,8 +142,8 @@ class Distro(object): try: util.subp(['hostname', hostname]) except util.ProcessExecutionError: - util.logexc(LOG, ("Failed to non-persistently adjust" - " the system hostname to %s"), hostname) + util.logexc(LOG, "Failed to non-persistently adjust the system " + "hostname to %s", hostname) @abc.abstractmethod def _select_hostname(self, hostname, fqdn): @@ -200,8 +200,8 @@ class Distro(object): try: self._write_hostname(hostname, fn) except IOError: - util.logexc(LOG, "Failed to write hostname %s to %s", - hostname, fn) + util.logexc(LOG, "Failed to write hostname %s to %s", hostname, + fn) if (sys_hostname and prev_hostname and sys_hostname != prev_hostname): @@ -347,7 +347,7 @@ class Distro(object): try: util.subp(adduser_cmd, logstring=x_adduser_cmd) except Exception as e: - util.logexc(LOG, "Failed to create user %s due to error.", e) + util.logexc(LOG, "Failed to create user %s", name) raise e # Set password if plain-text password provided @@ -360,8 +360,8 @@ class Distro(object): try: util.subp(['passwd', '--lock', name]) except Exception as e: - util.logexc(LOG, ("Failed to disable password logins for" - "user %s" % name), e) + util.logexc(LOG, "Failed to disable password logins for " + "user %s", name) raise e # Configure sudo access @@ -385,7 +385,7 @@ class Distro(object): try: util.subp(cmd, pass_string, logstring="chpasswd for %s" % user) except Exception as e: - util.logexc(LOG, "Failed to set password for %s" % user) + util.logexc(LOG, "Failed to set password for %s", user) raise e return True @@ -427,7 +427,7 @@ class Distro(object): util.append_file(sudo_base, sudoers_contents) LOG.debug("Added '#includedir %s' to %s" % (path, sudo_base)) except IOError as e: - util.logexc(LOG, "Failed to write %s" % sudo_base, e) + util.logexc(LOG, "Failed to write %s", sudo_base) raise e util.ensure_dir(path, 0750) @@ -478,15 +478,15 @@ class Distro(object): try: util.subp(group_add_cmd) LOG.info("Created new group %s" % name) - except Exception as e: - util.logexc("Failed to create group %s" % name, e) + except Exception: + util.logexc("Failed to create group %s", name) # Add members to the group, if so defined if len(members) > 0: for member in members: if not util.is_user(member): LOG.warn("Unable to add group member '%s' to group '%s'" - "; user does not exist." % (member, name)) + "; user does not exist.", member, name) continue util.subp(['usermod', '-a', '-G', name, member]) diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 174da3ab..0727ecd1 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2012 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # Copyright (C) 2012 Yahoo! Inc. # # Author: Scott Moser @@ -72,9 +72,8 @@ class Distro(distros.Distro): r_conf = ResolvConf(util.load_file(self.resolve_conf_fn)) r_conf.parse() except IOError: - util.logexc(LOG, - "Failed at parsing %s reverting to an empty instance", - self.resolve_conf_fn) + util.logexc(LOG, "Failed at parsing %s reverting to an empty " + "instance", self.resolve_conf_fn) r_conf = ResolvConf('') r_conf.parse() if dns_servers: diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 924463ce..497d68c5 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2012 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # Copyright (C) 2012 Yahoo! Inc. # # Author: Scott Moser @@ -117,10 +117,9 @@ def run_part(mod, data, filename, payload, frequency, headers): else: raise ValueError("Unknown module version %s" % (mod_ver)) except: - util.logexc(LOG, ("Failed calling handler %s (%s, %s, %s)" - " with frequency %s"), - mod, content_type, filename, - mod_ver, frequency) + util.logexc(LOG, "Failed calling handler %s (%s, %s, %s) with " + "frequency %s", mod, content_type, filename, mod_ver, + frequency) def call_begin(mod, data, frequency): @@ -158,8 +157,8 @@ def walker_handle_handler(pdata, _ctype, _filename, payload): handlers.register(mod) pdata['handlercount'] = curcount + 1 except: - util.logexc(LOG, ("Failed at registering python file: %s" - " (part handler %s)"), modfname, curcount) + util.logexc(LOG, "Failed at registering python file: %s (part " + "handler %s)", modfname, curcount) def _extract_first_or_bytes(blob, size): diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py index bf2899ab..11ac4fe5 100644 --- a/cloudinit/handlers/boot_hook.py +++ b/cloudinit/handlers/boot_hook.py @@ -70,5 +70,5 @@ class BootHookPartHandler(handlers.Handler): except util.ProcessExecutionError: util.logexc(LOG, "Boothooks script %s execution error", filepath) except Exception: - util.logexc(LOG, ("Boothooks unknown " - "error when running %s"), filepath) + util.logexc(LOG, "Boothooks unknown error when running %s", + filepath) diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index a4e6fb03..b91c1290 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2012 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # Copyright (C) 2012 Yahoo! Inc. # # Author: Scott Moser @@ -216,8 +216,8 @@ class ConfigMerger(object): if ds_cfg and isinstance(ds_cfg, (dict)): d_cfgs.append(ds_cfg) except: - util.logexc(LOG, ("Failed loading of datasource" - " config object from %s"), self._ds) + util.logexc(LOG, "Failed loading of datasource config object " + "from %s", self._ds) return d_cfgs def _get_env_configs(self): @@ -227,8 +227,8 @@ class ConfigMerger(object): try: e_cfgs.append(util.read_conf(e_fn)) except: - util.logexc(LOG, ('Failed loading of env. config' - ' from %s'), e_fn) + util.logexc(LOG, 'Failed loading of env. config from %s', + e_fn) return e_cfgs def _get_instance_configs(self): @@ -242,8 +242,8 @@ class ConfigMerger(object): try: i_cfgs.append(util.read_conf(cc_fn)) except: - util.logexc(LOG, ('Failed loading of cloud-config' - ' from %s'), cc_fn) + util.logexc(LOG, 'Failed loading of cloud-config from %s', + cc_fn) return i_cfgs def _read_cfg(self): @@ -259,8 +259,8 @@ class ConfigMerger(object): try: cfgs.append(util.read_conf(c_fn)) except: - util.logexc(LOG, ("Failed loading of configuration" - " from %s"), c_fn) + util.logexc(LOG, "Failed loading of configuration from %s", + c_fn) cfgs.extend(self._get_env_configs()) cfgs.extend(self._get_instance_configs()) diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index 64548d43..a834f8eb 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -1,10 +1,11 @@ # vi: ts=4 expandtab # # Copyright (C) 2009-2010 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # Copyright (C) 2012 Yahoo! Inc. # # Author: Joe VLcek +# Author: Juerg Haefliger # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as @@ -79,7 +80,7 @@ def read_user_data_callback(mount_dir): try: user_data = util.load_file(user_data_file).strip() except IOError: - util.logexc(LOG, ('Failed accessing user data file.')) + util.logexc(LOG, 'Failed accessing user data file.') return None return user_data @@ -178,7 +179,7 @@ class DataSourceAltCloud(sources.DataSource): return False # No user data found - util.logexc(LOG, ('Failed accessing user data.')) + util.logexc(LOG, 'Failed accessing user data.') return False def user_data_rhevm(self): @@ -205,12 +206,12 @@ class DataSourceAltCloud(sources.DataSource): (cmd_out, _err) = util.subp(cmd) LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out)) except ProcessExecutionError, _err: - util.logexc(LOG, (('Failed command: %s\n%s') % \ - (' '.join(cmd), _err.message))) + util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), + _err.message) return False except OSError, _err: - util.logexc(LOG, (('Failed command: %s\n%s') % \ - (' '.join(cmd), _err.message))) + util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), + _err.message) return False floppy_dev = '/dev/fd0' @@ -222,12 +223,12 @@ class DataSourceAltCloud(sources.DataSource): (cmd_out, _err) = util.subp(cmd) LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out)) except ProcessExecutionError, _err: - util.logexc(LOG, (('Failed command: %s\n%s') % \ - (' '.join(cmd), _err.message))) + util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), + _err.message) return False except OSError, _err: - util.logexc(LOG, (('Failed command: %s\n%s') % \ - (' '.join(cmd), _err.message))) + util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), + _err.message) return False try: @@ -236,8 +237,8 @@ class DataSourceAltCloud(sources.DataSource): if err.errno != errno.ENOENT: raise except util.MountFailedError: - util.logexc(LOG, ("Failed to mount %s" - " when looking for user data"), floppy_dev) + util.logexc(LOG, "Failed to mount %s when looking for user data", + floppy_dev) self.userdata_raw = return_str self.metadata = META_DATA_NOT_SUPPORTED @@ -272,8 +273,8 @@ class DataSourceAltCloud(sources.DataSource): if err.errno != errno.ENOENT: raise except util.MountFailedError: - util.logexc(LOG, ("Failed to mount %s" - " when looking for user data"), cdrom_dev) + util.logexc(LOG, "Failed to mount %s when looking for user " + "data", cdrom_dev) self.userdata_raw = return_str self.metadata = META_DATA_NOT_SUPPORTED diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 81c8cda9..08f661e4 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -4,11 +4,13 @@ # Copyright (C) 2012 Cosmin Luta # Copyright (C) 2012 Yahoo! Inc. # Copyright (C) 2012 Gerard Dethier +# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. # # Author: Cosmin Luta # Author: Scott Moser # Author: Joshua Harlow # Author: Gerard Dethier +# Author: Juerg Haefliger # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as @@ -109,8 +111,8 @@ class DataSourceCloudStack(sources.DataSource): int(time.time() - start_time)) return True except Exception: - util.logexc(LOG, ('Failed fetching from metadata ' - 'service %s'), self.metadata_address) + util.logexc(LOG, 'Failed fetching from metadata service %s', + self.metadata_address) return False def get_instance_id(self): diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 084abca7..4ef92a56 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2009-2010 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # Copyright (C) 2012 Yahoo! Inc. # # Author: Scott Moser @@ -119,8 +119,8 @@ class DataSourceNoCloud(sources.DataSource): if e.errno != errno.ENOENT: raise except util.MountFailedError: - util.logexc(LOG, ("Failed to mount %s" - " when looking for data"), dev) + util.logexc(LOG, "Failed to mount %s when looking for " + "data", dev) # There was no indication on kernel cmdline or data # in the seeddir suggesting this handler should be used. diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 95133236..70a577bc 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -229,11 +229,9 @@ def extract_authorized_keys(username): except (IOError, OSError): # Give up and use a default key filename auth_key_fn = os.path.join(ssh_dir, 'authorized_keys') - util.logexc(LOG, ("Failed extracting 'AuthorizedKeysFile'" - " in ssh config" - " from %r, using 'AuthorizedKeysFile' file" - " %r instead"), - DEF_SSHD_CFG, auth_key_fn) + util.logexc(LOG, "Failed extracting 'AuthorizedKeysFile' in ssh " + "config from %r, using 'AuthorizedKeysFile' file " + "%r instead", DEF_SSHD_CFG, auth_key_fn) return (auth_key_fn, parse_authorized_keys(auth_key_fn)) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 543d247f..df49cabb 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2012 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # Copyright (C) 2012 Yahoo! Inc. # # Author: Scott Moser @@ -154,9 +154,8 @@ class Init(object): try: util.chownbyname(log_file, u, g) except OSError: - util.logexc(LOG, ("Unable to change the ownership" - " of %s to user %s, group %s"), - log_file, u, g) + util.logexc(LOG, "Unable to change the ownership of %s to " + "user %s, group %s", log_file, u, g) def read_cfg(self, extra_fns=None): # None check so that we don't keep on re-loading if empty diff --git a/cloudinit/util.py b/cloudinit/util.py index b27b3567..c45aae06 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2012 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # Copyright (C) 2012 Yahoo! Inc. # # Author: Scott Moser @@ -219,8 +219,7 @@ def fork_cb(child_cb, *args): child_cb(*args) os._exit(0) # pylint: disable=W0212 except: - logexc(LOG, ("Failed forking and" - " calling callback %s"), + logexc(LOG, "Failed forking and calling callback %s", type_utils.obj_name(child_cb)) os._exit(1) # pylint: disable=W0212 else: -- cgit v1.2.3 From 6b7e65e4f57902c25363c78a7e47aa2caa579b7b Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Thu, 18 Jul 2013 15:37:18 -0600 Subject: Added SmartOS datasource and unit tests. --- cloudinit/settings.py | 1 + cloudinit/sources/DataSourceSmartOS.py | 172 +++++++++++++++++++++ cloudinit/util.py | 18 +++ tests/unittests/test_datasource/test_smartos.py | 191 ++++++++++++++++++++++++ 4 files changed, 382 insertions(+) create mode 100644 cloudinit/sources/DataSourceSmartOS.py create mode 100644 tests/unittests/test_datasource/test_smartos.py (limited to 'cloudinit/util.py') diff --git a/cloudinit/settings.py b/cloudinit/settings.py index dc371cd2..9f6badae 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -37,6 +37,7 @@ CFG_BUILTIN = { 'MAAS', 'Ec2', 'CloudStack', + 'SmartOS', # At the end to act as a 'catch' when none of the above work... 'None', ], diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py new file mode 100644 index 00000000..f9b724eb --- /dev/null +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -0,0 +1,172 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2013 Canonical Ltd. +# +# Author: Ben Howard +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# +# Datasource for provisioning on SmartOS. This works on Joyent +# and public/private Clouds using SmartOS. +# +# SmartOS hosts use a serial console (/dev/ttyS1) on Linux Guests. +# The meta-data is transmitted via key/value pairs made by +# requests on the console. For example, to get the hostname, you +# would send "GET hostname" on /dev/ttyS1. +# + + +import os +import os.path +import serial +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import util + + +TTY_LOC = '/dev/ttyS1' +LOG = logging.getLogger(__name__) + + +class DataSourceSmartOS(sources.DataSource): + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.seed_dir = os.path.join(paths.seed_dir, 'sdc') + self.seed = None + self.is_smartdc = None + + def __str__(self): + root = sources.DataSource.__str__(self) + return "%s [seed=%s]" % (root, self.seed) + + def get_data(self): + md = {} + ud = "" + + if not os.path.exists(TTY_LOC): + LOG.debug("Host does not appear to be on SmartOS") + return False + self.seed = TTY_LOC + + system_uuid, system_type = dmi_data() + if 'smartdc' not in system_type.lower(): + LOG.debug("Host is not on SmartOS") + return False + self.is_smartdc = True + + hostname = query_data("hostname", strip=True) + if not hostname: + hostname = system_uuid + + md['local-hostname'] = hostname + md['instance-id'] = system_uuid + md['public-keys'] = query_data("root_authorized_keys", strip=True) + ud = query_data("user-script") + md['iptables_disable'] = query_data("disable_iptables_flag", + strip=True) + md['motd_sys_info'] = query_data("enable_motd_sys_info", strip=True) + + self.metadata = md + self.userdata_raw = ud + return True + + def get_instance_id(self): + return self.metadata['instance-id'] + + +def get_serial(): + """This is replaced in unit testing, allowing us to replace + serial.Serial with a mocked class""" + return serial.Serial() + + +def query_data(noun, strip=False): + """Makes a request to via the serial console via "GET " + + In the response, the first line is the status, while subsequent lines + are is the value. A blank line with a "." is used to indicate end of + response. + + The timeout value of 60 seconds should never be hit. The value + is taken from SmartOS own provisioning tools. Since we are reading + each line individually up until the single ".", the transfer is + usually very fast (i.e. microseconds) to get the response. + """ + if not noun: + return False + + ser = get_serial() + ser.port = '/dev/ttyS1' + ser.open() + if not ser.isOpen(): + LOG.debug("Serial console is not open") + return False + + ser.write("GET %s\n" % noun.rstrip()) + status = str(ser.readline()).rstrip() + response = [] + eom_found = False + + if 'SUCCESS' not in status: + ser.close() + return None + + while not eom_found: + m = ser.readline() + if m.rstrip() == ".": + eom_found = True + else: + response.append(m) + + ser.close() + if not strip: + return "".join(response) + else: + return "".join(response).rstrip() + + return None + + +def dmi_data(): + sys_uuid, sys_type = None, None + dmidecode_path = util.which('dmidecode') + if not dmidecode_path: + return False + + sys_uuid_cmd = [dmidecode_path, "-s", "system-uuid"] + try: + LOG.debug("Getting hostname from dmidecode") + (sys_uuid, _err) = util.subp(sys_uuid_cmd) + except Exception as e: + util.logexc(LOG, "Failed to get system UUID", e) + + sys_type_cmd = [dmidecode_path, "-s", "system-product-name"] + try: + LOG.debug("Determining hypervisor product name via dmidecode") + (sys_type, _err) = util.subp(sys_type_cmd) + except Exception as e: + util.logexc(LOG, "Failed to get system UUID", e) + + return sys_uuid.lower(), sys_type + + +# Used to match classes to dependencies +datasources = [ + (DataSourceSmartOS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/util.py b/cloudinit/util.py index c45aae06..7163225f 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1743,3 +1743,21 @@ def get_mount_info(path, log=LOG): mountinfo_path = '/proc/%s/mountinfo' % os.getpid() lines = load_file(mountinfo_path).splitlines() return parse_mount_info(path, lines, log) + +def which(program): + # Return path of program for execution if found in path + def is_exe(fpath): + return os.path.isfile(fpath) and os.access(fpath, os.X_OK) + + fpath, fname = os.path.split(program) + if fpath: + if is_exe(program): + return program + else: + for path in os.environ["PATH"].split(os.pathsep): + path = path.strip('"') + exe_file = os.path.join(path, program) + if is_exe(exe_file): + return exe_file + + return None diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py new file mode 100644 index 00000000..494f9828 --- /dev/null +++ b/tests/unittests/test_datasource/test_smartos.py @@ -0,0 +1,191 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2013 Canonical Ltd. +# +# Author: Ben Howard +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# +# This is a testcase for the SmartOS datasource. It replicates a serial +# console and acts like the SmartOS console does in order to validate +# return responses. +# + +from cloudinit import helpers +from cloudinit.sources import DataSourceSmartOS + +from mocker import MockerTestCase +import uuid + +mock_returns = { + 'hostname': 'test-host', + 'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname', + 'disable_iptables_flag': False, + 'enable_motd_sys_info': False, + 'system_uuid': str(uuid.uuid4()), + 'smartdc': 'smartdc', + 'userdata': """ +#!/bin/sh +/bin/true +""", +} + + +class MockSerial(object): + """Fake a serial terminal for testing the code that + interfaces with the serial""" + + port = None + + def __init__(self): + self.last = None + self.last = None + self.new = True + self.count = 0 + self.mocked_out = [] + + def open(self): + return True + + def close(self): + return True + + def isOpen(self): + return True + + def write(self, line): + line = line.replace('GET ', '') + self.last = line.rstrip() + + def readline(self): + if self.new: + self.new = False + if self.last in mock_returns: + return 'SUCCESS\n' + else: + return 'NOTFOUND %s\n' % self.last + + if self.last in mock_returns: + if not self.mocked_out: + self.mocked_out = [x for x in self._format_out()] + print self.mocked_out + + if len(self.mocked_out) > self.count: + self.count += 1 + return self.mocked_out[self.count - 1] + + def _format_out(self): + if self.last in mock_returns: + try: + for l in mock_returns[self.last].splitlines(): + yield "%s\n" % l + except: + yield "%s\n" % mock_returns[self.last] + + yield '\n' + yield '.' + + +class TestSmartOSDataSource(MockerTestCase): + def setUp(self): + # makeDir comes from MockerTestCase + self.tmp = self.makeDir() + + # patch cloud_dir, so our 'seed_dir' is guaranteed empty + self.paths = helpers.Paths({'cloud_dir': self.tmp}) + + self.unapply = [] + super(TestSmartOSDataSource, self).setUp() + + def tearDown(self): + apply_patches([i for i in reversed(self.unapply)]) + super(TestSmartOSDataSource, self).tearDown() + + def apply_patches(self, patches): + ret = apply_patches(patches) + self.unapply += ret + + def _get_ds(self): + + def _get_serial(): + return MockSerial() + + def _dmi_data(): + return mock_returns['system_uuid'], 'smartdc' + + data = {'sys_cfg': {}} + mod = DataSourceSmartOS + self.apply_patches([(mod, 'get_serial', _get_serial)]) + self.apply_patches([(mod, 'dmi_data', _dmi_data)]) + dsrc = mod.DataSourceSmartOS( + data.get('sys_cfg', {}), distro=None, paths=self.paths) + return dsrc + + def test_seed(self): + dsrc = self._get_ds() + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals('/dev/ttyS1', dsrc.seed) + + def test_issmartdc(self): + dsrc = self._get_ds() + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertTrue(dsrc.is_smartdc) + + def test_uuid(self): + dsrc = self._get_ds() + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(mock_returns['system_uuid'], + dsrc.metadata['instance-id']) + + def test_root_keys(self): + dsrc = self._get_ds() + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(mock_returns['root_authorized_keys'], + dsrc.metadata['public-keys']) + + def test_hostname(self): + dsrc = self._get_ds() + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(mock_returns['hostname'], + dsrc.metadata['local-hostname']) + + def test_disable_iptables_flag(self): + dsrc = self._get_ds() + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(str(mock_returns['disable_iptables_flag']), + dsrc.metadata['iptables_disable']) + + def test_motd_sys_info(self): + dsrc = self._get_ds() + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(str(mock_returns['enable_motd_sys_info']), + dsrc.metadata['motd_sys_info']) + + +def apply_patches(patches): + ret = [] + for (ref, name, replace) in patches: + if replace is None: + continue + orig = getattr(ref, name) + setattr(ref, name, replace) + ret.append((ref, name, orig)) + return ret -- cgit v1.2.3 From 27f096a1ab2e60222f85d87c961e388fdefaf92c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 21 Jul 2013 09:34:26 -0700 Subject: Use a util helper to do prefix/suffix removal. --- cloudinit/handlers/boot_hook.py | 8 +++----- cloudinit/util.py | 8 ++++++++ 2 files changed, 11 insertions(+), 5 deletions(-) (limited to 'cloudinit/util.py') diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py index 5e7b6204..1848ce2c 100644 --- a/cloudinit/handlers/boot_hook.py +++ b/cloudinit/handlers/boot_hook.py @@ -48,11 +48,9 @@ class BootHookPartHandler(handlers.Handler): def _write_part(self, payload, filename): filename = util.clean_filename(filename) filepath = os.path.join(self.boothook_dir, filename) - contents = util.dos2unix(payload) - if contents.startswith(BOOTHOOK_PREFIX): - real_start = len(BOOTHOOK_PREFIX) + 1 - contents = contents[real_start:] - util.write_file(filepath, contents, 0700) + contents = util.strip_prefix_suffix(util.dos2unix(payload), + prefix=BOOTHOOK_PREFIX) + util.write_file(filepath, contents.lstrip(), 0700) return filepath def handle_part(self, _data, ctype, filename, # pylint: disable=W0221 diff --git a/cloudinit/util.py b/cloudinit/util.py index c45aae06..47d71ef4 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1530,6 +1530,14 @@ def shellify(cmdlist, add_header=True): return content +def strip_prefix_suffix(line, prefix=None, suffix=None): + if prefix and line.startswith(prefix): + line = line[len(prefix):] + if suffix and line.endswith(suffix): + line = line[:-len(suffix)] + return line + + def is_container(): """ Checks to see if this code running in a container of some sort -- cgit v1.2.3 From 4b41f7dc3d37d5bf7397bbc34d8a5e0c56798ac7 Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Tue, 23 Jul 2013 16:33:46 -0600 Subject: Changed get_serial to be fully parameterized and return the serial initialized. Added a mapping of attributes between cloud-init and smartos. --- cloudinit/sources/DataSourceSmartOS.py | 64 ++++++++++++++----------- cloudinit/util.py | 5 +- tests/unittests/test_datasource/test_smartos.py | 10 ++-- 3 files changed, 43 insertions(+), 36 deletions(-) (limited to 'cloudinit/util.py') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 129020ec..d6589f57 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -27,25 +27,37 @@ # -import os -import os.path -import serial from cloudinit import log as logging from cloudinit import sources from cloudinit import util +import os +import os.path +import serial DEF_TTY_LOC = '/dev/ttyS1' -TTY_LOC = None +DEF_TTY_TIMEOUT = 60 LOG = logging.getLogger(__name__) +SMARTOS_ATTRIB_MAP = { + #Cloud-init Key : (SmartOS Key, Strip line endings) + 'local-hostname': ('hostname', True), + 'public-keys': ('root_authorized_keys', True), + 'user-script': ('user-script', False), + 'user-data': ('user-data', False), + 'iptables_disable': ('iptables_disable', True), + 'motd_sys_info': ('motd_sys_info', True), +} + class DataSourceSmartOS(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, 'sdc') - self.seed = None self.is_smartdc = None + self.seed = self.sys_cfg.get("serial_device", DEF_TTY_LOC) + self.seed_timeout = self.sys_cfg.get("serial_timeout", + DEF_TTY_TIMEOUT) def __str__(self): root = sources.DataSource.__str__(self) @@ -55,30 +67,25 @@ class DataSourceSmartOS(sources.DataSource): md = {} ud = "" - TTY_LOC = self.sys_cfg.get("serial_device", DEF_TTY_LOC) - if not os.path.exists(TTY_LOC): + if not os.path.exists(self.seed): LOG.debug("Host does not appear to be on SmartOS") return False - self.seed = TTY_LOC + self.seed = self.seed system_uuid, system_type = dmi_data() if 'smartdc' not in system_type.lower(): LOG.debug("Host is not on SmartOS") return False self.is_smartdc = True + md['instance-id'] = system_uuid - hostname = query_data("hostname", strip=True) - if not hostname: - hostname = system_uuid + for ci_noun, attribute in SMARTOS_ATTRIB_MAP.iteritems(): + smartos_noun, strip = attribute + md[ci_noun] = query_data(smartos_noun, self.seed, + self.seed_timeout, strip=strip) - md['local-hostname'] = hostname - md['instance-id'] = system_uuid - md['public-keys'] = query_data("root_authorized_keys", strip=True) - md['user-script'] = query_data("user-script") - md['user-data'] = query_data("user-script") - md['iptables_disable'] = query_data("disable_iptables_flag", - strip=True) - md['motd_sys_info'] = query_data("enable_motd_sys_info", strip=True) + if not md['local-hostname']: + md['local-hostname'] = system_uuid if md['user-data']: ud = md['user-data'] @@ -93,7 +100,7 @@ class DataSourceSmartOS(sources.DataSource): return self.metadata['instance-id'] -def get_serial(): +def get_serial(seed_device, seed_timeout): """This is replaced in unit testing, allowing us to replace serial.Serial with a mocked class @@ -102,18 +109,17 @@ def get_serial(): each line individually up until the single ".", the transfer is usually very fast (i.e. microseconds) to get the response. """ - if not TTY_LOC: - raise AttributeError("TTY_LOC value is not set") - - _ret = serial.Serial(TTY_LOC, timeout=60) - if not _ret.isOpen(): - raise SystemError("Unable to open %s" % TTY_LOC) + if not seed_device: + raise AttributeError("seed_device value is not set") - return _ret + ser = serial.Serial(seed_device, timeout=seed_timeout) + if not ser.isOpen(): + raise SystemError("Unable to open %s" % seed_device) + return ser -def query_data(noun, strip=False): +def query_data(noun, seed_device, seed_timeout, strip=False): """Makes a request to via the serial console via "GET " In the response, the first line is the status, while subsequent lines @@ -124,7 +130,7 @@ def query_data(noun, strip=False): if not noun: return False - ser = get_serial() + ser = get_serial(seed_device, seed_timeout) ser.write("GET %s\n" % noun.rstrip()) status = str(ser.readline()).rstrip() response = [] diff --git a/cloudinit/util.py b/cloudinit/util.py index 7163225f..a2fbc004 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1744,13 +1744,14 @@ def get_mount_info(path, log=LOG): lines = load_file(mountinfo_path).splitlines() return parse_mount_info(path, lines, log) + def which(program): # Return path of program for execution if found in path def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) - fpath, fname = os.path.split(program) - if fpath: + _fpath, _ = os.path.split(program) + if _fpath: if is_exe(program): return program else: diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 494f9828..6c12f1e2 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -31,8 +31,8 @@ import uuid mock_returns = { 'hostname': 'test-host', 'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname', - 'disable_iptables_flag': False, - 'enable_motd_sys_info': False, + 'disable_iptables_flag': None, + 'enable_motd_sys_info': None, 'system_uuid': str(uuid.uuid4()), 'smartdc': 'smartdc', 'userdata': """ @@ -118,7 +118,7 @@ class TestSmartOSDataSource(MockerTestCase): def _get_ds(self): - def _get_serial(): + def _get_serial(*_): return MockSerial() def _dmi_data(): @@ -169,14 +169,14 @@ class TestSmartOSDataSource(MockerTestCase): dsrc = self._get_ds() ret = dsrc.get_data() self.assertTrue(ret) - self.assertEquals(str(mock_returns['disable_iptables_flag']), + self.assertEquals(mock_returns['disable_iptables_flag'], dsrc.metadata['iptables_disable']) def test_motd_sys_info(self): dsrc = self._get_ds() ret = dsrc.get_data() self.assertTrue(ret) - self.assertEquals(str(mock_returns['enable_motd_sys_info']), + self.assertEquals(mock_returns['enable_motd_sys_info'], dsrc.metadata['motd_sys_info']) -- cgit v1.2.3 From 66490ebb92af59d148f79aae42a2eddc1ecedb7e Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 30 Jul 2013 14:23:10 -0400 Subject: add util.log_time helper The reason for this is that more and more things I was wanting to be able to see how long they took. This puts that time logic into a single place. It also supports (by default) reading from /proc/uptime as the timing mechanism. While that is almost certainly slower than time.time(), it does give millisecond granularity and is not affected by 'ntpdate' having run in between the two events. --- ChangeLog | 3 +++ bin/cloud-init | 5 ++++- cloudinit/config/cc_growpart.py | 3 ++- cloudinit/config/cc_resizefs.py | 11 +++++------ cloudinit/sources/DataSourceAzure.py | 14 ++++++-------- cloudinit/util.py | 35 +++++++++++++++++++++++++++++++++++ 6 files changed, 55 insertions(+), 16 deletions(-) (limited to 'cloudinit/util.py') diff --git a/ChangeLog b/ChangeLog index a255d24a..68d03376 100644 --- a/ChangeLog +++ b/ChangeLog @@ -11,6 +11,9 @@ - always finalize handlers even if processing failed (LP: #1203368) - support merging into cloud-config via jsonp. (LP: #1200476) - add datasource 'SmartOS' for Joyent Cloud. Adds a dependency on serial. + - add 'log_time' helper to util for timing how long things take + which also reads from uptime. uptime is useful as clock may change during + boot due to ntp. 0.7.2: - add a debian watch file - add 'sudo' entry to ubuntu's default user (LP: #1080717) diff --git a/bin/cloud-init b/bin/cloud-init index c5a5b949..bd9ddc04 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -502,7 +502,10 @@ def main(): signal_handler.attach_handlers() (name, functor) = args.action - return functor(name, args) + + return util.log_time(logfunc=LOG.debug, + msg="cloud-init mode '%s'" % name, uptime=True, + func=functor, args=(name, args)) if __name__ == '__main__': diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 4f8c8f80..ba6c58af 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -264,7 +264,8 @@ def handle(_name, cfg, _cloud, log, _args): raise e return - resized = resize_devices(resizer, devices) + resized = util.log_time(logfunc=log.debug, msg="resize_devices", + func=resize_devices, args=(resizer, devices)) for (entry, action, msg) in resized: if action == RESIZE.CHANGED: log.info("'%s' resized: %s" % (entry, msg)) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index b4ee16b2..56040fdd 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -21,7 +21,6 @@ import errno import os import stat -import time from cloudinit.settings import PER_ALWAYS from cloudinit import util @@ -120,9 +119,12 @@ def handle(name, cfg, _cloud, log, args): if resize_root == NOBLOCK: # Fork to a child that will run # the resize command - util.fork_cb(do_resize, resize_cmd, log) + util.fork_cb( + util.log_time(logfunc=log.debug, msg="backgrounded Resizing", + func=do_resize, args=(resize_cmd, log))) else: - do_resize(resize_cmd, log) + util.log_time(logfunc=log.debug, msg="Resizing", + func=do_resize, args=(resize_cmd, log)) action = 'Resized' if resize_root == NOBLOCK: @@ -132,13 +134,10 @@ def handle(name, cfg, _cloud, log, args): def do_resize(resize_cmd, log): - start = time.time() try: util.subp(resize_cmd) except util.ProcessExecutionError: util.logexc(log, "Failed to resize filesystem (cmd=%s)", resize_cmd) raise - tot_time = time.time() - start - log.debug("Resizing took %.3f seconds", tot_time) # TODO(harlowja): Should we add a fsck check after this to make # sure we didn't corrupt anything? diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 2f28702e..f906b8fa 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -138,13 +138,11 @@ class DataSourceAzureNet(sources.DataSource): bname = pk['fingerprint'] + ".crt" fp_files += [os.path.join(mycfg['data_dir'], bname)] - start = time.time() - missing = wait_for_files(wait_for + fp_files) + missing = util.log_time(logfunc=LOG.debug, msg="waiting for files", + func=wait_for_files, + args=(wait_for + fp_files,)) if len(missing): LOG.warn("Did not find files, but going on: %s", missing) - else: - LOG.debug("waited %.3f seconds for %d files to appear", - time.time() - start, len(wait_for)) if shcfgxml in missing: LOG.warn("SharedConfig.xml missing, using static instance-id") @@ -206,11 +204,11 @@ def apply_hostname_bounce(hostname, policy, interface, command, command = BOUNCE_COMMAND LOG.debug("pubhname: publishing hostname [%s]", msg) - start = time.time() shell = not isinstance(command, (list, tuple)) # capture=False, see comments in bug 1202758 and bug 1206164. - (output, err) = util.subp(command, shell=shell, capture=False, env=env) - LOG.debug("publishing hostname took %.3f seconds", time.time() - start) + util.log_time(logfunc=LOG.debug, msg="publishing hostname", + func=util.subp, kwargs={'command': command, 'shell': shell, + 'capture': False, 'env': env}) def crtfile_to_pubkey(fname): diff --git a/cloudinit/util.py b/cloudinit/util.py index 8542fe27..b0eb56e6 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1770,3 +1770,38 @@ def which(program): return exe_file return None + + +def log_time(logfunc, msg, func, args=None, kwargs=None, get_uptime=True): + if args is None: + args = [] + if kwargs is None: + kwargs = {} + + start = time.time() + + ustart = None + if get_uptime: + try: + ustart = float(uptime()) + except ValueError: + pass + + try: + ret = func(*args, **kwargs) + finally: + delta = time.time() - start + if ustart is not None: + try: + udelta = float(uptime()) - ustart + except ValueError: + udelta = "N/A" + + tmsg = " took %0.3f seconds" % delta + if get_uptime: + tmsg += "(%0.2f)" % udelta + try: + logfunc(msg + tmsg) + except: + pass + return ret -- cgit v1.2.3 From 0ca150b08433fbc57e10d599a46e300142c955c5 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 30 Jul 2013 14:28:09 -0400 Subject: set reading /proc/uptime to false by default. reading /proc/uptime is going to be slower, and no reason to do it on most things. Better to only do it when you suspect maybe a need for it. --- bin/cloud-init | 5 ++--- cloudinit/sources/DataSourceAzure.py | 5 +++-- cloudinit/util.py | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'cloudinit/util.py') diff --git a/bin/cloud-init b/bin/cloud-init index bd9ddc04..b4f9fd07 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -503,9 +503,8 @@ def main(): (name, functor) = args.action - return util.log_time(logfunc=LOG.debug, - msg="cloud-init mode '%s'" % name, uptime=True, - func=functor, args=(name, args)) + return util.log_time(logfunc=LOG.debug, msg="cloud-init mode '%s'" % name, + get_uptime=True, func=functor, args=(name, args)) if __name__ == '__main__': diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index f906b8fa..1a74de21 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -207,8 +207,9 @@ def apply_hostname_bounce(hostname, policy, interface, command, shell = not isinstance(command, (list, tuple)) # capture=False, see comments in bug 1202758 and bug 1206164. util.log_time(logfunc=LOG.debug, msg="publishing hostname", - func=util.subp, kwargs={'command': command, 'shell': shell, - 'capture': False, 'env': env}) + get_uptime=True, func=util.subp, + kwargs={'command': command, 'shell': shell, 'capture': False, + 'env': env}) def crtfile_to_pubkey(fname): diff --git a/cloudinit/util.py b/cloudinit/util.py index b0eb56e6..4a74ba57 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1772,7 +1772,7 @@ def which(program): return None -def log_time(logfunc, msg, func, args=None, kwargs=None, get_uptime=True): +def log_time(logfunc, msg, func, args=None, kwargs=None, get_uptime=False): if args is None: args = [] if kwargs is None: -- cgit v1.2.3