From 914c6e86f1689ae186a0db836e7f0304d72c38b4 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 18 Oct 2012 10:34:38 -0700 Subject: Start fixing how boto and our url fetching can not use ssl correctly (they can not do validation due to the underlying usage of urllib/httplib). Adjust to use urllib3 which can in cloud-init url fetching. For now also take the metadata/userdata extraction code from boto (small amount) and fix it in a new local file called 'ec2_utils'. --- Requires | 3 ++ cloudinit/ec2_utils.py | 135 ++++++++++++++++++++++++++++++++++++++++++++++++ cloudinit/url_helper.py | 97 ++++++++++++---------------------- 3 files changed, 171 insertions(+), 64 deletions(-) create mode 100644 cloudinit/ec2_utils.py diff --git a/Requires b/Requires index 4f9311d5..b23dd4e9 100644 --- a/Requires +++ b/Requires @@ -26,3 +26,6 @@ pyyaml # The new main entrypoint uses argparse instead of optparse argparse + +# Urllib3 handles ssl correctly! +urllib3 diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py new file mode 100644 index 00000000..ef7fac7d --- /dev/null +++ b/cloudinit/ec2_utils.py @@ -0,0 +1,135 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import json +import urllib + +from cloudinit import log as logging +from cloudinit import url_helper as uh +from cloudinit import util + +LOG = logging.getLogger(__name__) + + +# For now take this and fix it... +class LazyLoadMetadata(dict): + def __init__(self, url, fetch_timeout, num_retries, ssl_details): + self._url = url + self._num_retries = num_retries + self._ssl_details = ssl_details + self._fetch_timeout = fetch_timeout + self._leaves = {} + self._dicts = [] + response = uh.readurl(url, timeout=fetch_timeout, + retries=num_retries, ssl_details=ssl_details) + data = str(response) + if data: + fields = data.split('\n') + for field in fields: + if field.endswith('/'): + key = field[0:-1] + self._dicts.append(key) + else: + p = field.find('=') + if p > 0: + key = field[p + 1:] + resource = field[0:p] + '/openssh-key' + else: + key = resource = field + self._leaves[key] = resource + self[key] = None + + def _materialize(self): + for key in self: + self[key] + + def __getitem__(self, key): + if key not in self: + # Allow dict to throw the KeyError + return super(LazyLoadMetadata, self).__getitem__(key) + + # Already loaded + val = super(LazyLoadMetadata, self).__getitem__(key) + if val is not None: + return val + + if key in self._leaves: + resource = self._leaves[key] + new_url = self._url + urllib.quote(resource, safe="/:") + response = uh.readurl(new_url, retries=self._num_retries, + timeout=self._fetch_timeout, + ssl_details=self._ssl_details) + val = str(response) + if val and val[0] == '{': + val = json.loads(val) + else: + p = val.find('\n') + if p > 0: + val = val.split('\n') + self[key] = val + elif key in self._dicts: + new_url = self._url + key + '/' + self[key] = LazyLoadMetadata(new_url, + num_retries=self._num_retries, + fetch_timeout=self._fetch_timeout, + ssl_details=self._ssl_details) + + return super(LazyLoadMetadata, self).__getitem__(key) + + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + + def values(self): + self._materialize() + return super(LazyLoadMetadata, self).values() + + def items(self): + self._materialize() + return super(LazyLoadMetadata, self).items() + + def __str__(self): + self._materialize() + return super(LazyLoadMetadata, self).__str__() + + def __repr__(self): + self._materialize() + return super(LazyLoadMetadata, self).__repr__() + + +def get_instance_userdata(url, version='latest', ssl_details=None): + ud_url = '%s/%s/user-data' % (url, version) + try: + response = uh.readurl(ud_url, timeout=5, + retries=10, ssl_details=ssl_details) + return str(response) + except Exception as e: + util.logexc(LOG, "Failed fetching url %s", ud_url) + return None + + +def get_instance_metadata(url, version='latest', ssl_details=None): + md_url = '%s/%s/meta-data' % (url, version) + try: + return LazyLoadMetadata(md_url, timeout=5, + retries=10, ssl_details=ssl_details) + except Exception as e: + util.logexc(LOG, "Failed fetching url %s", md_url) + return None diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index f3e3fd7e..e3f63021 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -26,7 +26,9 @@ import errno import socket import time import urllib -import urllib2 + +from urllib3 import connectionpool +from urllib3 import util from cloudinit import log as logging from cloudinit import version @@ -68,71 +70,38 @@ class UrlResponse(object): return False -def readurl(url, data=None, timeout=None, - retries=0, sec_between=1, headers=None): - +def readurl(url, data=None, timeout=None, retries=0, + headers=None, ssl_details=None): req_args = {} - req_args['url'] = url - if data is not None: - req_args['data'] = urllib.urlencode(data) - - if not headers: - headers = { - 'User-Agent': 'Cloud-Init/%s' % (version.version_string()), + p_url = util.parse_url(url) + if p_url.scheme == 'https' and ssl_details: + for k in ['key_file', 'cert_file', 'cert_reqs', 'ca_certs']: + if k in ssl_details: + req_args[k] = ssl_details[k] + with closing(connectionpool.connection_from_url(url, **req_args)) as req_p: + retries = max(int(retries), 0) + attempts = retries + 1 + LOG.debug(("Attempting to open '%s' with %s attempts" + " (%s retries, timeout=%s) to be performed"), + url, attempts, retries, timeout) + open_args = { + 'method': 'GET', + 'retries': retries, + 'redirect': False, + 'url': p_url.request_uri, } - - req_args['headers'] = headers - req = urllib2.Request(**req_args) - - retries = max(retries, 0) - attempts = retries + 1 - - excepts = [] - LOG.debug(("Attempting to open '%s' with %s attempts" - " (%s retries, timeout=%s) to be performed"), - url, attempts, retries, timeout) - open_args = {} - if timeout is not None: - open_args['timeout'] = int(timeout) - for i in range(0, attempts): - try: - with closing(urllib2.urlopen(req, **open_args)) as rh: - content = rh.read() - status = rh.getcode() - if status is None: - # This seems to happen when files are read... - status = 200 - headers = {} - if rh.headers: - headers = dict(rh.headers) - LOG.debug("Read from %s (%s, %sb) after %s attempts", - url, status, len(content), (i + 1)) - return UrlResponse(status, content, headers) - except urllib2.HTTPError as e: - excepts.append(e) - except urllib2.URLError as e: - # This can be a message string or - # another exception instance - # (socket.error for remote URLs, OSError for local URLs). - if (isinstance(e.reason, (OSError)) and - e.reason.errno == errno.ENOENT): - excepts.append(e.reason) - else: - excepts.append(e) - except Exception as e: - excepts.append(e) - if i + 1 < attempts: - LOG.debug("Please wait %s seconds while we wait to try again", - sec_between) - time.sleep(sec_between) - - # Didn't work out - LOG.debug("Failed reading from %s after %s attempts", url, attempts) - - # It must of errored at least once for code - # to get here so re-raise the last error - LOG.debug("%s errors occured, re-raising the last one", len(excepts)) - raise excepts[-1] + if data is not None: + open_args['body'] = urllib.urlencode(data) + open_args['method'] = 'POST' + if not headers: + headers = { + 'User-Agent': 'Cloud-Init/%s' % (version.version_string()), + } + open_args['headers'] = headers + if timeout is not None: + open_args['timeout'] = max(int(timeout), 0) + r = req_p.urlopen(**open_args) + return UrlResponse(r.status, r.data, r.headers) def wait_for_url(urls, max_wait=None, timeout=None, -- cgit v1.2.3 From 7c9bbbc9b49425e3ba8e0517908477c58ea51d4b Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 19 Oct 2012 14:06:21 -0700 Subject: Remove the need for boto just for fetching the userdata and metadata. Add in this crawling functionality to the ec2_utils module that will fully crawl (not lazily) the ec2 metadata and parse it in the same manner as boto. 1. Make the ec2 datasource + cloudstack now call into these. 2. Fix phone_home due to urllib3 change (TBD) --- Requires | 5 - cloudinit/config/cc_phone_home.py | 2 +- cloudinit/ec2_utils.py | 210 +++++++++++++++++------------- cloudinit/sources/DataSourceCloudStack.py | 9 +- cloudinit/sources/DataSourceEc2.py | 9 +- cloudinit/url_helper.py | 16 +-- 6 files changed, 131 insertions(+), 120 deletions(-) diff --git a/Requires b/Requires index b23dd4e9..13a5d997 100644 --- a/Requires +++ b/Requires @@ -10,11 +10,6 @@ PrettyTable # datasource is removed, this is no longer needed oauth -# This is used to fetch the ec2 metadata into a easily -# parseable format, instead of having to have cloud-init perform -# those same fetchs and decodes and signing (...) that ec2 requires. -boto - # This is only needed for places where we need to support configs in a manner # that the built-in config parser is not sufficent (ie # when we need to preserve comments, or do not have a top-level diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index ae1349eb..a268d66f 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -112,7 +112,7 @@ def handle(name, cfg, cloud, log, args): } url = templater.render_string(url, url_params) try: - uhelp.readurl(url, data=real_submit_keys, retries=tries, sec_between=3) + uhelp.readurl(url, data=real_submit_keys, retries=tries) except: util.logexc(log, ("Failed to post phone home data to" " %s in %s tries"), url, tries) diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index ef7fac7d..b9d7a2f7 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -16,6 +16,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +from urlparse import (urlparse, urlunparse) + import json import urllib @@ -26,110 +28,132 @@ from cloudinit import util LOG = logging.getLogger(__name__) -# For now take this and fix it... -class LazyLoadMetadata(dict): - def __init__(self, url, fetch_timeout, num_retries, ssl_details): - self._url = url - self._num_retries = num_retries - self._ssl_details = ssl_details - self._fetch_timeout = fetch_timeout - self._leaves = {} - self._dicts = [] - response = uh.readurl(url, timeout=fetch_timeout, - retries=num_retries, ssl_details=ssl_details) - data = str(response) - if data: - fields = data.split('\n') - for field in fields: - if field.endswith('/'): - key = field[0:-1] - self._dicts.append(key) - else: - p = field.find('=') - if p > 0: - key = field[p + 1:] - resource = field[0:p] + '/openssh-key' - else: - key = resource = field - self._leaves[key] = resource - self[key] = None - - def _materialize(self): - for key in self: - self[key] - - def __getitem__(self, key): - if key not in self: - # Allow dict to throw the KeyError - return super(LazyLoadMetadata, self).__getitem__(key) - - # Already loaded - val = super(LazyLoadMetadata, self).__getitem__(key) - if val is not None: - return val - - if key in self._leaves: - resource = self._leaves[key] - new_url = self._url + urllib.quote(resource, safe="/:") - response = uh.readurl(new_url, retries=self._num_retries, - timeout=self._fetch_timeout, - ssl_details=self._ssl_details) - val = str(response) - if val and val[0] == '{': - val = json.loads(val) +def combine_url(base, add_on): + base_parsed = list(urlparse(base)) + path = base_parsed[2] + if path and not path.endswith("/"): + path += "/" + path += urllib.quote(str(add_on), safe="/:") + base_parsed[2] = path + return urlunparse(base_parsed) + + +# See: http://bit.ly/TyoUQs +class MetadataMaterializer(object): + def __init__(self, blob, base_url, **fetch_settings): + self._blob = blob + self._md = None + self._base_url = base_url + self._fetch_settings = fetch_settings + + def _parse(self, blob): + leaves = {} + children = [] + if not blob: + return (leaves, children) + + def has_children(item): + if item.endswith("/"): + return True + else: + return False + + def get_name(item): + if item.endswith("/"): + return item.rstrip("/") + return item + + for field in blob.splitlines(): + field = field.strip() + field_name = get_name(field) + if not field or not field_name: + continue + if has_children(field): + if field_name not in children: + children.append(field_name) + else: + contents = field.split("=", 1) + resource = field_name + if len(contents) > 1: + # What a PITA... + (ident, sub_contents) = contents + checked_ident = util.safe_int(ident) + if checked_ident is not None: + resource = "%s/openssh-key" % (checked_ident) + field_name = sub_contents + leaves[field_name] = resource + return (leaves, children) + + def materialize(self): + if self._md is not None: + return self._md + self._md = self._materialize(self._blob, self._base_url) + return self._md + + def _fetch_url(self, url, **opts): + response = uh.readurl(url, **opts) + return str(response) + + def _decode_leaf_blob(self, blob): + if not blob: + return blob + stripped_blob = blob.strip() + if stripped_blob.startswith("{") and stripped_blob.endswith("}"): + # Assume and try with json + try: + return json.loads(blob) + except (ValueError, TypeError): + pass + if blob.find("\n") != -1: + return blob.splitlines() + return blob + + def _materialize(self, blob, base_url): + (leaves, children) = self._parse(blob) + child_contents = {} + for c in children: + child_url = combine_url(base_url, c) + if not child_url.endswith("/"): + child_url += "/" + child_blob = self._fetch_url(child_url, **self._fetch_settings) + child_contents[c] = self._materialize(child_blob, child_url) + leaf_contents = {} + for (field, resource) in leaves.items(): + leaf_url = combine_url(base_url, resource) + leaf_blob = self._fetch_url(leaf_url, **self._fetch_settings) + leaf_contents[field] = self._decode_leaf_blob(leaf_blob) + joined = {} + joined.update(child_contents) + for field in leaf_contents.keys(): + if field in joined: + LOG.warn("Duplicate key found in results from %s", base_url) else: - p = val.find('\n') - if p > 0: - val = val.split('\n') - self[key] = val - elif key in self._dicts: - new_url = self._url + key + '/' - self[key] = LazyLoadMetadata(new_url, - num_retries=self._num_retries, - fetch_timeout=self._fetch_timeout, - ssl_details=self._ssl_details) - - return super(LazyLoadMetadata, self).__getitem__(key) - - def get(self, key, default=None): - try: - return self[key] - except KeyError: - return default - - def values(self): - self._materialize() - return super(LazyLoadMetadata, self).values() - - def items(self): - self._materialize() - return super(LazyLoadMetadata, self).items() - - def __str__(self): - self._materialize() - return super(LazyLoadMetadata, self).__str__() - - def __repr__(self): - self._materialize() - return super(LazyLoadMetadata, self).__repr__() + joined[field] = leaf_contents[field] + return joined def get_instance_userdata(url, version='latest', ssl_details=None): - ud_url = '%s/%s/user-data' % (url, version) + ud_url = combine_url(url, version) + ud_url = combine_url(ud_url, 'user-data') try: response = uh.readurl(ud_url, timeout=5, retries=10, ssl_details=ssl_details) return str(response) - except Exception as e: - util.logexc(LOG, "Failed fetching url %s", ud_url) + except Exception: + util.logexc(LOG, "Failed fetching userdata from url %s", ud_url) return None def get_instance_metadata(url, version='latest', ssl_details=None): - md_url = '%s/%s/meta-data' % (url, version) + md_url = combine_url(url, version) + md_url = combine_url(md_url, 'meta-data') try: - return LazyLoadMetadata(md_url, timeout=5, - retries=10, ssl_details=ssl_details) - except Exception as e: - util.logexc(LOG, "Failed fetching url %s", md_url) + response = uh.readurl(md_url, timeout=5, + retries=10, ssl_details=ssl_details) + materializer = MetadataMaterializer(str(response), md_url, + timeout=5, retries=10, + ssl_details=ssl_details) + return materializer.materialize() + except Exception: + util.logexc(LOG, "Failed fetching metadata from url %s", md_url) return None diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index f7ffa7cb..2654df53 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -26,8 +26,7 @@ from struct import pack import os import time -import boto.utils as boto_utils - +from cloudinit import ec2_utils from cloudinit import log as logging from cloudinit import sources from cloudinit import url_helper as uhelp @@ -116,10 +115,8 @@ class DataSourceCloudStack(sources.DataSource): if not self.wait_for_metadata_service(): return False start_time = time.time() - self.userdata_raw = boto_utils.get_instance_userdata(self.api_ver, - None, self.metadata_address) - self.metadata = boto_utils.get_instance_metadata(self.api_ver, - self.metadata_address) + self.userdata_raw = ec2_utils.get_instance_userdata(self.metadata_address, self.api_ver) + self.metadata = ec2_utils.get_instance_metadata(self.metadata_address, self.api_ver) LOG.debug("Crawl of metadata service took %s seconds", int(time.time() - start_time)) return True diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 3686fa10..204963e7 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -23,8 +23,7 @@ import os import time -import boto.utils as boto_utils - +from cloudinit import ec2_utils from cloudinit import log as logging from cloudinit import sources from cloudinit import url_helper as uhelp @@ -65,10 +64,8 @@ class DataSourceEc2(sources.DataSource): if not self.wait_for_metadata_service(): return False start_time = time.time() - self.userdata_raw = boto_utils.get_instance_userdata(self.api_ver, - None, self.metadata_address) - self.metadata = boto_utils.get_instance_metadata(self.api_ver, - self.metadata_address) + self.userdata_raw = ec2_utils.get_instance_userdata(self.metadata_address, self.api_ver) + self.metadata = ec2_utils.get_instance_metadata(self.metadata_address, self.api_ver) LOG.debug("Crawl of metadata service took %s seconds", int(time.time() - start_time)) return True diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index e3f63021..2c9d5eef 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -22,11 +22,10 @@ from contextlib import closing -import errno -import socket import time import urllib +from urllib3 import exceptions from urllib3 import connectionpool from urllib3 import util @@ -91,7 +90,10 @@ def readurl(url, data=None, timeout=None, retries=0, 'url': p_url.request_uri, } if data is not None: - open_args['body'] = urllib.urlencode(data) + if isinstance(data, (str, basestring)): + open_args['body'] = data + else: + open_args['body'] = urllib.urlencode(data) open_args['method'] = 'POST' if not headers: headers = { @@ -112,7 +114,7 @@ def wait_for_url(urls, max_wait=None, timeout=None, max_wait: roughly the maximum time to wait before giving up The max time is *actually* len(urls)*timeout as each url will be tried once and given the timeout provided. - timeout: the timeout provided to urllib2.urlopen + timeout: the timeout provided to urlopen status_cb: call method with string message when a url is not available headers_cb: call method with single argument of url to get headers for request. @@ -174,12 +176,8 @@ def wait_for_url(urls, max_wait=None, timeout=None, e = ValueError(reason) else: return url - except urllib2.HTTPError as e: + except exceptions.HTTPError as e: reason = "http error [%s]" % e.code - except urllib2.URLError as e: - reason = "url error [%s]" % e.reason - except socket.timeout as e: - reason = "socket timeout [%s]" % e except Exception as e: reason = "unexpected error [%s]" % e -- cgit v1.2.3 From 0fc887d97626132e9024490b271888bed162c867 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 21 Oct 2012 19:37:00 -0700 Subject: Use the python requests lib instead of urllib3 since it provides the same support (and it uses that library). Urllib3 doesn't seem to exist as its own package (requests hides it) so solve that by using requests and handling its oddness ourself. --- cloudinit/url_helper.py | 118 +++++++++++++++++++++++++++++++----------------- 1 file changed, 76 insertions(+), 42 deletions(-) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 2c9d5eef..c5921147 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -20,20 +20,33 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from contextlib import closing - import time -import urllib -from urllib3 import exceptions -from urllib3 import connectionpool -from urllib3 import util +import requests +from requests import exceptions + +from urlparse import urlparse from cloudinit import log as logging from cloudinit import version LOG = logging.getLogger(__name__) +# Check if requests has ssl support (added in requests >= 0.8.8) +SSL_ENABLED = False +CONFIG_ENABLED = False # This was added in 0.7 +try: + import pkg_resources + from distutils.version import LooseVersion + _REQ = pkg_resources.get_distribution('requests') + _REQ_VER = LooseVersion(_REQ.version) + if _REQ_VER >= LooseVersion('0.8.8'): + SSL_ENABLED = True + if _REQ_VER >= LooseVersion('0.7.0'): + CONFIG_ENABLED = True +except: + pass + class UrlResponse(object): def __init__(self, status_code, contents=None, headers=None): @@ -70,40 +83,60 @@ class UrlResponse(object): def readurl(url, data=None, timeout=None, retries=0, - headers=None, ssl_details=None): - req_args = {} - p_url = util.parse_url(url) - if p_url.scheme == 'https' and ssl_details: - for k in ['key_file', 'cert_file', 'cert_reqs', 'ca_certs']: - if k in ssl_details: - req_args[k] = ssl_details[k] - with closing(connectionpool.connection_from_url(url, **req_args)) as req_p: - retries = max(int(retries), 0) - attempts = retries + 1 - LOG.debug(("Attempting to open '%s' with %s attempts" - " (%s retries, timeout=%s) to be performed"), - url, attempts, retries, timeout) - open_args = { - 'method': 'GET', - 'retries': retries, - 'redirect': False, - 'url': p_url.request_uri, - } - if data is not None: - if isinstance(data, (str, basestring)): - open_args['body'] = data + headers=None, ssl_details=None, check_status=True): + req_args = { + 'url': url, + } + if urlparse(url).scheme == 'https' and ssl_details: + if not SSL_ENABLED: + LOG.warn("SSL is not enabled, cert. verification can not occur!") + else: + if 'ca_certs' in ssl_details and ssl_details['ca_certs']: + req_args['verify'] = ssl_details['ca_certs'] else: - open_args['body'] = urllib.urlencode(data) - open_args['method'] = 'POST' - if not headers: - headers = { - 'User-Agent': 'Cloud-Init/%s' % (version.version_string()), - } - open_args['headers'] = headers - if timeout is not None: - open_args['timeout'] = max(int(timeout), 0) - r = req_p.urlopen(**open_args) - return UrlResponse(r.status, r.data, r.headers) + req_args['verify'] = True + if 'cert_file' in ssl_details and 'key_file' in ssl_details: + req_args['cert'] = [ssl_details['cert_file'], + ssl_details['key_file']] + req_args['allow_redirects'] = False + req_args['method'] = 'GET' + if timeout is not None: + req_args['timeout'] = max(float(timeout), 0) + if data: + req_args['method'] = 'POST' + # It doesn't seem like config + # was added in older library versions, thus we + # need to manually do the retries if it wasn't + manual_tries = 1 + if CONFIG_ENABLED: + req_config = {} + req_config['store_cookies'] = False + if retries: + req_config['max_retries'] = max(int(retries), 0) + req_args['config'] = req_config + else: + if retries: + manual_tries = max(int(retries) + 1, 1) + if not headers: + headers = { + 'User-Agent': 'Cloud-Init/%s' % (version.version_string()), + } + req_args['headers'] = headers + LOG.debug("Attempting to open '%s' with %s configuration", url, req_args) + if data: + # Do this after the log (it might be large) + req_args['data'] = data + last_excp = [] + for _i in range(0, manual_tries): + try: + r = requests.request(**req_args) + except exceptions.RequestException as e: + last_excp = [e] + if last_excp: + raise last_excp[-1] + if check_status: + r.raise_for_status() + return UrlResponse(r.status_code, r.content, r.headers) def wait_for_url(urls, max_wait=None, timeout=None, @@ -167,7 +200,8 @@ def wait_for_url(urls, max_wait=None, timeout=None, else: headers = {} - resp = readurl(url, headers=headers, timeout=timeout) + resp = readurl(url, headers=headers, timeout=timeout, + check_status=False) if not resp.contents: reason = "empty response [%s]" % (resp.code) e = ValueError(reason) @@ -176,8 +210,8 @@ def wait_for_url(urls, max_wait=None, timeout=None, e = ValueError(reason) else: return url - except exceptions.HTTPError as e: - reason = "http error [%s]" % e.code + except exceptions.RequestException as e: + reason = "request error [%s]" % e except Exception as e: reason = "unexpected error [%s]" % e -- cgit v1.2.3 From eec668c2256fc32a8d0879e1d91340d307c0971f Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 21 Oct 2012 19:38:29 -0700 Subject: Fix the requires after using requests. --- Requires | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Requires b/Requires index 13a5d997..0313d569 100644 --- a/Requires +++ b/Requires @@ -22,5 +22,5 @@ pyyaml # The new main entrypoint uses argparse instead of optparse argparse -# Urllib3 handles ssl correctly! -urllib3 +# Requests handles ssl correctly! +requests -- cgit v1.2.3 From 8036ff686ae7e1a884e6a78022dd86a8eab514ec Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 21 Oct 2012 20:44:23 -0700 Subject: Fix the checking and capturing of exceptions. --- cloudinit/url_helper.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index c5921147..93df2510 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -130,12 +130,12 @@ def readurl(url, data=None, timeout=None, retries=0, for _i in range(0, manual_tries): try: r = requests.request(**req_args) + if check_status: + r.raise_for_status() except exceptions.RequestException as e: last_excp = [e] if last_excp: raise last_excp[-1] - if check_status: - r.raise_for_status() return UrlResponse(r.status_code, r.content, r.headers) -- cgit v1.2.3 From 6a6a2f7c337e18d84d7031ba60d5adf6a93256fc Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 22 Oct 2012 13:24:25 -0700 Subject: More cleanups for using the requests module. 1. Handle our own retries (so that we can sleep in between) 2. Cleanup the url so that partially invalid (no scheme) urls will continue working. 3. Allow redirects option now a param. --- cloudinit/config/cc_phone_home.py | 2 +- cloudinit/url_helper.py | 103 ++++++++++++++++++++------------------ 2 files changed, 54 insertions(+), 51 deletions(-) diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index a268d66f..ae1349eb 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -112,7 +112,7 @@ def handle(name, cfg, cloud, log, args): } url = templater.render_string(url, url_params) try: - uhelp.readurl(url, data=real_submit_keys, retries=tries) + uhelp.readurl(url, data=real_submit_keys, retries=tries, sec_between=3) except: util.logexc(log, ("Failed to post phone home data to" " %s in %s tries"), url, tries) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 93df2510..e8330e24 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -25,7 +25,7 @@ import time import requests from requests import exceptions -from urlparse import urlparse +from urlparse import (urlparse, urlunparse) from cloudinit import log as logging from cloudinit import version @@ -48,42 +48,20 @@ except: pass -class UrlResponse(object): - def __init__(self, status_code, contents=None, headers=None): - self._status_code = status_code - self._contents = contents - self._headers = headers +def _cleanurl(url): + parsed_url = list(urlparse(url, scheme='http')) + if not parsed_url[1] and parsed_url[2]: + # Swap these since this seems to be a common + # occurrence when given urls like 'www.google.com' + parsed_url[1] = parsed_url[2] + parsed_url[2] = '' + return urlunparse(parsed_url) - @property - def code(self): - return self._status_code - @property - def contents(self): - return self._contents - - @property - def headers(self): - return self._headers - - def __str__(self): - if not self.contents: - return '' - else: - return str(self.contents) - - def ok(self, redirects_ok=False): - upper = 300 - if redirects_ok: - upper = 400 - if self.code >= 200 and self.code < upper: - return True - else: - return False - - -def readurl(url, data=None, timeout=None, retries=0, - headers=None, ssl_details=None, check_status=True): +def readurl(url, data=None, timeout=None, retries=0, sec_between=1, + headers=None, ssl_details=None, check_status=True, + allow_redirects=False): + url = _cleanurl(url) req_args = { 'url': url, } @@ -98,7 +76,8 @@ def readurl(url, data=None, timeout=None, retries=0, if 'cert_file' in ssl_details and 'key_file' in ssl_details: req_args['cert'] = [ssl_details['cert_file'], ssl_details['key_file']] - req_args['allow_redirects'] = False + + req_args['allow_redirects'] = allow_redirects req_args['method'] = 'GET' if timeout is not None: req_args['timeout'] = max(float(timeout), 0) @@ -107,16 +86,19 @@ def readurl(url, data=None, timeout=None, retries=0, # It doesn't seem like config # was added in older library versions, thus we # need to manually do the retries if it wasn't - manual_tries = 1 if CONFIG_ENABLED: - req_config = {} - req_config['store_cookies'] = False - if retries: - req_config['max_retries'] = max(int(retries), 0) + req_config = { + 'store_cookies': False, + } + # Don't use the retry support built-in + # since it doesn't allow for 'sleep_times' + # in between tries.... + # if retries: + # req_config['max_retries'] = max(int(retries), 0) req_args['config'] = req_config - else: - if retries: - manual_tries = max(int(retries) + 1, 1) + manual_tries = 1 + if retries: + manual_tries = max(int(retries) + 1, 1) if not headers: headers = { 'User-Agent': 'Cloud-Init/%s' % (version.version_string()), @@ -126,17 +108,38 @@ def readurl(url, data=None, timeout=None, retries=0, if data: # Do this after the log (it might be large) req_args['data'] = data - last_excp = [] - for _i in range(0, manual_tries): + if sec_between is None: + sec_between = -1 + excps = [] + # Handle retrying ourselves since the built-in support + # doesn't handle sleeping between tries... + for i in range(0, manual_tries): try: r = requests.request(**req_args) if check_status: r.raise_for_status() + contents = r.content + status = r.status_code + headers = r.headers + LOG.debug("Read from %s (%s, %sb) after %s attempts", url, + status, len(contents), (i + 1)) + # Doesn't seem like we can make it use a different + # subclass for responses, so add our own backward-compat + # attrs + if not hasattr(r, 'code'): + setattr(r, 'code', status) + if not hasattr(r, 'contents'): + setattr(r, 'contents', contents) + return r except exceptions.RequestException as e: - last_excp = [e] - if last_excp: - raise last_excp[-1] - return UrlResponse(r.status_code, r.content, r.headers) + excps.append(e) + if i + 1 < manual_tries and sec_between > 0: + LOG.debug("Please wait %s seconds while we wait to try again", + sec_between) + time.sleep(sec_between) + if excps: + raise excps[-1] + return None # Should throw before this... def wait_for_url(urls, max_wait=None, timeout=None, -- cgit v1.2.3 From 8bc85abd97e06d964bbd26208eb732e80eb87c10 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 20 Nov 2012 20:02:48 -0800 Subject: Start allowing different merging types to be applied After user data handling splits apart all the different content types into there various mime messages it is nice to be able to have each message specify how it should be merged (mainly for cloud-config or cloud-archive) into the single cloud config that is eventually used. This starts to add a plugable merging framework and the needed components to activate said headers and merging. --- cloudinit/handlers/__init__.py | 49 +++++++++-------- cloudinit/handlers/boot_hook.py | 2 +- cloudinit/handlers/cloud_config.py | 22 ++++---- cloudinit/handlers/shell_script.py | 2 +- cloudinit/handlers/upstart_job.py | 2 +- cloudinit/mergers/__init__.py | 104 +++++++++++++++++++++++++++++++++++++ cloudinit/mergers/dict.py | 33 ++++++++++++ cloudinit/mergers/list.py | 41 +++++++++++++++ cloudinit/mergers/str.py | 28 ++++++++++ 9 files changed, 246 insertions(+), 37 deletions(-) create mode 100644 cloudinit/mergers/__init__.py create mode 100644 cloudinit/mergers/dict.py create mode 100644 cloudinit/mergers/list.py create mode 100644 cloudinit/mergers/str.py diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 8d6dcd4d..bfccfd89 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -69,7 +69,6 @@ INCLUSION_SRCH = sorted(list(INCLUSION_TYPES_MAP.keys()), class Handler(object): - __metaclass__ = abc.ABCMeta def __init__(self, frequency, version=2): @@ -83,15 +82,12 @@ class Handler(object): def list_types(self): raise NotImplementedError() - def handle_part(self, data, ctype, filename, payload, frequency): - return self._handle_part(data, ctype, filename, payload, frequency) - @abc.abstractmethod - def _handle_part(self, data, ctype, filename, payload, frequency): + def handle_part(self, *args, **kwargs): raise NotImplementedError() -def run_part(mod, data, ctype, filename, payload, frequency): +def run_part(mod, data, filename, payload, headers, frequency): mod_freq = mod.frequency if not (mod_freq == PER_ALWAYS or (frequency == PER_INSTANCE and mod_freq == PER_INSTANCE)): @@ -102,19 +98,25 @@ def run_part(mod, data, ctype, filename, payload, frequency): mod_ver = int(mod_ver) except: mod_ver = 1 + content_type = headers['Content-Type'] try: LOG.debug("Calling handler %s (%s, %s, %s) with frequency %s", - mod, ctype, filename, mod_ver, frequency) - if mod_ver >= 2: + mod, content_type, filename, mod_ver, frequency) + if mod_ver == 3: + # Treat as v. 3 which does get a frequency + headers + mod.handle_part(data, content_type, filename, + payload, frequency, headers) + elif mod_ver == 2: # Treat as v. 2 which does get a frequency - mod.handle_part(data, ctype, filename, payload, frequency) + mod.handle_part(data, content_type, filename, + payload, frequency) else: # Treat as v. 1 which gets no frequency - mod.handle_part(data, ctype, filename, payload) + mod.handle_part(data, content_type, filename, payload) except: util.logexc(LOG, ("Failed calling handler %s (%s, %s, %s)" " with frequency %s"), - mod, ctype, filename, + mod, content_type, filename, mod_ver, frequency) @@ -173,26 +175,27 @@ def _escape_string(text): return text -def walker_callback(pdata, ctype, filename, payload): - if ctype in PART_CONTENT_TYPES: - walker_handle_handler(pdata, ctype, filename, payload) +def walker_callback(data, filename, payload, headers): + content_type = headers['Content-Type'] + if content_type in PART_CONTENT_TYPES: + walker_handle_handler(data, content_type, filename, payload) return - handlers = pdata['handlers'] - if ctype in pdata['handlers']: - run_part(handlers[ctype], pdata['data'], ctype, filename, - payload, pdata['frequency']) + handlers = data['handlers'] + if content_type in handlers: + run_part(handlers[content_type], data['data'], filename, + payload, headers, data['frequency']) elif payload: # Extract the first line or 24 bytes for displaying in the log start = _extract_first_or_bytes(payload, 24) details = "'%s...'" % (_escape_string(start)) if ctype == NOT_MULTIPART_TYPE: LOG.warning("Unhandled non-multipart (%s) userdata: %s", - ctype, details) + content_type, details) else: LOG.warning("Unhandled unknown content-type (%s) userdata: %s", - ctype, details) + content_type, details) else: - LOG.debug("empty payload of type %s" % ctype) + LOG.debug("Empty payload of type %s", content_type) # Callback is a function that will be called with @@ -212,7 +215,9 @@ def walk(msg, callback, data): if not filename: filename = PART_FN_TPL % (partnum) - callback(data, ctype, filename, part.get_payload(decode=True)) + callback(data, ctype, filename, + part.get_payload(decode=True), + dict(part)) partnum = partnum + 1 diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py index 456b8020..bf313f10 100644 --- a/cloudinit/handlers/boot_hook.py +++ b/cloudinit/handlers/boot_hook.py @@ -56,7 +56,7 @@ class BootHookPartHandler(handlers.Handler): util.write_file(filepath, contents, 0700) return filepath - def _handle_part(self, _data, ctype, filename, payload, _frequency): + def handle_part(self, _data, ctype, filename, payload, _frequency): if ctype in handlers.CONTENT_SIGNALS: return diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index f6d95244..86027187 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -22,6 +22,7 @@ from cloudinit import handlers from cloudinit import log as logging +from cloudinit import mergers from cloudinit import util from cloudinit.settings import (PER_ALWAYS) @@ -31,8 +32,8 @@ LOG = logging.getLogger(__name__) class CloudConfigPartHandler(handlers.Handler): def __init__(self, paths, **_kwargs): - handlers.Handler.__init__(self, PER_ALWAYS) - self.cloud_buf = [] + handlers.Handler.__init__(self, PER_ALWAYS, version=3) + self.cloud_buf = {} self.cloud_fn = paths.get_ipath("cloud_config") def list_types(self): @@ -43,20 +44,17 @@ class CloudConfigPartHandler(handlers.Handler): def _write_cloud_config(self, buf): if not self.cloud_fn: return - lines = [str(b) for b in buf] - payload = "\n".join(lines) + payload = util.yaml_dumps(self.cloud_buf) util.write_file(self.cloud_fn, payload, 0600) - def _handle_part(self, _data, ctype, filename, payload, _frequency): + def handle_part(self, _data, ctype, filename, payload, _frequency, headers): if ctype == handlers.CONTENT_START: - self.cloud_buf = [] + self.cloud_buf = {} return if ctype == handlers.CONTENT_END: self._write_cloud_config(self.cloud_buf) - self.cloud_buf = [] + self.cloud_buf = {} return - - filename = util.clean_filename(filename) - if not filename: - filename = '??' - self.cloud_buf.extend(["#%s" % (filename), str(payload)]) + merge_how = headers.get("Merge-Type", 'list+dict+str') + merger = mergers.construct(merge_how) + self.cloud_buf = merger.merge(self.cloud_buf, util.load_yaml(payload)) diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py index 6c5c11ca..2a87e8dd 100644 --- a/cloudinit/handlers/shell_script.py +++ b/cloudinit/handlers/shell_script.py @@ -41,7 +41,7 @@ class ShellScriptPartHandler(handlers.Handler): handlers.type_from_starts_with("#!"), ] - def _handle_part(self, _data, ctype, filename, payload, _frequency): + def handle_part(self, _data, ctype, filename, payload, _frequency): if ctype in handlers.CONTENT_SIGNALS: # TODO(harlowja): maybe delete existing things here return diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py index 99e0afde..a5cb9b0c 100644 --- a/cloudinit/handlers/upstart_job.py +++ b/cloudinit/handlers/upstart_job.py @@ -42,7 +42,7 @@ class UpstartJobPartHandler(handlers.Handler): handlers.type_from_starts_with("#upstart-job"), ] - def _handle_part(self, _data, ctype, filename, payload, frequency): + def handle_part(self, _data, ctype, filename, payload, frequency): if ctype in handlers.CONTENT_SIGNALS: return diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py new file mode 100644 index 00000000..b3e728b0 --- /dev/null +++ b/cloudinit/mergers/__init__.py @@ -0,0 +1,104 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +from cloudinit import importer +from cloudinit import log as logging +from cloudinit import util + +LOG = logging.getLogger(__name__) + + +class UnknownMerger(object): + # Named differently so auto-method finding + # doesn't pick this up if there is ever a type + # named "unknown" + def _handle_unknown(self, meth_wanted, value, merge_with): + return value + + def merge(self, source, merge_with): + type_name = util.obj_name(source) + type_name = type_name.lower() + method_name = "_on_%s" % (type_name) + meth = None + args = [source, merge_with] + if hasattr(self, method_name): + meth = getattr(self, method_name) + if not meth: + meth = self._handle_unknown + args.insert(0, method_name) + return meth(*args) + + +class LookupMerger(UnknownMerger): + def __init__(self, lookups=None): + UnknownMerger.__init__(self) + if lookups is None: + self._lookups = [] + else: + self._lookups = lookups + + def _handle_unknown(self, meth_wanted, value, merge_with): + meth = None + for merger in self._lookups: + if hasattr(merger, meth_wanted): + # First one that has that method/attr gets to be + # the one that will be called + meth = getattr(merger, meth_wanted) + break + if not meth: + return UnknownMerger._handle_unknown(self, meth_wanted, + value, merge_with) + return meth(value, merge_with) + + +def _extract_merger_names(merge_how): + names = [] + for m_name in merge_how.split("+"): + # Canonicalize the name (so that it can be found + # even when users alter it in various ways... + m_name = m_name.lower().strip() + m_name = m_name.replace(" ", "_") + m_name = m_name.replace("\t", "_") + m_name = m_name.replace("-", "_") + if not m_name: + continue + names.append(m_name) + return names + + +def construct(merge_how, default_classes=None): + mergers = [] + merger_classes = [] + root = LookupMerger(mergers) + for m_name in _extract_merger_names(merge_how): + merger_locs = importer.find_module(m_name, + [__name__], + ['Merger']) + if not merger_locs: + msg = "Could not find merger named %s" % (m_name) + raise ImportError(msg) + else: + mod = importer.import_module(merger_locs[0]) + cls = getattr(mod, 'Merger') + merger_classes.append(cls) + if not merger_classes and default_classes: + merger_classes = default_classes + for m_class in merger_classes: + mergers.append(m_class(root)) + return root \ No newline at end of file diff --git a/cloudinit/mergers/dict.py b/cloudinit/mergers/dict.py new file mode 100644 index 00000000..a0ffaa33 --- /dev/null +++ b/cloudinit/mergers/dict.py @@ -0,0 +1,33 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +class Merger(object): + def __init__(self, merger): + self._merger = merger + + def _on_dict(self, value, merge_with): + if not isinstance(merge_with, (dict)): + return value + merged = dict(value) + for (k, v) in merge_with.items(): + if k in merged: + merged[k] = self._merger.merge(merged[k], v) + else: + merged[k] = v + return merged diff --git a/cloudinit/mergers/list.py b/cloudinit/mergers/list.py new file mode 100644 index 00000000..ad1b9793 --- /dev/null +++ b/cloudinit/mergers/list.py @@ -0,0 +1,41 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +class Merger(object): + def __init__(self, merger): + self._merger = merger + + def _on_tuple(self, value, merge_with): + return self._on_list(list(value), merge_with) + + def _on_list(self, value, merge_with): + if isinstance(merge_with, (tuple, list)): + new_value = list(value) + for m_v in merge_with: + m_am = 0 + for (i, o_v) in enumerate(new_value): + if m_v == o_v: + new_value[i] = self._merger.merge(o_v, m_v) + m_am += 1 + if m_am == 0: + new_value.append(m_v) + else: + new_value = list(value) + new_value.append(merge_with) + return new_value diff --git a/cloudinit/mergers/str.py b/cloudinit/mergers/str.py new file mode 100644 index 00000000..7c3fa585 --- /dev/null +++ b/cloudinit/mergers/str.py @@ -0,0 +1,28 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +class Merger(object): + def __init__(self, merger): + pass + + def _on_unicode(self, value, merge_with): + return self._on_str(value, merge_with) + + def _on_str(self, value, merge_with): + return value -- cgit v1.2.3 From eded09c1e260330107a19bd0b5a351686fe49e80 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 22 Nov 2012 08:21:37 -0800 Subject: Continue working on merging prototype. --- cloudinit/handlers/__init__.py | 26 +++++++++++++++++--------- cloudinit/handlers/cloud_config.py | 27 +++++++++++++++++++++------ 2 files changed, 38 insertions(+), 15 deletions(-) diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index bfccfd89..566b61a7 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -92,14 +92,14 @@ def run_part(mod, data, filename, payload, headers, frequency): if not (mod_freq == PER_ALWAYS or (frequency == PER_INSTANCE and mod_freq == PER_INSTANCE)): return - mod_ver = mod.handler_version # Sanity checks on version (should be an int convertable) try: + mod_ver = mod.handler_version mod_ver = int(mod_ver) - except: + except (TypeError, ValueError, AttributeError): mod_ver = 1 - content_type = headers['Content-Type'] try: + content_type = headers['Content-Type'] LOG.debug("Calling handler %s (%s, %s, %s) with frequency %s", mod, content_type, filename, mod_ver, frequency) if mod_ver == 3: @@ -110,9 +110,11 @@ def run_part(mod, data, filename, payload, headers, frequency): # Treat as v. 2 which does get a frequency mod.handle_part(data, content_type, filename, payload, frequency) - else: + elif mod_ver == 1: # Treat as v. 1 which gets no frequency mod.handle_part(data, content_type, filename, payload) + else: + raise ValueError("Unknown module version %s" % (mod_ver)) except: util.logexc(LOG, ("Failed calling handler %s (%s, %s, %s)" " with frequency %s"), @@ -121,11 +123,17 @@ def run_part(mod, data, filename, payload, headers, frequency): def call_begin(mod, data, frequency): - run_part(mod, data, CONTENT_START, None, None, frequency) + headers = { + 'Content-Type': CONTENT_START, + } + run_part(mod, data, None, None, headers, frequency) def call_end(mod, data, frequency): - run_part(mod, data, CONTENT_END, None, None, frequency) + headers = { + 'Content-Type': CONTENT_END, + } + run_part(mod, data, None, None, headers, frequency) def walker_handle_handler(pdata, _ctype, _filename, payload): @@ -215,9 +223,9 @@ def walk(msg, callback, data): if not filename: filename = PART_FN_TPL % (partnum) - callback(data, ctype, filename, - part.get_payload(decode=True), - dict(part)) + headers = dict(part) + headers['Content-Type'] = ctype + callback(data, filename, part.get_payload(decode=True), headers) partnum = partnum + 1 diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index 86027187..22ced20d 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -29,6 +29,8 @@ from cloudinit.settings import (PER_ALWAYS) LOG = logging.getLogger(__name__) +DEF_MERGE_TYPE = "list+dict+str" + class CloudConfigPartHandler(handlers.Handler): def __init__(self, paths, **_kwargs): @@ -44,10 +46,25 @@ class CloudConfigPartHandler(handlers.Handler): def _write_cloud_config(self, buf): if not self.cloud_fn: return - payload = util.yaml_dumps(self.cloud_buf) - util.write_file(self.cloud_fn, payload, 0600) + lines = ["#cloud-config", util.yaml_dumps(self.cloud_buf)] + util.write_file(self.cloud_fn, "\n".join(lines), 0600) + + def _merge_part(self, payload, headers, filename): + merge_how = headers.get("Merge-Type") + try: + payload_y = util.load_yaml(payload) + if not merge_how: + merge_how = payload_y.pop("Merge-Type", '') + merge_how = merge_how.strip().lower() + if not merge_how: + merge_how = DEF_MERGE_TYPE + merger = mergers.construct(merge_how) + self.cloud_buf = merger.merge(self.cloud_buf, payload_y) + except: + util.logexc(LOG, "Failed at merging in cloud config part from %s", + filename) - def handle_part(self, _data, ctype, filename, payload, _frequency, headers): + def handle_part(self, _data, ctype, filename, payload, _freq, headers): if ctype == handlers.CONTENT_START: self.cloud_buf = {} return @@ -55,6 +72,4 @@ class CloudConfigPartHandler(handlers.Handler): self._write_cloud_config(self.cloud_buf) self.cloud_buf = {} return - merge_how = headers.get("Merge-Type", 'list+dict+str') - merger = mergers.construct(merge_how) - self.cloud_buf = merger.merge(self.cloud_buf, util.load_yaml(payload)) + self._merge_part(payload, headers, filename) -- cgit v1.2.3 From 0596e8db112a031095e8f5cbdae770e8f3ca4bbb Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 22 Nov 2012 08:24:25 -0800 Subject: Select merge-type from either header or content after loading as yaml. --- cloudinit/handlers/cloud_config.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index 22ced20d..ba07b2ef 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -50,11 +50,13 @@ class CloudConfigPartHandler(handlers.Handler): util.write_file(self.cloud_fn, "\n".join(lines), 0600) def _merge_part(self, payload, headers, filename): - merge_how = headers.get("Merge-Type") + merge_headers_how = headers.get("Merge-Type") try: payload_y = util.load_yaml(payload) - if not merge_how: - merge_how = payload_y.pop("Merge-Type", '') + merge_how = '' + for merge_i in [payload_y.pop("Merge-Type", ''), merge_headers_how]: + if merge_i: + merge_how = merge_i merge_how = merge_how.strip().lower() if not merge_how: merge_how = DEF_MERGE_TYPE -- cgit v1.2.3 From cc765725c6493082e8e2f72f78de9786b6e2cc2a Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 22 Nov 2012 08:26:20 -0800 Subject: Continue work. --- cloudinit/handlers/cloud_config.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index ba07b2ef..50fbb445 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -50,13 +50,14 @@ class CloudConfigPartHandler(handlers.Handler): util.write_file(self.cloud_fn, "\n".join(lines), 0600) def _merge_part(self, payload, headers, filename): - merge_headers_how = headers.get("Merge-Type") + merge_headers = headers.get("Merge-Type") try: payload_y = util.load_yaml(payload) merge_how = '' - for merge_i in [payload_y.pop("Merge-Type", ''), merge_headers_how]: + for merge_i in [payload_y.pop("Merge-Type", ''), merge_headers]: if merge_i: merge_how = merge_i + break merge_how = merge_how.strip().lower() if not merge_how: merge_how = DEF_MERGE_TYPE -- cgit v1.2.3 From f6f6d4b961adca0a5f19ea4bbdd35bcc45955b24 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 22 Nov 2012 08:49:48 -0800 Subject: Change the yaml merge header extraction to be in a sep. function that can look in more places. --- cloudinit/handlers/cloud_config.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index 50fbb445..de9c5252 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -30,6 +30,7 @@ from cloudinit.settings import (PER_ALWAYS) LOG = logging.getLogger(__name__) DEF_MERGE_TYPE = "list+dict+str" +MERGE_HEADER = 'Merge-Type' class CloudConfigPartHandler(handlers.Handler): @@ -46,19 +47,33 @@ class CloudConfigPartHandler(handlers.Handler): def _write_cloud_config(self, buf): if not self.cloud_fn: return + # Write the combined & merged dictionary/yaml out lines = ["#cloud-config", util.yaml_dumps(self.cloud_buf)] util.write_file(self.cloud_fn, "\n".join(lines), 0600) + def _merge_header_extract(self, payload_yaml): + merge_header_yaml = '' + for k in [MERGE_HEADER, MERGE_HEADER.lower(), + MERGE_HEADER.lower().replace("-", "_")]: + if k in payload_yaml: + merge_header_yaml = str(payload_yaml[k]) + break + return merge_header_yaml + def _merge_part(self, payload, headers, filename): - merge_headers = headers.get("Merge-Type") + merge_header_headers = headers.get(MERGE_HEADER, '') try: payload_y = util.load_yaml(payload) merge_how = '' - for merge_i in [payload_y.pop("Merge-Type", ''), merge_headers]: + # Select either the merge-type from the content + # or the merge type from the headers or default to our own set + # if neither exists (or is empty) from the later + merge_header_yaml = self._merge_header_extract(payload_y) + for merge_i in [merge_header_yaml, merge_header_headers]: + merge_i = merge_i.strip().lower() if merge_i: merge_how = merge_i break - merge_how = merge_how.strip().lower() if not merge_how: merge_how = DEF_MERGE_TYPE merger = mergers.construct(merge_how) -- cgit v1.2.3 From 67e318c71c05b3c5dcc7e4ef9775951bbc42534c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 22 Nov 2012 08:52:35 -0800 Subject: Adjust naming and exception catching. --- cloudinit/handlers/cloud_config.py | 42 +++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index de9c5252..1cfbc210 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -60,27 +60,23 @@ class CloudConfigPartHandler(handlers.Handler): break return merge_header_yaml - def _merge_part(self, payload, headers, filename): + def _merge_part(self, payload, headers): merge_header_headers = headers.get(MERGE_HEADER, '') - try: - payload_y = util.load_yaml(payload) - merge_how = '' - # Select either the merge-type from the content - # or the merge type from the headers or default to our own set - # if neither exists (or is empty) from the later - merge_header_yaml = self._merge_header_extract(payload_y) - for merge_i in [merge_header_yaml, merge_header_headers]: - merge_i = merge_i.strip().lower() - if merge_i: - merge_how = merge_i - break - if not merge_how: - merge_how = DEF_MERGE_TYPE - merger = mergers.construct(merge_how) - self.cloud_buf = merger.merge(self.cloud_buf, payload_y) - except: - util.logexc(LOG, "Failed at merging in cloud config part from %s", - filename) + payload_yaml = util.load_yaml(payload) + merge_how = '' + # Select either the merge-type from the content + # or the merge type from the headers or default to our own set + # if neither exists (or is empty) from the later + merge_header_yaml = self._merge_header_extract(payload_yaml) + for merge_i in [merge_header_yaml, merge_header_headers]: + merge_i = merge_i.strip().lower() + if merge_i: + merge_how = merge_i + break + if not merge_how: + merge_how = DEF_MERGE_TYPE + merger = mergers.construct(merge_how) + self.cloud_buf = merger.merge(self.cloud_buf, payload_yaml) def handle_part(self, _data, ctype, filename, payload, _freq, headers): if ctype == handlers.CONTENT_START: @@ -90,4 +86,8 @@ class CloudConfigPartHandler(handlers.Handler): self._write_cloud_config(self.cloud_buf) self.cloud_buf = {} return - self._merge_part(payload, headers, filename) + try: + self._merge_part(payload, headers) + except: + util.logexc(LOG, "Failed at merging in cloud config part from %s", + filename) -- cgit v1.2.3 From 180e83b20aa02dc9df903fa7e31121dd49a49b3a Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 22 Nov 2012 09:00:14 -0800 Subject: Add which files the yaml blob came from. --- cloudinit/handlers/cloud_config.py | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index 1cfbc210..9a8782bb 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -38,6 +38,7 @@ class CloudConfigPartHandler(handlers.Handler): handlers.Handler.__init__(self, PER_ALWAYS, version=3) self.cloud_buf = {} self.cloud_fn = paths.get_ipath("cloud_config") + self.file_names = [] def list_types(self): return [ @@ -48,7 +49,17 @@ class CloudConfigPartHandler(handlers.Handler): if not self.cloud_fn: return # Write the combined & merged dictionary/yaml out - lines = ["#cloud-config", util.yaml_dumps(self.cloud_buf)] + lines = [ + "#cloud-config", + '', + ] + # Write which files we merged from + if self.file_names: + lines.append("# from %s files" % (len(self.file_names))) + for fn in self.file_names: + lines.append("# %s" % (fn)) + lines.append("") + lines.append(util.yaml_dumps(self.cloud_buf)) util.write_file(self.cloud_fn, "\n".join(lines), 0600) def _merge_header_extract(self, payload_yaml): @@ -78,16 +89,21 @@ class CloudConfigPartHandler(handlers.Handler): merger = mergers.construct(merge_how) self.cloud_buf = merger.merge(self.cloud_buf, payload_yaml) + def _reset(self): + self.file_names = [] + self.cloud_buf = {} + def handle_part(self, _data, ctype, filename, payload, _freq, headers): if ctype == handlers.CONTENT_START: - self.cloud_buf = {} + self._reset() return if ctype == handlers.CONTENT_END: self._write_cloud_config(self.cloud_buf) - self.cloud_buf = {} + self._reset() return try: self._merge_part(payload, headers) + self.file_names.append(filename) except: util.logexc(LOG, "Failed at merging in cloud config part from %s", filename) -- cgit v1.2.3 From 3941466b3e065c9ce7bb7500e41f464993861672 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 22 Nov 2012 19:45:43 -0800 Subject: Allow mergers to take options. --- cloudinit/handlers/cloud_config.py | 2 +- cloudinit/mergers/__init__.py | 37 ++++++++++++++++++++++--------------- cloudinit/mergers/dict.py | 11 +++++++++-- cloudinit/mergers/list.py | 29 +++++++++++++++++++---------- cloudinit/mergers/str.py | 12 +++++++++--- 5 files changed, 60 insertions(+), 31 deletions(-) diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index 9a8782bb..02a7ad9d 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -29,7 +29,7 @@ from cloudinit.settings import (PER_ALWAYS) LOG = logging.getLogger(__name__) -DEF_MERGE_TYPE = "list+dict+str" +DEF_MERGE_TYPE = "list()+dict()+str()" MERGE_HEADER = 'Merge-Type' diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py index b3e728b0..20658edc 100644 --- a/cloudinit/mergers/__init__.py +++ b/cloudinit/mergers/__init__.py @@ -16,11 +16,14 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +import re from cloudinit import importer from cloudinit import log as logging from cloudinit import util +NAME_MTCH = re.compile(r"(^[a-zA-Z_][A-Za-z0-9_]*)\((.*?)\)$") + LOG = logging.getLogger(__name__) @@ -71,10 +74,8 @@ def _extract_merger_names(merge_how): names = [] for m_name in merge_how.split("+"): # Canonicalize the name (so that it can be found - # even when users alter it in various ways... + # even when users alter it in various ways) m_name = m_name.lower().strip() - m_name = m_name.replace(" ", "_") - m_name = m_name.replace("\t", "_") m_name = m_name.replace("-", "_") if not m_name: continue @@ -82,23 +83,29 @@ def _extract_merger_names(merge_how): return names -def construct(merge_how, default_classes=None): - mergers = [] - merger_classes = [] - root = LookupMerger(mergers) - for m_name in _extract_merger_names(merge_how): +def construct(merge_how): + mergers_to_be = [] + for name in _extract_merger_names(merge_how): + match = NAME_MTCH.match(name) + if not match: + msg = "Matcher identifer '%s' is not in the right format" % (name) + raise ValueError(msg) + (m_name, m_ops) = match.groups() + m_ops = m_ops.strip().split(",") + m_ops = [m.strip().lower() for m in m_ops if m.strip()] merger_locs = importer.find_module(m_name, [__name__], ['Merger']) if not merger_locs: - msg = "Could not find merger named %s" % (m_name) + msg = "Could not find merger named '%s'" % (m_name) raise ImportError(msg) else: mod = importer.import_module(merger_locs[0]) - cls = getattr(mod, 'Merger') - merger_classes.append(cls) - if not merger_classes and default_classes: - merger_classes = default_classes - for m_class in merger_classes: - mergers.append(m_class(root)) + mod_attr = getattr(mod, 'Merger') + mergers_to_be.append((mod_attr, m_ops)) + # Now form them... + mergers = [] + root = LookupMerger(mergers) + for (attr, opts) in mergers_to_be: + mergers.append(attr(root, opts)) return root \ No newline at end of file diff --git a/cloudinit/mergers/dict.py b/cloudinit/mergers/dict.py index a0ffaa33..e7073bd9 100644 --- a/cloudinit/mergers/dict.py +++ b/cloudinit/mergers/dict.py @@ -18,8 +18,12 @@ class Merger(object): - def __init__(self, merger): + def __init__(self, merger, opts): self._merger = merger + self._overwrite = 'overwrite' in opts + + if opts and opts.lower().find("overwrite") != -1: + self._overwrite = True def _on_dict(self, value, merge_with): if not isinstance(merge_with, (dict)): @@ -27,7 +31,10 @@ class Merger(object): merged = dict(value) for (k, v) in merge_with.items(): if k in merged: - merged[k] = self._merger.merge(merged[k], v) + if not self._overwrite: + merged[k] = self._merger.merge(merged[k], v) + else: + merged[k] = v else: merged[k] = v return merged diff --git a/cloudinit/mergers/list.py b/cloudinit/mergers/list.py index ad1b9793..0c65d053 100644 --- a/cloudinit/mergers/list.py +++ b/cloudinit/mergers/list.py @@ -18,8 +18,10 @@ class Merger(object): - def __init__(self, merger): + def __init__(self, merger, opts): self._merger = merger + self._discard_non = 'discard_non_list' in opts + self._append = 'append' in opts def _on_tuple(self, value, merge_with): return self._on_list(list(value), merge_with) @@ -27,15 +29,22 @@ class Merger(object): def _on_list(self, value, merge_with): if isinstance(merge_with, (tuple, list)): new_value = list(value) - for m_v in merge_with: - m_am = 0 - for (i, o_v) in enumerate(new_value): - if m_v == o_v: - new_value[i] = self._merger.merge(o_v, m_v) - m_am += 1 - if m_am == 0: - new_value.append(m_v) + if self._append: + new_value.extend(merge_with) + else: + # Merge instead + for m_v in merge_with: + m_am = 0 + for (i, o_v) in enumerate(new_value): + if m_v == o_v: + new_value[i] = self._merger.merge(o_v, m_v) + m_am += 1 + if m_am == 0: + new_value.append(m_v) else: new_value = list(value) - new_value.append(merge_with) + if self._discard_non: + pass + else: + new_value.append(merge_with) return new_value diff --git a/cloudinit/mergers/str.py b/cloudinit/mergers/str.py index 7c3fa585..14bc46ec 100644 --- a/cloudinit/mergers/str.py +++ b/cloudinit/mergers/str.py @@ -18,11 +18,17 @@ class Merger(object): - def __init__(self, merger): - pass + def __init__(self, merger, opts): + self._append = 'append' in opts def _on_unicode(self, value, merge_with): return self._on_str(value, merge_with) def _on_str(self, value, merge_with): - return value + if not self._append: + return value + else: + if isinstance(value, (unicode)): + return value + unicode(merge_with) + else: + return value + str(merge_with) -- cgit v1.2.3 From 9d91b156e4e81d07eb2f01946cea17c7565b7fc4 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 22 Nov 2012 19:51:33 -0800 Subject: More cleanups. --- cloudinit/mergers/dict.py | 3 --- cloudinit/mergers/list.py | 11 ++++------- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/cloudinit/mergers/dict.py b/cloudinit/mergers/dict.py index e7073bd9..bc392afa 100644 --- a/cloudinit/mergers/dict.py +++ b/cloudinit/mergers/dict.py @@ -21,9 +21,6 @@ class Merger(object): def __init__(self, merger, opts): self._merger = merger self._overwrite = 'overwrite' in opts - - if opts and opts.lower().find("overwrite") != -1: - self._overwrite = True def _on_dict(self, value, merge_with): if not isinstance(merge_with, (dict)): diff --git a/cloudinit/mergers/list.py b/cloudinit/mergers/list.py index 0c65d053..a848b8d6 100644 --- a/cloudinit/mergers/list.py +++ b/cloudinit/mergers/list.py @@ -21,15 +21,15 @@ class Merger(object): def __init__(self, merger, opts): self._merger = merger self._discard_non = 'discard_non_list' in opts - self._append = 'append' in opts + self._extend = 'extend' in opts def _on_tuple(self, value, merge_with): return self._on_list(list(value), merge_with) def _on_list(self, value, merge_with): + new_value = list(value) if isinstance(merge_with, (tuple, list)): - new_value = list(value) - if self._append: + if self._extend: new_value.extend(merge_with) else: # Merge instead @@ -42,9 +42,6 @@ class Merger(object): if m_am == 0: new_value.append(m_v) else: - new_value = list(value) - if self._discard_non: - pass - else: + if not self._discard_non: new_value.append(merge_with) return new_value -- cgit v1.2.3 From 0612a35c4190a485a95ebade8f5c0598ae8b0e14 Mon Sep 17 00:00:00 2001 From: Blair Zajac Date: Mon, 28 Jan 2013 14:10:56 -0800 Subject: Support resizing btrfs filesystems. The existing code has two issues with btrfs: 1) The command to resize a btrfs filesystem uses a path to the mount point, not the underlying device: $ btrfs filesystem resize max /dev/vda1 ERROR: unable to resize '/dev/vda1' - Inappropriate ioctl for device Resize '/dev/vda1' of 'max' $ btrfs filesystem resize max / Resize '/' of 'max' 2) The code that is given a path and finds the ID of the device where the path is mounted doesn't work for btrfs: Use /proc/$$/mountinfo to find the device where path is mounted. This is done because with a btrfs filesystem using os.stat(path) does not return the ID of the device. Here, / has a device of 18 (decimal). $ stat / File: '/' Size: 234 Blocks: 0 IO Block: 4096 directory Device: 12h/18d Inode: 256 Links: 1 Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root) Access: 2013-01-13 07:31:04.358011255 +0000 Modify: 2013-01-13 18:48:25.930011255 +0000 Change: 2013-01-13 18:48:25.930011255 +0000 Birth: - Find where / is mounted: $ mount | grep ' / ' /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo) And the device ID for /dev/vda1 is not 18: $ ls -l /dev/vda1 brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1 So use /proc/$$/mountinfo to find the device underlying the input path. --- cloudinit/config/cc_resizefs.py | 199 ++++++++++++++++++++++++++-------------- 1 file changed, 130 insertions(+), 69 deletions(-) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 70294eda..0bbbf81e 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -27,42 +27,108 @@ from cloudinit import util frequency = PER_ALWAYS +def _resize_btrfs(mount_point, devpth): + return ('btrfs', 'filesystem', 'resize', 'max', mount_point) + +def _resize_ext(mount_point, devpth): + return ('resize2fs', devpth) + +def _resize_xfs(mount_point, devpth): + return ('xfs_growfs', devpth) + +# Do not use a dictionary as these commands should be able to be used +# for multiple filesystem types if possible, e.g. one command for +# ext2, ext3 and ext4. RESIZE_FS_PREFIXES_CMDS = [ - ('ext', 'resize2fs'), - ('xfs', 'xfs_growfs'), + ('btrfs', _resize_btrfs), + ('ext', _resize_ext), + ('xfs', _resize_xfs), ] NOBLOCK = "noblock" -def nodeify_path(devpth, where, log): - try: - st_dev = os.stat(where).st_dev - dev = os.makedev(os.major(st_dev), os.minor(st_dev)) - os.mknod(devpth, 0400 | stat.S_IFBLK, dev) - return st_dev - except: - if util.is_container(): - log.debug("Inside container, ignoring mknod failure in resizefs") - return - log.warn("Failed to make device node to resize %s at %s", - where, devpth) - raise - +def get_mount_info(path, log): + # Use /proc/$$/mountinfo to find the device where path is mounted. + # This is done because with a btrfs filesystem using os.stat(path) + # does not return the ID of the device. + # + # Here, / has a device of 18 (decimal). + # + # $ stat / + # File: '/' + # Size: 234 Blocks: 0 IO Block: 4096 directory + # Device: 12h/18d Inode: 256 Links: 1 + # Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root) + # Access: 2013-01-13 07:31:04.358011255 +0000 + # Modify: 2013-01-13 18:48:25.930011255 +0000 + # Change: 2013-01-13 18:48:25.930011255 +0000 + # Birth: - + # + # Find where / is mounted: + # + # $ mount | grep ' / ' + # /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo) + # + # And the device ID for /dev/vda1 is not 18: + # + # $ ls -l /dev/vda1 + # brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1 + # + # So use /proc/$$/mountinfo to find the device underlying the + # input path. + path_elements = [e for e in path.split('/') if e] + devpth = None + fs_type = None + match_mount_point = None + match_mount_point_elements = None + mountinfo_path = '/proc/%s/mountinfo' % os.getpid() + for line in util.load_file(mountinfo_path).splitlines(): + parts = line.split() + + mount_point = parts[4] + mount_point_elements = [e for e in mount_point.split('/') if e] + + # Ignore mounts deeper than the path in question. + if len(mount_point_elements) > len(path_elements): + continue + + # Ignore mounts where the common path is not the same. + l = min(len(mount_point_elements), len(path_elements)) + if mount_point_elements[0:l] != path_elements[0:l]: + continue + + # Ignore mount points higher than an already seen mount + # point. + if (match_mount_point_elements is not None and + len(match_mount_point_elements) > len(mount_point_elements)): + continue + + # Find the '-' which terminates a list of optional columns to + # find the filesystem type and the path to the device. See + # man 5 proc for the format of this file. + try: + i = parts.index('-') + except ValueError: + log.debug("Did not find column named '-' in %s", + mountinfo_path) + return None -def get_fs_type(st_dev, path, log): - try: - dev_entries = util.find_devs_with(tag='TYPE', oformat='value', - no_cache=True, path=path) - if not dev_entries: + # Get the path to the device. + try: + fs_type = parts[i+1] + devpth = parts[i+2] + except IndexError: + log.debug("Too few columns in %s after '-' column", mountinfo_path) return None - return dev_entries[0].strip() - except util.ProcessExecutionError: - util.logexc(log, ("Failed to get filesystem type" - " of maj=%s, min=%s for path %s"), - os.major(st_dev), os.minor(st_dev), path) - raise + match_mount_point = mount_point + match_mount_point_elements = mount_point_elements + + if devpth and fs_type and match_mount_point: + return (devpth, fs_type, match_mount_point) + else: + return None def handle(name, cfg, _cloud, log, args): if len(args) != 0: @@ -80,52 +146,47 @@ def handle(name, cfg, _cloud, log, args): # TODO(harlowja): allow what is to be resized to be configurable?? resize_what = "/" - with util.ExtendedTemporaryFile(prefix="cloudinit.resizefs.", - dir=resize_root_d, delete=True) as tfh: - devpth = tfh.name - - # Delete the file so that mknod will work - # but don't change the file handle to know that its - # removed so that when a later call that recreates - # occurs this temporary file will still benefit from - # auto deletion - tfh.unlink_now() - - st_dev = nodeify_path(devpth, resize_what, log) - fs_type = get_fs_type(st_dev, devpth, log) - if not fs_type: - log.warn("Could not determine filesystem type of %s", resize_what) - return - - resizer = None - fstype_lc = fs_type.lower() - for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS: - if fstype_lc.startswith(pfix): - resizer = root_cmd - break - - if not resizer: - log.warn("Not resizing unknown filesystem type %s for %s", - fs_type, resize_what) - return - - log.debug("Resizing %s (%s) using %s", resize_what, fs_type, resizer) - resize_cmd = [resizer, devpth] - - if resize_root == NOBLOCK: - # Fork to a child that will run - # the resize command - util.fork_cb(do_resize, resize_cmd, log) - # Don't delete the file now in the parent - tfh.delete = False - else: - do_resize(resize_cmd, log) + result = get_mount_info(resize_what, log) + if not result: + log.warn("Could not determine filesystem type of %s", resize_what) + return + + (devpth, fs_type, mount_point) = result + + # Ensure the path is a block device. + if not stat.S_ISBLK(os.stat(devpth).st_mode): + log.debug("The %s device which was found for mount point %s for %s " + "is not a block device" % (devpth, mount_point, resize_what)) + return + + resizer = None + fstype_lc = fs_type.lower() + for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS: + if fstype_lc.startswith(pfix): + resizer = root_cmd + break + + if not resizer: + log.warn("Not resizing unknown filesystem type %s for %s", + fs_type, resize_what) + return + + resize_cmd = resizer(resize_what, devpth) + log.debug("Resizing %s (%s) using %s", resize_what, fs_type, + ' '.join(resize_cmd)) + + if resize_root == NOBLOCK: + # Fork to a child that will run + # the resize command + util.fork_cb(do_resize, resize_cmd, log) + else: + do_resize(resize_cmd, log) action = 'Resized' if resize_root == NOBLOCK: action = 'Resizing (via forking)' - log.debug("%s root filesystem (type=%s, maj=%i, min=%i, val=%s)", - action, fs_type, os.major(st_dev), os.minor(st_dev), resize_root) + log.debug("%s root filesystem (type=%s, val=%s)", action, fs_type, + resize_root) def do_resize(resize_cmd, log): -- cgit v1.2.3 From 6d40d5b2e3da9577d4a2686444d47125e62817fe Mon Sep 17 00:00:00 2001 From: harlowja Date: Tue, 19 Feb 2013 22:51:49 -0800 Subject: Continue working on integrating requests. --- cloudinit/config/cc_phone_home.py | 4 +-- cloudinit/ec2_utils.py | 24 +++++-------- cloudinit/sources/DataSourceEc2.py | 2 +- cloudinit/sources/DataSourceMAAS.py | 16 +++++---- cloudinit/url_helper.py | 50 ++++++++++++++++++++++---- cloudinit/user_data.py | 7 ++-- cloudinit/util.py | 71 ++++++++++++++++++++++++++++++++----- 7 files changed, 129 insertions(+), 45 deletions(-) diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index ae1349eb..90834080 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -19,7 +19,6 @@ # along with this program. If not, see . from cloudinit import templater -from cloudinit import url_helper as uhelp from cloudinit import util from cloudinit.settings import PER_INSTANCE @@ -112,7 +111,8 @@ def handle(name, cfg, cloud, log, args): } url = templater.render_string(url, url_params) try: - uhelp.readurl(url, data=real_submit_keys, retries=tries, sec_between=3) + util.read_file_or_url(url, data=real_submit_keys, + retries=tries, sec_between=3) except: util.logexc(log, ("Failed to post phone home data to" " %s in %s tries"), url, tries) diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index b9d7a2f7..c422eea9 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -22,7 +22,6 @@ import json import urllib from cloudinit import log as logging -from cloudinit import url_helper as uh from cloudinit import util LOG = logging.getLogger(__name__) @@ -40,11 +39,10 @@ def combine_url(base, add_on): # See: http://bit.ly/TyoUQs class MetadataMaterializer(object): - def __init__(self, blob, base_url, **fetch_settings): + def __init__(self, blob, base_url): self._blob = blob self._md = None self._base_url = base_url - self._fetch_settings = fetch_settings def _parse(self, blob): leaves = {} @@ -90,8 +88,8 @@ class MetadataMaterializer(object): self._md = self._materialize(self._blob, self._base_url) return self._md - def _fetch_url(self, url, **opts): - response = uh.readurl(url, **opts) + def _fetch_url(self, url): + response = util.read_file_or_url(url) return str(response) def _decode_leaf_blob(self, blob): @@ -115,12 +113,12 @@ class MetadataMaterializer(object): child_url = combine_url(base_url, c) if not child_url.endswith("/"): child_url += "/" - child_blob = self._fetch_url(child_url, **self._fetch_settings) + child_blob = self._fetch_url(child_url) child_contents[c] = self._materialize(child_blob, child_url) leaf_contents = {} for (field, resource) in leaves.items(): leaf_url = combine_url(base_url, resource) - leaf_blob = self._fetch_url(leaf_url, **self._fetch_settings) + leaf_blob = self._fetch_url(leaf_url) leaf_contents[field] = self._decode_leaf_blob(leaf_blob) joined = {} joined.update(child_contents) @@ -136,23 +134,19 @@ def get_instance_userdata(url, version='latest', ssl_details=None): ud_url = combine_url(url, version) ud_url = combine_url(ud_url, 'user-data') try: - response = uh.readurl(ud_url, timeout=5, - retries=10, ssl_details=ssl_details) + response = util.read_file_or_url(ud_url) return str(response) except Exception: util.logexc(LOG, "Failed fetching userdata from url %s", ud_url) return None -def get_instance_metadata(url, version='latest', ssl_details=None): +def get_instance_metadata(url, version='latest'): md_url = combine_url(url, version) md_url = combine_url(md_url, 'meta-data') try: - response = uh.readurl(md_url, timeout=5, - retries=10, ssl_details=ssl_details) - materializer = MetadataMaterializer(str(response), md_url, - timeout=5, retries=10, - ssl_details=ssl_details) + response = util.read_file_or_url(md_url) + materializer = MetadataMaterializer(str(response), md_url) return materializer.materialize() except Exception: util.logexc(LOG, "Failed fetching metadata from url %s", md_url) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 204963e7..47f677d4 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -137,7 +137,7 @@ class DataSourceEc2(sources.DataSource): start_time = time.time() url = uhelp.wait_for_url(urls=urls, max_wait=max_wait, - timeout=timeout, status_cb=LOG.warn) + timeout=timeout, status_cb=LOG.warn) if url: LOG.debug("Using metadata source: '%s'", url2base[url]) diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index e187aec9..2de31305 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -25,9 +25,11 @@ import os import time import urllib2 +import requests + from cloudinit import log as logging from cloudinit import sources -from cloudinit import url_helper as uhelp +from cloudinit import url_helper from cloudinit import util LOG = logging.getLogger(__name__) @@ -191,8 +193,8 @@ def read_maas_seed_url(seed_url, header_cb=None, timeout=None, version=MD_VERSION): """ Read the maas datasource at seed_url. - header_cb is a method that should return a headers dictionary that will - be given to urllib2.Request() + - header_cb is a method that should return a headers dictionary for + a given url Expected format of seed_url is are the following files: * //meta-data/instance-id @@ -220,13 +222,13 @@ def read_maas_seed_url(seed_url, header_cb=None, timeout=None, else: headers = {} try: - resp = uhelp.readurl(url, headers=headers, timeout=timeout) - if resp.ok(): + resp = util.read_file_or_url(url, headers=headers, timeout=timeout) + if resp.ok: md[name] = str(resp) else: LOG.warn(("Fetching from %s resulted in" - " an invalid http code %s"), url, resp.code) - except urllib2.HTTPError as e: + " an invalid http code %s"), url, resp.status_code) + except url_helper.UrlError as e: if e.code != 404: raise return check_seed_contents(md, seed_url) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index e8330e24..0839e63b 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -58,6 +58,44 @@ def _cleanurl(url): return urlunparse(parsed_url) +class UrlResponse(object): + def __init__(self, response): + self._response = response + + @property + def contents(self): + return self._response.content + + @property + def url(self): + return self._response.url + + @property + def ok(self): + return self._response.ok + + @property + def headers(self): + return self._response.headers + + @property + def code(self): + return self._response.status_code + + def __str__(self): + return self.contents + + +class UrlError(IOError): + def __init__(self, cause): + IOError.__init__(self, str(cause)) + self.cause = cause + if isinstance(cause, exceptions.HTTPError) and cause.response: + self.code = cause.response.status_code + else: + self.code = None + + def readurl(url, data=None, timeout=None, retries=0, sec_between=1, headers=None, ssl_details=None, check_status=True, allow_redirects=False): @@ -76,6 +114,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, if 'cert_file' in ssl_details and 'key_file' in ssl_details: req_args['cert'] = [ssl_details['cert_file'], ssl_details['key_file']] + elif 'cert_file' in ssl_details: + req_args['cert'] = str(ssl_details['cert_file']) req_args['allow_redirects'] = allow_redirects req_args['method'] = 'GET' @@ -126,13 +166,9 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, # Doesn't seem like we can make it use a different # subclass for responses, so add our own backward-compat # attrs - if not hasattr(r, 'code'): - setattr(r, 'code', status) - if not hasattr(r, 'contents'): - setattr(r, 'contents', contents) - return r + return UrlResponse(r) except exceptions.RequestException as e: - excps.append(e) + excps.append(UrlError(e)) if i + 1 < manual_tries and sec_between > 0: LOG.debug("Please wait %s seconds while we wait to try again", sec_between) @@ -213,7 +249,7 @@ def wait_for_url(urls, max_wait=None, timeout=None, e = ValueError(reason) else: return url - except exceptions.RequestException as e: + except UrlError as e: reason = "request error [%s]" % e except Exception as e: reason = "unexpected error [%s]" % e diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index 803ffc3a..4a640f1e 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -29,7 +29,6 @@ from email.mime.text import MIMEText from cloudinit import handlers from cloudinit import log as logging -from cloudinit import url_helper from cloudinit import util LOG = logging.getLogger(__name__) @@ -173,10 +172,10 @@ class UserDataProcessor(object): if include_once_on and os.path.isfile(include_once_fn): content = util.load_file(include_once_fn) else: - resp = url_helper.readurl(include_url) - if include_once_on and resp.ok(): + resp = util.read_file_or_url(include_url) + if include_once_on and resp.ok: util.write_file(include_once_fn, str(resp), mode=0600) - if resp.ok(): + if resp.ok: content = str(resp) else: LOG.warn(("Fetching from %s resulted in" diff --git a/cloudinit/util.py b/cloudinit/util.py index f5a7ac12..da2cdeda 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -51,7 +51,7 @@ import yaml from cloudinit import importer from cloudinit import log as logging from cloudinit import safeyaml -from cloudinit import url_helper as uhelp +from cloudinit import url_helper from cloudinit.settings import (CFG_BUILTIN) @@ -69,6 +69,18 @@ FN_ALLOWED = ('_-.()' + string.digits + string.ascii_letters) CONTAINER_TESTS = ['running-in-container', 'lxc-is-container'] +class FileResponse(object): + def __init__(self, path, contents): + self.code = 200 + self.headers = {} + self.contents = contents + self.ok = True + self.url = path + + def __str__(self): + return self.contents + + class ProcessExecutionError(IOError): MESSAGE_TMPL = ('%(description)s\n' @@ -628,12 +640,53 @@ def read_optional_seed(fill, base="", ext="", timeout=5): raise -def read_file_or_url(url, timeout=5, retries=10, file_retries=0): +def fetch_ssl_details(paths=None): + ssl_details = {} + # Lookup in these locations for ssl key/cert files + ssl_cert_paths = [ + '/var/lib/cloud/data/ssl', + '/var/lib/cloud/instance/data/ssl', + ] + if paths: + ssl_cert_paths.extend([ + os.path.join(paths.get_ipath_cur('data'), 'ssl'), + os.path.join(paths.get_cpath('data'), 'ssl'), + ]) + ssl_cert_paths = uniq_merge(ssl_cert_paths) + ssl_cert_paths = [d for d in ssl_cert_paths if d and os.path.isdir(d)] + cert_file = None + for d in ssl_cert_paths: + if os.path.isfile(os.path.join(d, 'cert.pem')): + cert_file = os.path.join(d, 'cert.pem') + break + key_file = None + for d in ssl_cert_paths: + if os.path.isfile(os.path.join(d, 'key.pem')): + key_file = os.path.join(d, 'key.pem') + break + if cert_file and key_file: + ssl_details['cert_file'] = cert_file + ssl_details['key_file'] = key_file + elif cert_file: + ssl_details['cert_file'] = cert_file + return ssl_details + + +def read_file_or_url(url, timeout=5, retries=10, + headers=None, data=None, sec_between=1, paths=None): if url.startswith("/"): url = "file://%s" % url - if url.startswith("file://"): - retries = file_retries - return uhelp.readurl(url, timeout=timeout, retries=retries) + if url.lower().startswith("file://"): + file_path = url[len("file://"):] + return FileResponse(file_path, contents=load_file(file_path)) + else: + return url_helper.readurl(url, + timeout=timeout, + retries=retries, + headers=headers, + data=data, + sec_between=sec_between, + ssl_details=fetch_ssl_details(paths)) def load_yaml(blob, default=None, allowed=(dict,)): @@ -675,13 +728,13 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): md_resp = read_file_or_url(md_url, timeout, retries, file_retries) md = None - if md_resp.ok(): + if md_resp.ok: md_str = str(md_resp) md = load_yaml(md_str, default={}) ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries) ud = None - if ud_resp.ok(): + if ud_resp.ok: ud_str = str(ud_resp) ud = ud_str @@ -850,8 +903,8 @@ def get_cmdline_url(names=('cloud-config-url', 'url'), if not url: return (None, None, None) - resp = uhelp.readurl(url) - if resp.contents.startswith(starts) and resp.ok(): + resp = read_file_or_url(url) + if resp.contents.startswith(starts) and resp.ok: return (key, url, str(resp)) return (key, url, None) -- cgit v1.2.3 From 48abaf6a72dca4485f0bc64af075febac6ef590d Mon Sep 17 00:00:00 2001 From: Vlastimil Holer Date: Wed, 20 Feb 2013 17:13:38 +0100 Subject: Added arguments to packages/bddeb: -d pass through '-d' to debuild --no-cloud-utils don't depend on cloud-utils package (default: False) These are essential for building on Debian 6, because there are no python-mocker (build dependency) and cloud-utils (install dependency) in squeeze and squeeze-backports. --- packages/bddeb | 17 +++++++++++++---- packages/debian/control.in | 3 +-- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/packages/bddeb b/packages/bddeb index bda3170d..61399739 100755 --- a/packages/bddeb +++ b/packages/bddeb @@ -36,10 +36,10 @@ PKG_MP = { 'prettytable': 'python-prettytable', 'pyyaml': 'python-yaml', } -DEBUILD_ARGS = ["-us", "-S", "-uc"] +DEBUILD_ARGS = ["-us", "-S", "-uc", "-d"] -def write_debian_folder(root, version, revno): +def write_debian_folder(root, version, revno, append_requires=[]): deb_dir = util.abs_join(root, 'debian') os.makedirs(deb_dir) @@ -58,7 +58,7 @@ def write_debian_folder(root, version, revno): pkgs = [p.lower().strip() for p in stdout.splitlines()] # Map to known packages - requires = [] + requires = append_requires for p in pkgs: tgt_pkg = PKG_MP.get(p) if not tgt_pkg: @@ -87,6 +87,11 @@ def main(): " (default: %(default)s)"), default=False, action='store_true') + parser.add_argument("--no-cloud-utils", dest="no_cloud_utils", + help=("don't depend on cloud-utils package" + " (default: %(default)s)"), + default=False, + action='store_true') for ent in DEBUILD_ARGS: parser.add_argument(ent, dest="debuild_args", action='append_const', @@ -128,7 +133,11 @@ def main(): shutil.move(extracted_name, xdir) print("Creating a debian/ folder in %r" % (xdir)) - write_debian_folder(xdir, version, revno) + if not args.no_cloud_utils: + append_requires=['cloud-utils'] + else: + append_requires=[] + write_debian_folder(xdir, version, revno, append_requires) # The naming here seems to follow some debian standard # so it will whine if it is changed... diff --git a/packages/debian/control.in b/packages/debian/control.in index edb5aff5..b9352f5b 100644 --- a/packages/debian/control.in +++ b/packages/debian/control.in @@ -18,8 +18,7 @@ Standards-Version: 3.9.3 Package: cloud-init Architecture: all -Depends: cloud-utils, - procps, +Depends: procps, python, #for $r in $requires ${r}, -- cgit v1.2.3 From 27ce4ea4f2bfe1e33454b8cec63276b044e29d12 Mon Sep 17 00:00:00 2001 From: Vlastimil Holer Date: Thu, 21 Feb 2013 14:23:53 +0100 Subject: Modify init-scripts to be able to run both on RHEL and Debian. --- setup.py | 2 +- sysvinit/cloud-config | 11 ++++------- sysvinit/cloud-final | 11 ++++------- sysvinit/cloud-init | 11 ++++------- sysvinit/cloud-init-local | 11 ++++------- 5 files changed, 17 insertions(+), 29 deletions(-) diff --git a/setup.py b/setup.py index 24476681..38c8cd93 100755 --- a/setup.py +++ b/setup.py @@ -42,7 +42,7 @@ INITSYS_FILES = { 'upstart': [f for f in glob('upstart/*') if is_f(f)], } INITSYS_ROOTS = { - 'sysvinit': '/etc/rc.d/init.d', + 'sysvinit': '/etc/init.d', 'systemd': '/etc/systemd/system/', 'upstart': '/etc/init/', } diff --git a/sysvinit/cloud-config b/sysvinit/cloud-config index e587446d..ad8ed831 100755 --- a/sysvinit/cloud-config +++ b/sysvinit/cloud-config @@ -29,15 +29,13 @@ # Should-Start: $time # Required-Stop: # Should-Stop: -# Default-Start: 3 5 -# Default-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 # Short-Description: The config cloud-init job # Description: Start cloud-init and runs the config phase # and any associated config modules as desired. ### END INIT INFO -. /etc/init.d/functions - # Return values acc. to LSB for all commands but status: # 0 - success # 1 - generic or unspecified error @@ -60,8 +58,9 @@ prog="cloud-init" cloud_init="/usr/bin/cloud-init" conf="/etc/cloud/cloud.cfg" -# If there exists a sysconfig variable override file use it... +# If there exist sysconfig/default variable override files use it... [ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init +[ -f /etc/default/cloud-init ] && . /etc/default/cloud-init start() { [ -x $cloud_init ] || return 5 @@ -80,8 +79,6 @@ stop() { return $RETVAL } -. /etc/init.d/functions - case "$1" in start) start diff --git a/sysvinit/cloud-final b/sysvinit/cloud-final index 5deb8457..aeae8903 100755 --- a/sysvinit/cloud-final +++ b/sysvinit/cloud-final @@ -29,15 +29,13 @@ # Should-Start: $time # Required-Stop: # Should-Stop: -# Default-Start: 3 5 -# Default-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 # Short-Description: The final cloud-init job # Description: Start cloud-init and runs the final phase # and any associated final modules as desired. ### END INIT INFO -. /etc/init.d/functions - # Return values acc. to LSB for all commands but status: # 0 - success # 1 - generic or unspecified error @@ -60,8 +58,9 @@ prog="cloud-init" cloud_init="/usr/bin/cloud-init" conf="/etc/cloud/cloud.cfg" -# If there exists a sysconfig variable override file use it... +# If there exist sysconfig/default variable override files use it... [ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init +[ -f /etc/default/cloud-init ] && . /etc/default/cloud-init start() { [ -x $cloud_init ] || return 5 @@ -80,8 +79,6 @@ stop() { return $RETVAL } -. /etc/init.d/functions - case "$1" in start) start diff --git a/sysvinit/cloud-init b/sysvinit/cloud-init index f8ab5d5f..c1c92ad0 100755 --- a/sysvinit/cloud-init +++ b/sysvinit/cloud-init @@ -29,15 +29,13 @@ # Should-Start: $time # Required-Stop: # Should-Stop: -# Default-Start: 3 5 -# Default-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 # Short-Description: The initial cloud-init job (net and fs contingent) # Description: Start cloud-init and runs the initialization phase # and any associated initial modules as desired. ### END INIT INFO -. /etc/init.d/functions - # Return values acc. to LSB for all commands but status: # 0 - success # 1 - generic or unspecified error @@ -60,8 +58,9 @@ prog="cloud-init" cloud_init="/usr/bin/cloud-init" conf="/etc/cloud/cloud.cfg" -# If there exists a sysconfig variable override file use it... +# If there exist sysconfig/default variable override files use it... [ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init +[ -f /etc/default/cloud-init ] && . /etc/default/cloud-init start() { [ -x $cloud_init ] || return 5 @@ -80,8 +79,6 @@ stop() { return $RETVAL } -. /etc/init.d/functions - case "$1" in start) start diff --git a/sysvinit/cloud-init-local b/sysvinit/cloud-init-local index 0c63b9b0..b53e0db2 100755 --- a/sysvinit/cloud-init-local +++ b/sysvinit/cloud-init-local @@ -29,15 +29,13 @@ # Should-Start: $time # Required-Stop: # Should-Stop: -# Default-Start: 3 5 -# Default-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 # Short-Description: The initial cloud-init job (local fs contingent) # Description: Start cloud-init and runs the initialization phases # and any associated initial modules as desired. ### END INIT INFO -. /etc/init.d/functions - # Return values acc. to LSB for all commands but status: # 0 - success # 1 - generic or unspecified error @@ -60,8 +58,9 @@ prog="cloud-init" cloud_init="/usr/bin/cloud-init" conf="/etc/cloud/cloud.cfg" -# If there exists a sysconfig variable override file use it... +# If there exist sysconfig/default variable override files use it... [ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init +[ -f /etc/default/cloud-init ] && . /etc/default/cloud-init start() { [ -x $cloud_init ] || return 5 @@ -80,8 +79,6 @@ stop() { return $RETVAL } -. /etc/init.d/functions - case "$1" in start) start -- cgit v1.2.3 From 3f61f2ee54ca1e550c733bb5d9b27f6250c1e51e Mon Sep 17 00:00:00 2001 From: Vlastimil Holer Date: Thu, 21 Feb 2013 15:07:54 +0100 Subject: New option INIT_SYSTEM=sysvinit_deb for Debian /etc/ directories --- setup.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 38c8cd93..b30cd53b 100755 --- a/setup.py +++ b/setup.py @@ -38,11 +38,13 @@ def is_f(p): INITSYS_FILES = { 'sysvinit': [f for f in glob('sysvinit/*') if is_f(f)], + 'sysvinit_deb': [f for f in glob('sysvinit/*') if is_f(f)], 'systemd': [f for f in glob('systemd/*') if is_f(f)], 'upstart': [f for f in glob('upstart/*') if is_f(f)], } INITSYS_ROOTS = { - 'sysvinit': '/etc/init.d', + 'sysvinit': '/etc/rc.d/init.d', + 'sysvinit_deb': '/etc/init.d', 'systemd': '/etc/systemd/system/', 'upstart': '/etc/init/', } -- cgit v1.2.3 From 10ea1c0bb933b21d32012d89b218a3bbbd15a75a Mon Sep 17 00:00:00 2001 From: Vlastimil Holer Date: Thu, 21 Feb 2013 16:29:06 +0100 Subject: Split Debian and Ubuntu APT sources --- cloudinit/config/cc_apt_configure.py | 9 ++++-- templates/sources.list.debian.tmpl | 28 +++++++++++++++++ templates/sources.list.tmpl | 60 ------------------------------------ templates/sources.list.ubuntu.tmpl | 60 ++++++++++++++++++++++++++++++++++++ 4 files changed, 94 insertions(+), 63 deletions(-) create mode 100644 templates/sources.list.debian.tmpl delete mode 100644 templates/sources.list.tmpl create mode 100644 templates/sources.list.ubuntu.tmpl diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index f8664160..3ce3b351 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -140,10 +140,13 @@ def get_release(): def generate_sources_list(codename, mirrors, cloud, log): - template_fn = cloud.get_template_filename('sources.list') + template_fn = cloud.get_template_filename('sources.list.%s' % + (cloud.distro.name)) if not template_fn: - log.warn("No template found, not rendering /etc/apt/sources.list") - return + template_fn = cloud.get_template_filename('sources.list') + if not template_fn: + log.warn("No template found, not rendering /etc/apt/sources.list") + return params = {'codename': codename} for k in mirrors: diff --git a/templates/sources.list.debian.tmpl b/templates/sources.list.debian.tmpl new file mode 100644 index 00000000..609bc6bd --- /dev/null +++ b/templates/sources.list.debian.tmpl @@ -0,0 +1,28 @@ +\## Note, this file is written by cloud-init on first boot of an instance +\## modifications made here will not survive a re-bundle. +\## if you wish to make changes you can: +\## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg +\## or do the same in user-data +\## b.) add sources in /etc/apt/sources.list.d +\## c.) make changes to template file /etc/cloud/templates/sources.list.debian.tmpl +\### + +# See http://www.debian.org/releases/stable/i386/release-notes/ch-upgrading.html +# for how to upgrade to newer versions of the distribution. +deb $mirror $codename main contrib non-free +deb-src $mirror $codename main contrib non-free + +\## Major bug fix updates produced after the final release of the +\## distribution. +deb $security $codename/updates main contrib non-free +deb-src $security $codename/updates main contrib non-free +deb $mirror $codename-updates main contrib non-free +deb-src $mirror $codename-updates main contrib non-free + +\## Uncomment the following two lines to add software from the 'backports' +\## repository. +\## N.B. software from this repository may not have been tested as +\## extensively as that contained in the main release, although it includes +\## newer versions of some applications which may provide useful features. +# deb http://backports.debian.org/debian-backports $codename-backports main contrib non-free +# deb-src http://backports.debian.org/debian-backports $codename-backports main contrib non-free diff --git a/templates/sources.list.tmpl b/templates/sources.list.tmpl deleted file mode 100644 index ce395b3d..00000000 --- a/templates/sources.list.tmpl +++ /dev/null @@ -1,60 +0,0 @@ -\## Note, this file is written by cloud-init on first boot of an instance -\## modifications made here will not survive a re-bundle. -\## if you wish to make changes you can: -\## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg -\## or do the same in user-data -\## b.) add sources in /etc/apt/sources.list.d -\## c.) make changes to template file /etc/cloud/templates/sources.list.tmpl -\### - -# See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to -# newer versions of the distribution. -deb $mirror $codename main -deb-src $mirror $codename main - -\## Major bug fix updates produced after the final release of the -\## distribution. -deb $mirror $codename-updates main -deb-src $mirror $codename-updates main - -\## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu -\## team. Also, please note that software in universe WILL NOT receive any -\## review or updates from the Ubuntu security team. -deb $mirror $codename universe -deb-src $mirror $codename universe -deb $mirror $codename-updates universe -deb-src $mirror $codename-updates universe - -\## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu -\## team, and may not be under a free licence. Please satisfy yourself as to -\## your rights to use the software. Also, please note that software in -\## multiverse WILL NOT receive any review or updates from the Ubuntu -\## security team. -# deb $mirror $codename multiverse -# deb-src $mirror $codename multiverse -# deb $mirror $codename-updates multiverse -# deb-src $mirror $codename-updates multiverse - -\## Uncomment the following two lines to add software from the 'backports' -\## repository. -\## N.B. software from this repository may not have been tested as -\## extensively as that contained in the main release, although it includes -\## newer versions of some applications which may provide useful features. -\## Also, please note that software in backports WILL NOT receive any review -\## or updates from the Ubuntu security team. -# deb $mirror $codename-backports main restricted universe multiverse -# deb-src $mirror $codename-backports main restricted universe multiverse - -\## Uncomment the following two lines to add software from Canonical's -\## 'partner' repository. -\## This software is not part of Ubuntu, but is offered by Canonical and the -\## respective vendors as a service to Ubuntu users. -# deb http://archive.canonical.com/ubuntu $codename partner -# deb-src http://archive.canonical.com/ubuntu $codename partner - -deb $security $codename-security main -deb-src $security $codename-security main -deb $security $codename-security universe -deb-src $security $codename-security universe -# deb $security $codename-security multiverse -# deb-src $security $codename-security multiverse diff --git a/templates/sources.list.ubuntu.tmpl b/templates/sources.list.ubuntu.tmpl new file mode 100644 index 00000000..ce395b3d --- /dev/null +++ b/templates/sources.list.ubuntu.tmpl @@ -0,0 +1,60 @@ +\## Note, this file is written by cloud-init on first boot of an instance +\## modifications made here will not survive a re-bundle. +\## if you wish to make changes you can: +\## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg +\## or do the same in user-data +\## b.) add sources in /etc/apt/sources.list.d +\## c.) make changes to template file /etc/cloud/templates/sources.list.tmpl +\### + +# See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to +# newer versions of the distribution. +deb $mirror $codename main +deb-src $mirror $codename main + +\## Major bug fix updates produced after the final release of the +\## distribution. +deb $mirror $codename-updates main +deb-src $mirror $codename-updates main + +\## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu +\## team. Also, please note that software in universe WILL NOT receive any +\## review or updates from the Ubuntu security team. +deb $mirror $codename universe +deb-src $mirror $codename universe +deb $mirror $codename-updates universe +deb-src $mirror $codename-updates universe + +\## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu +\## team, and may not be under a free licence. Please satisfy yourself as to +\## your rights to use the software. Also, please note that software in +\## multiverse WILL NOT receive any review or updates from the Ubuntu +\## security team. +# deb $mirror $codename multiverse +# deb-src $mirror $codename multiverse +# deb $mirror $codename-updates multiverse +# deb-src $mirror $codename-updates multiverse + +\## Uncomment the following two lines to add software from the 'backports' +\## repository. +\## N.B. software from this repository may not have been tested as +\## extensively as that contained in the main release, although it includes +\## newer versions of some applications which may provide useful features. +\## Also, please note that software in backports WILL NOT receive any review +\## or updates from the Ubuntu security team. +# deb $mirror $codename-backports main restricted universe multiverse +# deb-src $mirror $codename-backports main restricted universe multiverse + +\## Uncomment the following two lines to add software from Canonical's +\## 'partner' repository. +\## This software is not part of Ubuntu, but is offered by Canonical and the +\## respective vendors as a service to Ubuntu users. +# deb http://archive.canonical.com/ubuntu $codename partner +# deb-src http://archive.canonical.com/ubuntu $codename partner + +deb $security $codename-security main +deb-src $security $codename-security main +deb $security $codename-security universe +deb-src $security $codename-security universe +# deb $security $codename-security multiverse +# deb-src $security $codename-security multiverse -- cgit v1.2.3 From 9dfb60d3144860334ab1ad1d72920d962139461f Mon Sep 17 00:00:00 2001 From: harlowja Date: Thu, 21 Feb 2013 22:39:30 -0800 Subject: More work on requests integration. --- cloudinit/config/cc_phone_home.py | 3 ++- cloudinit/ec2_utils.py | 17 +++++++++++------ cloudinit/sources/DataSourceEc2.py | 6 ++++-- cloudinit/sources/DataSourceMAAS.py | 15 +++++++++------ cloudinit/util.py | 4 ++-- 5 files changed, 28 insertions(+), 17 deletions(-) diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index 90834080..5a4332ef 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -112,7 +112,8 @@ def handle(name, cfg, cloud, log, args): url = templater.render_string(url, url_params) try: util.read_file_or_url(url, data=real_submit_keys, - retries=tries, sec_between=3) + retries=tries, sec_between=3, + ssl_details=util.fetch_ssl_details(cloud.paths)) except: util.logexc(log, ("Failed to post phone home data to" " %s in %s tries"), url, tries) diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index c422eea9..026ee178 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -38,11 +38,16 @@ def combine_url(base, add_on): # See: http://bit.ly/TyoUQs +# +# Since boto metadata reader uses the old urllib which does not +# support ssl, we need to ahead and create our own reader which +# works the same as the boto one (for now). class MetadataMaterializer(object): - def __init__(self, blob, base_url): + def __init__(self, blob, base_url, ssl_details): self._blob = blob self._md = None self._base_url = base_url + self._ssl_details = ssl_details def _parse(self, blob): leaves = {} @@ -89,7 +94,7 @@ class MetadataMaterializer(object): return self._md def _fetch_url(self, url): - response = util.read_file_or_url(url) + response = util.read_file_or_url(url, ssl_details=self._ssl_details) return str(response) def _decode_leaf_blob(self, blob): @@ -134,19 +139,19 @@ def get_instance_userdata(url, version='latest', ssl_details=None): ud_url = combine_url(url, version) ud_url = combine_url(ud_url, 'user-data') try: - response = util.read_file_or_url(ud_url) + response = util.read_file_or_url(ud_url, ssl_details=ssl_details) return str(response) except Exception: util.logexc(LOG, "Failed fetching userdata from url %s", ud_url) return None -def get_instance_metadata(url, version='latest'): +def get_instance_metadata(url, version='latest', ssl_details=None): md_url = combine_url(url, version) md_url = combine_url(md_url, 'meta-data') try: - response = util.read_file_or_url(md_url) - materializer = MetadataMaterializer(str(response), md_url) + response = util.read_file_or_url(md_url, ssl_details=ssl_details) + materializer = MetadataMaterializer(str(response), md_url, ssl_details) return materializer.materialize() except Exception: util.logexc(LOG, "Failed fetching metadata from url %s", md_url) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 47f677d4..eee4e6bc 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -64,8 +64,10 @@ class DataSourceEc2(sources.DataSource): if not self.wait_for_metadata_service(): return False start_time = time.time() - self.userdata_raw = ec2_utils.get_instance_userdata(self.metadata_address, self.api_ver) - self.metadata = ec2_utils.get_instance_metadata(self.metadata_address, self.api_ver) + self.userdata_raw = ec2_utils.get_instance_userdata(self.metadata_address, self.api_ver, + ssl_details=util.fetch_ssl_details(self.paths)) + self.metadata = ec2_utils.get_instance_metadata(self.metadata_address, self.api_ver, + ssl_details=util.fetch_ssl_details(self.paths)) LOG.debug("Crawl of metadata service took %s seconds", int(time.time() - start_time)) return True diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 2de31305..dc048943 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -81,7 +81,8 @@ class DataSourceMAAS(sources.DataSource): self.base_url = url (userdata, metadata) = read_maas_seed_url(self.base_url, - self.md_headers) + self.md_headers, + paths=self.paths) self.userdata_raw = userdata self.metadata = metadata return True @@ -141,7 +142,7 @@ class DataSourceMAAS(sources.DataSource): LOG.debug("Using metadata source: '%s'", url) else: LOG.critical("Giving up on md from %s after %i seconds", - urls, int(time.time() - starttime)) + urls, int(time.time() - starttime)) return bool(url) @@ -190,7 +191,7 @@ def read_maas_seed_dir(seed_d): def read_maas_seed_url(seed_url, header_cb=None, timeout=None, - version=MD_VERSION): + version=MD_VERSION, paths=None): """ Read the maas datasource at seed_url. - header_cb is a method that should return a headers dictionary for @@ -222,12 +223,13 @@ def read_maas_seed_url(seed_url, header_cb=None, timeout=None, else: headers = {} try: - resp = util.read_file_or_url(url, headers=headers, timeout=timeout) + resp = util.read_file_or_url(url, headers=headers, timeout=timeout, + ssl_details=util.fetch_ssl_details(paths)) if resp.ok: md[name] = str(resp) else: LOG.warn(("Fetching from %s resulted in" - " an invalid http code %s"), url, resp.status_code) + " an invalid http code %s"), url, resp.code) except url_helper.UrlError as e: if e.code != 404: raise @@ -372,7 +374,8 @@ if __name__ == "__main__": if args.subcmd == "check-seed": if args.url.startswith("http"): (userdata, metadata) = read_maas_seed_url(args.url, - header_cb=my_headers, version=args.apiver) + header_cb=my_headers, + version=args.apiver) else: (userdata, metadata) = read_maas_seed_url(args.url) print "=== userdata ===" diff --git a/cloudinit/util.py b/cloudinit/util.py index da2cdeda..307ed613 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -673,7 +673,7 @@ def fetch_ssl_details(paths=None): def read_file_or_url(url, timeout=5, retries=10, - headers=None, data=None, sec_between=1, paths=None): + headers=None, data=None, sec_between=1, ssl_details=None): if url.startswith("/"): url = "file://%s" % url if url.lower().startswith("file://"): @@ -686,7 +686,7 @@ def read_file_or_url(url, timeout=5, retries=10, headers=headers, data=data, sec_between=sec_between, - ssl_details=fetch_ssl_details(paths)) + ssl_details=ssl_details) def load_yaml(blob, default=None, allowed=(dict,)): -- cgit v1.2.3 From 46a7a39775ed8f745ec8b63a9563f3ae6337d845 Mon Sep 17 00:00:00 2001 From: harlowja Date: Thu, 21 Feb 2013 22:56:17 -0800 Subject: Why did this file showup. --- cloudinit/ec2_utils.py.moved | 157 ------------------------------------------- 1 file changed, 157 deletions(-) delete mode 100644 cloudinit/ec2_utils.py.moved diff --git a/cloudinit/ec2_utils.py.moved b/cloudinit/ec2_utils.py.moved deleted file mode 100644 index 4812eccb..00000000 --- a/cloudinit/ec2_utils.py.moved +++ /dev/null @@ -1,157 +0,0 @@ -# vi: ts=4 expandtab -# -# Copyright (C) 2012 Yahoo! Inc. -# -# Author: Joshua Harlow -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from urlparse import (urlparse, urlunparse) - -import json -import urllib - -from cloudinit import log as logging -from cloudinit import util - -LOG = logging.getLogger(__name__) - - -def combine_url(base, add_on): - base_parsed = list(urlparse(base)) - path = base_parsed[2] - if path and not path.endswith("/"): - path += "/" - path += urllib.quote(str(add_on), safe="/:") - base_parsed[2] = path - return urlunparse(base_parsed) - - -# See: http://bit.ly/TyoUQs -# -# Since boto metadata reader uses the old urllib which does not -# support ssl, we need to ahead and create our own reader which -# works the same as the boto one (for now). -class MetadataMaterializer(object): - def __init__(self, blob, base_url, ssl_details): - self._blob = blob - self._md = None - self._base_url = base_url - self._ssl_details = ssl_details - - def _parse(self, blob): - leaves = {} - children = [] - if not blob: - return (leaves, children) - - def has_children(item): - if item.endswith("/"): - return True - else: - return False - - def get_name(item): - if item.endswith("/"): - return item.rstrip("/") - return item - - for field in blob.splitlines(): - field = field.strip() - field_name = get_name(field) - if not field or not field_name: - continue - if has_children(field): - if field_name not in children: - children.append(field_name) - else: - contents = field.split("=", 1) - resource = field_name - if len(contents) > 1: - # What a PITA... - (ident, sub_contents) = contents - checked_ident = util.safe_int(ident) - if checked_ident is not None: - resource = "%s/openssh-key" % (checked_ident) - field_name = sub_contents - leaves[field_name] = resource - return (leaves, children) - - def materialize(self): - if self._md is not None: - return self._md - self._md = self._materialize(self._blob, self._base_url) - return self._md - - def _fetch_url(self, url): - response = util.read_file_or_url(url, ssl_details=self._ssl_details) - return str(response) - - def _decode_leaf_blob(self, blob): - if not blob: - return blob - stripped_blob = blob.strip() - if stripped_blob.startswith("{") and stripped_blob.endswith("}"): - # Assume and try with json - try: - return json.loads(blob) - except (ValueError, TypeError): - pass - if blob.find("\n") != -1: - return blob.splitlines() - return blob - - def _materialize(self, blob, base_url): - (leaves, children) = self._parse(blob) - child_contents = {} - for c in children: - child_url = combine_url(base_url, c) - if not child_url.endswith("/"): - child_url += "/" - child_blob = self._fetch_url(child_url) - child_contents[c] = self._materialize(child_blob, child_url) - leaf_contents = {} - for (field, resource) in leaves.items(): - leaf_url = combine_url(base_url, resource) - leaf_blob = self._fetch_url(leaf_url) - leaf_contents[field] = self._decode_leaf_blob(leaf_blob) - joined = {} - joined.update(child_contents) - for field in leaf_contents.keys(): - if field in joined: - LOG.warn("Duplicate key found in results from %s", base_url) - else: - joined[field] = leaf_contents[field] - return joined - - -def get_instance_userdata(api_version, metadata_address, ssl_details=None): - ud_url = combine_url(metadata_address, api_version) - ud_url = combine_url(ud_url, 'user-data') - try: - response = util.read_file_or_url(ud_url, ssl_details=ssl_details) - return str(response) - except Exception: - util.logexc(LOG, "Failed fetching userdata from url %s", ud_url) - return None - -def get_instance_metadata(api_version, metadata_address, ssl_details=None): - md_url = combine_url(metadata_address, api_version) - md_url = combine_url(md_url, 'meta-data') - try: - response = util.read_file_or_url(md_url, ssl_details=ssl_details) - materializer = MetadataMaterializer(str(response), md_url, ssl_details) - return materializer.materialize() - except Exception: - util.logexc(LOG, "Failed fetching metadata from url %s", md_url) - return None -- cgit v1.2.3 From eacfc7ffbec3e6a0348ed484da895e2d2fc5ba10 Mon Sep 17 00:00:00 2001 From: harlowja Date: Sat, 23 Feb 2013 21:23:24 -0800 Subject: Get tests working and further adjustments. --- cloudinit/sources/DataSourceMAAS.py | 9 +++++--- cloudinit/url_helper.py | 21 +++++++++-------- cloudinit/user_data.py | 8 ++++--- cloudinit/util.py | 34 ++++++++++++++++++++-------- tests/unittests/test__init__.py | 10 ++++---- tests/unittests/test_datasource/test_maas.py | 11 +++++---- 6 files changed, 60 insertions(+), 33 deletions(-) diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 6e1133b2..0c526305 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -223,9 +223,12 @@ def read_maas_seed_url(seed_url, header_cb=None, timeout=None, else: headers = {} try: - resp = util.read_file_or_url(url, headers=headers, timeout=timeout, - ssl_details=util.fetch_ssl_details(paths)) - if resp.ok: + ssl_details = util.fetch_ssl_details(paths) + resp = util.read_file_or_url(url, + headers=headers, + timeout=timeout, + ssl_details=ssl_details) + if resp.ok(): md[name] = str(resp) else: LOG.warn(("Fetching from %s resulted in" diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 0839e63b..300e70c2 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -70,9 +70,14 @@ class UrlResponse(object): def url(self): return self._response.url - @property - def ok(self): - return self._response.ok + def ok(self, redirects_ok=False): + upper = 300 + if redirects_ok: + upper = 400 + if self.code >= 200 and self.code < upper: + return True + else: + return False @property def headers(self): @@ -158,11 +163,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, r = requests.request(**req_args) if check_status: r.raise_for_status() - contents = r.content - status = r.status_code - headers = r.headers LOG.debug("Read from %s (%s, %sb) after %s attempts", url, - status, len(contents), (i + 1)) + r.status_code, len(r.content), (i + 1)) # Doesn't seem like we can make it use a different # subclass for responses, so add our own backward-compat # attrs @@ -256,8 +258,9 @@ def wait_for_url(urls, max_wait=None, timeout=None, time_taken = int(time.time() - start_time) status_msg = "Calling '%s' failed [%s/%ss]: %s" % (url, - time_taken, - max_wait, reason) + time_taken, + max_wait, + reason) status_cb(status_msg) if exception_cb: exception_cb(msg=status_msg, exception=e) diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index eaf448a7..df069ff8 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -59,6 +59,7 @@ EXAMINE_FOR_LAUNCH_INDEX = ["text/cloud-config"] class UserDataProcessor(object): def __init__(self, paths): self.paths = paths + self.ssl_details = util.fetch_ssl_details(paths) def process(self, blob): accumulating_msg = MIMEMultipart() @@ -172,10 +173,11 @@ class UserDataProcessor(object): if include_once_on and os.path.isfile(include_once_fn): content = util.load_file(include_once_fn) else: - resp = util.read_file_or_url(include_url) - if include_once_on and resp.ok: + resp = util.read_file_or_url(include_url, + ssl_details=self.ssl_details) + if include_once_on and resp.ok(): util.write_file(include_once_fn, str(resp), mode=0600) - if resp.ok: + if resp.ok(): content = str(resp) else: LOG.warn(("Fetching from %s resulted in" diff --git a/cloudinit/util.py b/cloudinit/util.py index 42b3ab01..dc3c5639 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -70,18 +70,31 @@ FN_ALLOWED = ('_-.()' + string.digits + string.ascii_letters) CONTAINER_TESTS = ['running-in-container', 'lxc-is-container'] -class FileResponse(object): - def __init__(self, path, contents): - self.code = 200 +# Made to have same accessors as UrlResponse so that the +# read_file_or_url can return this or that object and the +# 'user' of those objects will not need to know the difference. +class StringResponse(object): + def __init__(self, contents, code=200): + self.code = code self.headers = {} self.contents = contents - self.ok = True - self.url = path + self.url = None + + def ok(self, *args, **kwargs): + if self.code != 200: + return False + return True def __str__(self): return self.contents +class FileResponse(StringResponse): + def __init__(self, path, contents, code=200): + StringResponse.__init__(self, contents, code=code) + self.url = path + + class ProcessExecutionError(IOError): MESSAGE_TMPL = ('%(description)s\n' @@ -630,7 +643,7 @@ def read_optional_seed(fill, base="", ext="", timeout=5): fill['user-data'] = ud fill['meta-data'] = md return True - except OSError as e: + except IOError as e: if e.errno == errno.ENOENT: return False raise @@ -670,9 +683,12 @@ def fetch_ssl_details(paths=None): def read_file_or_url(url, timeout=5, retries=10, headers=None, data=None, sec_between=1, ssl_details=None): + url = url.lstrip() if url.startswith("/"): url = "file://%s" % url if url.lower().startswith("file://"): + if data: + LOG.warn("Unable to post data to file resource %s", url) file_path = url[len("file://"):] return FileResponse(file_path, contents=load_file(file_path)) else: @@ -724,13 +740,13 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): md_resp = read_file_or_url(md_url, timeout, retries, file_retries) md = None - if md_resp.ok: + if md_resp.ok(): md_str = str(md_resp) md = load_yaml(md_str, default={}) ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries) ud = None - if ud_resp.ok: + if ud_resp.ok(): ud_str = str(ud_resp) ud = ud_str @@ -900,7 +916,7 @@ def get_cmdline_url(names=('cloud-config-url', 'url'), return (None, None, None) resp = read_file_or_url(url) - if resp.contents.startswith(starts) and resp.ok: + if resp.contents.startswith(starts) and resp.ok(): return (key, url, str(resp)) return (key, url, None) diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index ac082076..d707afa9 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -191,8 +191,8 @@ class TestCmdlineUrl(MockerTestCase): mock_readurl = self.mocker.replace(url_helper.readurl, passthrough=False) - mock_readurl(url) - self.mocker.result(url_helper.UrlResponse(200, payload)) + mock_readurl(url, ARGS, KWARGS) + self.mocker.result(util.StringResponse(payload)) self.mocker.replay() self.assertEqual((key, url, None), @@ -207,8 +207,8 @@ class TestCmdlineUrl(MockerTestCase): mock_readurl = self.mocker.replace(url_helper.readurl, passthrough=False) - mock_readurl(url) - self.mocker.result(url_helper.UrlResponse(200, payload)) + mock_readurl(url, ARGS, KWARGS) + self.mocker.result(util.StringResponse(payload)) self.mocker.replay() self.assertEqual((key, url, payload), @@ -221,7 +221,7 @@ class TestCmdlineUrl(MockerTestCase): cmdline = "ro %s=%s bar=1" % (key, url) self.mocker.replace(url_helper.readurl, passthrough=False) - self.mocker.result(url_helper.UrlResponse(400)) + self.mocker.result(util.StringResponse("")) self.mocker.replay() self.assertEqual((None, None, None), diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index b56fea82..47f8caa4 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -3,12 +3,13 @@ import os from cloudinit.sources import DataSourceMAAS from cloudinit import url_helper +from cloudinit import util from tests.unittests.helpers import populate_dir -from mocker import MockerTestCase +import mocker -class TestMAASDataSource(MockerTestCase): +class TestMAASDataSource(mocker.MockerTestCase): def setUp(self): super(TestMAASDataSource, self).setUp() @@ -115,9 +116,11 @@ class TestMAASDataSource(MockerTestCase): for key in valid_order: url = "%s/%s/%s" % (my_seed, my_ver, key) - mock_request(url, headers=my_headers, timeout=None) + mock_request(url, headers=my_headers, timeout=mocker.ANY, + data=mocker.ANY, sec_between=mocker.ANY, + ssl_details=mocker.ANY, retries=mocker.ANY) resp = valid.get(key) - self.mocker.result(url_helper.UrlResponse(200, resp)) + self.mocker.result(util.StringResponse(resp)) self.mocker.replay() (userdata, metadata) = DataSourceMAAS.read_maas_seed_url(my_seed, -- cgit v1.2.3 From f22388a04076247490c02f605b176c3c45ecd425 Mon Sep 17 00:00:00 2001 From: harlowja Date: Sat, 23 Feb 2013 23:22:57 -0800 Subject: Fix the maas callback mechanism now that requests is used. --- cloudinit/sources/DataSourceMAAS.py | 25 ++++++++++++------------ cloudinit/url_helper.py | 38 +++++++++++++++++++++++-------------- 2 files changed, 36 insertions(+), 27 deletions(-) diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 0c526305..f3759b4b 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -25,8 +25,6 @@ import os import time import urllib2 -import requests - from cloudinit import log as logging from cloudinit import sources from cloudinit import url_helper @@ -81,7 +79,7 @@ class DataSourceMAAS(sources.DataSource): self.base_url = url (userdata, metadata) = read_maas_seed_url(self.base_url, - self.md_headers, + self._md_headers, paths=self.paths) self.userdata_raw = userdata self.metadata = metadata @@ -90,7 +88,7 @@ class DataSourceMAAS(sources.DataSource): util.logexc(LOG, "Failed fetching metadata from url %s", url) return False - def md_headers(self, url): + def _md_headers(self, url): mcfg = self.ds_cfg # If we are missing token_key, token_secret or consumer_key @@ -134,9 +132,10 @@ class DataSourceMAAS(sources.DataSource): starttime = time.time() check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION) urls = [check_url] - url = uhelp.wait_for_url(urls=urls, max_wait=max_wait, - timeout=timeout, exception_cb=self._except_cb, - headers_cb=self.md_headers) + url = url_helper.wait_for_url(urls=urls, max_wait=max_wait, + timeout=timeout, + exception_cb=self._except_cb, + headers_cb=self._md_headers) if url: LOG.debug("Using metadata source: '%s'", url) @@ -147,23 +146,23 @@ class DataSourceMAAS(sources.DataSource): return bool(url) def _except_cb(self, msg, exception): - if not (isinstance(exception, urllib2.HTTPError) and + if not (isinstance(exception, url_helper.UrlError) and (exception.code == 403 or exception.code == 401)): return + if 'date' not in exception.headers: - LOG.warn("date field not in %d headers" % exception.code) + LOG.warn("Missing header 'date' in %s response", exception.code) return date = exception.headers['date'] - try: ret_time = time.mktime(parsedate(date)) - except: - LOG.warn("failed to convert datetime '%s'") + except Exception as e: + LOG.warn("Failed to convert datetime '%s': %s", date, e) return self.oauth_clockskew = int(ret_time - time.time()) - LOG.warn("set oauth clockskew to %d" % self.oauth_clockskew) + LOG.warn("Setting oauth clockskew to %d", self.oauth_clockskew) return diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 300e70c2..6f06761a 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -92,13 +92,13 @@ class UrlResponse(object): class UrlError(IOError): - def __init__(self, cause): + def __init__(self, cause, code=None, headers=None): IOError.__init__(self, str(cause)) self.cause = cause - if isinstance(cause, exceptions.HTTPError) and cause.response: - self.code = cause.response.status_code - else: - self.code = None + self.code = code + self.headers = headers + if self.headers is None: + self.headers = {} def readurl(url, data=None, timeout=None, retries=0, sec_between=1, @@ -170,7 +170,11 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, # attrs return UrlResponse(r) except exceptions.RequestException as e: - excps.append(UrlError(e)) + if isinstance(e, (exceptions.HTTPError)) and e.response: + excps.append(UrlError(e, code=e.response.status_code, + headers=e.response.headers)) + else: + excps.append(UrlError(e)) if i + 1 < manual_tries and sec_between > 0: LOG.debug("Please wait %s seconds while we wait to try again", sec_between) @@ -235,20 +239,23 @@ def wait_for_url(urls, max_wait=None, timeout=None, timeout = int((start_time + max_wait) - now) reason = "" + e = None try: if headers_cb is not None: headers = headers_cb(url) else: headers = {} - resp = readurl(url, headers=headers, timeout=timeout, - check_status=False) - if not resp.contents: - reason = "empty response [%s]" % (resp.code) - e = ValueError(reason) - elif not resp.ok(): - reason = "bad status code [%s]" % (resp.code) - e = ValueError(reason) + response = readurl(url, headers=headers, timeout=timeout, + check_status=False) + if not response.contents: + reason = "empty response [%s]" % (response.code) + e = UrlError(ValueError(reason), + code=response.code, headers=response.headers) + elif not response.ok(): + reason = "bad status code [%s]" % (response.code) + e = UrlError(ValueError(reason), + code=response.code, headers=response.headers) else: return url except UrlError as e: @@ -263,6 +270,9 @@ def wait_for_url(urls, max_wait=None, timeout=None, reason) status_cb(status_msg) if exception_cb: + # This can be used to alter the headers that will be sent + # in the future, for example this is what the MAAS datasource + # does. exception_cb(msg=status_msg, exception=e) if timeup(max_wait, start_time): -- cgit v1.2.3 From 6a72a8677ad2e4e66669d2be93880643b0525b51 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 27 Feb 2013 13:02:06 -0500 Subject: do not reload upstart configuration on upstart jobs For now, we disable reloading upstart jobs due to bug 1124384. At some point in the future, we could enable it again when that bug is fixed. The change here allows for a boothook in a multipart input to write the file '/run/cloud-init-upstart-reload' and then have configuration reloaded. --- cloudinit/handlers/upstart_job.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py index 4684f7f2..0aa7446e 100644 --- a/cloudinit/handlers/upstart_job.py +++ b/cloudinit/handlers/upstart_job.py @@ -65,6 +65,14 @@ class UpstartJobPartHandler(handlers.Handler): path = os.path.join(self.upstart_dir, filename) util.write_file(path, payload, 0644) - # if inotify support is not present in the root filesystem - # (overlayroot) then we need to tell upstart to re-read /etc - util.subp(["initctl", "reload-configuration"], capture=False) + # FIXME LATER (LP: #1124384) + # a bug in upstart means that invoking reload-configuration + # at this stage in boot causes havoc. So, until that is fixed + # we will not do that. However, I'd like to be able to easily + # test to see if this bug is still present in an image with + # a newer upstart. So, a boot hook could easiliy write this file. + if os.path.exists("/run/cloud-init-upstart-reload"): + # if inotify support is not present in the root filesystem + # (overlayroot) then we need to tell upstart to re-read /etc + + util.subp(["initctl", "reload-configuration"], capture=False) -- cgit v1.2.3 From e71071a9bea85235c708380473d8cf3912f7aa61 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 1 Mar 2013 01:41:23 -0500 Subject: skip unit test due to LP: #1124384 --- tests/unittests/test_builtin_handlers.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py index 5f41cb3d..da52f15b 100644 --- a/tests/unittests/test_builtin_handlers.py +++ b/tests/unittests/test_builtin_handlers.py @@ -1,6 +1,7 @@ """Tests of the built-in user data handlers.""" import os +import unittest from mocker import MockerTestCase @@ -34,6 +35,7 @@ class TestBuiltins(MockerTestCase): None, None, None) self.assertEquals(0, len(os.listdir(up_root))) + @unittest.skip("until LP: #1124384 fixed") def test_upstart_frequency_single(self): # files should be written out when frequency is ! per-instance c_root = self.makeDir() -- cgit v1.2.3 From 368d2ba20a1ea7a97bf7186493b17be429a031d4 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 1 Mar 2013 01:43:06 -0500 Subject: initial stab at growpart module LP: #1136936 --- cloudinit/config/cc_growpart.py | 98 +++++++++++++++++++++++++++++++++++++++++ cloudinit/config/cc_resizefs.py | 85 +---------------------------------- cloudinit/util.py | 83 ++++++++++++++++++++++++++++++++++ config/cloud.cfg | 1 + 4 files changed, 183 insertions(+), 84 deletions(-) create mode 100644 cloudinit/config/cc_growpart.py diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py new file mode 100644 index 00000000..f958cd53 --- /dev/null +++ b/cloudinit/config/cc_growpart.py @@ -0,0 +1,98 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2011 Canonical Ltd. +# +# Author: Scott Moser +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os.path +import stat + +from cloudinit.settings import PER_ALWAYS +from cloudinit import util + +frequency = PER_ALWAYS + + +def device_part_info(devpath, log): + # convert an entry in /dev/ to parent disk and partition number + + # input of /dev/vdb or /dev/disk/by-label/foo + # rpath is hopefully a real-ish path in /dev (vda, sdb..) + rpath = os.path.realpath(devpath) + + bname = os.path.basename(rpath) + syspath = "/sys/class/block/%s" % bname + + if not os.path.exists(syspath): + log.debug("%s had no syspath (%s)" % (devpath, syspath)) + return None + + ptpath = os.path.join(syspath, "partition") + if not os.path.exists(ptpath): + log.debug("%s not a partition" % devpath) + return None + + ptnum = util.load_file(ptpath).rstrip() + + # for a partition, real syspath is something like: + # /sys/devices/pci0000:00/0000:00:04.0/virtio1/block/vda/vda1 + rsyspath = os.path.realpath(syspath) + disksyspath = os.path.dirname(rsyspath) + + diskmajmin = util.load_file(os.path.join(disksyspath, "dev")).rstrip() + diskdevpath = os.path.realpath("/dev/block/%s" % diskmajmin) + + # diskdevpath has something like 253:0 + # and udev has put links in /dev/block/253:0 to the device name in /dev/ + return (diskdevpath, ptnum) + + +def handle(name, cfg, _cloud, log, args): + if len(args) != 0: + growroot = args[0] + else: + growroot = util.get_cfg_option_bool(cfg, "growroot", True) + + if not growroot: + log.debug("Skipping module named %s, growroot disabled", name) + return + + resize_what = "/" + result = util.get_mount_info(resize_what, log) + if not result: + log.warn("Could not determine filesystem type of %s" % resize_what) + return + + (devpth, _fs_type, mount_point) = result + + # Ensure the path is a block device. + if not stat.S_ISBLK(os.stat(devpth).st_mode): + log.debug("The %s device which was found for mount point %s for %s " + "is not a block device" % (devpth, mount_point, resize_what)) + return + + result = device_part_info(devpth, log) + if not result: + log.debug("%s did not look like a partition" % devpth) + + (disk, ptnum) = result + + try: + (out, _err) = util.subp(["growpart", disk, ptnum], rcs=[0, 1]) + except util.ProcessExecutionError as e: + log.warn("growpart failed: %s/%s" % (e.stdout, e.stderr)) + return + + log.debug("growpart said: %s" % out) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 44b27933..51dead2f 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -51,89 +51,6 @@ RESIZE_FS_PREFIXES_CMDS = [ NOBLOCK = "noblock" -def get_mount_info(path, log): - # Use /proc/$$/mountinfo to find the device where path is mounted. - # This is done because with a btrfs filesystem using os.stat(path) - # does not return the ID of the device. - # - # Here, / has a device of 18 (decimal). - # - # $ stat / - # File: '/' - # Size: 234 Blocks: 0 IO Block: 4096 directory - # Device: 12h/18d Inode: 256 Links: 1 - # Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root) - # Access: 2013-01-13 07:31:04.358011255 +0000 - # Modify: 2013-01-13 18:48:25.930011255 +0000 - # Change: 2013-01-13 18:48:25.930011255 +0000 - # Birth: - - # - # Find where / is mounted: - # - # $ mount | grep ' / ' - # /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo) - # - # And the device ID for /dev/vda1 is not 18: - # - # $ ls -l /dev/vda1 - # brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1 - # - # So use /proc/$$/mountinfo to find the device underlying the - # input path. - path_elements = [e for e in path.split('/') if e] - devpth = None - fs_type = None - match_mount_point = None - match_mount_point_elements = None - mountinfo_path = '/proc/%s/mountinfo' % os.getpid() - for line in util.load_file(mountinfo_path).splitlines(): - parts = line.split() - - mount_point = parts[4] - mount_point_elements = [e for e in mount_point.split('/') if e] - - # Ignore mounts deeper than the path in question. - if len(mount_point_elements) > len(path_elements): - continue - - # Ignore mounts where the common path is not the same. - l = min(len(mount_point_elements), len(path_elements)) - if mount_point_elements[0:l] != path_elements[0:l]: - continue - - # Ignore mount points higher than an already seen mount - # point. - if (match_mount_point_elements is not None and - len(match_mount_point_elements) > len(mount_point_elements)): - continue - - # Find the '-' which terminates a list of optional columns to - # find the filesystem type and the path to the device. See - # man 5 proc for the format of this file. - try: - i = parts.index('-') - except ValueError: - log.debug("Did not find column named '-' in %s", - mountinfo_path) - return None - - # Get the path to the device. - try: - fs_type = parts[i + 1] - devpth = parts[i + 2] - except IndexError: - log.debug("Too few columns in %s after '-' column", mountinfo_path) - return None - - match_mount_point = mount_point - match_mount_point_elements = mount_point_elements - - if devpth and fs_type and match_mount_point: - return (devpth, fs_type, match_mount_point) - else: - return None - - def handle(name, cfg, _cloud, log, args): if len(args) != 0: resize_root = args[0] @@ -150,7 +67,7 @@ def handle(name, cfg, _cloud, log, args): # TODO(harlowja): allow what is to be resized to be configurable?? resize_what = "/" - result = get_mount_info(resize_what, log) + result = util.get_mount_info(resize_what, log) if not result: log.warn("Could not determine filesystem type of %s", resize_what) return diff --git a/cloudinit/util.py b/cloudinit/util.py index ffe844b2..1e9ca5d9 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1586,3 +1586,86 @@ def expand_package_list(version_fmt, pkgs): raise RuntimeError("Invalid package type.") return pkglist + + +def get_mount_info(path, log): + # Use /proc/$$/mountinfo to find the device where path is mounted. + # This is done because with a btrfs filesystem using os.stat(path) + # does not return the ID of the device. + # + # Here, / has a device of 18 (decimal). + # + # $ stat / + # File: '/' + # Size: 234 Blocks: 0 IO Block: 4096 directory + # Device: 12h/18d Inode: 256 Links: 1 + # Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root) + # Access: 2013-01-13 07:31:04.358011255 +0000 + # Modify: 2013-01-13 18:48:25.930011255 +0000 + # Change: 2013-01-13 18:48:25.930011255 +0000 + # Birth: - + # + # Find where / is mounted: + # + # $ mount | grep ' / ' + # /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo) + # + # And the device ID for /dev/vda1 is not 18: + # + # $ ls -l /dev/vda1 + # brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1 + # + # So use /proc/$$/mountinfo to find the device underlying the + # input path. + path_elements = [e for e in path.split('/') if e] + devpth = None + fs_type = None + match_mount_point = None + match_mount_point_elements = None + mountinfo_path = '/proc/%s/mountinfo' % os.getpid() + for line in load_file(mountinfo_path).splitlines(): + parts = line.split() + + mount_point = parts[4] + mount_point_elements = [e for e in mount_point.split('/') if e] + + # Ignore mounts deeper than the path in question. + if len(mount_point_elements) > len(path_elements): + continue + + # Ignore mounts where the common path is not the same. + l = min(len(mount_point_elements), len(path_elements)) + if mount_point_elements[0:l] != path_elements[0:l]: + continue + + # Ignore mount points higher than an already seen mount + # point. + if (match_mount_point_elements is not None and + len(match_mount_point_elements) > len(mount_point_elements)): + continue + + # Find the '-' which terminates a list of optional columns to + # find the filesystem type and the path to the device. See + # man 5 proc for the format of this file. + try: + i = parts.index('-') + except ValueError: + log.debug("Did not find column named '-' in %s", + mountinfo_path) + return None + + # Get the path to the device. + try: + fs_type = parts[i + 1] + devpth = parts[i + 2] + except IndexError: + log.debug("Too few columns in %s after '-' column", mountinfo_path) + return None + + match_mount_point = mount_point + match_mount_point_elements = mount_point_elements + + if devpth and fs_type and match_mount_point: + return (devpth, fs_type, match_mount_point) + else: + return None diff --git a/config/cloud.cfg b/config/cloud.cfg index a8c74486..b61b8a7d 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -26,6 +26,7 @@ cloud_init_modules: - migrator - bootcmd - write-files + - growpart - resizefs - set_hostname - update_hostname -- cgit v1.2.3 From 86fe289ceb9b292ea91dbca056e0159e74091e47 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 1 Mar 2013 14:19:54 -0500 Subject: add some unit tests, fix an issue or two * drop the parsing of options into csv, as we were only exploding them back. That can only result in error. Just do minimal parsing. * change the parsing of key lines to: if entry is valid: * use it else try taking off options: if good, use it else fail --- cloudinit/ssh_util.py | 97 +++++++++++++++++++---------------------- tests/unittests/test_sshutil.py | 94 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 140 insertions(+), 51 deletions(-) create mode 100644 tests/unittests/test_sshutil.py diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index dd6b742f..863a63e7 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -107,62 +107,57 @@ class AuthKeyLineParser(object): i = i + 1 options = ent[0:i] - options_lst = [] - - # Now use a csv parser to pull the options - # out of the above string that we just found an endpoint for. - # - # No quoting so we don't mess up any of the quoting that - # is already there. - reader = csv.reader(StringIO(options), quoting=csv.QUOTE_NONE) - for row in reader: - for e in row: - # Only keep non-empty csv options - e = e.strip() - if e: - options_lst.append(e) - - # Now take the rest of the items before the string - # as long as there is room to do this... - toks = [] - if i + 1 < len(ent): - rest = ent[i + 1:] - toks = rest.split(None, 2) - return (options_lst, toks) - - def _form_components(self, src_line, toks, options=None): - components = {} - if len(toks) == 1: - components['base64'] = toks[0] - elif len(toks) == 2: - components['base64'] = toks[0] - components['comment'] = toks[1] - elif len(toks) == 3: - components['keytype'] = toks[0] - components['base64'] = toks[1] - components['comment'] = toks[2] - components['options'] = options - if not components: - return AuthKeyLine(src_line) - else: - return AuthKeyLine(src_line, **components) + + # Return the rest of the string in 'remain' + remain = ent[i:].lstrip() + return (options, remain) def parse(self, src_line, def_opt=None): + # modeled after opensshes auth2-pubkey.c:user_key_allowed2 line = src_line.rstrip("\r\n") if line.startswith("#") or line.strip() == '': return AuthKeyLine(src_line) - else: - ent = line.strip() - toks = ent.split(None, 3) - if len(toks) < 4: - return self._form_components(src_line, toks, def_opt) - else: - (options, toks) = self._extract_options(ent) - if options: - options = ",".join(options) - else: - options = def_opt - return self._form_components(src_line, toks, options) + + def parse_ssh_key(ent): + # return ketype, key, [comment] + toks = ent.split(None, 2) + if len(toks) < 2: + raise TypeError("To few fields: %s" % len(toks)) + if not _is_valid_ssh_keytype(toks[0]): + raise TypeError("Invalid keytype %s" % toks[0]) + + # valid key type and 2 or 3 fields: + if len(toks) == 2: + # no comment in line + toks.append("") + + return toks + + ent = line.strip() + options = None + try: + (keytype, base64, comment) = parse_ssh_key(ent) + options = def_opt + except TypeError as e: + (options, remain) = self._extract_options(ent) + try: + (keytype, base64, comment) = parse_ssh_key(remain) + except TypeError as e: + return AuthKeyLine(src_line) + + return AuthKeyLine(src_line, keytype=keytype, base64=base64, + comment=comment, options=options) + + +def _is_valid_ssh_keytype(key): + valid = ("rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa", + "ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com", + "ssh-rsa-cert-v01@openssh.com", "ssh-dss-cert-v01@openssh.com", + "ecdsa-sha2-nistp256-cert-v01@openssh.com", + "ecdsa-sha2-nistp384-cert-v01@openssh.com", + "ecdsa-sha2-nistp521-cert-v01@openssh.com") + + return key in valid def parse_authorized_keys(fname): diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py new file mode 100644 index 00000000..4564d9be --- /dev/null +++ b/tests/unittests/test_sshutil.py @@ -0,0 +1,94 @@ +from unittest import TestCase +from cloudinit import ssh_util + + +VALID_CONTENT = { + 'dsa': ( + "AAAAB3NzaC1kc3MAAACBAIrjOQSlSea19bExXBMBKBvcLhBoVvNBjCppNzllipF" + "W4jgIOMcNanULRrZGjkOKat6MWJNetSbV1E6IOFDQ16rQgsh/OvYU9XhzM8seLa" + "A21VszZuhIV7/2DE3vxu7B54zVzueG1O1Deq6goQCRGWBUnqO2yluJiG4HzrnDa" + "jzRAAAAFQDMPO96qXd4F5A+5b2f2MO7SpVomQAAAIBpC3K2zIbDLqBBs1fn7rsv" + "KcJvwihdlVjG7UXsDB76P2GNqVG+IlYPpJZ8TO/B/fzTMtrdXp9pSm9OY1+BgN4" + "REsZ2WNcvfgY33aWaEM+ieCcQigvxrNAF2FTVcbUIIxAn6SmHuQSWrLSfdHc8H7" + "hsrgeUPPdzjBD/cv2ZmqwZ1AAAAIAplIsScrJut5wJMgyK1JG0Kbw9JYQpLe95P" + "obB069g8+mYR8U0fysmTEdR44mMu0VNU5E5OhTYoTGfXrVrkR134LqFM2zpVVbE" + "JNDnIqDHxTkc6LY2vu8Y2pQ3/bVnllZZOda2oD5HQ7ovygQa6CH+fbaZHbdDUX/" + "5z7u2rVAlDw==" + ), + 'ecdsa': ( + "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBITrGBB3cgJ" + "J7fPxvtMW9H3oRisNpJ3OAslxZeyP7I0A9BPAW0RQIwHVtVnM7zrp4nI+JLZov/" + "Ql7lc2leWL7CY=" + ), + 'rsa': ( + "AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5oz" + "emNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbD" + "c1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q" + "7NDwfIrJJtO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhT" + "YWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07" + "/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw==" + ), +} + +TEST_OPTIONS = ("no-port-forwarding,no-agent-forwarding,no-X11-forwarding," + 'command="echo \'Please login as the user \"ubuntu\" rather than the' + 'user \"root\".\';echo;sleep 10"') + +class TestAuthKeyLineParser(TestCase): + def test_simple_parse(self): + # test key line with common 3 fields (keytype, base64, comment) + parser = ssh_util.AuthKeyLineParser() + for ktype in ['rsa', 'ecdsa', 'dsa']: + content = VALID_CONTENT[ktype] + comment = 'user-%s@host' % ktype + line = ' '.join((ktype, content, comment,)) + key = parser.parse(line) + + self.assertEqual(key.base64, content) + self.assertFalse(key.options) + self.assertEqual(key.comment, comment) + self.assertEqual(key.keytype, ktype) + + def test_parse_no_comment(self): + # test key line with key type and base64 only + parser = ssh_util.AuthKeyLineParser() + for ktype in ['rsa', 'ecdsa', 'dsa']: + content = VALID_CONTENT[ktype] + line = ' '.join((ktype, content,)) + key = parser.parse(line) + + self.assertEqual(key.base64, content) + self.assertFalse(key.options) + self.assertFalse(key.comment) + self.assertEqual(key.keytype, ktype) + + def test_parse_with_options(self): + # test key line with options in it + parser = ssh_util.AuthKeyLineParser() + options = TEST_OPTIONS + for ktype in ['rsa', 'ecdsa', 'dsa']: + content = VALID_CONTENT[ktype] + comment = 'user-%s@host' % ktype + line = ' '.join((options, ktype, content, comment,)) + key = parser.parse(line) + + self.assertEqual(key.base64, content) + self.assertEqual(key.options, options) + self.assertEqual(key.comment, comment) + self.assertEqual(key.keytype, ktype) + + def test_parse_with_defopt(self): + # test key line with key type and base64 only + parser = ssh_util.AuthKeyLineParser() + for ktype in ['rsa', 'ecdsa', 'dsa']: + content = VALID_CONTENT[ktype] + line = ' '.join((ktype, content,)) + myopts = "no-port-forwarding,no-agent-forwarding" + key = parser.parse(line, myopts) + + self.assertEqual(key.base64, content) + self.assertEqual(key.options, myopts) + self.assertFalse(key.comment) + self.assertEqual(key.keytype, ktype) + +# vi: ts=4 expandtab -- cgit v1.2.3 From ceec6724143e950d6ceb9ea0758dbfd1ad33921a Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 1 Mar 2013 14:22:00 -0500 Subject: move function to a static list, comment where it came from --- cloudinit/ssh_util.py | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 863a63e7..082c5bbd 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -33,6 +33,14 @@ LOG = logging.getLogger(__name__) # See: man sshd_config DEF_SSHD_CFG = "/etc/ssh/sshd_config" +# taken from openssh source key.c/key_type_from_name +VALID_KEY_TYPES = ("rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa", + "ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com", + "ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com", + "ssh-rsa-cert-v01@openssh.com", "ssh-dss-cert-v01@openssh.com", + "ecdsa-sha2-nistp256-cert-v01@openssh.com", + "ecdsa-sha2-nistp384-cert-v01@openssh.com", + "ecdsa-sha2-nistp521-cert-v01@openssh.com") class AuthKeyLine(object): def __init__(self, source, keytype=None, base64=None, @@ -123,7 +131,7 @@ class AuthKeyLineParser(object): toks = ent.split(None, 2) if len(toks) < 2: raise TypeError("To few fields: %s" % len(toks)) - if not _is_valid_ssh_keytype(toks[0]): + if toks[0] not in VALID_KEY_TYPES: raise TypeError("Invalid keytype %s" % toks[0]) # valid key type and 2 or 3 fields: @@ -149,17 +157,6 @@ class AuthKeyLineParser(object): comment=comment, options=options) -def _is_valid_ssh_keytype(key): - valid = ("rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa", - "ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com", - "ssh-rsa-cert-v01@openssh.com", "ssh-dss-cert-v01@openssh.com", - "ecdsa-sha2-nistp256-cert-v01@openssh.com", - "ecdsa-sha2-nistp384-cert-v01@openssh.com", - "ecdsa-sha2-nistp521-cert-v01@openssh.com") - - return key in valid - - def parse_authorized_keys(fname): lines = [] try: -- cgit v1.2.3 From ff0a34876dc0ce29b762ffd7fcdbfa80308e5aae Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 1 Mar 2013 14:56:55 -0500 Subject: change parser.parse 'default_opts' to 'options' Now, parser.parse specifies options that override any options found, rather than just being default options. There could still potentially be a user for default_options, but since we're not using them anywhere, I've dropped it. The difference is that in setting up the root user, we're now insisting that all keys that go in there have the key_prefix, even if the key content had other options. I think this is actually the commit that fixes LP: #1136343. --- cloudinit/config/cc_ssh.py | 4 ++-- cloudinit/ssh_util.py | 27 ++++++++++++++------------- tests/unittests/test_sshutil.py | 28 +++++++++++++++++----------- 3 files changed, 33 insertions(+), 26 deletions(-) diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index b623d476..7ef20d9f 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -126,7 +126,7 @@ def apply_credentials(keys, user, disable_root, disable_root_opts): keys = set(keys) if user: - ssh_util.setup_user_keys(keys, user, '') + ssh_util.setup_user_keys(keys, user) if disable_root: if not user: @@ -135,4 +135,4 @@ def apply_credentials(keys, user, disable_root, disable_root_opts): else: key_prefix = '' - ssh_util.setup_user_keys(keys, 'root', key_prefix) + ssh_util.setup_user_keys(keys, 'root', options=key_prefix) diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 082c5bbd..44c7c15b 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -51,11 +51,8 @@ class AuthKeyLine(object): self.keytype = keytype self.source = source - def empty(self): - if (not self.base64 and - not self.comment and not self.keytype and not self.options): - return True - return False + def valid(self): + return (self.base64 and self.keytype) def __str__(self): toks = [] @@ -120,7 +117,7 @@ class AuthKeyLineParser(object): remain = ent[i:].lstrip() return (options, remain) - def parse(self, src_line, def_opt=None): + def parse(self, src_line, options=None): # modeled after opensshes auth2-pubkey.c:user_key_allowed2 line = src_line.rstrip("\r\n") if line.startswith("#") or line.strip() == '': @@ -141,13 +138,17 @@ class AuthKeyLineParser(object): return toks + if "badopt" in src_line: + import ipdb; ipdb.set_trace() + ent = line.strip() - options = None try: (keytype, base64, comment) = parse_ssh_key(ent) - options = def_opt except TypeError as e: - (options, remain) = self._extract_options(ent) + (keyopts, remain) = self._extract_options(ent) + if options is None: + options = keyopts + try: (keytype, base64, comment) = parse_ssh_key(remain) except TypeError as e: @@ -178,11 +179,11 @@ def update_authorized_keys(old_entries, keys): for i in range(0, len(old_entries)): ent = old_entries[i] - if ent.empty() or not ent.base64: + if ent.valid(): continue # Replace those with the same base64 for k in keys: - if k.empty() or not k.base64: + if ent.valid(): continue if k.base64 == ent.base64: # Replace it with our better one @@ -241,7 +242,7 @@ def extract_authorized_keys(username): return (auth_key_fn, parse_authorized_keys(auth_key_fn)) -def setup_user_keys(keys, username, key_prefix): +def setup_user_keys(keys, username, options=None): # Make sure the users .ssh dir is setup accordingly (ssh_dir, pwent) = users_ssh_info(username) if not os.path.isdir(ssh_dir): @@ -252,7 +253,7 @@ def setup_user_keys(keys, username, key_prefix): parser = AuthKeyLineParser() key_entries = [] for k in keys: - key_entries.append(parser.parse(str(k), def_opt=key_prefix)) + key_entries.append(parser.parse(str(k), options=options)) # Extract the old and make the new (auth_key_fn, auth_key_entries) = extract_authorized_keys(username) diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index 4564d9be..2415d06f 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -62,7 +62,7 @@ class TestAuthKeyLineParser(TestCase): self.assertFalse(key.comment) self.assertEqual(key.keytype, ktype) - def test_parse_with_options(self): + def test_parse_with_keyoptions(self): # test key line with options in it parser = ssh_util.AuthKeyLineParser() options = TEST_OPTIONS @@ -77,18 +77,24 @@ class TestAuthKeyLineParser(TestCase): self.assertEqual(key.comment, comment) self.assertEqual(key.keytype, ktype) - def test_parse_with_defopt(self): + def test_parse_with_options_passed_in(self): # test key line with key type and base64 only parser = ssh_util.AuthKeyLineParser() - for ktype in ['rsa', 'ecdsa', 'dsa']: - content = VALID_CONTENT[ktype] - line = ' '.join((ktype, content,)) - myopts = "no-port-forwarding,no-agent-forwarding" - key = parser.parse(line, myopts) - self.assertEqual(key.base64, content) - self.assertEqual(key.options, myopts) - self.assertFalse(key.comment) - self.assertEqual(key.keytype, ktype) + baseline = ' '.join(("rsa", VALID_CONTENT['rsa'], "user@host")) + myopts = "no-port-forwarding,no-agent-forwarding" + + key = parser.parse("allowedopt" + " " + baseline) + self.assertEqual(key.options, "allowedopt") + + key = parser.parse("overridden_opt " + baseline, options=myopts) + self.assertEqual(key.options, myopts) + + def test_parse_invalid_keytype(self): + parser = ssh_util.AuthKeyLineParser() + key = parser.parse(' '.join(["badkeytype", VALID_CONTENT['rsa']])) + + self.assertFalse(key.valid()) + # vi: ts=4 expandtab -- cgit v1.2.3 From ac83536339d2622f4a896f50681497a388e7e26f Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 1 Mar 2013 16:03:02 -0500 Subject: fix regression on expected label of filesystem for DataSourceNone Last addition to DataSourceNoCloud left it looking for a filesystem named 'None'. --- cloudinit/sources/DataSourceNoCloud.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 097bbc52..603f0155 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -86,11 +86,11 @@ class DataSourceNoCloud(sources.DataSource): if 'ds_config' not in found: found.append("ds_config") - if self.ds_cfg.get('fs_label', "cidata"): + label = self.ds_cfg.get('fs_label', "cidata") + if label is not None: fslist = util.find_devs_with("TYPE=vfat") fslist.extend(util.find_devs_with("TYPE=iso9660")) - label = self.ds_cfg.get('fs_label') label_list = util.find_devs_with("LABEL=%s" % label) devlist = list(set(fslist) & set(label_list)) devlist.sort(reverse=True) -- cgit v1.2.3 From d55c9ae845544871d6bf105b44f701b7076c8e35 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 1 Mar 2013 16:07:54 -0500 Subject: remove debug code --- cloudinit/ssh_util.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 44c7c15b..4b29661f 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -138,9 +138,6 @@ class AuthKeyLineParser(object): return toks - if "badopt" in src_line: - import ipdb; ipdb.set_trace() - ent = line.strip() try: (keytype, base64, comment) = parse_ssh_key(ent) -- cgit v1.2.3 From a6ef326b46a7f99b7ec585df595ef41151705ceb Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 1 Mar 2013 16:10:53 -0500 Subject: fix reversed logic --- cloudinit/ssh_util.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 4b29661f..65fab117 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -176,11 +176,11 @@ def update_authorized_keys(old_entries, keys): for i in range(0, len(old_entries)): ent = old_entries[i] - if ent.valid(): + if not ent.valid(): continue # Replace those with the same base64 for k in keys: - if ent.valid(): + if not ent.valid(): continue if k.base64 == ent.base64: # Replace it with our better one -- cgit v1.2.3 From 1d015f6ec3284287ad1383d0e2d9a264128f23eb Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sun, 3 Mar 2013 20:56:32 -0500 Subject: add default log value to get_mount_info --- cloudinit/util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/util.py b/cloudinit/util.py index 1e9ca5d9..d0a6f81c 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1588,7 +1588,7 @@ def expand_package_list(version_fmt, pkgs): return pkglist -def get_mount_info(path, log): +def get_mount_info(path, log=LOG): # Use /proc/$$/mountinfo to find the device where path is mounted. # This is done because with a btrfs filesystem using os.stat(path) # does not return the ID of the device. -- cgit v1.2.3 From 7bb938c0a677637796ee62fc4242d8c0118987bd Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sun, 3 Mar 2013 21:03:28 -0500 Subject: more work --- cloudinit/config/cc_growpart.py | 169 +++++++++++++++++++++++++++++++++------- 1 file changed, 139 insertions(+), 30 deletions(-) diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index f958cd53..206cfc94 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -17,6 +17,7 @@ # along with this program. If not, see . import os.path +import re import stat from cloudinit.settings import PER_ALWAYS @@ -24,8 +25,82 @@ from cloudinit import util frequency = PER_ALWAYS +def resizer_factory(mode): + resize_class = None + if mode == "auto": + for (_name, resizer) in RESIZERS: + cur = resizer() + if cur.available(): + resize_class = cur -def device_part_info(devpath, log): + if not resize_class: + raise ValueError("No resizers available") + + else: + mmap = {} + for (k, v) in RESIZERS: + mmap[k] = v + + if mode not in mmap: + raise TypeError("unknown resize mode %s" % mode) + + mclass = mmap[mode]() + if mclass.available(): + resize_class = mclass + + if not resize_class: + raise ValueError("mode %s not available" % mode) + + return resize_class + + +class ResizeFailedException(Exception): + pass + + +class ResizeParted(object): + def available(self): + myenv = os.environ.copy() + myenv['LANG'] = 'C' + + try: + (out, _err) = util.subp(["parted", "--help"], env=myenv) + if re.search("COMMAND.*resize\s+", out, re.DOTALL): + return True + + except util.ProcessExecutionError: + pass + return False + + def resize(self, blockdev, part): + try: + util.subp(["parted", "resizepart", blockdev, part]) + except util.ProcessExecutionError as e: + raise ResizeFailedException(e) + + +class ResizeGrowPart(object): + def available(self): + myenv = os.environ.copy() + myenv['LANG'] = 'C' + + try: + (out, _err) = util.subp(["growpart", "--help"], env=myenv) + if re.search("--update\s+", out, re.DOTALL): + return True + + except util.ProcessExecutionError: + pass + return False + + def resize(self, blockdev, part): + try: + util.subp(["growpart", blockdev, part]) + except util.ProcessExecutionError as e: + raise ResizeFailedException(e) + + +def device_part_info(devpath): # convert an entry in /dev/ to parent disk and partition number # input of /dev/vdb or /dev/disk/by-label/foo @@ -36,13 +111,11 @@ def device_part_info(devpath, log): syspath = "/sys/class/block/%s" % bname if not os.path.exists(syspath): - log.debug("%s had no syspath (%s)" % (devpath, syspath)) - return None + raise ValueError("%s had no syspath (%s)" % (devpath, syspath)) ptpath = os.path.join(syspath, "partition") if not os.path.exists(ptpath): - log.debug("%s not a partition" % devpath) - return None + raise TypeError("%s not a partition" % devpath) ptnum = util.load_file(ptpath).rstrip() @@ -59,40 +132,76 @@ def device_part_info(devpath, log): return (diskdevpath, ptnum) -def handle(name, cfg, _cloud, log, args): - if len(args) != 0: - growroot = args[0] +def devent2dev(devent): + if devent.startswith("/dev/"): + return devent else: - growroot = util.get_cfg_option_bool(cfg, "growroot", True) + result = util.get_mount_info(devent) + if not result: + raise ValueError("Could not determine device of '%s' % dev_ent") + return result[0] + + +def resize(resizer, devices, log): + resized = [] + for devent in devices: + try: + blockdev = devent2dev(devent) + except ValueError as e: + log.debug("unable to turn %s into device: %s" % (devent, e)) + continue + + if not stat.S_ISBLK(os.stat(blockdev).st_mode): + log.debug("device '%s' for '%s' is not a block device" % + (devent, blockdev)) + continue + + try: + (disk, ptnum) = device_part_info(blockdev) + except (TypeError, ValueError) as e: + log.debug("failed to get part_info for (%s, %s): %s" % + (devent, blockdev, e)) + continue + + try: + resizer.resize(disk, ptnum) + except ResizeFailedException as e: + log.warn("failed to resize: devent=%s, disk=%s, ptnum=%s: %s", + devent, disk, ptnum, e) + + resized.append(devent) + + return resized + + +def handle(name, cfg, _cloud, log, _args): + if 'growpart' not in cfg: + log.debug("Skipping module named %s, no growpart entry", name) + return - if not growroot: - log.debug("Skipping module named %s, growroot disabled", name) + mycfg = cfg.get('growpart') + if not isinstance(mycfg, dict): + log.warn("'growpart' in config was not a dict") return - resize_what = "/" - result = util.get_mount_info(resize_what, log) - if not result: - log.warn("Could not determine filesystem type of %s" % resize_what) + mode = mycfg.get('mode') + if util.is_false(mode): + log.debug("growpart disabled: mode=%s" % mode) return - (devpth, _fs_type, mount_point) = result + try: + resizer = resizer_factory(mode) + except (ValueError, TypeError) as e: + log.debug("growpart unable to find resizer for '%s': %s" % (mode, e)) - # Ensure the path is a block device. - if not stat.S_ISBLK(os.stat(devpth).st_mode): - log.debug("The %s device which was found for mount point %s for %s " - "is not a block device" % (devpth, mount_point, resize_what)) + devices = util.get_cfg_option_list(cfg, "devices", ["/"]) + if not len(devices): + log.debug("growpart: empty device list") return - result = device_part_info(devpth, log) - if not result: - log.debug("%s did not look like a partition" % devpth) + resized = resize(resizer, devices, log) + log.debug("resized: %s" % resized) - (disk, ptnum) = result +RESIZERS = (('parted', ResizeParted), ('growpart', ResizeGrowPart)) - try: - (out, _err) = util.subp(["growpart", disk, ptnum], rcs=[0, 1]) - except util.ProcessExecutionError as e: - log.warn("growpart failed: %s/%s" % (e.stdout, e.stderr)) - return - log.debug("growpart said: %s" % out) -- cgit v1.2.3 From 1a3e0fa19b6c3224ea986141b37f74d5464b7c82 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 4 Mar 2013 09:56:24 -0500 Subject: if sigterm received, and networking is up, exit 0 LP: #1015223 --- upstart/cloud-init-nonet.conf | 53 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 42 insertions(+), 11 deletions(-) diff --git a/upstart/cloud-init-nonet.conf b/upstart/cloud-init-nonet.conf index 118ffc1c..bdeb5a2e 100644 --- a/upstart/cloud-init-nonet.conf +++ b/upstart/cloud-init-nonet.conf @@ -10,19 +10,50 @@ task console output script - # /run/network/static-network-up-emitted is written by - # upstart (via /etc/network/if-up.d/upstart). its presense would - # indicate that static-network-up has already fired. - EMITTED="/run/network/static-network-up-emitted" - [ -e "$EMITTED" -o -e "/var/$EMITTED" ] && exit 0 + set +e # you cannot trap TERM reliably with 'set -e' + SLEEP_CHILD="" + static_network_up() { + local emitted="/run/network/static-network-up-emitted" + # /run/network/static-network-up-emitted is written by + # upstart (via /etc/network/if-up.d/upstart). its presense would + # indicate that static-network-up has already fired. + [ -e "$emitted" -o -e "/var/$emitted" ] + } + msg() { + echo "$UPSTART_JOB:" "$1" + } + + handle_sigterm() { + # if we received sigterm and static networking is up then it probably + # came from upstart as a result of 'stop on static-network-up' + [ -z "$SLEEP_CHILD" ] || kill $SLEEP_CHILD + if static_network_up; then + msg "static networking is now up" + exit 0 + fi + exit 2 + } + + dowait() { + msg "waiting $1 seconds for network device" + sleep "$1" & + SLEEP_CHILD=$! + wait $SLEEP_CHILD + SLEEP_CHILD="" + } + + trap handle_sigterm TERM + + # static_network_up already occurred + static_network_up && exit 0 + + # obj.pkl comes from cloud-init-local (or previous boot and + # manual_cache_clean) [ -f /var/lib/cloud/instance/obj.pkl ] && exit 0 - short=10; long=120; - sleep ${short} - echo $UPSTART_JOB "waiting ${long} seconds for a network device." - sleep ${long} - echo $UPSTART_JOB "gave up waiting for a network device." + dowait 10 + dowait 120 + msg "gave up waiting for a network device." : > /var/lib/cloud/data/no-net end script -# EOF -- cgit v1.2.3 From bcf04395be323c60b013e531ef3bf32b722a780a Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 4 Mar 2013 14:28:34 -0500 Subject: add uptime to msg output --- upstart/cloud-init-nonet.conf | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/upstart/cloud-init-nonet.conf b/upstart/cloud-init-nonet.conf index bdeb5a2e..36b99fb5 100644 --- a/upstart/cloud-init-nonet.conf +++ b/upstart/cloud-init-nonet.conf @@ -21,7 +21,11 @@ script [ -e "$emitted" -o -e "/var/$emitted" ] } msg() { - echo "$UPSTART_JOB:" "$1" + local uptime="" idle="" + if [ -r /proc/uptime ]; then + read uptime idle < /proc/uptime + fi + echo "$UPSTART_JOB${uptime:+[${uptime}]}:" "$1" } handle_sigterm() { @@ -32,6 +36,7 @@ script msg "static networking is now up" exit 0 fi + msg "recieved SIGTERM, networking not up" exit 2 } -- cgit v1.2.3 From ab73a5a7befb9583a9b2cee35fa99e363793c116 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 4 Mar 2013 16:59:57 -0500 Subject: add the unit test, fix a few issues --- cloudinit/config/cc_growpart.py | 6 +- .../test_handler/test_handler_growpart.py | 156 +++++++++++++++++++++ 2 files changed, 161 insertions(+), 1 deletion(-) create mode 100644 tests/unittests/test_handler/test_handler_growpart.py diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 206cfc94..d49159ed 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -32,6 +32,7 @@ def resizer_factory(mode): cur = resizer() if cur.available(): resize_class = cur + break if not resize_class: raise ValueError("No resizers available") @@ -65,7 +66,7 @@ class ResizeParted(object): try: (out, _err) = util.subp(["parted", "--help"], env=myenv) - if re.search("COMMAND.*resize\s+", out, re.DOTALL): + if re.search("COMMAND.*resizepart\s+", out, re.DOTALL): return True except util.ProcessExecutionError: @@ -193,6 +194,9 @@ def handle(name, cfg, _cloud, log, _args): resizer = resizer_factory(mode) except (ValueError, TypeError) as e: log.debug("growpart unable to find resizer for '%s': %s" % (mode, e)) + if mode != "auto": + raise e + return devices = util.get_cfg_option_list(cfg, "devices", ["/"]) if not len(devices): diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py new file mode 100644 index 00000000..9d2a8dae --- /dev/null +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -0,0 +1,156 @@ +from mocker import MockerTestCase + +from cloudinit import cloud +from cloudinit import helpers +from cloudinit import util + +from cloudinit.config import cc_growpart + +import logging +import os +import mocker + +# growpart: +# mode: auto # off, on, auto, 'growpart', 'parted' +# devices: ['root'] + +HELP_PARTED_NO_RESIZE = """ +Usage: parted [OPTION]... [DEVICE [COMMAND [PARAMETERS]...]...] +Apply COMMANDs with PARAMETERS to DEVICE. If no COMMAND(s) are given, run in +interactive mode. + +OPTIONs: + + +COMMANDs: + + quit exit program + rescue START END rescue a lost partition near START + and END + resize NUMBER START END resize partition NUMBER and its file + system + rm NUMBER delete partition NUMBER + +Report bugs to bug-parted@gnu.org +""" + +HELP_PARTED_RESIZE = """ +Usage: parted [OPTION]... [DEVICE [COMMAND [PARAMETERS]...]...] +Apply COMMANDs with PARAMETERS to DEVICE. If no COMMAND(s) are given, run in +interactive mode. + +OPTIONs: + + +COMMANDs: + + quit exit program + rescue START END rescue a lost partition near START + and END + resize NUMBER START END resize partition NUMBER and its file + system + resizepart NUMBER END resize partition NUMBER + rm NUMBER delete partition NUMBER + +Report bugs to bug-parted@gnu.org +""" + +HELP_GROWPART_RESIZE = """ +growpart disk partition + rewrite partition table so that partition takes up all the space it can + options: + -h | --help print Usage and exit + + -u | --update R update the the kernel partition table info after growing + this requires kernel support and 'partx --update' + R is one of: + - 'auto' : [default] update partition if possible + + Example: + - growpart /dev/sda 1 + Resize partition 1 on /dev/sda +""" + +HELP_GROWPART_NO_RESIZE = """ +growpart disk partition + rewrite partition table so that partition takes up all the space it can + options: + -h | --help print Usage and exit + + Example: + - growpart /dev/sda 1 + Resize partition 1 on /dev/sda +""" + +class TestDisabled(MockerTestCase): + def setUp(self): + super(TestDisabled, self).setUp() + self.name = "growpart" + self.cloud_init = None + self.log = logging.getLogger("TestDisabled") + self.args = [] + + self.handle = cc_growpart.handle + + def test_mode_off(self): + #Test that nothing is done if mode is off. + config = {'growpart': {'mode': 'off'}} + self.mocker.replay() + + self.handle(self.name, config, self.cloud_init, self.log, self.args) + + def test_no_config(self): + #Test that nothing is done if no 'growpart' config + config = { } + self.mocker.replay() + + self.handle(self.name, config, self.cloud_init, self.log, self.args) + + +class TestConfig(MockerTestCase): + def setUp(self): + super(TestConfig, self).setUp() + self.name = "growpart" + self.paths = None + self.cloud = cloud.Cloud(None, self.paths, None, None, None) + self.log = logging.getLogger("TestConfig") + self.args = [] + os.environ = {} + + self.cloud_init = None + self.handle = cc_growpart.handle + + # Order must be correct + self.mocker.order() + + def test_no_resizers_auto_is_fine(self): + subp = self.mocker.replace(util.subp, passthrough=False) + subp(['parted', '--help'], env={'LANG': 'C'}) + self.mocker.result((HELP_PARTED_NO_RESIZE,"")) + subp(['growpart', '--help'], env={'LANG': 'C'}) + self.mocker.result((HELP_GROWPART_NO_RESIZE,"")) + self.mocker.replay() + + config = {'growpart': {'mode': 'auto'}} + self.handle(self.name, config, self.cloud_init, self.log, self.args) + + def test_no_resizers_mode_growpart_is_exception(self): + subp = self.mocker.replace(util.subp, passthrough=False) + subp(['growpart', '--help'], env={'LANG': 'C'}) + self.mocker.result((HELP_GROWPART_NO_RESIZE,"")) + self.mocker.replay() + + config = {'growpart': {'mode': "growpart"}} + self.assertRaises(ValueError, self.handle, self.name, config, + self.cloud_init, self.log, self.args) + + def test_mode_auto_prefers_parted(self): + subp = self.mocker.replace(util.subp, passthrough=False) + subp(['parted', '--help'], env={'LANG': 'C'}) + self.mocker.result((HELP_PARTED_RESIZE,"")) + self.mocker.replay() + + ret = cc_growpart.resizer_factory(mode="auto") + self.assertTrue(isinstance(ret, cc_growpart.ResizeParted)) + +# vi: ts=4 expandtab -- cgit v1.2.3 From b25c943ac22d457891cd6cfa240ca83aa03e4542 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 4 Mar 2013 23:18:37 -0500 Subject: test of resize, a couple small fixes --- cloudinit/config/cc_growpart.py | 12 +++- .../test_handler/test_handler_growpart.py | 71 ++++++++++++++++++++++ 2 files changed, 81 insertions(+), 2 deletions(-) diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index d49159ed..96e72350 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -17,6 +17,7 @@ # along with this program. If not, see . import os.path +import os import re import stat @@ -152,9 +153,16 @@ def resize(resizer, devices, log): log.debug("unable to turn %s into device: %s" % (devent, e)) continue - if not stat.S_ISBLK(os.stat(blockdev).st_mode): + try: + statret = os.stat(blockdev) + except OSError as e: + log.debug("device '%s' for '%s' failed stat" % + (blockdev, devent)) + continue + + if not stat.S_ISBLK(statret.st_mode): log.debug("device '%s' for '%s' is not a block device" % - (devent, blockdev)) + (blockdev, devent)) continue try: diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index 9d2a8dae..7fb58a06 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -6,9 +6,12 @@ from cloudinit import util from cloudinit.config import cc_growpart +import errno import logging import os import mocker +import re +import stat # growpart: # mode: auto # off, on, auto, 'growpart', 'parted' @@ -94,7 +97,11 @@ class TestDisabled(MockerTestCase): def test_mode_off(self): #Test that nothing is done if mode is off. + + # this really only verifies that resizer_factory isn't called config = {'growpart': {'mode': 'off'}} + self.mocker.replace(cc_growpart.resizer_factory, + passthrough=False) self.mocker.replay() self.handle(self.name, config, self.cloud_init, self.log, self.args) @@ -102,6 +109,8 @@ class TestDisabled(MockerTestCase): def test_no_config(self): #Test that nothing is done if no 'growpart' config config = { } + self.mocker.replace(cc_growpart.resizer_factory, + passthrough=False) self.mocker.replay() self.handle(self.name, config, self.cloud_init, self.log, self.args) @@ -153,4 +162,66 @@ class TestConfig(MockerTestCase): ret = cc_growpart.resizer_factory(mode="auto") self.assertTrue(isinstance(ret, cc_growpart.ResizeParted)) + +class TestResize(MockerTestCase): + def setUp(self): + super(TestResize, self).setUp() + self.name = "growpart" + self.log = logging.getLogger("TestResize") + + # Order must be correct + self.mocker.order() + + def test_simple_devices(self): + #test simple device list + # this patches out devent2dev, os.stat, and device_part_info + # so in the end, doesn't test a lot + devs = ["/dev/XXda1", "/dev/YYda2"] + devstat_ret = Bunch(st_mode=25008, st_ino=6078, st_dev=5L, + st_nlink=1, st_uid=0, st_gid=6, st_size=0, + st_atime=0, st_mtime=0, st_ctime=0) + enoent = ["/dev/NOENT"] + real_stat = os.stat + resize_calls = [] + + class myresizer(): + def resize(self, dev, part): + resize_calls.append((dev, part,)) + return + + def mystat(path): + if path in devs: + return devstat_ret + if path in enoent: + e = OSError("%s: does not exist" % path) + e.errno = errno.ENOENT + raise e + return real_stat(path) + + try: + opinfo = cc_growpart.device_part_info + cc_growpart.device_part_info = simple_device_part_info + os.stat = mystat + + resized = cc_growpart.resize(myresizer(), devs + enoent, self.log) + + self.assertEqual(devs, resized) + self.assertEqual(resize_calls, + [("/dev/XXda", "1",), ("/dev/YYda", "2",)]) + finally: + cc_growpart.device_part_info = opinfo + os.stat = real_stat + + +def simple_device_part_info(devpath): + # simple stupid return (/dev/vda, 1) for /dev/vda + ret = re.search("([^0-9]*)([0-9]*)$", devpath) + x = (ret.group(1), ret.group(2)) + return x + +class Bunch: + def __init__(self, **kwds): + self.__dict__.update(kwds) + + # vi: ts=4 expandtab -- cgit v1.2.3 From 34a76e2be8a3eb4f8490183f12a67a01276575ff Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 5 Mar 2013 16:50:34 +0000 Subject: change default (no 'growpart' in config) to use 'auto' and '/' --- cloudinit/config/cc_growpart.py | 26 +++++++++------- .../test_handler/test_handler_growpart.py | 35 +++++++++++++++------- 2 files changed, 40 insertions(+), 21 deletions(-) diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 96e72350..6d647be1 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -26,6 +26,12 @@ from cloudinit import util frequency = PER_ALWAYS +DEFAULT_CONFIG = { + 'mode': 'auto', + 'devices': ['/'], +} + + def resizer_factory(mode): resize_class = None if mode == "auto": @@ -144,7 +150,7 @@ def devent2dev(devent): return result[0] -def resize(resizer, devices, log): +def resize_devices(resizer, devices, log): resized = [] for devent in devices: try: @@ -185,8 +191,9 @@ def resize(resizer, devices, log): def handle(name, cfg, _cloud, log, _args): if 'growpart' not in cfg: - log.debug("Skipping module named %s, no growpart entry", name) - return + log.debug("No 'growpart' entry in cfg. Using default: %s" % + DEFAULT_CONFIG) + cfg['growpart'] = DEFAULT_CONFIG mycfg = cfg.get('growpart') if not isinstance(mycfg, dict): @@ -198,6 +205,11 @@ def handle(name, cfg, _cloud, log, _args): log.debug("growpart disabled: mode=%s" % mode) return + devices = util.get_cfg_option_list(cfg, "devices", ["/"]) + if not len(devices): + log.debug("growpart: empty device list") + return + try: resizer = resizer_factory(mode) except (ValueError, TypeError) as e: @@ -206,14 +218,8 @@ def handle(name, cfg, _cloud, log, _args): raise e return - devices = util.get_cfg_option_list(cfg, "devices", ["/"]) - if not len(devices): - log.debug("growpart: empty device list") - return - - resized = resize(resizer, devices, log) + resized = resize_devices(resizer, devices, log) log.debug("resized: %s" % resized) RESIZERS = (('parted', ResizeParted), ('growpart', ResizeGrowPart)) - diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index 7fb58a06..9a033d6b 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -106,16 +106,6 @@ class TestDisabled(MockerTestCase): self.handle(self.name, config, self.cloud_init, self.log, self.args) - def test_no_config(self): - #Test that nothing is done if no 'growpart' config - config = { } - self.mocker.replace(cc_growpart.resizer_factory, - passthrough=False) - self.mocker.replay() - - self.handle(self.name, config, self.cloud_init, self.log, self.args) - - class TestConfig(MockerTestCase): def setUp(self): super(TestConfig, self).setUp() @@ -162,6 +152,28 @@ class TestConfig(MockerTestCase): ret = cc_growpart.resizer_factory(mode="auto") self.assertTrue(isinstance(ret, cc_growpart.ResizeParted)) + def test_handle_with_no_growpart_entry(self): + #if no 'growpart' entry in config, then mode=auto should be used + + myresizer = object() + + factory = self.mocker.replace(cc_growpart.resizer_factory, + passthrough=False) + rsdevs = self.mocker.replace(cc_growpart.resize_devices, + passthrough=False) + factory("auto") + self.mocker.result(myresizer) + rsdevs(myresizer, ["/"], self.log) + self.mocker.result(["/"]) + self.mocker.replay() + + try: + orig_resizers = cc_growpart.RESIZERS + cc_growpart.RESIZERS = (('mysizer', object),) + self.handle(self.name, {}, self.cloud_init, self.log, self.args) + finally: + cc_growpart.RESIZERS = orig_resizers + class TestResize(MockerTestCase): def setUp(self): @@ -203,7 +215,8 @@ class TestResize(MockerTestCase): cc_growpart.device_part_info = simple_device_part_info os.stat = mystat - resized = cc_growpart.resize(myresizer(), devs + enoent, self.log) + resized = cc_growpart.resize_devices(myresizer(), devs + enoent, + self.log) self.assertEqual(devs, resized) self.assertEqual(resize_calls, -- cgit v1.2.3 From 00416dd5cd8f0fb507f64a7f0035fbe706c3f279 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 5 Mar 2013 18:32:24 +0000 Subject: remove 'log' passing. call growpart with --dry-run first. growrun --dry-run will exit 1 if it wouldn't do anything. so call it, check for '1' and if no change, then just return. --- cloudinit/config/cc_growpart.py | 27 ++++++++++++++++------ .../test_handler/test_handler_growpart.py | 5 ++-- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 6d647be1..b88e6f6a 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -22,6 +22,7 @@ import re import stat from cloudinit.settings import PER_ALWAYS +from cloudinit import log as logging from cloudinit import util frequency = PER_ALWAYS @@ -31,6 +32,7 @@ DEFAULT_CONFIG = { 'devices': ['/'], } +LOG = logging.getLogger(__name__) def resizer_factory(mode): resize_class = None @@ -102,9 +104,20 @@ class ResizeGrowPart(object): return False def resize(self, blockdev, part): + try: + util.subp(["growpart", '--dry-run', blockdev, part]) + except util.ProcessExecutionError as e: + if e.exit_code != 1: + logexc(LOG, ("Failed growpart --dry-run for (%s, %s)" % + (blockdev, part))) + raise ResizeFailedException(e) + LOG.debug("no change necessary on (%s,%s)" % (blockdev, part)) + return + try: util.subp(["growpart", blockdev, part]) except util.ProcessExecutionError as e: + logexc(LOG, "Failed: growpart %s %s" % (blockdev, part)) raise ResizeFailedException(e) @@ -150,38 +163,38 @@ def devent2dev(devent): return result[0] -def resize_devices(resizer, devices, log): +def resize_devices(resizer, devices): resized = [] for devent in devices: try: blockdev = devent2dev(devent) except ValueError as e: - log.debug("unable to turn %s into device: %s" % (devent, e)) + LOG.debug("unable to turn %s into device: %s" % (devent, e)) continue try: statret = os.stat(blockdev) except OSError as e: - log.debug("device '%s' for '%s' failed stat" % + LOG.debug("device '%s' for '%s' failed stat" % (blockdev, devent)) continue if not stat.S_ISBLK(statret.st_mode): - log.debug("device '%s' for '%s' is not a block device" % + LOG.debug("device '%s' for '%s' is not a block device" % (blockdev, devent)) continue try: (disk, ptnum) = device_part_info(blockdev) except (TypeError, ValueError) as e: - log.debug("failed to get part_info for (%s, %s): %s" % + LOG.debug("failed to get part_info for (%s, %s): %s" % (devent, blockdev, e)) continue try: resizer.resize(disk, ptnum) except ResizeFailedException as e: - log.warn("failed to resize: devent=%s, disk=%s, ptnum=%s: %s", + LOG.warn("failed to resize: devent=%s, disk=%s, ptnum=%s: %s", devent, disk, ptnum, e) resized.append(devent) @@ -218,7 +231,7 @@ def handle(name, cfg, _cloud, log, _args): raise e return - resized = resize_devices(resizer, devices, log) + resized = resize_devices(resizer, devices) log.debug("resized: %s" % resized) RESIZERS = (('parted', ResizeParted), ('growpart', ResizeGrowPart)) diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index 9a033d6b..3cf3efb7 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -163,7 +163,7 @@ class TestConfig(MockerTestCase): passthrough=False) factory("auto") self.mocker.result(myresizer) - rsdevs(myresizer, ["/"], self.log) + rsdevs(myresizer, ["/"]) self.mocker.result(["/"]) self.mocker.replay() @@ -215,8 +215,7 @@ class TestResize(MockerTestCase): cc_growpart.device_part_info = simple_device_part_info os.stat = mystat - resized = cc_growpart.resize_devices(myresizer(), devs + enoent, - self.log) + resized = cc_growpart.resize_devices(myresizer(), devs + enoent) self.assertEqual(devs, resized) self.assertEqual(resize_calls, -- cgit v1.2.3 From 6f31822b60f6878a65a4adb15c1d2b4cc4edc81d Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 5 Mar 2013 18:48:54 +0000 Subject: change default mode to 'auto' --- cloudinit/config/cc_growpart.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index b88e6f6a..65679242 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -213,7 +213,7 @@ def handle(name, cfg, _cloud, log, _args): log.warn("'growpart' in config was not a dict") return - mode = mycfg.get('mode') + mode = mycfg.get('mode', "auto") if util.is_false(mode): log.debug("growpart disabled: mode=%s" % mode) return -- cgit v1.2.3 From 626f49e427cdbe91285dc337d134bfe2011fd268 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 5 Mar 2013 18:49:03 +0000 Subject: add doc --- doc/examples/cloud-config-growpart.txt | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 doc/examples/cloud-config-growpart.txt diff --git a/doc/examples/cloud-config-growpart.txt b/doc/examples/cloud-config-growpart.txt new file mode 100644 index 00000000..705f02c2 --- /dev/null +++ b/doc/examples/cloud-config-growpart.txt @@ -0,0 +1,24 @@ +#cloud-config +# +# growpart entry is a dict, if it is not present at all +# in config, then the default is used ({'mode': 'auto', 'devices': ['/']}) +# +# mode: +# values: +# * auto: use any option possible (growpart or parted) +# if none are available, do not warn, but debug. +# * growpart: use growpart to grow partitions +# if growpart is not available, this is an error. +# * parted: use parted (parted resizepart) to resize partitions +# if parted is not available, this is an error. +# * off, false +# +# devices: +# a list of things to resize. +# items can be filesystem paths or devices (in /dev) +# examples: +# devices: [/, /dev/vdb1] +# +growpart: + mode: auto + devices: ['/'] -- cgit v1.2.3 From f9fe61cb0fff4391212c33ff5fc8af7402ad112c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 5 Mar 2013 16:26:01 -0500 Subject: pep8, pylint, make resize_devices return more useful resize_devices now contains what action occurred for each entry. --- cloudinit/config/cc_growpart.py | 102 ++++++++++++++------- .../test_handler/test_handler_growpart.py | 28 ++++-- 2 files changed, 89 insertions(+), 41 deletions(-) diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 65679242..b6e1fd37 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -16,24 +16,33 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import os.path import os +import os.path import re import stat -from cloudinit.settings import PER_ALWAYS from cloudinit import log as logging +from cloudinit.settings import PER_ALWAYS from cloudinit import util frequency = PER_ALWAYS DEFAULT_CONFIG = { - 'mode': 'auto', - 'devices': ['/'], + 'mode': 'auto', + 'devices': ['/'], } + +def enum(**enums): + return type('Enum', (), enums) + + +RESIZE = enum(SKIPPED="SKIPPED", CHANGED="CHANGED", NOCHANGE="NOCHANGE", + FAILED="FAILED") + LOG = logging.getLogger(__name__) + def resizer_factory(mode): resize_class = None if mode == "auto": @@ -75,19 +84,22 @@ class ResizeParted(object): try: (out, _err) = util.subp(["parted", "--help"], env=myenv) - if re.search("COMMAND.*resizepart\s+", out, re.DOTALL): + if re.search(r"COMMAND.*resizepart\s+", out, re.DOTALL): return True except util.ProcessExecutionError: pass return False - def resize(self, blockdev, part): + def resize(self, diskdev, partnum, partdev): + before = get_size(partdev) try: - util.subp(["parted", "resizepart", blockdev, part]) + util.subp(["parted", "resizepart", diskdev, partnum]) except util.ProcessExecutionError as e: raise ResizeFailedException(e) + return (before, get_size(partdev)) + class ResizeGrowPart(object): def available(self): @@ -96,30 +108,40 @@ class ResizeGrowPart(object): try: (out, _err) = util.subp(["growpart", "--help"], env=myenv) - if re.search("--update\s+", out, re.DOTALL): + if re.search(r"--update\s+", out, re.DOTALL): return True except util.ProcessExecutionError: pass return False - def resize(self, blockdev, part): + def resize(self, diskdev, partnum, partdev): + before = get_size(partdev) try: - util.subp(["growpart", '--dry-run', blockdev, part]) + util.subp(["growpart", '--dry-run', diskdev, partnum]) except util.ProcessExecutionError as e: if e.exit_code != 1: - logexc(LOG, ("Failed growpart --dry-run for (%s, %s)" % - (blockdev, part))) + util.logexc(LOG, ("Failed growpart --dry-run for (%s, %s)" % + (diskdev, partnum))) raise ResizeFailedException(e) - LOG.debug("no change necessary on (%s,%s)" % (blockdev, part)) - return + return (before, before) try: - util.subp(["growpart", blockdev, part]) + util.subp(["growpart", diskdev, partnum]) except util.ProcessExecutionError as e: - logexc(LOG, "Failed: growpart %s %s" % (blockdev, part)) + util.logexc(LOG, "Failed: growpart %s %s" % (diskdev, partnum)) raise ResizeFailedException(e) + return (before, get_size(partdev)) + + +def get_size(filename): + fd = os.open(filename, os.O_RDONLY) + try: + return os.lseek(fd, 0, os.SEEK_END) + finally: + os.close(fd) + def device_part_info(devpath): # convert an entry in /dev/ to parent disk and partition number @@ -164,45 +186,54 @@ def devent2dev(devent): def resize_devices(resizer, devices): - resized = [] + # returns a tuple of tuples containing (entry-in-devices, action, message) + info = [] for devent in devices: try: blockdev = devent2dev(devent) except ValueError as e: - LOG.debug("unable to turn %s into device: %s" % (devent, e)) + info.append((devent, RESIZE.SKIPPED, + "unable to convert to device: %s" % e,)) continue try: statret = os.stat(blockdev) except OSError as e: - LOG.debug("device '%s' for '%s' failed stat" % - (blockdev, devent)) + info.append((devent, RESIZE.SKIPPED, + "stat of '%s' failed: %s" % (blockdev, e),)) continue - + if not stat.S_ISBLK(statret.st_mode): - LOG.debug("device '%s' for '%s' is not a block device" % - (blockdev, devent)) + info.append((devent, RESIZE.SKIPPED, + "device '%s' not a block device" % blockdev,)) continue try: (disk, ptnum) = device_part_info(blockdev) except (TypeError, ValueError) as e: - LOG.debug("failed to get part_info for (%s, %s): %s" % - (devent, blockdev, e)) + info.append((devent, RESIZE.SKIPPED, + "device_part_info(%s) failed: %s" % (blockdev, e),)) continue try: - resizer.resize(disk, ptnum) - except ResizeFailedException as e: - LOG.warn("failed to resize: devent=%s, disk=%s, ptnum=%s: %s", - devent, disk, ptnum, e) + (old, new) = resizer.resize(disk, ptnum, blockdev) + if old == new: + info.append((devent, RESIZE.NOCHANGE, + "no change necessary (%s, %s)" % (disk, ptnum),)) + else: + info.append((devent, RESIZE.CHANGED, + "changed (%s, %s) from %s to %s" % + (disk, ptnum, old, new),)) - resized.append(devent) + except ResizeFailedException as e: + info.append((devent, RESIZE.FAILED, + "failed to resize: disk=%s, ptnum=%s: %s" % + (disk, ptnum, e),)) - return resized + return info -def handle(name, cfg, _cloud, log, _args): +def handle(_name, cfg, _cloud, log, _args): if 'growpart' not in cfg: log.debug("No 'growpart' entry in cfg. Using default: %s" % DEFAULT_CONFIG) @@ -232,7 +263,10 @@ def handle(name, cfg, _cloud, log, _args): return resized = resize_devices(resizer, devices) - log.debug("resized: %s" % resized) + for (entry, action, msg) in resized: + if action == RESIZE.CHANGED: + log.info("'%s' resized: %s" % (entry, msg)) + else: + log.debug("'%s' %s: %s" % (entry, action, msg)) RESIZERS = (('parted', ResizeParted), ('growpart', ResizeGrowPart)) - diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index 3cf3efb7..74c254e0 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -164,7 +164,7 @@ class TestConfig(MockerTestCase): factory("auto") self.mocker.result(myresizer) rsdevs(myresizer, ["/"]) - self.mocker.result(["/"]) + self.mocker.result((("/", cc_growpart.RESIZE.CHANGED, "my-message",),)) self.mocker.replay() try: @@ -197,9 +197,11 @@ class TestResize(MockerTestCase): resize_calls = [] class myresizer(): - def resize(self, dev, part): - resize_calls.append((dev, part,)) - return + def resize(self, diskdev, partnum, partdev): + resize_calls.append((diskdev, partnum, partdev)) + if partdev == "/dev/YYda2": + return (1024, 2048) + return (1024, 1024) # old size, new size def mystat(path): if path in devs: @@ -217,9 +219,21 @@ class TestResize(MockerTestCase): resized = cc_growpart.resize_devices(myresizer(), devs + enoent) - self.assertEqual(devs, resized) - self.assertEqual(resize_calls, - [("/dev/XXda", "1",), ("/dev/YYda", "2",)]) + def find(name, res): + for f in res: + if f[0] == name: + return f + return None + + self.assertEqual(cc_growpart.RESIZE.NOCHANGE, + find("/dev/XXda1", resized)[1]) + self.assertEqual(cc_growpart.RESIZE.CHANGED, + find("/dev/YYda2", resized)[1]) + self.assertEqual(cc_growpart.RESIZE.SKIPPED, + find(enoent[0], resized)[1]) + #self.assertEqual(resize_calls, + #[("/dev/XXda", "1", "/dev/XXda1"), + #("/dev/YYda", "2", "/dev/YYda2")]) finally: cc_growpart.device_part_info = opinfo os.stat = real_stat -- cgit v1.2.3 From b4fa42f0cb841b1f096bd8d654eda7230053935c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 5 Mar 2013 16:38:35 -0500 Subject: fix ChangeLog entries incorrectly added as 0.6.0 --- ChangeLog | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ChangeLog b/ChangeLog index 110cdec2..ed2373b9 100644 --- a/ChangeLog +++ b/ChangeLog @@ -43,6 +43,10 @@ - improve debian support in sysvinit scripts, package build scripts, and split sources.list template to be distro specific. - support for resizing btrfs root filesystems [Blair Zajac] + - fix issue when writing ssh keys to .ssh/authorized_keys (LP: #1136343) + - upstart: cloud-init-nonet.conf trap the TERM signal, so that dmesg or other + output does not get a 'killed by TERM signal' message. + 0.7.1: - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 - config-drive: map hostname to local-hostname (LP: #1061964) @@ -501,7 +505,4 @@ - make the message on 'disable_root' more clear (LP: #672417) - do not require public key if private is given in ssh cloud-config (LP: #648905) - - fix issue when writing ssh keys to .ssh/authorized_keys (LP: #1136343) - - upstart: cloud-init-nonet.conf trap the TERM signal, so that dmesg or other - output does not get a 'killed by TERM signal' message. # vi: syntax=text textwidth=79 -- cgit v1.2.3 From 2653a9172e375484b4d0a88c3de56334136fa134 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 5 Mar 2013 19:16:01 -0800 Subject: Add in a bunch of changes and tests. --- cloudinit/handlers/__init__.py | 15 +-- cloudinit/handlers/cloud_config.py | 89 +++++++++------- cloudinit/mergers/__init__.py | 59 +++++++++-- cloudinit/mergers/dict.py | 11 ++ cloudinit/mergers/list.py | 21 ++-- cloudinit/mergers/str.py | 5 + tests/unittests/test__init__.py | 27 +++-- tests/unittests/test_merging.py | 205 ++++++++++++++++++++++++++----------- tests/unittests/test_userdata.py | 80 +++++++++++++-- 9 files changed, 368 insertions(+), 144 deletions(-) diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 566b61a7..63fdb948 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -87,7 +87,7 @@ class Handler(object): raise NotImplementedError() -def run_part(mod, data, filename, payload, headers, frequency): +def run_part(mod, data, filename, payload, frequency, headers): mod_freq = mod.frequency if not (mod_freq == PER_ALWAYS or (frequency == PER_INSTANCE and mod_freq == PER_INSTANCE)): @@ -98,8 +98,8 @@ def run_part(mod, data, filename, payload, headers, frequency): mod_ver = int(mod_ver) except (TypeError, ValueError, AttributeError): mod_ver = 1 + content_type = headers['Content-Type'] try: - content_type = headers['Content-Type'] LOG.debug("Calling handler %s (%s, %s, %s) with frequency %s", mod, content_type, filename, mod_ver, frequency) if mod_ver == 3: @@ -123,17 +123,19 @@ def run_part(mod, data, filename, payload, headers, frequency): def call_begin(mod, data, frequency): + # Create a fake header set headers = { 'Content-Type': CONTENT_START, } - run_part(mod, data, None, None, headers, frequency) + run_part(mod, data, None, None, frequency, headers) def call_end(mod, data, frequency): + # Create a fake header set headers = { 'Content-Type': CONTENT_END, } - run_part(mod, data, None, None, headers, frequency) + run_part(mod, data, None, None, frequency, headers) def walker_handle_handler(pdata, _ctype, _filename, payload): @@ -191,12 +193,12 @@ def walker_callback(data, filename, payload, headers): handlers = data['handlers'] if content_type in handlers: run_part(handlers[content_type], data['data'], filename, - payload, headers, data['frequency']) + payload, data['frequency'], headers) elif payload: # Extract the first line or 24 bytes for displaying in the log start = _extract_first_or_bytes(payload, 24) details = "'%s...'" % (_escape_string(start)) - if ctype == NOT_MULTIPART_TYPE: + if content_type == NOT_MULTIPART_TYPE: LOG.warning("Unhandled non-multipart (%s) userdata: %s", content_type, details) else: @@ -224,6 +226,7 @@ def walk(msg, callback, data): filename = PART_FN_TPL % (partnum) headers = dict(part) + LOG.debug(headers) headers['Content-Type'] = ctype callback(data, filename, part.get_payload(decode=True), headers) partnum = partnum + 1 diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index 02a7ad9d..d458dee2 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -29,16 +29,19 @@ from cloudinit.settings import (PER_ALWAYS) LOG = logging.getLogger(__name__) -DEF_MERGE_TYPE = "list()+dict()+str()" +DEF_MERGE_TYPE = "list(extend)+dict()+str(append)" MERGE_HEADER = 'Merge-Type' class CloudConfigPartHandler(handlers.Handler): def __init__(self, paths, **_kwargs): handlers.Handler.__init__(self, PER_ALWAYS, version=3) - self.cloud_buf = {} + self.cloud_buf = None self.cloud_fn = paths.get_ipath("cloud_config") self.file_names = [] + self.mergers = [ + mergers.string_extract_mergers(DEF_MERGE_TYPE), + ] def list_types(self): return [ @@ -48,50 +51,64 @@ class CloudConfigPartHandler(handlers.Handler): def _write_cloud_config(self, buf): if not self.cloud_fn: return - # Write the combined & merged dictionary/yaml out - lines = [ - "#cloud-config", - '', - ] - # Write which files we merged from + # Capture which files we merged from... + file_lines = [] if self.file_names: - lines.append("# from %s files" % (len(self.file_names))) + file_lines.append("# from %s files" % (len(self.file_names))) for fn in self.file_names: - lines.append("# %s" % (fn)) - lines.append("") - lines.append(util.yaml_dumps(self.cloud_buf)) + file_lines.append("# %s" % (fn)) + file_lines.append("") + if self.cloud_buf is not None: + lines = [ + "#cloud-config", + '', + ] + lines.extend(file_lines) + lines.append(util.yaml_dumps(self.cloud_buf)) + else: + lines = [] util.write_file(self.cloud_fn, "\n".join(lines), 0600) - def _merge_header_extract(self, payload_yaml): - merge_header_yaml = '' - for k in [MERGE_HEADER, MERGE_HEADER.lower(), - MERGE_HEADER.lower().replace("-", "_")]: - if k in payload_yaml: - merge_header_yaml = str(payload_yaml[k]) + def _extract_mergers(self, payload, headers): + merge_header_headers = '' + for h in [MERGE_HEADER, 'X-%s' % (MERGE_HEADER)]: + tmp_h = headers.get(h, '') + if tmp_h: + merge_header_headers = tmp_h break - return merge_header_yaml - - def _merge_part(self, payload, headers): - merge_header_headers = headers.get(MERGE_HEADER, '') - payload_yaml = util.load_yaml(payload) - merge_how = '' # Select either the merge-type from the content # or the merge type from the headers or default to our own set - # if neither exists (or is empty) from the later - merge_header_yaml = self._merge_header_extract(payload_yaml) - for merge_i in [merge_header_yaml, merge_header_headers]: - merge_i = merge_i.strip().lower() - if merge_i: - merge_how = merge_i - break - if not merge_how: - merge_how = DEF_MERGE_TYPE - merger = mergers.construct(merge_how) - self.cloud_buf = merger.merge(self.cloud_buf, payload_yaml) + # if neither exists (or is empty) from the later. + payload_yaml = util.load_yaml(payload) + mergers_yaml = mergers.dict_extract_mergers(payload_yaml) + mergers_header = mergers.string_extract_mergers(merge_header_headers) + all_mergers = [] + all_mergers.extend(mergers_yaml) + all_mergers.extend(mergers_header) + if not all_mergers: + all_mergers = mergers.string_extract_mergers(DEF_MERGE_TYPE) + return all_mergers + + def _merge_part(self, payload, headers): + next_mergers = self._extract_mergers(payload, headers) + # Use the merger list from the last call, since it is the one + # that will be defining how to merge with the next payload. + curr_mergers = list(self.mergers[-1]) + LOG.debug("Merging with %s", curr_mergers) + self.mergers.append(next_mergers) + merger = mergers.construct(curr_mergers) + if self.cloud_buf is None: + # First time through, merge with an empty dict... + self.cloud_buf = {} + self.cloud_buf = merger.merge(self.cloud_buf, + util.load_yaml(payload)) def _reset(self): self.file_names = [] - self.cloud_buf = {} + self.cloud_buf = None + self.mergers = [ + mergers.string_extract_mergers(DEF_MERGE_TYPE), + ] def handle_part(self, _data, ctype, filename, payload, _freq, headers): if ctype == handlers.CONTENT_START: diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py index 20658edc..4a112165 100644 --- a/cloudinit/mergers/__init__.py +++ b/cloudinit/mergers/__init__.py @@ -34,6 +34,13 @@ class UnknownMerger(object): def _handle_unknown(self, meth_wanted, value, merge_with): return value + # This merging will attempt to look for a '_on_X' method + # in our own object for a given object Y with type X, + # if found it will be called to perform the merge of a source + # object and a object to merge_with. + # + # If not found the merge will be given to a '_handle_unknown' + # function which can decide what to do wit the 2 values. def merge(self, source, merge_with): type_name = util.obj_name(source) type_name = type_name.lower() @@ -56,6 +63,11 @@ class LookupMerger(UnknownMerger): else: self._lookups = lookups + # For items which can not be merged by the parent this object + # will lookup in a internally maintained set of objects and + # find which one of those objects can perform the merge. If + # any of the contained objects have the needed method, they + # will be called to perform the merge. def _handle_unknown(self, meth_wanted, value, merge_with): meth = None for merger in self._lookups: @@ -70,8 +82,33 @@ class LookupMerger(UnknownMerger): return meth(value, merge_with) -def _extract_merger_names(merge_how): - names = [] +def dict_extract_mergers(config): + parsed_mergers = [] + raw_mergers = config.get('merger_how') + if raw_mergers is None: + raw_mergers = config.get('merge_type') + if raw_mergers is None: + return parsed_mergers + if isinstance(raw_mergers, (str, basestring)): + return string_extract_mergers(raw_mergers) + for m in raw_mergers: + if isinstance(m, (dict)): + name = m['name'] + name = name.replace("-", "_").strip() + opts = m['settings'] + else: + name = m[0] + if len(m) >= 2: + opts = m[1:] + else: + opts = [] + if name: + parsed_mergers.append((name, opts)) + return parsed_mergers + + +def string_extract_mergers(merge_how): + parsed_mergers = [] for m_name in merge_how.split("+"): # Canonicalize the name (so that it can be found # even when users alter it in various ways) @@ -79,20 +116,20 @@ def _extract_merger_names(merge_how): m_name = m_name.replace("-", "_") if not m_name: continue - names.append(m_name) - return names - - -def construct(merge_how): - mergers_to_be = [] - for name in _extract_merger_names(merge_how): - match = NAME_MTCH.match(name) + match = NAME_MTCH.match(m_name) if not match: - msg = "Matcher identifer '%s' is not in the right format" % (name) + msg = "Matcher identifer '%s' is not in the right format" % (m_name) raise ValueError(msg) (m_name, m_ops) = match.groups() m_ops = m_ops.strip().split(",") m_ops = [m.strip().lower() for m in m_ops if m.strip()] + parsed_mergers.append((m_name, m_ops)) + return parsed_mergers + + +def construct(parsed_mergers): + mergers_to_be = [] + for (m_name, m_ops) in parsed_mergers: merger_locs = importer.find_module(m_name, [__name__], ['Merger']) diff --git a/cloudinit/mergers/dict.py b/cloudinit/mergers/dict.py index bc392afa..45a7d3a5 100644 --- a/cloudinit/mergers/dict.py +++ b/cloudinit/mergers/dict.py @@ -22,6 +22,17 @@ class Merger(object): self._merger = merger self._overwrite = 'overwrite' in opts + # This merging algorithm will attempt to merge with + # another dictionary, on encountering any other type of object + # it will not merge with said object, but will instead return + # the original value + # + # On encountering a dictionary, it will create a new dictionary + # composed of the original and the one to merge with, if 'overwrite' + # is enabled then keys that exist in the original will be overwritten + # by keys in the one to merge with (and associated values). Otherwise + # if not in overwrite mode the 2 conflicting keys themselves will + # be merged. def _on_dict(self, value, merge_with): if not isinstance(merge_with, (dict)): return value diff --git a/cloudinit/mergers/list.py b/cloudinit/mergers/list.py index a848b8d6..a56ff007 100644 --- a/cloudinit/mergers/list.py +++ b/cloudinit/mergers/list.py @@ -26,21 +26,24 @@ class Merger(object): def _on_tuple(self, value, merge_with): return self._on_list(list(value), merge_with) + # On encountering a list or tuple type this action will be applied + # a new list will be returned, if the value to merge with is itself + # a list and we have been told to 'extend', then the value here will + # be extended with the other list. If in 'extend' mode then we will + # attempt to merge instead, which means that values from the list + # to merge with will replace values in te original list (they will + # also be merged recursively). + # + # If the value to merge with is not a list, and we are set to discared + # then no modifications will take place, otherwise we will just append + # the value to merge with onto the end of our own list. def _on_list(self, value, merge_with): new_value = list(value) if isinstance(merge_with, (tuple, list)): if self._extend: new_value.extend(merge_with) else: - # Merge instead - for m_v in merge_with: - m_am = 0 - for (i, o_v) in enumerate(new_value): - if m_v == o_v: - new_value[i] = self._merger.merge(o_v, m_v) - m_am += 1 - if m_am == 0: - new_value.append(m_v) + return new_value else: if not self._discard_non: new_value.append(merge_with) diff --git a/cloudinit/mergers/str.py b/cloudinit/mergers/str.py index 14bc46ec..f1534c5b 100644 --- a/cloudinit/mergers/str.py +++ b/cloudinit/mergers/str.py @@ -21,9 +21,14 @@ class Merger(object): def __init__(self, merger, opts): self._append = 'append' in opts + # On encountering a unicode object to merge value with + # we will for now just proxy into the string method to let it handle it. def _on_unicode(self, value, merge_with): return self._on_str(value, merge_with) + # On encountering a string object to merge with we will + # perform the following action, if appending we will + # merge them together, otherwise we will just return value. def _on_str(self, value, merge_with): if not self._append: return value diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index ac082076..7924755a 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -22,8 +22,10 @@ class FakeModule(handlers.Handler): def list_types(self): return self.types - def _handle_part(self, data, ctype, filename, payload, frequency): + def handle_part(self, data, ctype, filename, payload, frequency): pass + + class TestWalkerHandleHandler(MockerTestCase): @@ -103,6 +105,9 @@ class TestHandlerHandlePart(MockerTestCase): self.filename = "fake filename" self.payload = "fake payload" self.frequency = settings.PER_INSTANCE + self.headers = { + 'Content-Type': self.ctype, + } def test_normal_version_1(self): """ @@ -118,8 +123,8 @@ class TestHandlerHandlePart(MockerTestCase): self.payload) self.mocker.replay() - handlers.run_part(mod_mock, self.data, self.ctype, self.filename, - self.payload, self.frequency) + handlers.run_part(mod_mock, self.data, self.filename, + self.payload, self.frequency, self.headers) def test_normal_version_2(self): """ @@ -135,8 +140,8 @@ class TestHandlerHandlePart(MockerTestCase): self.payload, self.frequency) self.mocker.replay() - handlers.run_part(mod_mock, self.data, self.ctype, self.filename, - self.payload, self.frequency) + handlers.run_part(mod_mock, self.data, self.filename, + self.payload, self.frequency, self.headers) def test_modfreq_per_always(self): """ @@ -152,8 +157,8 @@ class TestHandlerHandlePart(MockerTestCase): self.payload) self.mocker.replay() - handlers.run_part(mod_mock, self.data, self.ctype, self.filename, - self.payload, self.frequency) + handlers.run_part(mod_mock, self.data, self.filename, + self.payload, self.frequency, self.headers) def test_no_handle_when_modfreq_once(self): """C{handle_part} is not called if frequency is once.""" @@ -163,8 +168,8 @@ class TestHandlerHandlePart(MockerTestCase): self.mocker.result(settings.PER_ONCE) self.mocker.replay() - handlers.run_part(mod_mock, self.data, self.ctype, self.filename, - self.payload, self.frequency) + handlers.run_part(mod_mock, self.data, self.filename, + self.payload, self.frequency, self.headers) def test_exception_is_caught(self): """Exceptions within C{handle_part} are caught and logged.""" @@ -178,8 +183,8 @@ class TestHandlerHandlePart(MockerTestCase): self.mocker.throw(Exception()) self.mocker.replay() - handlers.run_part(mod_mock, self.data, self.ctype, self.filename, - self.payload, self.frequency) + handlers.run_part(mod_mock, self.data, self.filename, + self.payload, self.frequency, self.headers) class TestCmdlineUrl(MockerTestCase): diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index 0037b966..fa7ee8e4 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -1,62 +1,143 @@ -from mocker import MockerTestCase - -from cloudinit import util - - -class TestMergeDict(MockerTestCase): - def test_simple_merge(self): - """Test simple non-conflict merge.""" - source = {"key1": "value1"} - candidate = {"key2": "value2"} - result = util.mergedict(source, candidate) - self.assertEqual({"key1": "value1", "key2": "value2"}, result) - - def test_nested_merge(self): - """Test nested merge.""" - source = {"key1": {"key1.1": "value1.1"}} - candidate = {"key1": {"key1.2": "value1.2"}} - result = util.mergedict(source, candidate) - self.assertEqual( - {"key1": {"key1.1": "value1.1", "key1.2": "value1.2"}}, result) - - def test_merge_does_not_override(self): - """Test that candidate doesn't override source.""" - source = {"key1": "value1", "key2": "value2"} - candidate = {"key1": "value2", "key2": "NEW VALUE"} - result = util.mergedict(source, candidate) - self.assertEqual(source, result) - - def test_empty_candidate(self): - """Test empty candidate doesn't change source.""" - source = {"key": "value"} - candidate = {} - result = util.mergedict(source, candidate) - self.assertEqual(source, result) - - def test_empty_source(self): - """Test empty source is replaced by candidate.""" - source = {} - candidate = {"key": "value"} - result = util.mergedict(source, candidate) - self.assertEqual(candidate, result) - - def test_non_dict_candidate(self): - """Test non-dict candidate is discarded.""" - source = {"key": "value"} - candidate = "not a dict" - result = util.mergedict(source, candidate) - self.assertEqual(source, result) - - def test_non_dict_source(self): - """Test non-dict source is not modified with a dict candidate.""" - source = "not a dict" - candidate = {"key": "value"} - result = util.mergedict(source, candidate) - self.assertEqual(source, result) - - def test_neither_dict(self): - """Test if neither candidate or source is dict source wins.""" - source = "source" - candidate = "candidate" - result = util.mergedict(source, candidate) - self.assertEqual(source, result) +import os + +from tests.unittests import helpers + +from cloudinit import mergers + + +class TestSimpleRun(helpers.MockerTestCase): + def test_basic_merge(self): + source = { + 'Blah': ['blah2'], + 'Blah3': 'c', + } + merge_with = { + 'Blah2': ['blah3'], + 'Blah3': 'b', + 'Blah': ['123'], + } + # Basic merge should not do thing special + merge_how = "list()+dict()+str()" + merger_set = mergers.string_extract_mergers(merge_how) + self.assertEquals(3, len(merger_set)) + merger = mergers.construct(merger_set) + merged = merger.merge(source, merge_with) + self.assertEquals(merged['Blah'], ['blah2']) + self.assertEquals(merged['Blah2'], ['blah3']) + self.assertEquals(merged['Blah3'], 'c') + + def test_dict_overwrite(self): + source = { + 'Blah': ['blah2'], + } + merge_with = { + 'Blah': ['123'], + } + # Now lets try a dict overwrite + merge_how = "list()+dict(overwrite)+str()" + merger_set = mergers.string_extract_mergers(merge_how) + self.assertEquals(3, len(merger_set)) + merger = mergers.construct(merger_set) + merged = merger.merge(source, merge_with) + self.assertEquals(merged['Blah'], ['123']) + + def test_string_append(self): + source = { + 'Blah': 'blah2', + } + merge_with = { + 'Blah': '345', + } + merge_how = "list()+dict()+str(append)" + merger_set = mergers.string_extract_mergers(merge_how) + self.assertEquals(3, len(merger_set)) + merger = mergers.construct(merger_set) + merged = merger.merge(source, merge_with) + self.assertEquals(merged['Blah'], 'blah2345') + + def test_list_extend(self): + source = ['abc'] + merge_with = ['123'] + merge_how = "list(extend)+dict()+str()" + merger_set = mergers.string_extract_mergers(merge_how) + self.assertEquals(3, len(merger_set)) + merger = mergers.construct(merger_set) + merged = merger.merge(source, merge_with) + self.assertEquals(merged, ['abc', '123']) + + def test_deep_merge(self): + source = { + 'a': [1, 'b', 2], + 'b': 'blahblah', + 'c': { + 'e': [1, 2, 3], + 'f': 'bigblobof', + 'iamadict': { + 'ok': 'ok', + } + }, + 'run': [ + 'runme', + 'runme2', + ], + 'runmereally': [ + 'e', ['a'], 'd', + ], + } + merge_with = { + 'a': ['e', 'f', 'g'], + 'b': 'more', + 'c': { + 'a': 'b', + 'f': 'stuff', + }, + 'run': [ + 'morecmd', + 'moremoremore', + ], + 'runmereally': [ + 'blah', ['b'], 'e', + ], + } + merge_how = "list(extend)+dict()+str(append)" + merger_set = mergers.string_extract_mergers(merge_how) + self.assertEquals(3, len(merger_set)) + merger = mergers.construct(merger_set) + merged = merger.merge(source, merge_with) + self.assertEquals(merged['a'], [1, 'b', 2, 'e', 'f', 'g']) + self.assertEquals(merged['b'], 'blahblahmore') + self.assertEquals(merged['c']['f'], 'bigblobofstuff') + self.assertEquals(merged['run'], ['runme', 'runme2', 'morecmd', 'moremoremore']) + self.assertEquals(merged['runmereally'], ['e', ['a'], 'd', 'blah', ['b'], 'e']) + + def test_dict_overwrite_layered(self): + source = { + 'Blah3': { + 'f': '3', + 'g': { + 'a': 'b', + } + } + } + merge_with = { + 'Blah3': { + 'e': '2', + 'g': { + 'e': 'f', + } + } + } + merge_how = "list()+dict()+str()" + merger_set = mergers.string_extract_mergers(merge_how) + self.assertEquals(3, len(merger_set)) + merger = mergers.construct(merger_set) + merged = merger.merge(source, merge_with) + self.assertEquals(merged['Blah3'], { + 'e': '2', + 'f': '3', + 'g': { + 'a': 'b', + 'e': 'f', + } + }) + diff --git a/tests/unittests/test_userdata.py b/tests/unittests/test_userdata.py index 82a4c555..9e1fed7e 100644 --- a/tests/unittests/test_userdata.py +++ b/tests/unittests/test_userdata.py @@ -9,12 +9,17 @@ from email.mime.base import MIMEBase from mocker import MockerTestCase +from cloudinit import handlers +from cloudinit import helpers as c_helpers from cloudinit import log from cloudinit import sources from cloudinit import stages +from cloudinit import util INSTANCE_ID = "i-testing" +from tests.unittests import helpers + class FakeDataSource(sources.DataSource): @@ -26,22 +31,16 @@ class FakeDataSource(sources.DataSource): # FIXME: these tests shouldn't be checking log output?? # Weirddddd... - - -class TestConsumeUserData(MockerTestCase): +class TestConsumeUserData(helpers.FilesystemMockingTestCase): def setUp(self): - MockerTestCase.setUp(self) - # Replace the write so no actual files - # get written out... - self.mock_write = self.mocker.replace("cloudinit.util.write_file", - passthrough=False) + helpers.FilesystemMockingTestCase.setUp(self) self._log = None self._log_file = None self._log_handler = None def tearDown(self): - MockerTestCase.tearDown(self) + helpers.FilesystemMockingTestCase.tearDown(self) if self._log_handler and self._log: self._log.removeHandler(self._log_handler) @@ -53,12 +52,71 @@ class TestConsumeUserData(MockerTestCase): self._log.addHandler(self._log_handler) return log_file + def test_merging_cloud_config(self): + blob = ''' +#cloud-config +a: b +e: f +run: + - b + - c +''' + message1 = MIMEBase("text", "cloud-config") + message1['Merge-Type'] = 'dict()+list(extend)+str(append)' + message1.set_payload(blob) + + blob2 = ''' +#cloud-config +a: e +e: g +run: + - stuff + - morestuff +''' + message2 = MIMEBase("text", "cloud-config") + message2['Merge-Type'] = 'dict()+list(extend)+str()' + message2.set_payload(blob2) + + blob3 = ''' +#cloud-config +e: + - 1 + - 2 + - 3 +''' + message3 = MIMEBase("text", "cloud-config") + message3['Merge-Type'] = 'dict()+list()+str()' + message3.set_payload(blob3) + + messages = [message1, message2, message3] + + paths = c_helpers.Paths({}, ds=FakeDataSource('')) + cloud_cfg = handlers.cloud_config.CloudConfigPartHandler(paths) + + new_root = self.makeDir() + self.patchUtils(new_root) + self.patchOS(new_root) + cloud_cfg.handle_part(None, handlers.CONTENT_START, None, None, None, None) + for i, m in enumerate(messages): + headers = dict(m) + fn = "part-%s" % (i + 1) + payload = m.get_payload(decode=True) + cloud_cfg.handle_part(None, headers['Content-Type'], + fn, payload, None, headers) + cloud_cfg.handle_part(None, handlers.CONTENT_END, None, None, None, None) + contents = util.load_file(paths.get_ipath('cloud_config')) + contents = util.load_yaml(contents) + self.assertEquals(contents['run'], ['b', 'c', 'stuff', 'morestuff']) + self.assertEquals(contents['a'], 'be') + self.assertEquals(contents['e'], 'fg') + def test_unhandled_type_warning(self): """Raw text without magic is ignored but shows warning.""" ci = stages.Init() data = "arbitrary text\n" ci.datasource = FakeDataSource(data) + self.mock_write = self.mocker.replace("cloudinit.util.write_file", passthrough=False) self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) self.mocker.replay() @@ -76,6 +134,7 @@ class TestConsumeUserData(MockerTestCase): message.set_payload("Just text") ci.datasource = FakeDataSource(message.as_string()) + self.mock_write = self.mocker.replace("cloudinit.util.write_file", passthrough=False) self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) self.mocker.replay() @@ -93,6 +152,7 @@ class TestConsumeUserData(MockerTestCase): ci.datasource = FakeDataSource(script) outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001") + self.mock_write = self.mocker.replace("cloudinit.util.write_file", passthrough=False) self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) self.mock_write(outpath, script, 0700) self.mocker.replay() @@ -111,6 +171,7 @@ class TestConsumeUserData(MockerTestCase): ci.datasource = FakeDataSource(message.as_string()) outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001") + self.mock_write = self.mocker.replace("cloudinit.util.write_file", passthrough=False) self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) self.mock_write(outpath, script, 0700) self.mocker.replay() @@ -129,6 +190,7 @@ class TestConsumeUserData(MockerTestCase): ci.datasource = FakeDataSource(message.as_string()) outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001") + self.mock_write = self.mocker.replace("cloudinit.util.write_file", passthrough=False) self.mock_write(outpath, script, 0700) self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) self.mocker.replay() -- cgit v1.2.3 From fc6aa5aa54ee35ff0a3eff823bae0d3cf9b34bc1 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 6 Mar 2013 19:24:05 -0800 Subject: Continue working on merging code. --- cloudinit/config/cc_landscape.py | 3 ++- cloudinit/config/cc_mounts.py | 3 ++- cloudinit/distros/__init__.py | 15 +++++++------ cloudinit/handlers/__init__.py | 3 ++- cloudinit/handlers/cloud_config.py | 15 ++++++------- cloudinit/helpers.py | 3 ++- cloudinit/mergers/__init__.py | 13 +++++++++--- cloudinit/sources/DataSourceAltCloud.py | 5 +++-- cloudinit/sources/DataSourceCloudStack.py | 3 --- cloudinit/sources/DataSourceConfigDrive.py | 4 +++- cloudinit/sources/DataSourceEc2.py | 3 --- cloudinit/sources/DataSourceMAAS.py | 3 ++- cloudinit/sources/DataSourceNoCloud.py | 5 ++--- cloudinit/sources/DataSourceNone.py | 3 --- cloudinit/sources/DataSourceOVF.py | 3 ++- cloudinit/sources/__init__.py | 10 ++++++--- cloudinit/stages.py | 9 ++++---- cloudinit/type_utils.py | 34 ++++++++++++++++++++++++++++++ cloudinit/util.py | 33 ++++++++++------------------- tests/unittests/test_userdata.py | 4 +++- 20 files changed, 104 insertions(+), 70 deletions(-) create mode 100644 cloudinit/type_utils.py diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index 02610dd0..6734efee 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -24,6 +24,7 @@ from StringIO import StringIO from configobj import ConfigObj +from cloudinit import type_utils from cloudinit import util from cloudinit.settings import PER_INSTANCE @@ -58,7 +59,7 @@ def handle(_name, cfg, cloud, log, _args): if not isinstance(ls_cloudcfg, (dict)): raise RuntimeError(("'landscape' key existed in config," " but not a dictionary type," - " is a %s instead"), util.obj_name(ls_cloudcfg)) + " is a %s instead"), type_utils.obj_name(ls_cloudcfg)) if not ls_cloudcfg: return diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index cb772c86..6ebe563d 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -22,6 +22,7 @@ from string import whitespace # pylint: disable=W0402 import re +from cloudinit import type_utils from cloudinit import util # Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1 @@ -60,7 +61,7 @@ def handle(_name, cfg, cloud, log, _args): # skip something that wasn't a list if not isinstance(cfgmnt[i], list): log.warn("Mount option %s not a list, got a %s instead", - (i + 1), util.obj_name(cfgmnt[i])) + (i + 1), type_utils.obj_name(cfgmnt[i])) continue startname = str(cfgmnt[i][0]) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 6a684b89..eeea6af1 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -31,6 +31,7 @@ import re from cloudinit import importer from cloudinit import log as logging from cloudinit import ssh_util +from cloudinit import type_utils from cloudinit import util from cloudinit.distros.parsers import hosts @@ -427,7 +428,7 @@ class Distro(object): lines.append("%s %s" % (user, rules)) else: msg = "Can not create sudoers rule addition with type %r" - raise TypeError(msg % (util.obj_name(rules))) + raise TypeError(msg % (type_utils.obj_name(rules))) content = "\n".join(lines) content += "\n" # trailing newline @@ -550,7 +551,7 @@ def _normalize_groups(grp_cfg): c_grp_cfg[k] = [v] else: raise TypeError("Bad group member type %s" % - util.obj_name(v)) + type_utils.obj_name(v)) else: if isinstance(v, (list)): c_grp_cfg[k].extend(v) @@ -558,13 +559,13 @@ def _normalize_groups(grp_cfg): c_grp_cfg[k].append(v) else: raise TypeError("Bad group member type %s" % - util.obj_name(v)) + type_utils.obj_name(v)) elif isinstance(i, (str, basestring)): if i not in c_grp_cfg: c_grp_cfg[i] = [] else: raise TypeError("Unknown group name type %s" % - util.obj_name(i)) + type_utils.obj_name(i)) grp_cfg = c_grp_cfg groups = {} if isinstance(grp_cfg, (dict)): @@ -573,7 +574,7 @@ def _normalize_groups(grp_cfg): else: raise TypeError(("Group config must be list, dict " " or string types only and not %s") % - util.obj_name(grp_cfg)) + type_utils.obj_name(grp_cfg)) return groups @@ -604,7 +605,7 @@ def _normalize_users(u_cfg, def_user_cfg=None): ad_ucfg.append(v) else: raise TypeError(("Unmappable user value type %s" - " for key %s") % (util.obj_name(v), k)) + " for key %s") % (type_utils.obj_name(v), k)) u_cfg = ad_ucfg elif isinstance(u_cfg, (str, basestring)): u_cfg = util.uniq_merge_sorted(u_cfg) @@ -629,7 +630,7 @@ def _normalize_users(u_cfg, def_user_cfg=None): else: raise TypeError(("User config must be dictionary/list " " or string types only and not %s") % - util.obj_name(user_config)) + type_utils.obj_name(user_config)) # Ensure user options are in the right python friendly format if users: diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 63fdb948..924463ce 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -27,6 +27,7 @@ from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES) from cloudinit import importer from cloudinit import log as logging +from cloudinit import type_utils from cloudinit import util LOG = logging.getLogger(__name__) @@ -76,7 +77,7 @@ class Handler(object): self.frequency = frequency def __repr__(self): - return "%s: [%s]" % (util.obj_name(self), self.list_types()) + return "%s: [%s]" % (type_utils.obj_name(self), self.list_types()) @abc.abstractmethod def list_types(self): diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index d458dee2..5f519f78 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -29,8 +29,8 @@ from cloudinit.settings import (PER_ALWAYS) LOG = logging.getLogger(__name__) -DEF_MERGE_TYPE = "list(extend)+dict()+str(append)" MERGE_HEADER = 'Merge-Type' +DEF_MERGERS = mergers.default_mergers() class CloudConfigPartHandler(handlers.Handler): @@ -39,9 +39,7 @@ class CloudConfigPartHandler(handlers.Handler): self.cloud_buf = None self.cloud_fn = paths.get_ipath("cloud_config") self.file_names = [] - self.mergers = [ - mergers.string_extract_mergers(DEF_MERGE_TYPE), - ] + self.mergers = [DEF_MERGERS] def list_types(self): return [ @@ -59,6 +57,7 @@ class CloudConfigPartHandler(handlers.Handler): file_lines.append("# %s" % (fn)) file_lines.append("") if self.cloud_buf is not None: + # Something was actually gathered.... lines = [ "#cloud-config", '', @@ -86,7 +85,7 @@ class CloudConfigPartHandler(handlers.Handler): all_mergers.extend(mergers_yaml) all_mergers.extend(mergers_header) if not all_mergers: - all_mergers = mergers.string_extract_mergers(DEF_MERGE_TYPE) + all_mergers = DEF_MERGERS return all_mergers def _merge_part(self, payload, headers): @@ -94,7 +93,7 @@ class CloudConfigPartHandler(handlers.Handler): # Use the merger list from the last call, since it is the one # that will be defining how to merge with the next payload. curr_mergers = list(self.mergers[-1]) - LOG.debug("Merging with %s", curr_mergers) + LOG.debug("Merging by applying %s", curr_mergers) self.mergers.append(next_mergers) merger = mergers.construct(curr_mergers) if self.cloud_buf is None: @@ -106,9 +105,7 @@ class CloudConfigPartHandler(handlers.Handler): def _reset(self): self.file_names = [] self.cloud_buf = None - self.mergers = [ - mergers.string_extract_mergers(DEF_MERGE_TYPE), - ] + self.mergers = [DEF_MERGERS] def handle_part(self, _data, ctype, filename, payload, _freq, headers): if ctype == handlers.CONTENT_START: diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index 2077401c..a4e6fb03 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -32,6 +32,7 @@ from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE, CFG_ENV_NAME) from cloudinit import log as logging +from cloudinit import type_utils from cloudinit import util LOG = logging.getLogger(__name__) @@ -68,7 +69,7 @@ class FileLock(object): self.fn = fn def __str__(self): - return "<%s using file %r>" % (util.obj_name(self), self.fn) + return "<%s using file %r>" % (type_utils.obj_name(self), self.fn) def canon_sem_name(name): diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py index 4a112165..453426af 100644 --- a/cloudinit/mergers/__init__.py +++ b/cloudinit/mergers/__init__.py @@ -20,11 +20,12 @@ import re from cloudinit import importer from cloudinit import log as logging -from cloudinit import util +from cloudinit import type_utils NAME_MTCH = re.compile(r"(^[a-zA-Z_][A-Za-z0-9_]*)\((.*?)\)$") LOG = logging.getLogger(__name__) +DEF_MERGE_TYPE = "list(extend)+dict()+str(append)" class UnknownMerger(object): @@ -42,7 +43,7 @@ class UnknownMerger(object): # If not found the merge will be given to a '_handle_unknown' # function which can decide what to do wit the 2 values. def merge(self, source, merge_with): - type_name = util.obj_name(source) + type_name = type_utils.obj_name(source) type_name = type_name.lower() method_name = "_on_%s" % (type_name) meth = None @@ -127,6 +128,10 @@ def string_extract_mergers(merge_how): return parsed_mergers +def default_mergers(): + return tuple(string_extract_mergers(DEF_MERGE_TYPE)) + + def construct(parsed_mergers): mergers_to_be = [] for (m_name, m_ops) in parsed_mergers: @@ -145,4 +150,6 @@ def construct(parsed_mergers): root = LookupMerger(mergers) for (attr, opts) in mergers_to_be: mergers.append(attr(root, opts)) - return root \ No newline at end of file + return root + + diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index 9812bdcb..64548d43 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -30,6 +30,7 @@ import os.path from cloudinit import log as logging from cloudinit import sources from cloudinit import util + from cloudinit.util import ProcessExecutionError LOG = logging.getLogger(__name__) @@ -91,8 +92,8 @@ class DataSourceAltCloud(sources.DataSource): self.supported_seed_starts = ("/", "file://") def __str__(self): - mstr = "%s [seed=%s]" % (util.obj_name(self), self.seed) - return mstr + root = sources.DataSource.__str__(self) + return "%s [seed=%s]" % (root, self.seed) def get_cloud_type(self): ''' diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 076dba5a..c0e1a23c 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -59,9 +59,6 @@ class DataSourceCloudStack(sources.DataSource): return gw return None - def __str__(self): - return util.obj_name(self) - def _get_url_settings(self): mcfg = self.ds_cfg if not mcfg: diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index c7826851..46abd772 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -51,7 +51,9 @@ class DataSourceConfigDrive(sources.DataSource): self.ec2_metadata = None def __str__(self): - mstr = "%s [%s,ver=%s]" % (util.obj_name(self), self.dsmode, + root = sources.DataSource.__str__(self) + mstr = "%s [%s,ver=%s]" % (root, + self.dsmode, self.version) mstr += "[source=%s]" % (self.source) return mstr diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 2db53446..f010e640 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -49,9 +49,6 @@ class DataSourceEc2(sources.DataSource): self.seed_dir = os.path.join(paths.seed_dir, "ec2") self.api_ver = DEF_MD_VERSION - def __str__(self): - return util.obj_name(self) - def get_data(self): seed_ret = {} if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")): diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index b55d8a21..612d8ffa 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -50,7 +50,8 @@ class DataSourceMAAS(sources.DataSource): self.oauth_clockskew = None def __str__(self): - return "%s [%s]" % (util.obj_name(self), self.base_url) + root = sources.DataSource.__str__(self) + return "%s [%s]" % (root, self.base_url) def get_data(self): mcfg = self.ds_cfg diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index bed500a2..9a770d38 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -40,9 +40,8 @@ class DataSourceNoCloud(sources.DataSource): self.supported_seed_starts = ("/", "file://") def __str__(self): - mstr = "%s [seed=%s][dsmode=%s]" % (util.obj_name(self), - self.seed, self.dsmode) - return mstr + root = sources.DataSource.__str__(self) + return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode) def get_data(self): defaults = { diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py index c2125bee..e2175e1f 100644 --- a/cloudinit/sources/DataSourceNone.py +++ b/cloudinit/sources/DataSourceNone.py @@ -41,9 +41,6 @@ class DataSourceNone(sources.DataSource): def get_instance_id(self): return 'iid-datasource-none' - def __str__(self): - return util.obj_name(self) - @property def is_disconnected(self): return True diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index e90150c6..ae139074 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -43,7 +43,8 @@ class DataSourceOVF(sources.DataSource): self.supported_seed_starts = ("/", "file://") def __str__(self): - return "%s [seed=%s]" % (util.obj_name(self), self.seed) + root = sources.DataSource.__str__(self) + return "%s [seed=%s]" % (root, self.seed) def get_data(self): found = [] diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 96baff90..d8fbacdd 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -25,6 +25,7 @@ import os from cloudinit import importer from cloudinit import log as logging +from cloudinit import type_utils from cloudinit import user_data as ud from cloudinit import util @@ -52,7 +53,7 @@ class DataSource(object): self.userdata = None self.metadata = None self.userdata_raw = None - name = util.obj_name(self) + name = type_utils.obj_name(self) if name.startswith(DS_PREFIX): name = name[len(DS_PREFIX):] self.ds_cfg = util.get_cfg_by_path(self.sys_cfg, @@ -62,6 +63,9 @@ class DataSource(object): else: self.ud_proc = ud_proc + def __str__(self): + return type_utils.obj_name(self) + def get_userdata(self, apply_filter=False): if self.userdata is None: self.userdata = self.ud_proc.process(self.get_userdata_raw()) @@ -214,7 +218,7 @@ def normalize_pubkey_data(pubkey_data): def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list): ds_list = list_sources(cfg_list, ds_deps, pkg_list) - ds_names = [util.obj_name(f) for f in ds_list] + ds_names = [type_utils.obj_name(f) for f in ds_list] LOG.debug("Searching for data source in: %s", ds_names) for cls in ds_list: @@ -222,7 +226,7 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list): LOG.debug("Seeing if we can get any data from %s", cls) s = cls(sys_cfg, distro, paths) if s.get_data(): - return (s, util.obj_name(cls)) + return (s, type_utils.obj_name(cls)) except Exception: util.logexc(LOG, "Getting data from %s failed", cls) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 94a267df..531e7997 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -43,6 +43,7 @@ from cloudinit import helpers from cloudinit import importer from cloudinit import log as logging from cloudinit import sources +from cloudinit import type_utils from cloudinit import util LOG = logging.getLogger(__name__) @@ -220,7 +221,7 @@ class Init(object): # Any config provided??? pkg_list = self.cfg.get('datasource_pkg_list') or [] # Add the defaults at the end - for n in ['', util.obj_name(sources)]: + for n in ['', type_utils.obj_name(sources)]: if n not in pkg_list: pkg_list.append(n) cfg_list = self.cfg.get('datasource_list') or [] @@ -280,7 +281,7 @@ class Init(object): dp = self.paths.get_cpath('data') # Write what the datasource was and is.. - ds = "%s: %s" % (util.obj_name(self.datasource), self.datasource) + ds = "%s: %s" % (type_utils.obj_name(self.datasource), self.datasource) previous_ds = None ds_fn = os.path.join(idir, 'datasource') try: @@ -497,7 +498,7 @@ class Modules(object): else: raise TypeError(("Failed to read '%s' item in config," " unknown type %s") % - (item, util.obj_name(item))) + (item, type_utils.obj_name(item))) return module_list def _fixup_modules(self, raw_mods): @@ -515,7 +516,7 @@ class Modules(object): # Reset it so when ran it will get set to a known value freq = None mod_locs = importer.find_module(mod_name, - ['', util.obj_name(config)], + ['', type_utils.obj_name(config)], ['handle']) if not mod_locs: LOG.warn("Could not find module named %s", mod_name) diff --git a/cloudinit/type_utils.py b/cloudinit/type_utils.py new file mode 100644 index 00000000..2decbfc5 --- /dev/null +++ b/cloudinit/type_utils.py @@ -0,0 +1,34 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Canonical Ltd. +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Scott Moser +# Author: Juerg Haefliger +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# pylint: disable=C0302 + +import types + + +def obj_name(obj): + if isinstance(obj, (types.TypeType, + types.ModuleType, + types.FunctionType, + types.LambdaType)): + return str(obj.__name__) + return obj_name(obj.__class__) diff --git a/cloudinit/util.py b/cloudinit/util.py index ab918433..73bf6304 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -43,14 +43,15 @@ import subprocess import sys import tempfile import time -import types import urlparse import yaml from cloudinit import importer from cloudinit import log as logging +from cloudinit import mergers from cloudinit import safeyaml +from cloudinit import type_utils from cloudinit import url_helper as uhelp from cloudinit import version @@ -194,11 +195,12 @@ def fork_cb(child_cb, *args): os._exit(0) # pylint: disable=W0212 except: logexc(LOG, ("Failed forking and" - " calling callback %s"), obj_name(child_cb)) + " calling callback %s"), + type_utils.obj_name(child_cb)) os._exit(1) # pylint: disable=W0212 else: LOG.debug("Forked child %s who will run callback %s", - fid, obj_name(child_cb)) + fid, type_utils.obj_name(child_cb)) def is_true(val, addons=None): @@ -513,15 +515,6 @@ def make_url(scheme, host, port=None, return urlparse.urlunparse(pieces) -def obj_name(obj): - if isinstance(obj, (types.TypeType, - types.ModuleType, - types.FunctionType, - types.LambdaType)): - return str(obj.__name__) - return obj_name(obj.__class__) - - def mergemanydict(srcs, reverse=False): if reverse: srcs = reversed(srcs) @@ -538,13 +531,9 @@ def mergedict(src, cand): If C{src} has a key C{cand} will not override. Nested dictionaries are merged recursively. """ - if isinstance(src, dict) and isinstance(cand, dict): - for (k, v) in cand.iteritems(): - if k not in src: - src[k] = v - else: - src[k] = mergedict(src[k], v) - return src + raw_mergers = mergers.default_mergers() + merger = mergers.construct(raw_mergers) + return merger.merge(src, cand) @contextlib.contextmanager @@ -645,7 +634,7 @@ def load_yaml(blob, default=None, allowed=(dict,)): # Yes this will just be caught, but thats ok for now... raise TypeError(("Yaml load allows %s root types," " but got %s instead") % - (allowed, obj_name(converted))) + (allowed, type_utils.obj_name(converted))) loaded = converted except (yaml.YAMLError, TypeError, ValueError): if len(blob) == 0: @@ -714,7 +703,7 @@ def read_conf_with_confd(cfgfile): if not isinstance(confd, (str, basestring)): raise TypeError(("Config file %s contains 'conf_d' " "with non-string type %s") % - (cfgfile, obj_name(confd))) + (cfgfile, type_utils.obj_name(confd))) else: confd = str(confd).strip() elif os.path.isdir("%s.d" % cfgfile): @@ -1472,7 +1461,7 @@ def shellify(cmdlist, add_header=True): else: raise RuntimeError(("Unable to shellify type %s" " which is not a list or string") - % (obj_name(args))) + % (type_utils.obj_name(args))) LOG.debug("Shellified %s commands.", cmds_made) return content diff --git a/tests/unittests/test_userdata.py b/tests/unittests/test_userdata.py index 9e1fed7e..ef0dd7b8 100644 --- a/tests/unittests/test_userdata.py +++ b/tests/unittests/test_userdata.py @@ -74,7 +74,7 @@ run: - morestuff ''' message2 = MIMEBase("text", "cloud-config") - message2['Merge-Type'] = 'dict()+list(extend)+str()' + message2['X-Merge-Type'] = 'dict()+list(extend)+str()' message2.set_payload(blob2) blob3 = ''' @@ -83,6 +83,7 @@ e: - 1 - 2 - 3 +p: 1 ''' message3 = MIMEBase("text", "cloud-config") message3['Merge-Type'] = 'dict()+list()+str()' @@ -109,6 +110,7 @@ e: self.assertEquals(contents['run'], ['b', 'c', 'stuff', 'morestuff']) self.assertEquals(contents['a'], 'be') self.assertEquals(contents['e'], 'fg') + self.assertEquals(contents['p'], 1) def test_unhandled_type_warning(self): """Raw text without magic is ignored but shows warning.""" -- cgit v1.2.3 From 1e4f41e900a9c942354428b0f312428af00031ce Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 6 Mar 2013 19:36:31 -0800 Subject: Make conf.d and the default merging use the new merging algos. --- cloudinit/sources/DataSourceConfigDrive.py | 2 +- cloudinit/sources/DataSourceNoCloud.py | 8 ++++---- cloudinit/sources/DataSourceOVF.py | 4 ++-- cloudinit/util.py | 25 ++++++++++++------------- 4 files changed, 19 insertions(+), 20 deletions(-) diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 46abd772..0216ed07 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -154,7 +154,7 @@ class DataSourceConfigDrive(sources.DataSource): return False md = results['metadata'] - md = util.mergedict(md, DEFAULT_METADATA) + md = util.mergemanydict([md, DEFAULT_METADATA]) # Perform some metadata 'fixups' # diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 9a770d38..7800812b 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -64,7 +64,7 @@ class DataSourceNoCloud(sources.DataSource): # Check to see if the seed dir has data. seedret = {} if util.read_optional_seed(seedret, base=self.seed_dir + "/"): - md = util.mergedict(md, seedret['meta-data']) + md = util.mergemanydict([md, seedret['meta-data']]) ud = seedret['user-data'] found.append(self.seed_dir) LOG.debug("Using seeded cache data from %s", self.seed_dir) @@ -88,7 +88,7 @@ class DataSourceNoCloud(sources.DataSource): LOG.debug("Attempting to use data from %s", dev) (newmd, newud) = util.mount_cb(dev, util.read_seeded) - md = util.mergedict(newmd, md) + md = util.mergemanydict([newmd, md]) ud = newud # For seed from a device, the default mode is 'net'. @@ -139,11 +139,11 @@ class DataSourceNoCloud(sources.DataSource): LOG.debug("Using seeded cache data from %s", seedfrom) # Values in the command line override those from the seed - md = util.mergedict(md, md_seed) + md = util.mergemanydict([md, md_seed]) found.append(seedfrom) # Now that we have exhausted any other places merge in the defaults - md = util.mergedict(md, defaults) + md = util.mergemanydict([md, defaults]) # Update the network-interfaces if metadata had 'network-interfaces' # entry and this is the local datasource, or 'seedfrom' was used diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index ae139074..0530c4b7 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -94,11 +94,11 @@ class DataSourceOVF(sources.DataSource): (md_seed, ud) = util.read_seeded(seedfrom, timeout=None) LOG.debug("Using seeded cache data from %s", seedfrom) - md = util.mergedict(md, md_seed) + md = util.mergemanydict([md, md_seed]) found.append(seedfrom) # Now that we have exhausted any other places merge in the defaults - md = util.mergedict(md, defaults) + md = util.mergemanydict([md, defaults]) self.seed = ",".join(found) self.metadata = md diff --git a/cloudinit/util.py b/cloudinit/util.py index 73bf6304..e5c6f4ea 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -519,23 +519,22 @@ def mergemanydict(srcs, reverse=False): if reverse: srcs = reversed(srcs) m_cfg = {} + merge_how = [mergers.default_mergers()] for a_cfg in srcs: if a_cfg: - m_cfg = mergedict(m_cfg, a_cfg) + # Take the last merger as the one that + # will define how to merge next... + mergers_to_apply = list(merge_how[-1]) + merger = mergers.construct(mergers_to_apply) + m_cfg = merger.merge(m_cfg, a_cfg) + # If the config has now has new merger set, + # extract them to be used next time... + new_mergers = mergers.dict_extract_mergers(m_cfg) + if new_mergers: + merge_how.append(new_mergers) return m_cfg -def mergedict(src, cand): - """ - Merge values from C{cand} into C{src}. - If C{src} has a key C{cand} will not override. - Nested dictionaries are merged recursively. - """ - raw_mergers = mergers.default_mergers() - merger = mergers.construct(raw_mergers) - return merger.merge(src, cand) - - @contextlib.contextmanager def chdir(ndir): curr = os.getcwd() @@ -714,7 +713,7 @@ def read_conf_with_confd(cfgfile): # Conf.d settings override input configuration confd_cfg = read_conf_d(confd) - return mergedict(confd_cfg, cfg) + return mergemanydict([confd_cfg, cfg]) def read_cc_from_cmdline(cmdline=None): -- cgit v1.2.3 From 21aec9e44c27b9bf1c96314f0449fd39793d1c73 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 6 Mar 2013 22:24:29 -0800 Subject: Add some nice docs on what this is. --- cloudinit/mergers/__init__.py | 2 +- doc/merging.txt | 179 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 180 insertions(+), 1 deletion(-) create mode 100644 doc/merging.txt diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py index 453426af..45e88fb3 100644 --- a/cloudinit/mergers/__init__.py +++ b/cloudinit/mergers/__init__.py @@ -85,7 +85,7 @@ class LookupMerger(UnknownMerger): def dict_extract_mergers(config): parsed_mergers = [] - raw_mergers = config.get('merger_how') + raw_mergers = config.get('merge_how') if raw_mergers is None: raw_mergers = config.get('merge_type') if raw_mergers is None: diff --git a/doc/merging.txt b/doc/merging.txt new file mode 100644 index 00000000..f719aec8 --- /dev/null +++ b/doc/merging.txt @@ -0,0 +1,179 @@ +Arriving in 0.7.2 is a new way to handle dictionary merging in cloud-init. +--- + +Overview +-------- + +This was done because it has been a common feature request that there be a +way to specify how cloud-config yaml "dictionaries" are merged together when +there are multiple yamls to merge together (say when performing an #include). + +Since previously the merging algorithm was very simple and would only overwrite +and not append lists, or strings, and so on it was decided to create a new and +improved way to merge dictionaries (and there contained objects) together in a +way that is customizable, thus allowing for users who provide cloud-config data +to determine exactly how there objects will be merged. + +For example. + +#cloud-config (1) +run_cmd: + - bash1 + - bash2 + +#cloud-config (2) +run_cmd: + - bash3 + - bash4 + +The previous way of merging the following 2 objects would result in a final +cloud-config object that contains the following. + +#cloud-config (merged) +run_cmd: + - bash3 + - bash4 + +Typically this is not what users want, instead they would likely prefer: + +#cloud-config (merged) +run_cmd: + - bash1 + - bash2 + - bash3 + - bash4 + +This way makes it easier to combine the various cloud-config objects you have +into a more useful list, thus reducing duplication that would have had to +occur in the previous method to accomplish the same result. + +Customizability +--------------- + +Since the above merging algorithm may not always be the desired merging +algorithm (like how the merging algorithm in < 0.7.2 was not always the preferred +one) the concept of customizing how merging can be done was introduced through +a new concept call 'merge classes'. + +A merge class is a class defintion which provides functions that can be used +to merge a given type with another given type. + +An example of one of these merging classes is the following: + +class Merger(object): + def __init__(self, merger, opts): + self._merger = merger + self._overwrite = 'overwrite' in opts + + # This merging algorithm will attempt to merge with + # another dictionary, on encountering any other type of object + # it will not merge with said object, but will instead return + # the original value + # + # On encountering a dictionary, it will create a new dictionary + # composed of the original and the one to merge with, if 'overwrite' + # is enabled then keys that exist in the original will be overwritten + # by keys in the one to merge with (and associated values). Otherwise + # if not in overwrite mode the 2 conflicting keys themselves will + # be merged. + def _on_dict(self, value, merge_with): + if not isinstance(merge_with, (dict)): + return value + merged = dict(value) + for (k, v) in merge_with.items(): + if k in merged: + if not self._overwrite: + merged[k] = self._merger.merge(merged[k], v) + else: + merged[k] = v + else: + merged[k] = v + return merged + +As you can see there is a '_on_dict' method here that will be given a source value +and a value to merge with. The result will be the merged object. This code itself +is called by another merging class which 'directs' the merging to happen by +analyzing the types of the objects to merge and attempting to find a know object +that will merge that type. I will avoid pasting that here, but it can be found +in the mergers/__init__.py file (see LookupMerger and UnknownMerger). + +So following the typical cloud-init way of allowing source code to be downloaded +and used dynamically, it is possible for users to inject there own merging files +to handle specific types of merging as they choose (the basic ones included will +handle lists, dicts, and strings). Note how each merge can have options associated +with it which affect how the merging is performed, for example a dictionary merger +can be told to overwrite instead of attempt to merge, or a string merger can be +told to append strings instead of discarding other strings to merge with. + +How to activate +--------------- + +There are a few ways to activate the merging algorithms, and to customize them +for your own usage. + +1. The first way involves the usage of MIME messages in cloud-init to specify + multipart documents (this is one way in which multiple cloud-config is joined + together into a single cloud-config). Two new headers are looked for, both + of which can define the way merging is done (the first header to exist wins). + These new headers (in lookup order) are 'Merge-Type' and 'X-Merge-Type'. The value + should be a string which will satisfy the new merging format defintion (see + below for this format). +2. The second way is actually specifying the merge-type in the body of the + cloud-config dictionary. There are 2 ways to specify this, either as a string + or as a dictionary (see format below). The keys that are looked up for this + definition are the following (in order), 'merge_how', 'merge_type'. + +*String format* + +The string format that is expected is the following. + +"classname(option1,option2)+classname2(option3,option4)" (and so on) + +The class name there will be connected to class names used when looking for the +class that can be used to merge and options provided will be given to the class +on construction of that class. + +For example, the default string that is used when none is provided is the following: + +"list(extend)+dict()+str(append)" + +*Dictionary format* + +In cases where a dictionary can be used to specify the same information as the +string format (ie option #2 of above) it can be used, for example. + +merge_how: + - name: list + settings: [extend] + - name: dict + settings: [] + - name: str + settings: [append] + +This would be the equivalent format for default string format but in dictionary +form instead of string form. + +Specifying multiple types and its effect +---------------------------------------- + +Now you may be asking yourself, if I specify a merge-type header or dictionary +for every cloud-config that I provide, what exactly happens? + +The answer is that when merging, a stack of 'merging classes' is kept, the +first one on that stack is the default merging classes, this set of mergers +will be used when the first cloud-config is merged with the initial empty +cloud-config dictionary. If the cloud-config that was just merged provided a +set of merging classes (via the above formats) then those merging classes will +be pushed onto the stack. Now if there is a second cloud-config to be merged then +the merging classes from the cloud-config before the first will be used (not the +default) and so on. This way a cloud-config can decide how it will merge with a +cloud-config dictionary coming after it. + +Other uses +---------- + +The default merging algorithm for merging conf.d yaml files (which form a initial +yaml config for cloud-init) was also changed to use this mechanism so its full +benefits (and customization) can also be used there as well. Other places that +used the previous merging are also similar now extensible (metadata merging for +example). -- cgit v1.2.3 From dca9b6c94e10f9f42ad0f129ae6fd38ebb44f4b5 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 7 Mar 2013 14:54:25 -0500 Subject: pep8 and pylint fixes --- cloudinit/config/cc_power_state_change.py | 2 +- cloudinit/distros/__init__.py | 6 +++--- cloudinit/distros/debian.py | 5 ++++- cloudinit/distros/rhel.py | 5 ++++- cloudinit/ssh_util.py | 10 ++++------ cloudinit/util.py | 2 +- doc/rtd/conf.py | 8 ++++---- tests/unittests/helpers.py | 1 + tests/unittests/test_datasource/test_nocloud.py | 2 +- .../test_handler/test_handler_growpart.py | 22 +++++++++++----------- tests/unittests/test_sshutil.py | 5 +++-- 11 files changed, 37 insertions(+), 31 deletions(-) diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index aefa3aff..de0c0bbd 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -75,7 +75,7 @@ def load_power_state(cfg): ','.join(opt_map.keys())) delay = pstate.get("delay", "now") - if delay != "now" and not re.match("\+[0-9]+", delay): + if delay != "now" and not re.match(r"\+[0-9]+", delay): raise TypeError("power_state[delay] must be 'now' or '+m' (minutes).") args = ["shutdown", opt_map[mode], delay] diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 0db4aac7..2a2d8216 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -73,7 +73,7 @@ class Distro(object): self._apply_hostname(hostname) @abc.abstractmethod - def package_command(self, cmd, args=None): + def package_command(self, cmd, args=None, pkgs=None): raise NotImplementedError() @abc.abstractmethod @@ -370,7 +370,7 @@ class Distro(object): # Import SSH keys if 'ssh_authorized_keys' in kwargs: keys = set(kwargs['ssh_authorized_keys']) or [] - ssh_util.setup_user_keys(keys, name, key_prefix=None) + ssh_util.setup_user_keys(keys, name, options=None) return True @@ -776,7 +776,7 @@ def normalize_users_groups(cfg, distro): # Just add it on at the end... base_users.append({'name': 'default'}) elif isinstance(base_users, (dict)): - base_users['default'] = base_users.get('default', True) + base_users['default'] = dict(base_users).get('default', True) elif isinstance(base_users, (str, basestring)): # Just append it on to be re-parsed later base_users += ",default" diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 1a8e927b..1f2848d2 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -142,7 +142,10 @@ class Distro(distros.Distro): # This ensures that the correct tz will be used for the system util.copy(tz_file, self.tz_local_fn) - def package_command(self, command, args=None, pkgs=[]): + def package_command(self, command, args=None, pkgs=None): + if pkgs is None: + pkgs = [] + e = os.environ.copy() # See: http://tiny.cc/kg91fw # Or: http://tiny.cc/mh91fw diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 2f91e386..9fee5fd1 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -208,7 +208,10 @@ class Distro(distros.Distro): # This ensures that the correct tz will be used for the system util.copy(tz_file, self.tz_local_fn) - def package_command(self, command, args=None, pkgs=[]): + def package_command(self, command, args=None, pkgs=None): + if pkgs is None: + pkgs = [] + cmd = ['yum'] # If enabled, then yum will be tolerant of errors on the command line # with regard to packages. diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 65fab117..95133236 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -19,9 +19,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from StringIO import StringIO - -import csv import os import pwd @@ -42,6 +39,7 @@ VALID_KEY_TYPES = ("rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa", "ecdsa-sha2-nistp384-cert-v01@openssh.com", "ecdsa-sha2-nistp521-cert-v01@openssh.com") + class AuthKeyLine(object): def __init__(self, source, keytype=None, base64=None, comment=None, options=None): @@ -141,14 +139,14 @@ class AuthKeyLineParser(object): ent = line.strip() try: (keytype, base64, comment) = parse_ssh_key(ent) - except TypeError as e: + except TypeError: (keyopts, remain) = self._extract_options(ent) if options is None: options = keyopts - + try: (keytype, base64, comment) = parse_ssh_key(remain) - except TypeError as e: + except TypeError: return AuthKeyLine(src_line) return AuthKeyLine(src_line, keytype=keytype, base64=base64, diff --git a/cloudinit/util.py b/cloudinit/util.py index d0a6f81c..afde2066 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1530,7 +1530,7 @@ def get_proc_env(pid): fn = os.path.join("/proc/", str(pid), "environ") try: contents = load_file(fn) - toks = contents.split("\0") + toks = contents.split("\x00") for tok in toks: if tok == "": continue diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py index 87fc40ab..c9ae79f4 100644 --- a/doc/rtd/conf.py +++ b/doc/rtd/conf.py @@ -17,13 +17,13 @@ from cloudinit import version # General information about the project. project = 'Cloud-Init' -# -- General configuration ----------------------------------------------------- +# -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.intersphinx', ] @@ -55,7 +55,7 @@ exclude_patterns = [] # output. They are ignored by default. show_authors = False -# -- Options for HTML output --------------------------------------------------- +# -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index 91a50e18..904677f1 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -183,6 +183,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): setattr(mod, f, trap_func) self.patched_funcs.append((mod, f, func)) + def populate_dir(path, files): os.makedirs(path) for (name, content) in files.iteritems(): diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index 28e0a472..62fc5358 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -1,7 +1,7 @@ from cloudinit import helpers -from tests.unittests.helpers import populate_dir from cloudinit.sources import DataSourceNoCloud from cloudinit import util +from tests.unittests.helpers import populate_dir from mocker import MockerTestCase import os diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index 74c254e0..325244f2 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -1,7 +1,6 @@ from mocker import MockerTestCase from cloudinit import cloud -from cloudinit import helpers from cloudinit import util from cloudinit.config import cc_growpart @@ -9,9 +8,7 @@ from cloudinit.config import cc_growpart import errno import logging import os -import mocker import re -import stat # growpart: # mode: auto # off, on, auto, 'growpart', 'parted' @@ -85,6 +82,7 @@ growpart disk partition Resize partition 1 on /dev/sda """ + class TestDisabled(MockerTestCase): def setUp(self): super(TestDisabled, self).setUp() @@ -106,6 +104,7 @@ class TestDisabled(MockerTestCase): self.handle(self.name, config, self.cloud_init, self.log, self.args) + class TestConfig(MockerTestCase): def setUp(self): super(TestConfig, self).setUp() @@ -125,9 +124,9 @@ class TestConfig(MockerTestCase): def test_no_resizers_auto_is_fine(self): subp = self.mocker.replace(util.subp, passthrough=False) subp(['parted', '--help'], env={'LANG': 'C'}) - self.mocker.result((HELP_PARTED_NO_RESIZE,"")) + self.mocker.result((HELP_PARTED_NO_RESIZE, "")) subp(['growpart', '--help'], env={'LANG': 'C'}) - self.mocker.result((HELP_GROWPART_NO_RESIZE,"")) + self.mocker.result((HELP_GROWPART_NO_RESIZE, "")) self.mocker.replay() config = {'growpart': {'mode': 'auto'}} @@ -136,7 +135,7 @@ class TestConfig(MockerTestCase): def test_no_resizers_mode_growpart_is_exception(self): subp = self.mocker.replace(util.subp, passthrough=False) subp(['growpart', '--help'], env={'LANG': 'C'}) - self.mocker.result((HELP_GROWPART_NO_RESIZE,"")) + self.mocker.result((HELP_GROWPART_NO_RESIZE, "")) self.mocker.replay() config = {'growpart': {'mode': "growpart"}} @@ -146,7 +145,7 @@ class TestConfig(MockerTestCase): def test_mode_auto_prefers_parted(self): subp = self.mocker.replace(util.subp, passthrough=False) subp(['parted', '--help'], env={'LANG': 'C'}) - self.mocker.result((HELP_PARTED_RESIZE,"")) + self.mocker.result((HELP_PARTED_RESIZE, "")) self.mocker.replay() ret = cc_growpart.resizer_factory(mode="auto") @@ -173,7 +172,7 @@ class TestConfig(MockerTestCase): self.handle(self.name, {}, self.cloud_init, self.log, self.args) finally: cc_growpart.RESIZERS = orig_resizers - + class TestResize(MockerTestCase): def setUp(self): @@ -196,7 +195,7 @@ class TestResize(MockerTestCase): real_stat = os.stat resize_calls = [] - class myresizer(): + class myresizer(object): def resize(self, diskdev, partnum, partdev): resize_calls.append((diskdev, partnum, partdev)) if partdev == "/dev/YYda2": @@ -224,7 +223,7 @@ class TestResize(MockerTestCase): if f[0] == name: return f return None - + self.assertEqual(cc_growpart.RESIZE.NOCHANGE, find("/dev/XXda1", resized)[1]) self.assertEqual(cc_growpart.RESIZE.CHANGED, @@ -244,7 +243,8 @@ def simple_device_part_info(devpath): ret = re.search("([^0-9]*)([0-9]*)$", devpath) x = (ret.group(1), ret.group(2)) return x - + + class Bunch: def __init__(self, **kwds): self.__dict__.update(kwds) diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index 2415d06f..d8662cac 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -1,5 +1,5 @@ -from unittest import TestCase from cloudinit import ssh_util +from unittest import TestCase VALID_CONTENT = { @@ -34,6 +34,7 @@ TEST_OPTIONS = ("no-port-forwarding,no-agent-forwarding,no-X11-forwarding," 'command="echo \'Please login as the user \"ubuntu\" rather than the' 'user \"root\".\';echo;sleep 10"') + class TestAuthKeyLineParser(TestCase): def test_simple_parse(self): # test key line with common 3 fields (keytype, base64, comment) @@ -61,7 +62,7 @@ class TestAuthKeyLineParser(TestCase): self.assertFalse(key.options) self.assertFalse(key.comment) self.assertEqual(key.keytype, ktype) - + def test_parse_with_keyoptions(self): # test key line with options in it parser = ssh_util.AuthKeyLineParser() -- cgit v1.2.3 From 6586b35f348ba089bba00e6bebb4ca1b14f41a19 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 7 Mar 2013 15:30:38 -0500 Subject: allow customization of apt-get command, add --force-unsafe-io This allows the customization of the apt-get command used for installing packages, and also adds '--force-unsafe-io'. Because this is spawned from cloud-init, it seems to make sense as a first boot package installation option. --- ChangeLog | 1 + cloudinit/distros/debian.py | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index 5ff305a1..d035a7a3 100644 --- a/ChangeLog +++ b/ChangeLog @@ -47,6 +47,7 @@ - upstart: cloud-init-nonet.conf trap the TERM signal, so that dmesg or other output does not get a 'killed by TERM signal' message. - support resizing partitions via growpart or parted (LP: #1136936) + - allow specifying apt-get command in distro config ('apt_get_command') 0.7.1: - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 1f2848d2..4b779d57 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -33,6 +33,10 @@ from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) +APT_GET_COMMAND = ('apt-get', '--option=Dpkg::Options::=--force-confold', + '--option=Dpkg::options::=--force-unsafe-io', + '--assume-yes', '--quiet') + class Distro(distros.Distro): hostname_conf_fn = "/etc/hostname" @@ -150,8 +154,7 @@ class Distro(distros.Distro): # See: http://tiny.cc/kg91fw # Or: http://tiny.cc/mh91fw e['DEBIAN_FRONTEND'] = 'noninteractive' - cmd = ['apt-get', '--option', 'Dpkg::Options::=--force-confold', - '--assume-yes', '--quiet'] + cmd = list(self.get_option("apt_get_command", APT_GET_COMMAND)) if args and isinstance(args, str): cmd.append(args) -- cgit v1.2.3 From be8953bf9a27462adb5ce0c5ef6485f0cee47b48 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 7 Mar 2013 15:54:59 -0500 Subject: fix a pylint complaint in test_handler_growpart E1103: 81,44:TestWriteFile.test_basic_usage: Instance of 'Bunch' has no 'st_mode' member (but some types could not be inferred) so, if it wants st_mode, for now just give it one. --- tests/unittests/test_handler/test_handler_growpart.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index 325244f2..5df93570 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -246,6 +246,7 @@ def simple_device_part_info(devpath): class Bunch: + st_mode = None # fix pylint complaint def __init__(self, **kwds): self.__dict__.update(kwds) -- cgit v1.2.3 From 8013c284e82349246b2274f5475c138323fd7c55 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 7 Mar 2013 16:00:35 -0500 Subject: pep8 --- tests/unittests/test_handler/test_handler_growpart.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index 5df93570..b1b872b0 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -247,6 +247,7 @@ def simple_device_part_info(devpath): class Bunch: st_mode = None # fix pylint complaint + def __init__(self, **kwds): self.__dict__.update(kwds) -- cgit v1.2.3 From 9a771ec66f4e79bcd30f7cad7ef4b67e9cc7512d Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 7 Mar 2013 16:28:09 -0500 Subject: change default merge type the default merge type here was appending to strings and extending lists. Instead we want the same default that cloud-init had previously, which was to overwrite lists and strings. --- cloudinit/mergers/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py index 45e88fb3..3b56686f 100644 --- a/cloudinit/mergers/__init__.py +++ b/cloudinit/mergers/__init__.py @@ -25,7 +25,7 @@ from cloudinit import type_utils NAME_MTCH = re.compile(r"(^[a-zA-Z_][A-Za-z0-9_]*)\((.*?)\)$") LOG = logging.getLogger(__name__) -DEF_MERGE_TYPE = "list(extend)+dict()+str(append)" +DEF_MERGE_TYPE = "list()+dict()+str()" class UnknownMerger(object): -- cgit v1.2.3 From aae7fe638f61aaf02c6579d5b691a8641455c875 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 7 Mar 2013 16:47:54 -0500 Subject: fix pep8 and pylint --- cloudinit/config/cc_landscape.py | 3 ++- cloudinit/distros/__init__.py | 4 ++-- cloudinit/mergers/__init__.py | 5 +++-- cloudinit/mergers/str.py | 2 +- cloudinit/sources/DataSourceNone.py | 1 - tests/unittests/test__init__.py | 2 -- tests/unittests/test_merging.py | 8 ++++---- tests/unittests/test_userdata.py | 39 +++++++++++++++++++++---------------- 8 files changed, 34 insertions(+), 30 deletions(-) diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index 47c10a97..8a709677 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -59,7 +59,8 @@ def handle(_name, cfg, cloud, log, _args): if not isinstance(ls_cloudcfg, (dict)): raise RuntimeError(("'landscape' key existed in config," " but not a dictionary type," - " is a %s instead"), type_utils.obj_name(ls_cloudcfg)) + " is a %s instead"), + type_utils.obj_name(ls_cloudcfg)) if not ls_cloudcfg: return diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 7b6276c5..50d52594 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -741,7 +741,7 @@ def normalize_users_groups(cfg, distro): } if not isinstance(old_user, (dict)): LOG.warn(("Format for 'user' key must be a string or " - "dictionary and not %s"), util.obj_name(old_user)) + "dictionary and not %s"), type_utils.obj_name(old_user)) old_user = {} # If no old user format, then assume the distro @@ -767,7 +767,7 @@ def normalize_users_groups(cfg, distro): if not isinstance(base_users, (list, dict, str, basestring)): LOG.warn(("Format for 'users' key must be a comma separated string" " or a dictionary or a list and not %s"), - util.obj_name(base_users)) + type_utils.obj_name(base_users)) base_users = [] if old_user: diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py index 3b56686f..ac16f143 100644 --- a/cloudinit/mergers/__init__.py +++ b/cloudinit/mergers/__init__.py @@ -32,7 +32,7 @@ class UnknownMerger(object): # Named differently so auto-method finding # doesn't pick this up if there is ever a type # named "unknown" - def _handle_unknown(self, meth_wanted, value, merge_with): + def _handle_unknown(self, _meth_wanted, value, _merge_with): return value # This merging will attempt to look for a '_on_X' method @@ -119,7 +119,8 @@ def string_extract_mergers(merge_how): continue match = NAME_MTCH.match(m_name) if not match: - msg = "Matcher identifer '%s' is not in the right format" % (m_name) + msg = ("Matcher identifer '%s' is not in the right format" % + (m_name)) raise ValueError(msg) (m_name, m_ops) = match.groups() m_ops = m_ops.strip().split(",") diff --git a/cloudinit/mergers/str.py b/cloudinit/mergers/str.py index f1534c5b..291c91c2 100644 --- a/cloudinit/mergers/str.py +++ b/cloudinit/mergers/str.py @@ -18,7 +18,7 @@ class Merger(object): - def __init__(self, merger, opts): + def __init__(self, _merger, opts): self._append = 'append' in opts # On encountering a unicode object to merge value with diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py index e2175e1f..12a8a992 100644 --- a/cloudinit/sources/DataSourceNone.py +++ b/cloudinit/sources/DataSourceNone.py @@ -18,7 +18,6 @@ from cloudinit import log as logging from cloudinit import sources -from cloudinit import util LOG = logging.getLogger(__name__) diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index 7924755a..56ccbcfb 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -24,8 +24,6 @@ class FakeModule(handlers.Handler): def handle_part(self, data, ctype, filename, payload, frequency): pass - - class TestWalkerHandleHandler(MockerTestCase): diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index fa7ee8e4..591a99c8 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -1,5 +1,3 @@ -import os - from tests.unittests import helpers from cloudinit import mergers @@ -107,8 +105,10 @@ class TestSimpleRun(helpers.MockerTestCase): self.assertEquals(merged['a'], [1, 'b', 2, 'e', 'f', 'g']) self.assertEquals(merged['b'], 'blahblahmore') self.assertEquals(merged['c']['f'], 'bigblobofstuff') - self.assertEquals(merged['run'], ['runme', 'runme2', 'morecmd', 'moremoremore']) - self.assertEquals(merged['runmereally'], ['e', ['a'], 'd', 'blah', ['b'], 'e']) + self.assertEquals(merged['run'], ['runme', 'runme2', 'morecmd', + 'moremoremore']) + self.assertEquals(merged['runmereally'], ['e', ['a'], 'd', 'blah', + ['b'], 'e']) def test_dict_overwrite_layered(self): source = { diff --git a/tests/unittests/test_userdata.py b/tests/unittests/test_userdata.py index ef0dd7b8..48ad9c5f 100644 --- a/tests/unittests/test_userdata.py +++ b/tests/unittests/test_userdata.py @@ -7,8 +7,6 @@ import os from email.mime.base import MIMEBase -from mocker import MockerTestCase - from cloudinit import handlers from cloudinit import helpers as c_helpers from cloudinit import log @@ -97,14 +95,16 @@ p: 1 new_root = self.makeDir() self.patchUtils(new_root) self.patchOS(new_root) - cloud_cfg.handle_part(None, handlers.CONTENT_START, None, None, None, None) + cloud_cfg.handle_part(None, handlers.CONTENT_START, None, None, None, + None) for i, m in enumerate(messages): headers = dict(m) fn = "part-%s" % (i + 1) payload = m.get_payload(decode=True) cloud_cfg.handle_part(None, headers['Content-Type'], fn, payload, None, headers) - cloud_cfg.handle_part(None, handlers.CONTENT_END, None, None, None, None) + cloud_cfg.handle_part(None, handlers.CONTENT_END, None, None, None, + None) contents = util.load_file(paths.get_ipath('cloud_config')) contents = util.load_yaml(contents) self.assertEquals(contents['run'], ['b', 'c', 'stuff', 'morestuff']) @@ -118,8 +118,9 @@ p: 1 data = "arbitrary text\n" ci.datasource = FakeDataSource(data) - self.mock_write = self.mocker.replace("cloudinit.util.write_file", passthrough=False) - self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) + mock_write = self.mocker.replace("cloudinit.util.write_file", + passthrough=False) + mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) self.mocker.replay() log_file = self.capture_log(logging.WARNING) @@ -136,8 +137,9 @@ p: 1 message.set_payload("Just text") ci.datasource = FakeDataSource(message.as_string()) - self.mock_write = self.mocker.replace("cloudinit.util.write_file", passthrough=False) - self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) + mock_write = self.mocker.replace("cloudinit.util.write_file", + passthrough=False) + mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) self.mocker.replay() log_file = self.capture_log(logging.WARNING) @@ -154,9 +156,10 @@ p: 1 ci.datasource = FakeDataSource(script) outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001") - self.mock_write = self.mocker.replace("cloudinit.util.write_file", passthrough=False) - self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) - self.mock_write(outpath, script, 0700) + mock_write = self.mocker.replace("cloudinit.util.write_file", + passthrough=False) + mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) + mock_write(outpath, script, 0700) self.mocker.replay() log_file = self.capture_log(logging.WARNING) @@ -173,9 +176,10 @@ p: 1 ci.datasource = FakeDataSource(message.as_string()) outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001") - self.mock_write = self.mocker.replace("cloudinit.util.write_file", passthrough=False) - self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) - self.mock_write(outpath, script, 0700) + mock_write = self.mocker.replace("cloudinit.util.write_file", + passthrough=False) + mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) + mock_write(outpath, script, 0700) self.mocker.replay() log_file = self.capture_log(logging.WARNING) @@ -192,9 +196,10 @@ p: 1 ci.datasource = FakeDataSource(message.as_string()) outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001") - self.mock_write = self.mocker.replace("cloudinit.util.write_file", passthrough=False) - self.mock_write(outpath, script, 0700) - self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) + mock_write = self.mocker.replace("cloudinit.util.write_file", + passthrough=False) + mock_write(outpath, script, 0700) + mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) self.mocker.replay() log_file = self.capture_log(logging.WARNING) -- cgit v1.2.3 From 5da3984c2ca9e94b2483ab89ecdb5c93b5afb9f8 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 7 Mar 2013 17:13:05 -0500 Subject: more pep8/pylint. all clean now --- cloudinit/handlers/boot_hook.py | 3 ++- cloudinit/handlers/cloud_config.py | 7 ++++--- cloudinit/handlers/shell_script.py | 3 ++- cloudinit/handlers/upstart_job.py | 3 ++- cloudinit/mergers/__init__.py | 2 -- tests/unittests/test__init__.py | 3 ++- tests/unittests/test_merging.py | 1 - tests/unittests/test_userdata.py | 6 +++--- 8 files changed, 15 insertions(+), 13 deletions(-) diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py index bf313f10..bf2899ab 100644 --- a/cloudinit/handlers/boot_hook.py +++ b/cloudinit/handlers/boot_hook.py @@ -56,7 +56,8 @@ class BootHookPartHandler(handlers.Handler): util.write_file(filepath, contents, 0700) return filepath - def handle_part(self, _data, ctype, filename, payload, _frequency): + def handle_part(self, _data, ctype, filename, # pylint: disable=W0221 + payload, frequency): # pylint: disable=W0613 if ctype in handlers.CONTENT_SIGNALS: return diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index 5f519f78..d30d6338 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -46,7 +46,7 @@ class CloudConfigPartHandler(handlers.Handler): handlers.type_from_starts_with("#cloud-config"), ] - def _write_cloud_config(self, buf): + def _write_cloud_config(self): if not self.cloud_fn: return # Capture which files we merged from... @@ -107,12 +107,13 @@ class CloudConfigPartHandler(handlers.Handler): self.cloud_buf = None self.mergers = [DEF_MERGERS] - def handle_part(self, _data, ctype, filename, payload, _freq, headers): + def handle_part(self, _data, ctype, filename, # pylint: disable=W0221 + payload, _frequency, headers): # pylint: disable=W0613 if ctype == handlers.CONTENT_START: self._reset() return if ctype == handlers.CONTENT_END: - self._write_cloud_config(self.cloud_buf) + self._write_cloud_config() self._reset() return try: diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py index 2a87e8dd..b185c374 100644 --- a/cloudinit/handlers/shell_script.py +++ b/cloudinit/handlers/shell_script.py @@ -41,7 +41,8 @@ class ShellScriptPartHandler(handlers.Handler): handlers.type_from_starts_with("#!"), ] - def handle_part(self, _data, ctype, filename, payload, _frequency): + def handle_part(self, _data, ctype, filename, # pylint: disable=W0221 + payload, frequency): # pylint: disable=W0613 if ctype in handlers.CONTENT_SIGNALS: # TODO(harlowja): maybe delete existing things here return diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py index 3d8833a1..edd56527 100644 --- a/cloudinit/handlers/upstart_job.py +++ b/cloudinit/handlers/upstart_job.py @@ -42,7 +42,8 @@ class UpstartJobPartHandler(handlers.Handler): handlers.type_from_starts_with("#upstart-job"), ] - def handle_part(self, _data, ctype, filename, payload, frequency): + def handle_part(self, _data, ctype, filename, # pylint: disable=W0221 + payload, frequency): if ctype in handlers.CONTENT_SIGNALS: return diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py index ac16f143..e1ff57ba 100644 --- a/cloudinit/mergers/__init__.py +++ b/cloudinit/mergers/__init__.py @@ -152,5 +152,3 @@ def construct(parsed_mergers): for (attr, opts) in mergers_to_be: mergers.append(attr(root, opts)) return root - - diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index 56ccbcfb..2c0abfbc 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -22,7 +22,8 @@ class FakeModule(handlers.Handler): def list_types(self): return self.types - def handle_part(self, data, ctype, filename, payload, frequency): + def handle_part(self, _data, ctype, filename, # pylint: disable=W0221 + payload, frequency): pass diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index 591a99c8..ad137e85 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -140,4 +140,3 @@ class TestSimpleRun(helpers.MockerTestCase): 'e': 'f', } }) - diff --git a/tests/unittests/test_userdata.py b/tests/unittests/test_userdata.py index 48ad9c5f..fdfe2542 100644 --- a/tests/unittests/test_userdata.py +++ b/tests/unittests/test_userdata.py @@ -55,7 +55,7 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase): #cloud-config a: b e: f -run: +run: - b - c ''' @@ -67,7 +67,7 @@ run: #cloud-config a: e e: g -run: +run: - stuff - morestuff ''' @@ -77,7 +77,7 @@ run: blob3 = ''' #cloud-config -e: +e: - 1 - 2 - 3 -- cgit v1.2.3 From 73cba9d1f841020b0ee1304f204923d994dd5363 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 8 Mar 2013 15:57:05 -0800 Subject: Enable the merging.txt to be in .rst format for public viewing --- doc/merging.rst | 188 +++++++++++++++++++++++++++++++ doc/merging.txt | 179 ----------------------------- doc/rtd/index.rst | 1 + doc/rtd/topics/merging.rst | 5 + tests/unittests/helpers.py | 1 + tests/unittests/test_builtin_handlers.py | 24 ++-- 6 files changed, 209 insertions(+), 189 deletions(-) create mode 100644 doc/merging.rst delete mode 100644 doc/merging.txt create mode 100644 doc/rtd/topics/merging.rst diff --git a/doc/merging.rst b/doc/merging.rst new file mode 100644 index 00000000..6344facd --- /dev/null +++ b/doc/merging.rst @@ -0,0 +1,188 @@ +Overview +-------- + +This was done because it has been a common feature request that there be a +way to specify how cloud-config yaml "dictionaries" are merged together when +there are multiple yamls to merge together (say when performing an #include). + +Since previously the merging algorithm was very simple and would only overwrite +and not append lists, or strings, and so on it was decided to create a new and +improved way to merge dictionaries (and there contained objects) together in a +way that is customizable, thus allowing for users who provide cloud-config data +to determine exactly how there objects will be merged. + +For example. + +.. code-block:: yaml + + #cloud-config (1) + run_cmd: + - bash1 + - bash2 + + #cloud-config (2) + run_cmd: + - bash3 + - bash4 + +The previous way of merging the following 2 objects would result in a final +cloud-config object that contains the following. + +.. code-block:: yaml + + #cloud-config (merged) + run_cmd: + - bash3 + - bash4 + +Typically this is not what users want, instead they would likely prefer: + +.. code-block:: yaml + + #cloud-config (merged) + run_cmd: + - bash1 + - bash2 + - bash3 + - bash4 + +This way makes it easier to combine the various cloud-config objects you have +into a more useful list, thus reducing duplication that would have had to +occur in the previous method to accomplish the same result. + +Customizability +--------------- + +Since the above merging algorithm may not always be the desired merging +algorithm (like how the previous merging algorithm was not always the preferred +one) the concept of customizing how merging can be done was introduced through +a new concept call 'merge classes'. + +A merge class is a class defintion which provides functions that can be used +to merge a given type with another given type. + +An example of one of these merging classes is the following: + +.. code-block:: python + + class Merger(object): + def __init__(self, merger, opts): + self._merger = merger + self._overwrite = 'overwrite' in opts + + # This merging algorithm will attempt to merge with + # another dictionary, on encountering any other type of object + # it will not merge with said object, but will instead return + # the original value + # + # On encountering a dictionary, it will create a new dictionary + # composed of the original and the one to merge with, if 'overwrite' + # is enabled then keys that exist in the original will be overwritten + # by keys in the one to merge with (and associated values). Otherwise + # if not in overwrite mode the 2 conflicting keys themselves will + # be merged. + def _on_dict(self, value, merge_with): + if not isinstance(merge_with, (dict)): + return value + merged = dict(value) + for (k, v) in merge_with.items(): + if k in merged: + if not self._overwrite: + merged[k] = self._merger.merge(merged[k], v) + else: + merged[k] = v + else: + merged[k] = v + return merged + +As you can see there is a '_on_dict' method here that will be given a source value +and a value to merge with. The result will be the merged object. This code itself +is called by another merging class which 'directs' the merging to happen by +analyzing the types of the objects to merge and attempting to find a know object +that will merge that type. I will avoid pasting that here, but it can be found +in the `mergers/__init__.py` file (see `LookupMerger` and `UnknownMerger`). + +So following the typical cloud-init way of allowing source code to be downloaded +and used dynamically, it is possible for users to inject there own merging files +to handle specific types of merging as they choose (the basic ones included will +handle lists, dicts, and strings). Note how each merge can have options associated +with it which affect how the merging is performed, for example a dictionary merger +can be told to overwrite instead of attempt to merge, or a string merger can be +told to append strings instead of discarding other strings to merge with. + +How to activate +--------------- + +There are a few ways to activate the merging algorithms, and to customize them +for your own usage. + +1. The first way involves the usage of MIME messages in cloud-init to specify + multipart documents (this is one way in which multiple cloud-config is joined + together into a single cloud-config). Two new headers are looked for, both + of which can define the way merging is done (the first header to exist wins). + These new headers (in lookup order) are 'Merge-Type' and 'X-Merge-Type'. The value + should be a string which will satisfy the new merging format defintion (see + below for this format). +2. The second way is actually specifying the merge-type in the body of the + cloud-config dictionary. There are 2 ways to specify this, either as a string + or as a dictionary (see format below). The keys that are looked up for this + definition are the following (in order), 'merge_how', 'merge_type'. + +String format +******** + +The string format that is expected is the following. + +:: + + classname1(option1,option2)+classname2(option3,option4).... + +The class name there will be connected to class names used when looking for the +class that can be used to merge and options provided will be given to the class +on construction of that class. + +For example, the default string that is used when none is provided is the following: + +:: + + list(extend)+dict()+str(append) + +Dictionary format +******** + +In cases where a dictionary can be used to specify the same information as the +string format (ie option #2 of above) it can be used, for example. + +.. code-block:: python + + {'merge_how': [{'name': 'list', 'settings': ['extend']}, + {'name': 'dict', 'settings': []}, + {'name': 'str', 'settings': ['append']}]} + +This would be the equivalent format for default string format but in dictionary +form instead of string form. + +Specifying multiple types and its effect +---------------------------------------- + +Now you may be asking yourself, if I specify a merge-type header or dictionary +for every cloud-config that I provide, what exactly happens? + +The answer is that when merging, a stack of 'merging classes' is kept, the +first one on that stack is the default merging classes, this set of mergers +will be used when the first cloud-config is merged with the initial empty +cloud-config dictionary. If the cloud-config that was just merged provided a +set of merging classes (via the above formats) then those merging classes will +be pushed onto the stack. Now if there is a second cloud-config to be merged then +the merging classes from the cloud-config before the first will be used (not the +default) and so on. This way a cloud-config can decide how it will merge with a +cloud-config dictionary coming after it. + +Other uses +---------- + +The default merging algorithm for merging 'conf.d' yaml files (which form a initial +yaml config for cloud-init) was also changed to use this mechanism so its full +benefits (and customization) can also be used there as well. Other places that +used the previous merging are also similar now extensible (metadata merging for +example). diff --git a/doc/merging.txt b/doc/merging.txt deleted file mode 100644 index f719aec8..00000000 --- a/doc/merging.txt +++ /dev/null @@ -1,179 +0,0 @@ -Arriving in 0.7.2 is a new way to handle dictionary merging in cloud-init. ---- - -Overview --------- - -This was done because it has been a common feature request that there be a -way to specify how cloud-config yaml "dictionaries" are merged together when -there are multiple yamls to merge together (say when performing an #include). - -Since previously the merging algorithm was very simple and would only overwrite -and not append lists, or strings, and so on it was decided to create a new and -improved way to merge dictionaries (and there contained objects) together in a -way that is customizable, thus allowing for users who provide cloud-config data -to determine exactly how there objects will be merged. - -For example. - -#cloud-config (1) -run_cmd: - - bash1 - - bash2 - -#cloud-config (2) -run_cmd: - - bash3 - - bash4 - -The previous way of merging the following 2 objects would result in a final -cloud-config object that contains the following. - -#cloud-config (merged) -run_cmd: - - bash3 - - bash4 - -Typically this is not what users want, instead they would likely prefer: - -#cloud-config (merged) -run_cmd: - - bash1 - - bash2 - - bash3 - - bash4 - -This way makes it easier to combine the various cloud-config objects you have -into a more useful list, thus reducing duplication that would have had to -occur in the previous method to accomplish the same result. - -Customizability ---------------- - -Since the above merging algorithm may not always be the desired merging -algorithm (like how the merging algorithm in < 0.7.2 was not always the preferred -one) the concept of customizing how merging can be done was introduced through -a new concept call 'merge classes'. - -A merge class is a class defintion which provides functions that can be used -to merge a given type with another given type. - -An example of one of these merging classes is the following: - -class Merger(object): - def __init__(self, merger, opts): - self._merger = merger - self._overwrite = 'overwrite' in opts - - # This merging algorithm will attempt to merge with - # another dictionary, on encountering any other type of object - # it will not merge with said object, but will instead return - # the original value - # - # On encountering a dictionary, it will create a new dictionary - # composed of the original and the one to merge with, if 'overwrite' - # is enabled then keys that exist in the original will be overwritten - # by keys in the one to merge with (and associated values). Otherwise - # if not in overwrite mode the 2 conflicting keys themselves will - # be merged. - def _on_dict(self, value, merge_with): - if not isinstance(merge_with, (dict)): - return value - merged = dict(value) - for (k, v) in merge_with.items(): - if k in merged: - if not self._overwrite: - merged[k] = self._merger.merge(merged[k], v) - else: - merged[k] = v - else: - merged[k] = v - return merged - -As you can see there is a '_on_dict' method here that will be given a source value -and a value to merge with. The result will be the merged object. This code itself -is called by another merging class which 'directs' the merging to happen by -analyzing the types of the objects to merge and attempting to find a know object -that will merge that type. I will avoid pasting that here, but it can be found -in the mergers/__init__.py file (see LookupMerger and UnknownMerger). - -So following the typical cloud-init way of allowing source code to be downloaded -and used dynamically, it is possible for users to inject there own merging files -to handle specific types of merging as they choose (the basic ones included will -handle lists, dicts, and strings). Note how each merge can have options associated -with it which affect how the merging is performed, for example a dictionary merger -can be told to overwrite instead of attempt to merge, or a string merger can be -told to append strings instead of discarding other strings to merge with. - -How to activate ---------------- - -There are a few ways to activate the merging algorithms, and to customize them -for your own usage. - -1. The first way involves the usage of MIME messages in cloud-init to specify - multipart documents (this is one way in which multiple cloud-config is joined - together into a single cloud-config). Two new headers are looked for, both - of which can define the way merging is done (the first header to exist wins). - These new headers (in lookup order) are 'Merge-Type' and 'X-Merge-Type'. The value - should be a string which will satisfy the new merging format defintion (see - below for this format). -2. The second way is actually specifying the merge-type in the body of the - cloud-config dictionary. There are 2 ways to specify this, either as a string - or as a dictionary (see format below). The keys that are looked up for this - definition are the following (in order), 'merge_how', 'merge_type'. - -*String format* - -The string format that is expected is the following. - -"classname(option1,option2)+classname2(option3,option4)" (and so on) - -The class name there will be connected to class names used when looking for the -class that can be used to merge and options provided will be given to the class -on construction of that class. - -For example, the default string that is used when none is provided is the following: - -"list(extend)+dict()+str(append)" - -*Dictionary format* - -In cases where a dictionary can be used to specify the same information as the -string format (ie option #2 of above) it can be used, for example. - -merge_how: - - name: list - settings: [extend] - - name: dict - settings: [] - - name: str - settings: [append] - -This would be the equivalent format for default string format but in dictionary -form instead of string form. - -Specifying multiple types and its effect ----------------------------------------- - -Now you may be asking yourself, if I specify a merge-type header or dictionary -for every cloud-config that I provide, what exactly happens? - -The answer is that when merging, a stack of 'merging classes' is kept, the -first one on that stack is the default merging classes, this set of mergers -will be used when the first cloud-config is merged with the initial empty -cloud-config dictionary. If the cloud-config that was just merged provided a -set of merging classes (via the above formats) then those merging classes will -be pushed onto the stack. Now if there is a second cloud-config to be merged then -the merging classes from the cloud-config before the first will be used (not the -default) and so on. This way a cloud-config can decide how it will merge with a -cloud-config dictionary coming after it. - -Other uses ----------- - -The default merging algorithm for merging conf.d yaml files (which form a initial -yaml config for cloud-init) was also changed to use this mechanism so its full -benefits (and customization) can also be used there as well. Other places that -used the previous merging are also similar now extensible (metadata merging for -example). diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst index 619bb5dc..fe04b1a9 100644 --- a/doc/rtd/index.rst +++ b/doc/rtd/index.rst @@ -24,6 +24,7 @@ Summary topics/examples topics/datasources topics/modules + topics/merging topics/moreinfo topics/hacking diff --git a/doc/rtd/topics/merging.rst b/doc/rtd/topics/merging.rst new file mode 100644 index 00000000..8a03f3c7 --- /dev/null +++ b/doc/rtd/topics/merging.rst @@ -0,0 +1,5 @@ +========= +Merging +========= + +.. include:: ../../merging.rst diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index 904677f1..e020a3ec 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -175,6 +175,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): def patchOS(self, new_root): patch_funcs = { os.path: ['isfile', 'exists', 'islink', 'isdir'], + os: ['listdir'], } for (mod, funcs) in patch_funcs.items(): for f in funcs: diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py index da52f15b..dace486a 100644 --- a/tests/unittests/test_builtin_handlers.py +++ b/tests/unittests/test_builtin_handlers.py @@ -1,9 +1,8 @@ """Tests of the built-in user data handlers.""" import os -import unittest -from mocker import MockerTestCase +from tests.unittests import helpers as test_helpers from cloudinit import handlers from cloudinit import helpers @@ -14,7 +13,7 @@ from cloudinit.handlers import upstart_job from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE) -class TestBuiltins(MockerTestCase): +class TestBuiltins(test_helpers.FilesystemMockingTestCase): def test_upstart_frequency_no_out(self): c_root = self.makeDir() @@ -35,16 +34,20 @@ class TestBuiltins(MockerTestCase): None, None, None) self.assertEquals(0, len(os.listdir(up_root))) - @unittest.skip("until LP: #1124384 fixed") def test_upstart_frequency_single(self): # files should be written out when frequency is ! per-instance - c_root = self.makeDir() - up_root = self.makeDir() + new_root = self.makeDir() + freq = PER_INSTANCE + + self.patchOS(new_root) + self.patchUtils(new_root) paths = helpers.Paths({ - 'cloud_dir': c_root, - 'upstart_dir': up_root, + 'upstart_dir': "/etc/upstart", }) - freq = PER_INSTANCE + + util.ensure_dir("/run") + util.ensure_dir("/etc/upstart") + util.write_file("/run/cloud-init-upstart-reload", 'test') mock_subp = self.mocker.replace(util.subp, passthrough=False) mock_subp(["initctl", "reload-configuration"], capture=False) @@ -57,4 +60,5 @@ class TestBuiltins(MockerTestCase): 'test.conf', 'blah', freq) h.handle_part('', handlers.CONTENT_END, None, None, None) - self.assertEquals(1, len(os.listdir(up_root))) + + self.assertEquals(1, len(os.listdir('/etc/upstart'))) -- cgit v1.2.3 From eab08ade4bc56219e98bcc1d5568b75b6f4bb6ea Mon Sep 17 00:00:00 2001 From: Blair Zajac Date: Sun, 10 Mar 2013 19:43:54 -0700 Subject: Refactor util.get_mount_info() to facilitate unit testing. Refactor the parsing portion of util.get_mount_info() into a new util.parse_mount_info() method. Now util.get_mount_info() opens /proc/$$/mountinfo, splits on newlines and passes the lines to util.parse_mount_info(). --- cloudinit/util.py | 77 +++++++++++++++++++++++++++++++------------------------ 1 file changed, 43 insertions(+), 34 deletions(-) diff --git a/cloudinit/util.py b/cloudinit/util.py index 709d5cca..0c30f771 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1576,42 +1576,16 @@ def expand_package_list(version_fmt, pkgs): return pkglist -def get_mount_info(path, log=LOG): - # Use /proc/$$/mountinfo to find the device where path is mounted. - # This is done because with a btrfs filesystem using os.stat(path) - # does not return the ID of the device. - # - # Here, / has a device of 18 (decimal). - # - # $ stat / - # File: '/' - # Size: 234 Blocks: 0 IO Block: 4096 directory - # Device: 12h/18d Inode: 256 Links: 1 - # Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root) - # Access: 2013-01-13 07:31:04.358011255 +0000 - # Modify: 2013-01-13 18:48:25.930011255 +0000 - # Change: 2013-01-13 18:48:25.930011255 +0000 - # Birth: - - # - # Find where / is mounted: - # - # $ mount | grep ' / ' - # /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo) - # - # And the device ID for /dev/vda1 is not 18: - # - # $ ls -l /dev/vda1 - # brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1 - # - # So use /proc/$$/mountinfo to find the device underlying the - # input path. +def parse_mount_info(path, mountinfo_lines, log=LOG): + """Return the mount information for PATH given the lines from + /proc/$$/mountinfo.""" + path_elements = [e for e in path.split('/') if e] devpth = None fs_type = None match_mount_point = None match_mount_point_elements = None - mountinfo_path = '/proc/%s/mountinfo' % os.getpid() - for line in load_file(mountinfo_path).splitlines(): + for i, line in enumerate(mountinfo_lines): parts = line.split() mount_point = parts[4] @@ -1638,8 +1612,8 @@ def get_mount_info(path, log=LOG): try: i = parts.index('-') except ValueError: - log.debug("Did not find column named '-' in %s", - mountinfo_path) + log.debug("Did not find column named '-' in line %d: %s", + i + 1, line) return None # Get the path to the device. @@ -1647,7 +1621,8 @@ def get_mount_info(path, log=LOG): fs_type = parts[i + 1] devpth = parts[i + 2] except IndexError: - log.debug("Too few columns in %s after '-' column", mountinfo_path) + log.debug("Too few columns after '-' column in line %d: %s", + i + 1, line) return None match_mount_point = mount_point @@ -1657,3 +1632,37 @@ def get_mount_info(path, log=LOG): return (devpth, fs_type, match_mount_point) else: return None + + +def get_mount_info(path, log=LOG): + # Use /proc/$$/mountinfo to find the device where path is mounted. + # This is done because with a btrfs filesystem using os.stat(path) + # does not return the ID of the device. + # + # Here, / has a device of 18 (decimal). + # + # $ stat / + # File: '/' + # Size: 234 Blocks: 0 IO Block: 4096 directory + # Device: 12h/18d Inode: 256 Links: 1 + # Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root) + # Access: 2013-01-13 07:31:04.358011255 +0000 + # Modify: 2013-01-13 18:48:25.930011255 +0000 + # Change: 2013-01-13 18:48:25.930011255 +0000 + # Birth: - + # + # Find where / is mounted: + # + # $ mount | grep ' / ' + # /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo) + # + # And the device ID for /dev/vda1 is not 18: + # + # $ ls -l /dev/vda1 + # brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1 + # + # So use /proc/$$/mountinfo to find the device underlying the + # input path. + mountinfo_path = '/proc/%s/mountinfo' % os.getpid() + lines = load_file(mountinfo_path).splitlines() + return parse_mount_info(path, lines, log) -- cgit v1.2.3 From 335aded5400d6eb019cd0ee68dac2b643398240c Mon Sep 17 00:00:00 2001 From: Blair Zajac Date: Sun, 10 Mar 2013 19:45:42 -0700 Subject: util.parse_mount_info(): handle short lines. --- cloudinit/util.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/cloudinit/util.py b/cloudinit/util.py index 0c30f771..a1f6e004 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1588,6 +1588,17 @@ def parse_mount_info(path, mountinfo_lines, log=LOG): for i, line in enumerate(mountinfo_lines): parts = line.split() + # Completely fail if there is anything in any line that is + # unexpected, as continuing to parse past a bad line could + # cause an incorrect result to be returned, so it's better + # return nothing than an incorrect result. + + # The minimum number of elements in a valid line is 10. + if len(parts) < 10: + log.debug("Line %d has two few columns (%d): %s", + i + 1, len(parts), line) + return None + mount_point = parts[4] mount_point_elements = [e for e in mount_point.split('/') if e] -- cgit v1.2.3 From 64c8f384ffb81f34357f2b917b08b7851c470552 Mon Sep 17 00:00:00 2001 From: Blair Zajac Date: Sun, 10 Mar 2013 19:47:46 -0700 Subject: util.parse_mount_info(): add unit tests. --- tests/unittests/test_util.py | 95 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 02611581..7ff9a57f 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -248,4 +248,99 @@ class TestLoadYaml(TestCase): myobj) +class TestMountinfoParsing(TestCase): + precise_ext4_mountinfo = \ +"""15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=16422216k,nr_inodes=4105554,mode=755 +18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +19 20 0:15 / /run rw,nosuid,relatime - tmpfs tmpfs rw,size=6572812k,mode=755 +20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root rw,errors=remount-ro,data=ordered +21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs cgroup rw,mode=755 +22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw +23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw +25 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +26 19 0:19 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k +27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw +28 19 0:21 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 +24 21 0:18 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset +29 21 0:22 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu +30 21 0:23 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct +31 21 0:24 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory +32 21 0:25 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices +33 21 0:26 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer +34 21 0:27 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio +35 21 0:28 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event +36 20 9:0 / /boot rw,relatime - ext4 /dev/md0 rw,data=ordered +37 16 0:29 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw +39 28 0:30 / /run/user/foobar/gvfs rw,nosuid,nodev,relatime - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000""" + + raring_btrfs_mountinfo = \ +"""15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=865556k,nr_inodes=216389,mode=755 +18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +19 20 0:15 / /run rw,nosuid,relatime - tmpfs tmpfs rw,size=348196k,mode=755 +20 1 0:16 /@ / rw,relatime - btrfs /dev/vda1 rw,compress=lzo,space_cache +21 15 0:19 / /sys/fs/fuse/connections rw,relatime - fusectl none rw +22 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw +23 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +24 19 0:20 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k +25 19 0:21 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw +26 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 +27 20 0:16 /@home /home rw,relatime - btrfs /dev/vda1 rw,compress=lzo,space_cache""" + + def test_invalid_mountinfo(self): + line = "20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root rw,errors=remount-ro,data=ordered" + elements = line.split() + for i in range(len(elements) + 1): + lines = [' '.join(elements[0:i])] + if i < 10: + expected = None + else: + expected = ('/dev/mapper/vg0-root', 'ext4', '/') + self.assertEqual(expected, util.parse_mount_info('/', lines)) + + def test_precise_ext4_root(self): + lines = TestMountinfoParsing.precise_ext4_mountinfo.splitlines() + + expected = ('/dev/mapper/vg0-root', 'ext4', '/') + self.assertEqual(expected, util.parse_mount_info('/', lines)) + self.assertEqual(expected, util.parse_mount_info('/usr', lines)) + self.assertEqual(expected, util.parse_mount_info('/usr/bin', lines)) + + expected = ('/dev/md0', 'ext4', '/boot') + self.assertEqual(expected, util.parse_mount_info('/boot', lines)) + self.assertEqual(expected, util.parse_mount_info('/boot/grub', lines)) + + expected = ('/dev/mapper/vg0-root', 'ext4', '/') + self.assertEqual(expected, util.parse_mount_info('/home', lines)) + self.assertEqual(expected, util.parse_mount_info('/home/me', lines)) + + expected = ('tmpfs', 'tmpfs', '/run') + self.assertEqual(expected, util.parse_mount_info('/run', lines)) + + expected = ('none', 'tmpfs', '/run/lock') + self.assertEqual(expected, util.parse_mount_info('/run/lock', lines)) + + def test_raring_btrfs_root(self): + lines = TestMountinfoParsing.raring_btrfs_mountinfo.splitlines() + + expected = ('/dev/vda1', 'btrfs', '/') + self.assertEqual(expected, util.parse_mount_info('/', lines)) + self.assertEqual(expected, util.parse_mount_info('/usr', lines)) + self.assertEqual(expected, util.parse_mount_info('/usr/bin', lines)) + self.assertEqual(expected, util.parse_mount_info('/boot', lines)) + self.assertEqual(expected, util.parse_mount_info('/boot/grub', lines)) + + expected = ('/dev/vda1', 'btrfs', '/home') + self.assertEqual(expected, util.parse_mount_info('/home', lines)) + self.assertEqual(expected, util.parse_mount_info('/home/me', lines)) + + expected = ('tmpfs', 'tmpfs', '/run') + self.assertEqual(expected, util.parse_mount_info('/run', lines)) + + expected = ('none', 'tmpfs', '/run/lock') + self.assertEqual(expected, util.parse_mount_info('/run/lock', lines)) + # vi: ts=4 expandtab -- cgit v1.2.3 From cad31255aff2b3b7d0d640bf58649aeca43b7263 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 12 Mar 2013 08:56:28 -0400 Subject: skip unit test due to LP: #1124384 This re-applies the change in revno 785. A merge made this test pass rather than skip, but I'd rather have it skip for now, as we really hope to have the upstart bug fixed. --- tests/unittests/test_builtin_handlers.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py index dace486a..9cf28215 100644 --- a/tests/unittests/test_builtin_handlers.py +++ b/tests/unittests/test_builtin_handlers.py @@ -1,6 +1,7 @@ """Tests of the built-in user data handlers.""" import os +import unittest from tests.unittests import helpers as test_helpers @@ -34,6 +35,7 @@ class TestBuiltins(test_helpers.FilesystemMockingTestCase): None, None, None) self.assertEquals(0, len(os.listdir(up_root))) + @unittest.skip("until LP: #1124384 fixed") def test_upstart_frequency_single(self): # files should be written out when frequency is ! per-instance new_root = self.makeDir() @@ -47,7 +49,6 @@ class TestBuiltins(test_helpers.FilesystemMockingTestCase): util.ensure_dir("/run") util.ensure_dir("/etc/upstart") - util.write_file("/run/cloud-init-upstart-reload", 'test') mock_subp = self.mocker.replace(util.subp, passthrough=False) mock_subp(["initctl", "reload-configuration"], capture=False) -- cgit v1.2.3 From ae0f94c8f39a234d73ab8e2caf24d73439c8b5ee Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 13 Mar 2013 10:43:40 -0400 Subject: fix / workaround potential for socket.getaddrinfo to raise socket.error As reported in bug 1154599, I'm seeing this on my desktop system: $ python -c \ 'from cloudinit import util; print util.is_resolvable("brickies.neiit")' Traceback (most recent call last): File "", line 1, in File "cloudinit/util.py", line 865, in is_resolvable socket.SOCK_STREAM, socket.AI_CANONNAME) LP: #1154599 --- cloudinit/util.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloudinit/util.py b/cloudinit/util.py index a1f6e004..10297ca2 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -867,7 +867,7 @@ def is_resolvable(name): for (_fam, _stype, _proto, cname, sockaddr) in result: badresults[iname].append("%s: %s" % (cname, sockaddr[0])) badips.add(sockaddr[0]) - except socket.gaierror: + except (socket.gaierror, socket.error): pass _DNS_REDIRECT_IP = badips if badresults: @@ -880,7 +880,7 @@ def is_resolvable(name): if addr in _DNS_REDIRECT_IP: return False return True - except socket.gaierror: + except (socket.gaierror, socket.error): return False -- cgit v1.2.3 From 6ded151bd19d27cd03e22dbf2e98914b12504c78 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 13 Mar 2013 22:28:39 -0700 Subject: Update to handle requests >= 1.0 which doesn't use the config dict. --- cloudinit/url_helper.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 6f06761a..08e5f01b 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -34,7 +34,7 @@ LOG = logging.getLogger(__name__) # Check if requests has ssl support (added in requests >= 0.8.8) SSL_ENABLED = False -CONFIG_ENABLED = False # This was added in 0.7 +CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0) try: import pkg_resources from distutils.version import LooseVersion @@ -42,7 +42,7 @@ try: _REQ_VER = LooseVersion(_REQ.version) if _REQ_VER >= LooseVersion('0.8.8'): SSL_ENABLED = True - if _REQ_VER >= LooseVersion('0.7.0'): + if _REQ_VER >= LooseVersion('0.7.0') and _REQ_VER < LooseVersion('1.0.0'): CONFIG_ENABLED = True except: pass @@ -129,8 +129,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, if data: req_args['method'] = 'POST' # It doesn't seem like config - # was added in older library versions, thus we - # need to manually do the retries if it wasn't + # was added in older library versions (or newer ones either), thus we + # need to manually do the retries if it wasn't... if CONFIG_ENABLED: req_config = { 'store_cookies': False, -- cgit v1.2.3 From 204e79b93c882e17df63b24f7f682c0dbefb482d Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 13 Mar 2013 22:33:18 -0700 Subject: Fix how the http error doesn't always have the response attached in earlier versions of requests (pre 0.10.8). --- cloudinit/url_helper.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 08e5f01b..bfc5cfdd 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -170,7 +170,9 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, # attrs return UrlResponse(r) except exceptions.RequestException as e: - if isinstance(e, (exceptions.HTTPError)) and e.response: + if (isinstance(e, (exceptions.HTTPError)) + and hasattr(e, 'response') # This appeared in v 0.10.8 + and e.response): excps.append(UrlError(e, code=e.response.status_code, headers=e.response.headers)) else: -- cgit v1.2.3 From f8318f8eec9c8f1c1676ce6a5b5c2c77fa2f7cc5 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 19 Mar 2013 09:06:31 -0400 Subject: pylint fixes a.) appease pylint on raring, as it doesn't like subprocess pylint: 0.26.0-1ubuntu1 This is mentioned in comments at http://www.logilab.org/ticket/46273 b.) tests/unittests/test_util.py: the mountinfo lines are longer than 80 chars. Just disable long lines complaints for this file. --- cloudinit/config/cc_power_state_change.py | 2 +- cloudinit/util.py | 6 +++--- setup.py | 5 +++-- tests/unittests/test_util.py | 2 ++ 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index de0c0bbd..188047e5 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -100,7 +100,7 @@ def execmd(exe_args, output=None, data_in=None): proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE, stdout=output, stderr=subprocess.STDOUT) proc.communicate(data_in) - ret = proc.returncode + ret = proc.returncode # pylint: disable=E1101 except Exception: doexit(EXIT_FAIL) doexit(ret) diff --git a/cloudinit/util.py b/cloudinit/util.py index 10297ca2..636ed20e 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -462,7 +462,7 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None): new_fp = open(arg, owith) elif mode == "|": proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE) - new_fp = proc.stdin + new_fp = proc.stdin # pylint: disable=E1101 else: raise TypeError("Invalid type for output format: %s" % outfmt) @@ -484,7 +484,7 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None): new_fp = open(arg, owith) elif mode == "|": proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE) - new_fp = proc.stdin + new_fp = proc.stdin # pylint: disable=E1101 else: raise TypeError("Invalid type for error format: %s" % errfmt) @@ -1409,7 +1409,7 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, (out, err) = sp.communicate(data) except OSError as e: raise ProcessExecutionError(cmd=args, reason=e) - rc = sp.returncode + rc = sp.returncode # pylint: disable=E1101 if rc not in rcs: raise ProcessExecutionError(stdout=out, stderr=err, exit_code=rc, diff --git a/setup.py b/setup.py index b30cd53b..4aa1a47c 100755 --- a/setup.py +++ b/setup.py @@ -61,9 +61,10 @@ def tiny_p(cmd, capture=True): sp = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, stdin=None) (out, err) = sp.communicate() - if sp.returncode not in [0]: + ret = sp.returncode # pylint: disable=E1101 + if ret not in [0]: raise RuntimeError("Failed running %s [rc=%s] (%s, %s)" - % (cmd, sp.returncode, out, err)) + % (cmd, ret, out, err)) return (out, err) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 7ff9a57f..5853cb0f 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -1,3 +1,5 @@ +# pylint: disable=C0301 +# the mountinfo data lines are too long import os import stat import yaml -- cgit v1.2.3 From 8fbe938228909e153afb88f189b269df60501510 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 19 Mar 2013 09:32:04 -0400 Subject: appease pylint and pep8 * cloudinit/distros/parsers/resolv_conf.py added some pylint overrides with 'plXXXXX' syntax. example: # pl51222 pylint: disable=E0102 The pl51222 there means: http://www.logilab.org/ticket/51222 This specific issue is present in 12.04 pylint, but not 13.04. * pylint doesn't like the requests special handling we have. which makes sense as it is only checking versus one specific version. * general pep8 and pylint cleanups. --- cloudinit/distros/parsers/resolv_conf.py | 4 ++-- cloudinit/ec2_utils.py | 1 + cloudinit/url_helper.py | 22 ++++++++++++---------- cloudinit/util.py | 4 ++-- 4 files changed, 17 insertions(+), 14 deletions(-) diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py index 5733c25a..1be9d46b 100644 --- a/cloudinit/distros/parsers/resolv_conf.py +++ b/cloudinit/distros/parsers/resolv_conf.py @@ -137,8 +137,8 @@ class ResolvConf(object): self._contents.append(('option', ['search', s_list, ''])) return flat_sds - @local_domain.setter - def local_domain(self, domain): + @local_domain.setter # pl51222 pylint: disable=E1101 + def local_domain(self, domain): # pl51222 pylint: disable=E0102 self.parse() self._remove_option('domain') self._contents.append(('option', ['domain', str(domain), ''])) diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 4812eccb..6b2754aa 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -145,6 +145,7 @@ def get_instance_userdata(api_version, metadata_address, ssl_details=None): util.logexc(LOG, "Failed fetching userdata from url %s", ud_url) return None + def get_instance_metadata(api_version, metadata_address, ssl_details=None): md_url = combine_url(metadata_address, api_version) md_url = combine_url(md_url, 'meta-data') diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index bfc5cfdd..de73cc84 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -34,12 +34,12 @@ LOG = logging.getLogger(__name__) # Check if requests has ssl support (added in requests >= 0.8.8) SSL_ENABLED = False -CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0) +CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0) try: - import pkg_resources from distutils.version import LooseVersion + import pkg_resources _REQ = pkg_resources.get_distribution('requests') - _REQ_VER = LooseVersion(_REQ.version) + _REQ_VER = LooseVersion(_REQ.version) # pylint: disable=E1103 if _REQ_VER >= LooseVersion('0.8.8'): SSL_ENABLED = True if _REQ_VER >= LooseVersion('0.7.0') and _REQ_VER < LooseVersion('1.0.0'): @@ -49,7 +49,7 @@ except: def _cleanurl(url): - parsed_url = list(urlparse(url, scheme='http')) + parsed_url = list(urlparse(url, scheme='http')) # pylint: disable=E1123 if not parsed_url[1] and parsed_url[2]: # Swap these since this seems to be a common # occurrence when given urls like 'www.google.com' @@ -108,7 +108,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, req_args = { 'url': url, } - if urlparse(url).scheme == 'https' and ssl_details: + scheme = urlparse(url).scheme # pylint: disable=E1101 + if scheme == 'https' and ssl_details: if not SSL_ENABLED: LOG.warn("SSL is not enabled, cert. verification can not occur!") else: @@ -121,7 +122,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, ssl_details['key_file']] elif 'cert_file' in ssl_details: req_args['cert'] = str(ssl_details['cert_file']) - + req_args['allow_redirects'] = allow_redirects req_args['method'] = 'GET' if timeout is not None: @@ -162,16 +163,17 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, try: r = requests.request(**req_args) if check_status: - r.raise_for_status() + r.raise_for_status() # pylint: disable=E1103 LOG.debug("Read from %s (%s, %sb) after %s attempts", url, - r.status_code, len(r.content), (i + 1)) + r.status_code, len(r.content), # pylint: disable=E1103 + (i + 1)) # Doesn't seem like we can make it use a different # subclass for responses, so add our own backward-compat # attrs return UrlResponse(r) except exceptions.RequestException as e: if (isinstance(e, (exceptions.HTTPError)) - and hasattr(e, 'response') # This appeared in v 0.10.8 + and hasattr(e, 'response') # This appeared in v 0.10.8 and e.response): excps.append(UrlError(e, code=e.response.status_code, headers=e.response.headers)) @@ -183,7 +185,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, time.sleep(sec_between) if excps: raise excps[-1] - return None # Should throw before this... + return None # Should throw before this... def wait_for_url(urls, max_wait=None, timeout=None, diff --git a/cloudinit/util.py b/cloudinit/util.py index 52b528ea..36e9b83b 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -51,8 +51,8 @@ from cloudinit import importer from cloudinit import log as logging from cloudinit import mergers from cloudinit import safeyaml -from cloudinit import url_helper from cloudinit import type_utils +from cloudinit import url_helper from cloudinit import version from cloudinit.settings import (CFG_BUILTIN) @@ -81,7 +81,7 @@ class StringResponse(object): self.contents = contents self.url = None - def ok(self, *args, **kwargs): + def ok(self, *args, **kwargs): # pylint: disable=W0613 if self.code != 200: return False return True -- cgit v1.2.3 From fc77e3f4bc9b0736246abd05bfca8dda04cff0eb Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 19 Mar 2013 09:51:02 -0400 Subject: do not bother retrying on ssl errors if the error is an ssl error, its extremely unlikely that it would be fixed by waiting a few seconds and trying again. --- cloudinit/url_helper.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index de73cc84..ac6f25db 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -179,6 +179,10 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, headers=e.response.headers)) else: excps.append(UrlError(e)) + if SSL_ENABLED and isinstance(e, exceptions.SSLError): + # ssl exceptions are not going to get fixed by waiting a + # few seconds + break if i + 1 < manual_tries and sec_between > 0: LOG.debug("Please wait %s seconds while we wait to try again", sec_between) -- cgit v1.2.3 From 92b23e3d27623440b3b37ccb9d865b235a99f5f1 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 19 Mar 2013 10:05:51 -0400 Subject: set 'allow_redirects' to True by default the previous implementation of url_helper.readurl() would default to allow_redirects being true. So, for backwards compat, we should keep that behavior. --- cloudinit/url_helper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index ac6f25db..6b4516e0 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -103,7 +103,7 @@ class UrlError(IOError): def readurl(url, data=None, timeout=None, retries=0, sec_between=1, headers=None, ssl_details=None, check_status=True, - allow_redirects=False): + allow_redirects=True): url = _cleanurl(url) req_args = { 'url': url, -- cgit v1.2.3 From 0d325536ea06a8511da04f57260ab08e8b4786d9 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 19 Mar 2013 10:35:13 -0400 Subject: make get_instance_userdata and get_instance_metadata more like botos this shouldn't change anything, only the signatures of the methods. --- cloudinit/ec2_utils.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 6b2754aa..87644c40 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -135,8 +135,9 @@ class MetadataMaterializer(object): return joined -def get_instance_userdata(api_version, metadata_address, ssl_details=None): - ud_url = combine_url(metadata_address, api_version) +def get_instance_userdata(version='latest', url='http://169.254.169.254', + ssl_details=None): + ud_url = combine_url(url, version) ud_url = combine_url(ud_url, 'user-data') try: response = util.read_file_or_url(ud_url, ssl_details=ssl_details) @@ -146,11 +147,13 @@ def get_instance_userdata(api_version, metadata_address, ssl_details=None): return None -def get_instance_metadata(api_version, metadata_address, ssl_details=None): - md_url = combine_url(metadata_address, api_version) +def get_instance_metadata(version='latest', url='http://169.254.169.254', + ssl_details=None, timeout=5, num_retries=5) + md_url = combine_url(metadata_address, version) md_url = combine_url(md_url, 'meta-data') try: - response = util.read_file_or_url(md_url, ssl_details=ssl_details) + response = util.read_file_or_url(md_url, ssl_details=ssl_details, + timeout=timeout, retries=num_retries) materializer = MetadataMaterializer(str(response), md_url, ssl_details) return materializer.materialize() except Exception: -- cgit v1.2.3 From 5f3aaf5300825a3e586c9369aa4c1d917b448811 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 19 Mar 2013 11:12:59 -0400 Subject: fix typo --- cloudinit/ec2_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 87644c40..71c84206 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -148,7 +148,7 @@ def get_instance_userdata(version='latest', url='http://169.254.169.254', def get_instance_metadata(version='latest', url='http://169.254.169.254', - ssl_details=None, timeout=5, num_retries=5) + ssl_details=None, timeout=5, num_retries=5): md_url = combine_url(metadata_address, version) md_url = combine_url(md_url, 'meta-data') try: -- cgit v1.2.3 From 20a2d9961697fbd6ef0e74cd3770b6601b141bcd Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 19 Mar 2013 15:53:16 -0700 Subject: Move back to using boto for now. --- Requires | 3 + cloudinit/ec2_utils.py | 183 +++++++----------------------- cloudinit/sources/DataSourceCloudStack.py | 7 +- cloudinit/sources/DataSourceEc2.py | 7 +- 4 files changed, 48 insertions(+), 152 deletions(-) diff --git a/Requires b/Requires index 0313d569..de51a4e4 100644 --- a/Requires +++ b/Requires @@ -24,3 +24,6 @@ argparse # Requests handles ssl correctly! requests + +# Boto for ec2 +boto diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 71c84206..29393ce1 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -16,146 +16,45 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from urlparse import (urlparse, urlunparse) +import boto.utils as boto_utils + +# Versions of boto >= 2.6.0 (and possibly 2.5.2) +# try to lazily load the metadata backing, which +# doesn't work so well in cloud-init especially +# since the metadata is serialized and actions are +# performed where the metadata server may be blocked +# (thus the datasource will start failing) resulting +# in url exceptions when fields that do exist (or +# would have existed) do not exist due to the blocking +# that occurred. + + +def _unlazy_dict(mp): + if not isinstance(mp, (dict)): + return mp + # Walk over the keys/values which + # forces boto to unlazy itself and + # has no effect on dictionaries that + # already have there items. + for (_k, v) in mp.items(): + _unlazy_dict(v) + return mp + + +def get_instance_userdata(api_version, metadata_address): + # Note: boto.utils.get_instance_metadata returns '' for empty string + # so the change from non-true to '' is not specifically necessary, but + # this way cloud-init will get consistent behavior even if boto changed + # in the future to return a None on "no user-data provided". + ud = boto_utils.get_instance_userdata(api_version, None, metadata_address) + if not ud: + ud = '' + return ud + + +def get_instance_metadata(api_version, metadata_address): + metadata = boto_utils.get_instance_metadata(api_version, metadata_address) + if not isinstance(metadata, (dict)): + metadata = {} + return _unlazy_dict(metadata) -import json -import urllib - -from cloudinit import log as logging -from cloudinit import util - -LOG = logging.getLogger(__name__) - - -def combine_url(base, add_on): - base_parsed = list(urlparse(base)) - path = base_parsed[2] - if path and not path.endswith("/"): - path += "/" - path += urllib.quote(str(add_on), safe="/:") - base_parsed[2] = path - return urlunparse(base_parsed) - - -# See: http://bit.ly/TyoUQs -# -# Since boto metadata reader uses the old urllib which does not -# support ssl, we need to ahead and create our own reader which -# works the same as the boto one (for now). -class MetadataMaterializer(object): - def __init__(self, blob, base_url, ssl_details): - self._blob = blob - self._md = None - self._base_url = base_url - self._ssl_details = ssl_details - - def _parse(self, blob): - leaves = {} - children = [] - if not blob: - return (leaves, children) - - def has_children(item): - if item.endswith("/"): - return True - else: - return False - - def get_name(item): - if item.endswith("/"): - return item.rstrip("/") - return item - - for field in blob.splitlines(): - field = field.strip() - field_name = get_name(field) - if not field or not field_name: - continue - if has_children(field): - if field_name not in children: - children.append(field_name) - else: - contents = field.split("=", 1) - resource = field_name - if len(contents) > 1: - # What a PITA... - (ident, sub_contents) = contents - checked_ident = util.safe_int(ident) - if checked_ident is not None: - resource = "%s/openssh-key" % (checked_ident) - field_name = sub_contents - leaves[field_name] = resource - return (leaves, children) - - def materialize(self): - if self._md is not None: - return self._md - self._md = self._materialize(self._blob, self._base_url) - return self._md - - def _fetch_url(self, url): - response = util.read_file_or_url(url, ssl_details=self._ssl_details) - return str(response) - - def _decode_leaf_blob(self, blob): - if not blob: - return blob - stripped_blob = blob.strip() - if stripped_blob.startswith("{") and stripped_blob.endswith("}"): - # Assume and try with json - try: - return json.loads(blob) - except (ValueError, TypeError): - pass - if blob.find("\n") != -1: - return blob.splitlines() - return blob - - def _materialize(self, blob, base_url): - (leaves, children) = self._parse(blob) - child_contents = {} - for c in children: - child_url = combine_url(base_url, c) - if not child_url.endswith("/"): - child_url += "/" - child_blob = self._fetch_url(child_url) - child_contents[c] = self._materialize(child_blob, child_url) - leaf_contents = {} - for (field, resource) in leaves.items(): - leaf_url = combine_url(base_url, resource) - leaf_blob = self._fetch_url(leaf_url) - leaf_contents[field] = self._decode_leaf_blob(leaf_blob) - joined = {} - joined.update(child_contents) - for field in leaf_contents.keys(): - if field in joined: - LOG.warn("Duplicate key found in results from %s", base_url) - else: - joined[field] = leaf_contents[field] - return joined - - -def get_instance_userdata(version='latest', url='http://169.254.169.254', - ssl_details=None): - ud_url = combine_url(url, version) - ud_url = combine_url(ud_url, 'user-data') - try: - response = util.read_file_or_url(ud_url, ssl_details=ssl_details) - return str(response) - except Exception: - util.logexc(LOG, "Failed fetching userdata from url %s", ud_url) - return None - - -def get_instance_metadata(version='latest', url='http://169.254.169.254', - ssl_details=None, timeout=5, num_retries=5): - md_url = combine_url(metadata_address, version) - md_url = combine_url(md_url, 'meta-data') - try: - response = util.read_file_or_url(md_url, ssl_details=ssl_details, - timeout=timeout, retries=num_retries) - materializer = MetadataMaterializer(str(response), md_url, ssl_details) - return materializer.materialize() - except Exception: - util.logexc(LOG, "Failed fetching metadata from url %s", md_url) - return None diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 7fd38982..b4ca6d93 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -102,13 +102,10 @@ class DataSourceCloudStack(sources.DataSource): return False start_time = time.time() md_addr = self.metadata_address - ssl_details = util.fetch_ssl_details(self.paths) self.userdata_raw = ec2_utils.get_instance_userdata(self.api_ver, - md_addr, - ssl_details) + md_addr) self.metadata = ec2_utils.get_instance_metadata(self.api_ver, - md_addr, - ssl_details) + md_addr) LOG.debug("Crawl of metadata service took %s seconds", int(time.time() - start_time)) return True diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 0d62cf01..bd35c8b0 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -62,13 +62,10 @@ class DataSourceEc2(sources.DataSource): return False start_time = time.time() md_addr = self.metadata_address - ssl_details = util.fetch_ssl_details(self.paths) self.userdata_raw = ec2_utils.get_instance_userdata(self.api_ver, - md_addr, - ssl_details) + md_addr) self.metadata = ec2_utils.get_instance_metadata(self.api_ver, - md_addr, - ssl_details) + md_addr) LOG.debug("Crawl of metadata service took %s seconds", int(time.time() - start_time)) return True -- cgit v1.2.3 From 1c88411ed2692f2dacb0c6e15f27b8dca64e7089 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 19 Mar 2013 15:55:58 -0700 Subject: Add doc about issue 1401 in boto. --- cloudinit/ec2_utils.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 29393ce1..175f96aa 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -28,6 +28,10 @@ import boto.utils as boto_utils # would have existed) do not exist due to the blocking # that occurred. +# TODO: https://github.com/boto/boto/issues/1401 +# When boto finally moves to using requests, we should be able +# to provide it ssl details, it does not yet, so we can't provide them... + def _unlazy_dict(mp): if not isinstance(mp, (dict)): -- cgit v1.2.3 From c49b92e2e1f32cbb32c856ba246ef97026318dbf Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 20 Mar 2013 08:30:43 -0400 Subject: remove some churn cloudinit/sources/DataSourceCloudStack.py and cloudinit/sources/DataSourceEc2.py are reverted entirely back to trunk versions now, rather than the non-behavior change that was left in place. Also, remove inadvertantly added trailing newline from cloudinit/ec2_utils.py Overall, this just makes the diff when merged to trunk carry more focused changes. --- cloudinit/ec2_utils.py | 1 - cloudinit/sources/DataSourceCloudStack.py | 11 +++++------ cloudinit/sources/DataSourceEc2.py | 13 ++++++------- 3 files changed, 11 insertions(+), 14 deletions(-) diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 175f96aa..5fa8c5ad 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -61,4 +61,3 @@ def get_instance_metadata(api_version, metadata_address): if not isinstance(metadata, (dict)): metadata = {} return _unlazy_dict(metadata) - diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index b4ca6d93..81c8cda9 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -25,7 +25,7 @@ import os import time -from cloudinit import ec2_utils +from cloudinit import ec2_utils as ec2 from cloudinit import log as logging from cloudinit import sources from cloudinit import url_helper as uhelp @@ -101,11 +101,10 @@ class DataSourceCloudStack(sources.DataSource): if not self.wait_for_metadata_service(): return False start_time = time.time() - md_addr = self.metadata_address - self.userdata_raw = ec2_utils.get_instance_userdata(self.api_ver, - md_addr) - self.metadata = ec2_utils.get_instance_metadata(self.api_ver, - md_addr) + self.userdata_raw = ec2.get_instance_userdata(self.api_ver, + self.metadata_address) + self.metadata = ec2.get_instance_metadata(self.api_ver, + self.metadata_address) LOG.debug("Crawl of metadata service took %s seconds", int(time.time() - start_time)) return True diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index bd35c8b0..f010e640 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -23,7 +23,7 @@ import os import time -from cloudinit import ec2_utils +from cloudinit import ec2_utils as ec2 from cloudinit import log as logging from cloudinit import sources from cloudinit import url_helper as uhelp @@ -61,11 +61,10 @@ class DataSourceEc2(sources.DataSource): if not self.wait_for_metadata_service(): return False start_time = time.time() - md_addr = self.metadata_address - self.userdata_raw = ec2_utils.get_instance_userdata(self.api_ver, - md_addr) - self.metadata = ec2_utils.get_instance_metadata(self.api_ver, - md_addr) + self.userdata_raw = ec2.get_instance_userdata(self.api_ver, + self.metadata_address) + self.metadata = ec2.get_instance_metadata(self.api_ver, + self.metadata_address) LOG.debug("Crawl of metadata service took %s seconds", int(time.time() - start_time)) return True @@ -134,7 +133,7 @@ class DataSourceEc2(sources.DataSource): start_time = time.time() url = uhelp.wait_for_url(urls=urls, max_wait=max_wait, - timeout=timeout, status_cb=LOG.warn) + timeout=timeout, status_cb=LOG.warn) if url: LOG.debug("Using metadata source: '%s'", url2base[url]) -- cgit v1.2.3 From ab71760d7e127ae9a3cf31a4f6d7600c945f0dd7 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 20 Mar 2013 08:34:19 -0400 Subject: pep8 --- cloudinit/ec2_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 5fa8c5ad..fcd511c5 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -28,7 +28,7 @@ import boto.utils as boto_utils # would have existed) do not exist due to the blocking # that occurred. -# TODO: https://github.com/boto/boto/issues/1401 +# TODO(harlowja): https://github.com/boto/boto/issues/1401 # When boto finally moves to using requests, we should be able # to provide it ssl details, it does not yet, so we can't provide them... -- cgit v1.2.3 From a2113a70e9cf4c1cc00ec67fe3411b5696686f46 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 26 Mar 2013 14:32:12 -0400 Subject: fix brpm and bddeb by knowing about 'python-requests' --- packages/bddeb | 1 + packages/brpm | 1 + 2 files changed, 2 insertions(+) diff --git a/packages/bddeb b/packages/bddeb index 61399739..00bc717e 100755 --- a/packages/bddeb +++ b/packages/bddeb @@ -35,6 +35,7 @@ PKG_MP = { 'oauth': 'python-oauth', 'prettytable': 'python-prettytable', 'pyyaml': 'python-yaml', + 'requests': 'python-requests', } DEBUILD_ARGS = ["-us", "-S", "-uc", "-d"] diff --git a/packages/brpm b/packages/brpm index eea2a046..53de802c 100755 --- a/packages/brpm +++ b/packages/brpm @@ -41,6 +41,7 @@ PKG_MP = { 'oauth': 'python-oauth', 'prettytable': 'python-prettytable', 'pyyaml': 'PyYAML', + 'requests': 'python-requests', } # Subdirectories of the ~/rpmbuild dir -- cgit v1.2.3 From be0041e7c7fd6ce5ffc1c9c54893b715bcab6358 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 26 Mar 2013 15:40:29 -0400 Subject: cloud-init-nonet.conf: handle case where sleep died In starting containers in lxc, I was seeing errors like: /proc/self/fd/9: 24: kill: No such process Which indicated the sleep pid had already died. I'm not sure how or why it was dead, but this just is less annoying in that case. --- upstart/cloud-init-nonet.conf | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/upstart/cloud-init-nonet.conf b/upstart/cloud-init-nonet.conf index 36b99fb5..a94b1474 100644 --- a/upstart/cloud-init-nonet.conf +++ b/upstart/cloud-init-nonet.conf @@ -31,7 +31,12 @@ script handle_sigterm() { # if we received sigterm and static networking is up then it probably # came from upstart as a result of 'stop on static-network-up' - [ -z "$SLEEP_CHILD" ] || kill $SLEEP_CHILD + if [ -n "$SLEEP_CHILD" ]; then + if ! kill $SLEEP_CHILD 2>/dev/null; then + [ ! -d "/proc/$SLEEP_CHILD" ] || + msg "hm.. failed to kill sleep pid $SLEEP_CHILD" + fi + fi if static_network_up; then msg "static networking is now up" exit 0 -- cgit v1.2.3 From 19b11d7e269360880d11d883a59b80b2909cee0f Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 26 Mar 2013 15:50:25 -0400 Subject: handle errors in cc_reizefs better Now, errors will not be so annoying if the device doesn't exist. Specifically, if there is no device in a container, only debug messages will be logged. LP: #1160462 --- cloudinit/config/cc_resizefs.py | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 51dead2f..b4ee16b2 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -18,6 +18,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +import errno import os import stat import time @@ -75,9 +76,29 @@ def handle(name, cfg, _cloud, log, args): (devpth, fs_type, mount_point) = result # Ensure the path is a block device. - if not stat.S_ISBLK(os.stat(devpth).st_mode): - log.debug("The %s device which was found for mount point %s for %s " - "is not a block device" % (devpth, mount_point, resize_what)) + info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what) + log.debug("resize_info: %s" % info) + + try: + statret = os.stat(devpth) + except OSError as exc: + if util.is_container() and exc.errno == errno.ENOENT: + log.debug("Device '%s' did not exist in container. " + "cannot resize: %s" % (devpth, info)) + elif exc.errno == errno.ENOENT: + log.warn("Device '%s' did not exist. cannot resize: %s" % + (devpth, info)) + else: + raise exc + return + + if not stat.S_ISBLK(statret.st_mode): + if util.is_container(): + log.debug("device '%s' not a block device in container." + " cannot resize: %s" % (devpth, info)) + else: + log.warn("device '%s' not a block device. cannot resize: %s" % + (devpth, info)) return resizer = None -- cgit v1.2.3 From 984c72e522c585c6d3f6b3d3aec39fb21dd84028 Mon Sep 17 00:00:00 2001 From: Greg Padgett Date: Tue, 26 Mar 2013 17:48:40 -0400 Subject: compatibility fixes for Fedora and RHEL This patch fixes issues in Fedora 18 (and upcoming RHEL 7) which are present due to their use of systemd: - store locale configuration in /etc/locale.conf - store hostname in /etc/hostname - use a symlink for /etc/localtime (prior code would set the timezone but corrupt data in /usr/share/zoneinfo due to presence of symlink) It also contains fixes for issues unrelated to systemd adoption: - explicitly scan /dev/sr0 with blkid in order to get the optical drive in the blkid cache. This prevents an issue on systems running 2.6 kernels (such as RHEL 6) in which config disks on some devices won't be detected unless the device has previously been queried. (For reference, see https://patchwork.kernel.org/patch/1770241/) - append a newline when rewriting sysconfig files, as this is customary text configuration file formatting and is expected by some parsers (such as the ifcfg-rh plugin for NetworkManager) --- cloudinit/distros/rhel.py | 73 ++++++++++++++++------ cloudinit/sources/DataSourceConfigDrive.py | 3 + cloudinit/sources/DataSourceNoCloud.py | 3 + cloudinit/util.py | 1 + .../unittests/test_datasource/test_configdrive.py | 5 +- 5 files changed, 63 insertions(+), 22 deletions(-) diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 9fee5fd1..174da3ab 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -47,8 +47,10 @@ class Distro(distros.Distro): # See: http://tiny.cc/6r99fw clock_conf_fn = "/etc/sysconfig/clock" locale_conf_fn = '/etc/sysconfig/i18n' + systemd_locale_conf_fn = '/etc/locale.conf' network_conf_fn = "/etc/sysconfig/network" hostname_conf_fn = "/etc/sysconfig/network" + systemd_hostname_conf_fn = "/etc/hostname" network_script_tpl = '/etc/sysconfig/network-scripts/ifcfg-%s' resolve_conf_fn = "/etc/resolv.conf" tz_local_fn = "/etc/localtime" @@ -143,21 +145,36 @@ class Distro(distros.Distro): ] if not exists: lines.insert(0, util.make_header()) - util.write_file(fn, "\n".join(lines), 0644) + util.write_file(fn, "\n".join(lines) + "\n", 0644) + + def _dist_uses_systemd(self): + # Fedora 18 and RHEL 7 were the first adopters in their series + (dist, vers) = util.system_info()['dist'][:2] + major = (int)(vers.split('.')[0]) + return ((dist.startswith('Red Hat Enterprise Linux') and major >= 7) + or (dist.startswith('Fedora') and major >= 18)) def apply_locale(self, locale, out_fn=None): - if not out_fn: - out_fn = self.locale_conf_fn + if self._dist_uses_systemd(): + if not out_fn: + out_fn = self.systemd_locale_conf_fn + out_fn = self.systemd_locale_conf_fn + else: + if not out_fn: + out_fn = self.locale_conf_fn locale_cfg = { 'LANG': locale, } self._update_sysconfig_file(out_fn, locale_cfg) def _write_hostname(self, hostname, out_fn): - host_cfg = { - 'HOSTNAME': hostname, - } - self._update_sysconfig_file(out_fn, host_cfg) + if self._dist_uses_systemd(): + util.subp(['hostnamectl', 'set-hostname', str(hostname)]) + else: + host_cfg = { + 'HOSTNAME': hostname, + } + self._update_sysconfig_file(out_fn, host_cfg) def _select_hostname(self, hostname, fqdn): # See: http://bit.ly/TwitgL @@ -167,15 +184,25 @@ class Distro(distros.Distro): return hostname def _read_system_hostname(self): - return (self.network_conf_fn, - self._read_hostname(self.network_conf_fn)) + if self._dist_uses_systemd(): + host_fn = self.systemd_hostname_conf_fn + else: + host_fn = self.hostname_conf_fn + return (host_fn, self._read_hostname(host_fn)) def _read_hostname(self, filename, default=None): - (_exists, contents) = self._read_conf(filename) - if 'HOSTNAME' in contents: - return contents['HOSTNAME'] + if self._dist_uses_systemd(): + (out, _err) = util.subp(['hostname']) + if len(out): + return out + else: + return default else: - return default + (_exists, contents) = self._read_conf(filename) + if 'HOSTNAME' in contents: + return contents['HOSTNAME'] + else: + return default def _read_conf(self, fn): exists = False @@ -200,13 +227,19 @@ class Distro(distros.Distro): if not os.path.isfile(tz_file): raise RuntimeError(("Invalid timezone %s," " no file found at %s") % (tz, tz_file)) - # Adjust the sysconfig clock zone setting - clock_cfg = { - 'ZONE': str(tz), - } - self._update_sysconfig_file(self.clock_conf_fn, clock_cfg) - # This ensures that the correct tz will be used for the system - util.copy(tz_file, self.tz_local_fn) + if self._dist_uses_systemd(): + # Currently, timedatectl complains if invoked during startup + # so for compatibility, create the link manually. + util.del_file(self.tz_local_fn) + util.sym_link(tz_file, self.tz_local_fn) + else: + # Adjust the sysconfig clock zone setting + clock_cfg = { + 'ZONE': str(tz), + } + self._update_sysconfig_file(self.clock_conf_fn, clock_cfg) + # This ensures that the correct tz will be used for the system + util.copy(tz_file, self.tz_local_fn) def package_command(self, command, args=None, pkgs=None): if pkgs is None: diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 5f152299..d3443c2b 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -258,6 +258,9 @@ def find_candidate_devs(): * labeled with 'config-2' """ + # Query optical drive to get it in blkid cache for 2.6 kernels + util.find_devs_with(path="/dev/sr0") + by_fstype = (util.find_devs_with("TYPE=vfat") + util.find_devs_with("TYPE=iso9660")) by_label = util.find_devs_with("LABEL=config-2") diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 08a853cc..01c99028 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -87,6 +87,9 @@ class DataSourceNoCloud(sources.DataSource): label = self.ds_cfg.get('fs_label', "cidata") if label is not None: + # Query optical drive to get it in blkid cache for 2.6 kernels + util.find_devs_with(path="/dev/sr0") + fslist = util.find_devs_with("TYPE=vfat") fslist.extend(util.find_devs_with("TYPE=iso9660")) diff --git a/cloudinit/util.py b/cloudinit/util.py index 36e9b83b..50de55fe 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -408,6 +408,7 @@ def system_info(): 'release': platform.release(), 'python': platform.python_version(), 'uname': platform.uname(), + 'dist': platform.linux_distribution(), } diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 930086db..d5935294 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -259,8 +259,9 @@ class TestConfigDriveDataSource(MockerTestCase): def test_find_candidates(self): devs_with_answers = {} - def my_devs_with(criteria): - return devs_with_answers[criteria] + def my_devs_with(*args, **kwargs): + criteria = args[0] if len(args) else kwargs.pop('criteria', None) + return devs_with_answers.get(criteria, []) def my_is_partition(dev): return dev[-1] in "0123456789" and not dev.startswith("sr") -- cgit v1.2.3 From 7dac7bbd48bb56971c2fddfcf13d439d577740c1 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 2 Apr 2013 11:51:54 -0700 Subject: Fix the default string used for merging. It had been changed in code, but not in docs. So we needed to reflect the change in docs as well so that both are in sync. --- doc/merging.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/merging.rst b/doc/merging.rst index 6344facd..d4d5cd05 100644 --- a/doc/merging.rst +++ b/doc/merging.rst @@ -145,7 +145,7 @@ For example, the default string that is used when none is provided is the follow :: - list(extend)+dict()+str(append) + list()+dict()+str() Dictionary format ******** -- cgit v1.2.3 From 05c22e4f202332332de051e6849bbf5210aa19f6 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 3 Apr 2013 16:06:43 -0500 Subject: invoke 'dist-upgrade' instead of 'upgrade' on for upgrades. In general, dist-upgrade is the correct behavior here. It will get a new kernel, though, which could be annoying. So, allow a way to turn it off (by setting 'apt_get_upgrade_subcommand: upgrade'). LP: #1164147 --- cloudinit/distros/debian.py | 8 +++++++- doc/examples/cloud-config.txt | 13 +++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 4b779d57..a1e28ad5 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -161,7 +161,13 @@ class Distro(distros.Distro): elif args and isinstance(args, list): cmd.extend(args) - cmd.append(command) + + subcmd = command + if command == "upgrade": + subcmd = self.get_option("apt_get_upgrade_subcommand", + "dist-upgrade") + + cmd.append(subcmd) pkglist = util.expand_package_list('%s=%s', pkgs) cmd.extend(pkglist) diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt index 09298655..b8abb67a 100644 --- a/doc/examples/cloud-config.txt +++ b/doc/examples/cloud-config.txt @@ -125,6 +125,19 @@ apt_sources: =Y2oI -----END PGP PUBLIC KEY BLOCK----- +# apt_get_command: [command, argument, argument] +# Specify a different 'apt-get' command. must be a list. subcommands are +# appended to it. default is: +# ['apt-get', '--option=Dpkg::Options::=--force-confold', +# '--option=Dpkg::options::=--force-unsafe-io', '--assume-yes', '--quiet'] +# +# apt_get_upgrade_subcommand: +# Specify a different 'apt-get upgrade' subcommand. when 'apt_upgrade' or +# package_upgrade is set to true above, then this subcommand will be invoked. +# default is 'dist-upgrade'. For example, you could set this to 'upgrade'. +apt_get_upgrade_subcommand: dist-upgrade + + # Install additional packages on first boot # # Default: none -- cgit v1.2.3 From 0d3c21c53369673529301f2c4e23bdb7bae7495b Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 3 Apr 2013 17:39:32 -0500 Subject: add merge debug tool --- tools/ccfg-merge-debug | 81 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100755 tools/ccfg-merge-debug diff --git a/tools/ccfg-merge-debug b/tools/ccfg-merge-debug new file mode 100755 index 00000000..aac60528 --- /dev/null +++ b/tools/ccfg-merge-debug @@ -0,0 +1,81 @@ +#!/usr/bin/python + +from cloudinit import handlers +from cloudinit.handlers import cloud_config as cc_part +from cloudinit import helpers +from cloudinit.settings import PER_INSTANCE +from cloudinit import user_data as ud + +import argparse +import os +import shutil +import tempfile + + +def main(): + parser = argparse.ArgumentParser( + description='test cloud-config merging') + parser.add_argument("--output", "-o", metavar="file", + help="specify output file", default="-") + parser.add_argument('files', nargs='+') + + args = parser.parse_args() + outfile = args.output + if args.output == "-": + outfile = "/dev/stdout" + + tempd = tempfile.mkdtemp() + handler_dir = os.path.join(tempd, "hdir") + data = None # the 'init' object + frequency = PER_INSTANCE + + paths = helpers.Paths({}) + + # make a '#include ' style + udproc = ud.UserDataProcessor(paths=paths) + user_data_msg = udproc.process("#include\n" + + '\n'.join([os.path.abspath(f) for f in args.files])) + + ccph = cc_part.CloudConfigPartHandler(paths=paths) + ccph.cloud_fn = outfile + + c_handlers = helpers.ContentHandlers() + c_handlers.register_defaults([ccph]) + + called = [] + for (_ctype, mod) in c_handlers.iteritems(): + if mod in called: + continue + handlers.call_begin(mod, data, frequency) + called.append(mod) + + # Walk the user data + part_data = { + 'handlers': c_handlers, + # Any new handlers that are encountered get writen here + 'handlerdir': handler_dir, + 'data': data, + # The default frequency if handlers don't have one + 'frequency': frequency, + # This will be used when new handlers are found + # to help write there contents to files with numbered + # names... + 'handlercount': 0, + } + + handlers.walk(user_data_msg, handlers.walker_callback, data=part_data) + + # Give callbacks opportunity to finalize + called = [] + for (_ctype, mod) in c_handlers.iteritems(): + if mod in called: + continue + handlers.call_end(mod, data, frequency) + called.append(mod) + + shutil.rmtree(tempd) + +if __name__ == "__main__": + main() + +# vi: ts=4 expandtab -- cgit v1.2.3 From 90a6bbda5c181569a969edb0d191d19b6110755b Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 9 Apr 2013 17:57:41 -0400 Subject: tools: fix [some] shell quoting problems There were problems with these tools if the path had a space. This should make these tools safe. There are others that still have problems. --- tools/make-dist-tarball | 14 +++++--------- tools/make-tarball | 12 +++++------- tools/read-dependencies | 8 ++------ tools/read-version | 4 ++-- 4 files changed, 14 insertions(+), 24 deletions(-) diff --git a/tools/make-dist-tarball b/tools/make-dist-tarball index 7742caea..5b078515 100755 --- a/tools/make-dist-tarball +++ b/tools/make-dist-tarball @@ -10,16 +10,12 @@ EOF } topdir="$PWD" -tag=${1} +tag="$1" [ -n "$tag" ] || { Usage 1>&2 ; exit 1; } -tmpd=$(mktemp -d ); -trap "rm -Rf '${tmpd}'" 0 +out="${topdir}/cloud-init-${tag}.tar.gz" -out=${topdir}/cloud-init-${tag}.tar.gz - -cd ${tmpd} && - bzr branch -r "tag:${tag}" "${topdir}" ./cloud-init-${tag} && - tar czf "${out}" cloud-init-${tag}/ --exclude cloud-init-${tag}/.bzr && - echo "Wrote ${out}" +bzr export --format=tgz --root="cloud-init-$tag" \ + "--revision=tag:${tag}" "$out" "$topdir" && + echo "Wrote ${out}" diff --git a/tools/make-tarball b/tools/make-tarball index 47979f5b..27f5f374 100755 --- a/tools/make-tarball +++ b/tools/make-tarball @@ -18,18 +18,16 @@ if ! find_root; then exit 1; fi +REVNO=$(bzr revno "$ROOT_DIR") + if [ ! -z "$1" ]; then ARCHIVE_FN="$1" else - REVNO=$(bzr revno $ROOT_DIR) - VERSION=$($ROOT_DIR/tools/read-version) + VERSION=$("$ROOT_DIR/tools/read-version") ARCHIVE_FN="$PWD/cloud-init-$VERSION~bzr$REVNO.tar.gz" fi -FILES=$(cd $ROOT_DIR && bzr ls --versioned --recursive) -echo "$FILES" | tar czf $ARCHIVE_FN \ - -C "$ROOT_DIR" \ - --transform "s,^,cloud-init-$VERSION~bzr$REVNO/," \ - --no-recursion --files-from - +bzr export --format=tgz --root="cloud-init-$VERSION~bzr$REVNO" \ + "--revision=${REVNO}" "${ARCHIVE_FN}" "$ROOT_DIR" echo "$ARCHIVE_FN" diff --git a/tools/read-dependencies b/tools/read-dependencies index 4c88aa87..cadb09a8 100755 --- a/tools/read-dependencies +++ b/tools/read-dependencies @@ -21,15 +21,11 @@ fi REQUIRES="$ROOT_DIR/Requires" -if [ ! -e "$REQUIRES" ] -then +if [ ! -e "$REQUIRES" ]; then echo "Unable to find 'Requires' file located at $REQUIRES" exit 1 fi # Filter out comments and empty liens -DEPS=$(cat $REQUIRES | grep -Pv "^\s*#" | grep -Pv '^\s*$') +DEPS=$(grep -Pv "^\s*#" "$REQUIRES" | grep -Pv '^\s*$') echo "$DEPS" | sort -d -f - - - diff --git a/tools/read-version b/tools/read-version index 323357fe..c76b24a9 100755 --- a/tools/read-version +++ b/tools/read-version @@ -27,5 +27,5 @@ then exit 1 fi -VERSION=$(grep -P "\d+.\d+.\d+:" $CHNG_LOG | cut -f1 -d ":" | head -n 1) -echo $VERSION +VERSION=$(grep -P "\d+.\d+.\d+:" "$CHNG_LOG" | cut -f1 -d ":" | head -n 1) +echo "$VERSION" -- cgit v1.2.3 From 87963dd237ff2080be62cc6a8afb4138471e4f20 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 17 Apr 2013 08:46:58 -0700 Subject: Handle namespacing issues. Move from using the inbuilt type names as module names which is a bad thing to use it appears due to naming conflicts in the __init__ module and reduce the chances of these conflicts by enforcing a m_ prefix for merging modules. --- cloudinit/mergers/__init__.py | 11 +++++++--- cloudinit/mergers/dict.py | 48 ----------------------------------------- cloudinit/mergers/list.py | 50 ------------------------------------------- cloudinit/mergers/m_dict.py | 48 +++++++++++++++++++++++++++++++++++++++++ cloudinit/mergers/m_list.py | 50 +++++++++++++++++++++++++++++++++++++++++++ cloudinit/mergers/m_str.py | 39 +++++++++++++++++++++++++++++++++ cloudinit/mergers/str.py | 39 --------------------------------- 7 files changed, 145 insertions(+), 140 deletions(-) delete mode 100644 cloudinit/mergers/dict.py delete mode 100644 cloudinit/mergers/list.py create mode 100644 cloudinit/mergers/m_dict.py create mode 100644 cloudinit/mergers/m_list.py create mode 100644 cloudinit/mergers/m_str.py delete mode 100644 cloudinit/mergers/str.py diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py index e1ff57ba..f504e15f 100644 --- a/cloudinit/mergers/__init__.py +++ b/cloudinit/mergers/__init__.py @@ -26,6 +26,8 @@ NAME_MTCH = re.compile(r"(^[a-zA-Z_][A-Za-z0-9_]*)\((.*?)\)$") LOG = logging.getLogger(__name__) DEF_MERGE_TYPE = "list()+dict()+str()" +MERGER_PREFIX = 'm_' +MERGER_ATTR = 'Merger' class UnknownMerger(object): @@ -136,15 +138,18 @@ def default_mergers(): def construct(parsed_mergers): mergers_to_be = [] for (m_name, m_ops) in parsed_mergers: + if not m_name.startswith(MERGER_PREFIX): + m_name = MERGER_PREFIX + str(m_name) merger_locs = importer.find_module(m_name, [__name__], - ['Merger']) + [MERGER_ATTR]) if not merger_locs: - msg = "Could not find merger named '%s'" % (m_name) + msg = ("Could not find merger module named '%s' " + "with attribute '%s'") % (m_name, MERGER_ATTR) raise ImportError(msg) else: mod = importer.import_module(merger_locs[0]) - mod_attr = getattr(mod, 'Merger') + mod_attr = getattr(mod, MERGER_ATTR) mergers_to_be.append((mod_attr, m_ops)) # Now form them... mergers = [] diff --git a/cloudinit/mergers/dict.py b/cloudinit/mergers/dict.py deleted file mode 100644 index 45a7d3a5..00000000 --- a/cloudinit/mergers/dict.py +++ /dev/null @@ -1,48 +0,0 @@ -# vi: ts=4 expandtab -# -# Copyright (C) 2012 Yahoo! Inc. -# -# Author: Joshua Harlow -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - - -class Merger(object): - def __init__(self, merger, opts): - self._merger = merger - self._overwrite = 'overwrite' in opts - - # This merging algorithm will attempt to merge with - # another dictionary, on encountering any other type of object - # it will not merge with said object, but will instead return - # the original value - # - # On encountering a dictionary, it will create a new dictionary - # composed of the original and the one to merge with, if 'overwrite' - # is enabled then keys that exist in the original will be overwritten - # by keys in the one to merge with (and associated values). Otherwise - # if not in overwrite mode the 2 conflicting keys themselves will - # be merged. - def _on_dict(self, value, merge_with): - if not isinstance(merge_with, (dict)): - return value - merged = dict(value) - for (k, v) in merge_with.items(): - if k in merged: - if not self._overwrite: - merged[k] = self._merger.merge(merged[k], v) - else: - merged[k] = v - else: - merged[k] = v - return merged diff --git a/cloudinit/mergers/list.py b/cloudinit/mergers/list.py deleted file mode 100644 index a56ff007..00000000 --- a/cloudinit/mergers/list.py +++ /dev/null @@ -1,50 +0,0 @@ -# vi: ts=4 expandtab -# -# Copyright (C) 2012 Yahoo! Inc. -# -# Author: Joshua Harlow -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - - -class Merger(object): - def __init__(self, merger, opts): - self._merger = merger - self._discard_non = 'discard_non_list' in opts - self._extend = 'extend' in opts - - def _on_tuple(self, value, merge_with): - return self._on_list(list(value), merge_with) - - # On encountering a list or tuple type this action will be applied - # a new list will be returned, if the value to merge with is itself - # a list and we have been told to 'extend', then the value here will - # be extended with the other list. If in 'extend' mode then we will - # attempt to merge instead, which means that values from the list - # to merge with will replace values in te original list (they will - # also be merged recursively). - # - # If the value to merge with is not a list, and we are set to discared - # then no modifications will take place, otherwise we will just append - # the value to merge with onto the end of our own list. - def _on_list(self, value, merge_with): - new_value = list(value) - if isinstance(merge_with, (tuple, list)): - if self._extend: - new_value.extend(merge_with) - else: - return new_value - else: - if not self._discard_non: - new_value.append(merge_with) - return new_value diff --git a/cloudinit/mergers/m_dict.py b/cloudinit/mergers/m_dict.py new file mode 100644 index 00000000..45a7d3a5 --- /dev/null +++ b/cloudinit/mergers/m_dict.py @@ -0,0 +1,48 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +class Merger(object): + def __init__(self, merger, opts): + self._merger = merger + self._overwrite = 'overwrite' in opts + + # This merging algorithm will attempt to merge with + # another dictionary, on encountering any other type of object + # it will not merge with said object, but will instead return + # the original value + # + # On encountering a dictionary, it will create a new dictionary + # composed of the original and the one to merge with, if 'overwrite' + # is enabled then keys that exist in the original will be overwritten + # by keys in the one to merge with (and associated values). Otherwise + # if not in overwrite mode the 2 conflicting keys themselves will + # be merged. + def _on_dict(self, value, merge_with): + if not isinstance(merge_with, (dict)): + return value + merged = dict(value) + for (k, v) in merge_with.items(): + if k in merged: + if not self._overwrite: + merged[k] = self._merger.merge(merged[k], v) + else: + merged[k] = v + else: + merged[k] = v + return merged diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py new file mode 100644 index 00000000..a56ff007 --- /dev/null +++ b/cloudinit/mergers/m_list.py @@ -0,0 +1,50 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +class Merger(object): + def __init__(self, merger, opts): + self._merger = merger + self._discard_non = 'discard_non_list' in opts + self._extend = 'extend' in opts + + def _on_tuple(self, value, merge_with): + return self._on_list(list(value), merge_with) + + # On encountering a list or tuple type this action will be applied + # a new list will be returned, if the value to merge with is itself + # a list and we have been told to 'extend', then the value here will + # be extended with the other list. If in 'extend' mode then we will + # attempt to merge instead, which means that values from the list + # to merge with will replace values in te original list (they will + # also be merged recursively). + # + # If the value to merge with is not a list, and we are set to discared + # then no modifications will take place, otherwise we will just append + # the value to merge with onto the end of our own list. + def _on_list(self, value, merge_with): + new_value = list(value) + if isinstance(merge_with, (tuple, list)): + if self._extend: + new_value.extend(merge_with) + else: + return new_value + else: + if not self._discard_non: + new_value.append(merge_with) + return new_value diff --git a/cloudinit/mergers/m_str.py b/cloudinit/mergers/m_str.py new file mode 100644 index 00000000..291c91c2 --- /dev/null +++ b/cloudinit/mergers/m_str.py @@ -0,0 +1,39 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +class Merger(object): + def __init__(self, _merger, opts): + self._append = 'append' in opts + + # On encountering a unicode object to merge value with + # we will for now just proxy into the string method to let it handle it. + def _on_unicode(self, value, merge_with): + return self._on_str(value, merge_with) + + # On encountering a string object to merge with we will + # perform the following action, if appending we will + # merge them together, otherwise we will just return value. + def _on_str(self, value, merge_with): + if not self._append: + return value + else: + if isinstance(value, (unicode)): + return value + unicode(merge_with) + else: + return value + str(merge_with) diff --git a/cloudinit/mergers/str.py b/cloudinit/mergers/str.py deleted file mode 100644 index 291c91c2..00000000 --- a/cloudinit/mergers/str.py +++ /dev/null @@ -1,39 +0,0 @@ -# vi: ts=4 expandtab -# -# Copyright (C) 2012 Yahoo! Inc. -# -# Author: Joshua Harlow -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - - -class Merger(object): - def __init__(self, _merger, opts): - self._append = 'append' in opts - - # On encountering a unicode object to merge value with - # we will for now just proxy into the string method to let it handle it. - def _on_unicode(self, value, merge_with): - return self._on_str(value, merge_with) - - # On encountering a string object to merge with we will - # perform the following action, if appending we will - # merge them together, otherwise we will just return value. - def _on_str(self, value, merge_with): - if not self._append: - return value - else: - if isinstance(value, (unicode)): - return value + unicode(merge_with) - else: - return value + str(merge_with) -- cgit v1.2.3 From 66ea1ae9599d27686db2510f3a079485ea8292c3 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 17 Apr 2013 09:42:55 -0700 Subject: add debug output to ccfg-merge-debug Exeptions were being swallowed completely and no way to even see them other than log. --- cloudinit/log.py | 6 +++--- tools/ccfg-merge-debug | 8 ++++++++ 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/cloudinit/log.py b/cloudinit/log.py index da6c2851..622c946c 100644 --- a/cloudinit/log.py +++ b/cloudinit/log.py @@ -44,13 +44,13 @@ NOTSET = logging.NOTSET DEF_CON_FORMAT = '%(asctime)s - %(filename)s[%(levelname)s]: %(message)s' -def setupBasicLogging(): +def setupBasicLogging(level=DEBUG): root = logging.getLogger() console = logging.StreamHandler(sys.stderr) console.setFormatter(logging.Formatter(DEF_CON_FORMAT)) - console.setLevel(DEBUG) + console.setLevel(level) root.addHandler(console) - root.setLevel(DEBUG) + root.setLevel(level) def flushLoggers(root): diff --git a/tools/ccfg-merge-debug b/tools/ccfg-merge-debug index aac60528..5b6b050a 100755 --- a/tools/ccfg-merge-debug +++ b/tools/ccfg-merge-debug @@ -3,6 +3,7 @@ from cloudinit import handlers from cloudinit.handlers import cloud_config as cc_part from cloudinit import helpers +from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE from cloudinit import user_data as ud @@ -17,9 +18,16 @@ def main(): description='test cloud-config merging') parser.add_argument("--output", "-o", metavar="file", help="specify output file", default="-") + parser.add_argument('--verbose', '-v', action='count', default=0) parser.add_argument('files', nargs='+') args = parser.parse_args() + + if args.verbose: + level = (logging.WARN, logging.INFO, + logging.DEBUG)[min(args.verbose, 2)] + logging.setupBasicLogging(level) + outfile = args.output if args.output == "-": outfile = "/dev/stdout" -- cgit v1.2.3 From 6ad068d1ae175d784481fe8f8e190b2721a221f5 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 17 Apr 2013 10:17:09 -0700 Subject: Altering the order of merging. --- cloudinit/handlers/cloud_config.py | 12 +++--------- cloudinit/util.py | 23 +++++++++-------------- 2 files changed, 12 insertions(+), 23 deletions(-) diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index d30d6338..7678a5b0 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -39,7 +39,6 @@ class CloudConfigPartHandler(handlers.Handler): self.cloud_buf = None self.cloud_fn = paths.get_ipath("cloud_config") self.file_names = [] - self.mergers = [DEF_MERGERS] def list_types(self): return [ @@ -89,13 +88,9 @@ class CloudConfigPartHandler(handlers.Handler): return all_mergers def _merge_part(self, payload, headers): - next_mergers = self._extract_mergers(payload, headers) - # Use the merger list from the last call, since it is the one - # that will be defining how to merge with the next payload. - curr_mergers = list(self.mergers[-1]) - LOG.debug("Merging by applying %s", curr_mergers) - self.mergers.append(next_mergers) - merger = mergers.construct(curr_mergers) + my_mergers = self._extract_mergers(payload, headers) + LOG.debug("Merging by applying %s", my_mergers) + merger = mergers.construct(my_mergers) if self.cloud_buf is None: # First time through, merge with an empty dict... self.cloud_buf = {} @@ -105,7 +100,6 @@ class CloudConfigPartHandler(handlers.Handler): def _reset(self): self.file_names = [] self.cloud_buf = None - self.mergers = [DEF_MERGERS] def handle_part(self, _data, ctype, filename, # pylint: disable=W0221 payload, _frequency, headers): # pylint: disable=W0613 diff --git a/cloudinit/util.py b/cloudinit/util.py index 50de55fe..f7ff28cc 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -543,21 +543,16 @@ def make_url(scheme, host, port=None, def mergemanydict(srcs, reverse=False): if reverse: srcs = reversed(srcs) - m_cfg = {} - merge_how = [mergers.default_mergers()] - for a_cfg in srcs: - if a_cfg: - # Take the last merger as the one that - # will define how to merge next... - mergers_to_apply = list(merge_how[-1]) + merged_cfg = {} + for cfg in srcs: + if cfg: + # Figure out which mergers to apply... + mergers_to_apply = mergers.dict_extract_mergers(cfg) + if not mergers_to_apply: + mergers_to_apply = mergers.default_mergers() merger = mergers.construct(mergers_to_apply) - m_cfg = merger.merge(m_cfg, a_cfg) - # If the config has now has new merger set, - # extract them to be used next time... - new_mergers = mergers.dict_extract_mergers(m_cfg) - if new_mergers: - merge_how.append(new_mergers) - return m_cfg + merged_cfg = merger.merge(merged_cfg, cfg) + return merged_cfg @contextlib.contextmanager -- cgit v1.2.3 From 0eabf9cdc2870982bcabc6e5d05c80078fa100cb Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 18 Apr 2013 13:29:39 -0700 Subject: Adjust how merging is done. --- cloudinit/mergers/dict.py | 5 +++-- cloudinit/mergers/list.py | 18 +++--------------- 2 files changed, 6 insertions(+), 17 deletions(-) diff --git a/cloudinit/mergers/dict.py b/cloudinit/mergers/dict.py index 45a7d3a5..929d3865 100644 --- a/cloudinit/mergers/dict.py +++ b/cloudinit/mergers/dict.py @@ -20,7 +20,7 @@ class Merger(object): def __init__(self, merger, opts): self._merger = merger - self._overwrite = 'overwrite' in opts + self._not_overwrite = 'not_overwrite' in opts # This merging algorithm will attempt to merge with # another dictionary, on encountering any other type of object @@ -39,7 +39,8 @@ class Merger(object): merged = dict(value) for (k, v) in merge_with.items(): if k in merged: - if not self._overwrite: + if self._not_overwrite: + # Attempt to merge them.... merged[k] = self._merger.merge(merged[k], v) else: merged[k] = v diff --git a/cloudinit/mergers/list.py b/cloudinit/mergers/list.py index a56ff007..7c8b2e2a 100644 --- a/cloudinit/mergers/list.py +++ b/cloudinit/mergers/list.py @@ -20,7 +20,6 @@ class Merger(object): def __init__(self, merger, opts): self._merger = merger - self._discard_non = 'discard_non_list' in opts self._extend = 'extend' in opts def _on_tuple(self, value, merge_with): @@ -33,18 +32,7 @@ class Merger(object): # attempt to merge instead, which means that values from the list # to merge with will replace values in te original list (they will # also be merged recursively). - # - # If the value to merge with is not a list, and we are set to discared - # then no modifications will take place, otherwise we will just append - # the value to merge with onto the end of our own list. def _on_list(self, value, merge_with): - new_value = list(value) - if isinstance(merge_with, (tuple, list)): - if self._extend: - new_value.extend(merge_with) - else: - return new_value - else: - if not self._discard_non: - new_value.append(merge_with) - return new_value + if not self._extend or not isinstance(merge_with, (tuple, list)): + return merge_with + return list(value).extend(merge_with) -- cgit v1.2.3 From 50f91a1bca166b5e815a722aca573672b269bacb Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 18 Apr 2013 14:15:37 -0700 Subject: Remove str merging for now. --- cloudinit/mergers/__init__.py | 2 +- cloudinit/mergers/m_str.py | 39 --------------------------------------- 2 files changed, 1 insertion(+), 40 deletions(-) delete mode 100644 cloudinit/mergers/m_str.py diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py index f504e15f..2702496b 100644 --- a/cloudinit/mergers/__init__.py +++ b/cloudinit/mergers/__init__.py @@ -25,7 +25,7 @@ from cloudinit import type_utils NAME_MTCH = re.compile(r"(^[a-zA-Z_][A-Za-z0-9_]*)\((.*?)\)$") LOG = logging.getLogger(__name__) -DEF_MERGE_TYPE = "list()+dict()+str()" +DEF_MERGE_TYPE = "list()+dict()" MERGER_PREFIX = 'm_' MERGER_ATTR = 'Merger' diff --git a/cloudinit/mergers/m_str.py b/cloudinit/mergers/m_str.py deleted file mode 100644 index a0c57b53..00000000 --- a/cloudinit/mergers/m_str.py +++ /dev/null @@ -1,39 +0,0 @@ -# vi: ts=4 expandtab -# -# Copyright (C) 2012 Yahoo! Inc. -# -# Author: Joshua Harlow -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - - -class Merger(object): - def __init__(self, _merger, opts): - self._append = 'append' in opts - - # On encountering a unicode object to merge value with - # we will for now just proxy into the string method to let it handle it. - def _on_unicode(self, value, merge_with): - return self._on_str(value, merge_with) - - # On encountering a string object to merge with we will - # perform the following action, if appending we will - # merge them together, otherwise we will just return value. - def _on_str(self, value, merge_with): - if not self._append: - return merge_with - else: - if isinstance(value, (unicode)): - return value + unicode(merge_with) - else: - return value + str(merge_with) -- cgit v1.2.3 From 8441fe20fdd1d8bb195bc7d354c9e87d2f446ccd Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 22 Apr 2013 12:43:35 -0700 Subject: Continue working on merging. --- cloudinit/handlers/cloud_config.py | 7 ++-- cloudinit/mergers/__init__.py | 12 +++++-- cloudinit/mergers/m_dict.py | 73 ++++++++++++++++++++++++++------------ cloudinit/mergers/m_list.py | 59 ++++++++++++++++++++++++------ 4 files changed, 112 insertions(+), 39 deletions(-) diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index 7678a5b0..2ae9b226 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -85,17 +85,16 @@ class CloudConfigPartHandler(handlers.Handler): all_mergers.extend(mergers_header) if not all_mergers: all_mergers = DEF_MERGERS - return all_mergers + return (payload_yaml, all_mergers) def _merge_part(self, payload, headers): - my_mergers = self._extract_mergers(payload, headers) + (payload_yaml, my_mergers) = self._extract_mergers(payload, headers) LOG.debug("Merging by applying %s", my_mergers) merger = mergers.construct(my_mergers) if self.cloud_buf is None: # First time through, merge with an empty dict... self.cloud_buf = {} - self.cloud_buf = merger.merge(self.cloud_buf, - util.load_yaml(payload)) + self.cloud_buf = merger.merge(self.cloud_buf, payload_yaml) def _reset(self): self.file_names = [] diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py index 2702496b..221e93b5 100644 --- a/cloudinit/mergers/__init__.py +++ b/cloudinit/mergers/__init__.py @@ -55,6 +55,9 @@ class UnknownMerger(object): if not meth: meth = self._handle_unknown args.insert(0, method_name) + LOG.debug("Merging '%s' into '%s' using method '%s' of '%s'", + type_name, type_utils.obj_name(merge_with), + meth.__name__, self) return meth(*args) @@ -66,6 +69,9 @@ class LookupMerger(UnknownMerger): else: self._lookups = lookups + def __str__(self): + return 'LookupMerger: (%s)' % (len(self._lookups)) + # For items which can not be merged by the parent this object # will lookup in a internally maintained set of objects and # find which one of those objects can perform the merge. If @@ -78,6 +84,8 @@ class LookupMerger(UnknownMerger): # First one that has that method/attr gets to be # the one that will be called meth = getattr(merger, meth_wanted) + LOG.debug(("Merging using located merger '%s'" + " since it had method '%s'"), merger, meth_wanted) break if not meth: return UnknownMerger._handle_unknown(self, meth_wanted, @@ -87,9 +95,9 @@ class LookupMerger(UnknownMerger): def dict_extract_mergers(config): parsed_mergers = [] - raw_mergers = config.get('merge_how') + raw_mergers = config.pop('merge_how', None) if raw_mergers is None: - raw_mergers = config.get('merge_type') + raw_mergers = config.pop('merge_type', None) if raw_mergers is None: return parsed_mergers if isinstance(raw_mergers, (str, basestring)): diff --git a/cloudinit/mergers/m_dict.py b/cloudinit/mergers/m_dict.py index 929d3865..2c1c845f 100644 --- a/cloudinit/mergers/m_dict.py +++ b/cloudinit/mergers/m_dict.py @@ -20,30 +20,59 @@ class Merger(object): def __init__(self, merger, opts): self._merger = merger - self._not_overwrite = 'not_overwrite' in opts + # Affects merging behavior... + self._method = 'replace' + for m in ['replace', 'no_replace']: + if m in opts: + self._method = m + break + # Affect how recursive merging is done on other primitives + self._recurse_str = 'recurse_str' in opts + self._recurse_dict = True + self._recurse_array = 'recurse_array' in opts + self._allow_delete = 'allow_delete' in opts + + def __str__(self): + s = ('DictMerger: (method=%s,recurse_str=%s,' + 'recurse_dict=%s,recurse_array=%s,allow_delete=%s)') + s = s % (self._method, + self._recurse_str, + self._recurse_dict, + self._recurse_array, + self._allow_delete) + return s + + def _do_dict_replace(self, value, merge_with, do_replace=True): + + def merge_same_key(old_v, new_v): + if do_replace: + return new_v + if isinstance(new_v, (list, tuple)) and self._recurse_array: + return self._merger.merge(old_v, new_v) + if isinstance(new_v, (str, basestring)) and self._recurse_str: + return self._merger.merge(old_v, new_v) + if isinstance(new_v, (dict)) and self._recurse_dict: + return self._merger.merge(old_v, new_v) + # Otherwise leave it be... + return old_v - # This merging algorithm will attempt to merge with - # another dictionary, on encountering any other type of object - # it will not merge with said object, but will instead return - # the original value - # - # On encountering a dictionary, it will create a new dictionary - # composed of the original and the one to merge with, if 'overwrite' - # is enabled then keys that exist in the original will be overwritten - # by keys in the one to merge with (and associated values). Otherwise - # if not in overwrite mode the 2 conflicting keys themselves will - # be merged. - def _on_dict(self, value, merge_with): - if not isinstance(merge_with, (dict)): - return value - merged = dict(value) for (k, v) in merge_with.items(): - if k in merged: - if self._not_overwrite: - # Attempt to merge them.... - merged[k] = self._merger.merge(merged[k], v) + if k in value: + if v is None and self._allow_delete: + value.pop(k) else: - merged[k] = v + value[k] = merge_same_key(value[k], v) else: - merged[k] = v + value[k] = v + return value + + def _on_dict(self, value, merge_with): + if not isinstance(merge_with, (dict)): + return value + if self._method == 'replace': + merged = self._do_dict_replace(dict(value), merge_with) + elif self._method == 'no_replace': + merged = self._do_dict_replace(dict(value), merge_with, False) + else: + raise NotImplementedError("Unknown merge type %s" % (self._method)) return merged diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py index 208c5f52..c6a23d85 100644 --- a/cloudinit/mergers/m_list.py +++ b/cloudinit/mergers/m_list.py @@ -20,18 +20,55 @@ class Merger(object): def __init__(self, merger, opts): self._merger = merger - self._extend = 'extend' in opts + # Affects merging behavior... + self._method = 'replace' + for m in ['append', 'prepend', 'replace']: + if m in opts: + self._method = m + break + # Affect how recursive merging is done on other primitives + self._recurse_str = 'recurse_str' in opts + self._recurse_dict = 'recurse_dict' in opts + self._recurse_array = 'recurse_array' in opts + + def __str__(self): + return 'ListMerger: (m=%s,rs=%s,rd=%s,ra=%s)' % (self._method, + self._recurse_str, + self._recurse_dict, + self._recurse_array) def _on_tuple(self, value, merge_with): - return self._on_list(list(value), merge_with) + return tuple(self._on_list(list(value), merge_with)) - # On encountering a list or tuple type this action will be applied - # a new list will be returned, if the value to merge with is itself - # a list and we have been told to 'extend', then the value here will - # be extended with the other list. def _on_list(self, value, merge_with): - if not self._extend or not isinstance(merge_with, (tuple, list)): - return merge_with - # Leave the original list alone... - value = list(value) - return value.extend(merge_with) + if (self._method == 'replace' and + not isinstance(merge_with, (tuple, list))): + return merge_with + + # Ok we now know that what we are merging with is a list or tuple. + merged_list = [] + if self._method == 'prepend': + merged_list.extend(merge_with) + merged_list.extend(value) + return merged_list + elif self._method == 'append': + merged_list.extend(value) + merged_list.extend(merge_with) + return merged_list + + def merge_same_index(old_v, new_v): + if isinstance(new_v, (list, tuple)) and self._recurse_array: + return self._merger.merge(old_v, new_v) + if isinstance(new_v, (str, basestring)) and self._recurse_str: + return self._merger.merge(old_v, new_v) + if isinstance(new_v, (dict)) and self._recurse_dict: + return self._merger.merge(old_v, new_v) + # Otherwise leave it be... + return old_v + + # Ok now we are replacing same indexes + merged_list.extend(value) + common_len = min(len(merged_list), len(merge_with)) + for i in xrange(0, common_len): + merged_list[i] = merge_same_index(merged_list[i], merge_with[i]) + return merged_list -- cgit v1.2.3 From e14d64a03c6aa3e567b57f0c0a003ca2185f4493 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 23 Apr 2013 22:53:53 -0700 Subject: Add a bunch of new merging test files + runner. --- tests/data/merge_sources/expected1.yaml | 1 + tests/data/merge_sources/expected2.yaml | 3 + tests/data/merge_sources/expected3.yaml | 1 + tests/data/merge_sources/expected4.yaml | 2 + tests/data/merge_sources/source1-1.yaml | 3 + tests/data/merge_sources/source1-2.yaml | 5 + tests/data/merge_sources/source2-1.yaml | 6 + tests/data/merge_sources/source2-2.yaml | 5 + tests/data/merge_sources/source3-1.yaml | 4 + tests/data/merge_sources/source3-2.yaml | 4 + tests/data/merge_sources/source4-1.yaml | 3 + tests/data/merge_sources/source4-2.yaml | 6 + tests/unittests/test_merging.py | 194 +++++++++----------------------- 13 files changed, 97 insertions(+), 140 deletions(-) create mode 100644 tests/data/merge_sources/expected1.yaml create mode 100644 tests/data/merge_sources/expected2.yaml create mode 100644 tests/data/merge_sources/expected3.yaml create mode 100644 tests/data/merge_sources/expected4.yaml create mode 100644 tests/data/merge_sources/source1-1.yaml create mode 100644 tests/data/merge_sources/source1-2.yaml create mode 100644 tests/data/merge_sources/source2-1.yaml create mode 100644 tests/data/merge_sources/source2-2.yaml create mode 100644 tests/data/merge_sources/source3-1.yaml create mode 100644 tests/data/merge_sources/source3-2.yaml create mode 100644 tests/data/merge_sources/source4-1.yaml create mode 100644 tests/data/merge_sources/source4-2.yaml diff --git a/tests/data/merge_sources/expected1.yaml b/tests/data/merge_sources/expected1.yaml new file mode 100644 index 00000000..640d282b --- /dev/null +++ b/tests/data/merge_sources/expected1.yaml @@ -0,0 +1 @@ +Blah: ['blah2', 'b'] diff --git a/tests/data/merge_sources/expected2.yaml b/tests/data/merge_sources/expected2.yaml new file mode 100644 index 00000000..6eccc2cf --- /dev/null +++ b/tests/data/merge_sources/expected2.yaml @@ -0,0 +1,3 @@ +Blah: 3 +Blah2: 2 +Blah3: [1] diff --git a/tests/data/merge_sources/expected3.yaml b/tests/data/merge_sources/expected3.yaml new file mode 100644 index 00000000..32d9ad48 --- /dev/null +++ b/tests/data/merge_sources/expected3.yaml @@ -0,0 +1 @@ +Blah: [blah2, 'blah1'] diff --git a/tests/data/merge_sources/expected4.yaml b/tests/data/merge_sources/expected4.yaml new file mode 100644 index 00000000..d88d8f73 --- /dev/null +++ b/tests/data/merge_sources/expected4.yaml @@ -0,0 +1,2 @@ +#cloud-config +Blah: {} diff --git a/tests/data/merge_sources/source1-1.yaml b/tests/data/merge_sources/source1-1.yaml new file mode 100644 index 00000000..38e4e5e0 --- /dev/null +++ b/tests/data/merge_sources/source1-1.yaml @@ -0,0 +1,3 @@ +#cloud-config +Blah: ['blah2'] + diff --git a/tests/data/merge_sources/source1-2.yaml b/tests/data/merge_sources/source1-2.yaml new file mode 100644 index 00000000..2cd0e0e5 --- /dev/null +++ b/tests/data/merge_sources/source1-2.yaml @@ -0,0 +1,5 @@ +#cloud-config + +Blah: ['b'] + +merge_how: 'dict(recurse_array,no_replace)+list(append)' diff --git a/tests/data/merge_sources/source2-1.yaml b/tests/data/merge_sources/source2-1.yaml new file mode 100644 index 00000000..c7a33aaa --- /dev/null +++ b/tests/data/merge_sources/source2-1.yaml @@ -0,0 +1,6 @@ +#cloud-config + + +Blah: 1 +Blah2: 2 +Blah3: 3 diff --git a/tests/data/merge_sources/source2-2.yaml b/tests/data/merge_sources/source2-2.yaml new file mode 100644 index 00000000..8f2fdc1a --- /dev/null +++ b/tests/data/merge_sources/source2-2.yaml @@ -0,0 +1,5 @@ +#cloud-config + +Blah: 3 +Blah2: 2 +Blah3: [1] diff --git a/tests/data/merge_sources/source3-1.yaml b/tests/data/merge_sources/source3-1.yaml new file mode 100644 index 00000000..2303e906 --- /dev/null +++ b/tests/data/merge_sources/source3-1.yaml @@ -0,0 +1,4 @@ +#cloud-config +Blah: ['blah1'] + + diff --git a/tests/data/merge_sources/source3-2.yaml b/tests/data/merge_sources/source3-2.yaml new file mode 100644 index 00000000..dca2ad10 --- /dev/null +++ b/tests/data/merge_sources/source3-2.yaml @@ -0,0 +1,4 @@ +#cloud-config +Blah: ['blah2'] + +merge_how: 'dict(recurse_array,no_replace)+list(prepend)' diff --git a/tests/data/merge_sources/source4-1.yaml b/tests/data/merge_sources/source4-1.yaml new file mode 100644 index 00000000..e5b16872 --- /dev/null +++ b/tests/data/merge_sources/source4-1.yaml @@ -0,0 +1,3 @@ +#cloud-config +Blah: + b: 1 diff --git a/tests/data/merge_sources/source4-2.yaml b/tests/data/merge_sources/source4-2.yaml new file mode 100644 index 00000000..1844e0f8 --- /dev/null +++ b/tests/data/merge_sources/source4-2.yaml @@ -0,0 +1,6 @@ +#cloud-config +Blah: + b: null + + +merge_how: 'dict(allow_delete,no_replace)+list()' diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index ad137e85..470b18c7 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -1,142 +1,56 @@ from tests.unittests import helpers -from cloudinit import mergers - - -class TestSimpleRun(helpers.MockerTestCase): - def test_basic_merge(self): - source = { - 'Blah': ['blah2'], - 'Blah3': 'c', - } - merge_with = { - 'Blah2': ['blah3'], - 'Blah3': 'b', - 'Blah': ['123'], - } - # Basic merge should not do thing special - merge_how = "list()+dict()+str()" - merger_set = mergers.string_extract_mergers(merge_how) - self.assertEquals(3, len(merger_set)) - merger = mergers.construct(merger_set) - merged = merger.merge(source, merge_with) - self.assertEquals(merged['Blah'], ['blah2']) - self.assertEquals(merged['Blah2'], ['blah3']) - self.assertEquals(merged['Blah3'], 'c') - - def test_dict_overwrite(self): - source = { - 'Blah': ['blah2'], - } - merge_with = { - 'Blah': ['123'], - } - # Now lets try a dict overwrite - merge_how = "list()+dict(overwrite)+str()" - merger_set = mergers.string_extract_mergers(merge_how) - self.assertEquals(3, len(merger_set)) - merger = mergers.construct(merger_set) - merged = merger.merge(source, merge_with) - self.assertEquals(merged['Blah'], ['123']) - - def test_string_append(self): - source = { - 'Blah': 'blah2', - } - merge_with = { - 'Blah': '345', - } - merge_how = "list()+dict()+str(append)" - merger_set = mergers.string_extract_mergers(merge_how) - self.assertEquals(3, len(merger_set)) - merger = mergers.construct(merger_set) - merged = merger.merge(source, merge_with) - self.assertEquals(merged['Blah'], 'blah2345') - - def test_list_extend(self): - source = ['abc'] - merge_with = ['123'] - merge_how = "list(extend)+dict()+str()" - merger_set = mergers.string_extract_mergers(merge_how) - self.assertEquals(3, len(merger_set)) - merger = mergers.construct(merger_set) - merged = merger.merge(source, merge_with) - self.assertEquals(merged, ['abc', '123']) - - def test_deep_merge(self): - source = { - 'a': [1, 'b', 2], - 'b': 'blahblah', - 'c': { - 'e': [1, 2, 3], - 'f': 'bigblobof', - 'iamadict': { - 'ok': 'ok', - } - }, - 'run': [ - 'runme', - 'runme2', - ], - 'runmereally': [ - 'e', ['a'], 'd', - ], - } - merge_with = { - 'a': ['e', 'f', 'g'], - 'b': 'more', - 'c': { - 'a': 'b', - 'f': 'stuff', - }, - 'run': [ - 'morecmd', - 'moremoremore', - ], - 'runmereally': [ - 'blah', ['b'], 'e', - ], - } - merge_how = "list(extend)+dict()+str(append)" - merger_set = mergers.string_extract_mergers(merge_how) - self.assertEquals(3, len(merger_set)) - merger = mergers.construct(merger_set) - merged = merger.merge(source, merge_with) - self.assertEquals(merged['a'], [1, 'b', 2, 'e', 'f', 'g']) - self.assertEquals(merged['b'], 'blahblahmore') - self.assertEquals(merged['c']['f'], 'bigblobofstuff') - self.assertEquals(merged['run'], ['runme', 'runme2', 'morecmd', - 'moremoremore']) - self.assertEquals(merged['runmereally'], ['e', ['a'], 'd', 'blah', - ['b'], 'e']) - - def test_dict_overwrite_layered(self): - source = { - 'Blah3': { - 'f': '3', - 'g': { - 'a': 'b', - } - } - } - merge_with = { - 'Blah3': { - 'e': '2', - 'g': { - 'e': 'f', - } - } - } - merge_how = "list()+dict()+str()" - merger_set = mergers.string_extract_mergers(merge_how) - self.assertEquals(3, len(merger_set)) - merger = mergers.construct(merger_set) - merged = merger.merge(source, merge_with) - self.assertEquals(merged['Blah3'], { - 'e': '2', - 'f': '3', - 'g': { - 'a': 'b', - 'e': 'f', - } - }) +from cloudinit.handlers import cloud_config +from cloudinit.handlers import (CONTENT_START, CONTENT_END) + +from cloudinit import helpers as c_helpers +from cloudinit import util + +import collections +import glob +import os +import re + + +class TestSimpleRun(helpers.ResourceUsingTestCase): + def _load_merge_files(self, data_dir): + merge_root = self.resourceLocation(data_dir) + tests = [] + source_ids = collections.defaultdict(list) + expected_files = {} + for fn in glob.glob(os.path.join(merge_root, "source*.*yaml")): + base_fn = os.path.basename(fn) + file_id = re.match(r"source(\d+)\-(\d+)[.]yaml", base_fn) + if not file_id: + raise IOError("File %s does not have a numeric identifier" + % (fn)) + file_id = int(file_id.group(1)) + source_ids[file_id].append(fn) + expected_fn = os.path.join(merge_root, + "expected%s.yaml" % (file_id)) + if not os.path.isfile(expected_fn): + raise IOError("No expected file found at %s" % (expected_fn)) + expected_files[file_id] = expected_fn + for id in sorted(source_ids.keys()): + source_file_contents = [] + for fn in sorted(source_ids[id]): + source_file_contents.append(util.load_file(fn)) + expected = util.load_yaml(util.load_file(expected_files[id])) + tests.append((source_file_contents, expected)) + return tests + + def test_merge_samples(self): + tests = self._load_merge_files('merge_sources') + paths = c_helpers.Paths({}) + cc_handler = cloud_config.CloudConfigPartHandler(paths) + cc_handler.cloud_fn = None + for (payloads, expected_merge) in tests: + cc_handler.handle_part(None, CONTENT_START, None, + None, None, None) + for (i, p) in enumerate(payloads): + cc_handler.handle_part(None, None, "t-%s.yaml" % (i + 1), + p, None, {}) + merged_buf = cc_handler.cloud_buf + cc_handler.handle_part(None, CONTENT_END, None, + None, None, None) + self.assertEquals(expected_merge, merged_buf) -- cgit v1.2.3 From 1b7e36a966ce1a0964e93eefa98c9efcbc4c323d Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 25 Apr 2013 11:58:38 -0400 Subject: re-work maas datasource and headers call backs Couple of things here: * do not re-try on user-data (404 means 'not here') * re-generate headers on retry requests LP: #1172742 --- cloudinit/sources/DataSourceMAAS.py | 16 +++++++++++----- cloudinit/url_helper.py | 18 +++++++++++++----- cloudinit/util.py | 4 +++- 3 files changed, 27 insertions(+), 11 deletions(-) diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index de822924..dfe90bc6 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -218,14 +218,20 @@ def read_maas_seed_url(seed_url, header_cb=None, timeout=None, md = {} for name in file_order: url = files.get(name) - if header_cb: - headers = header_cb(url) + if not header_cb: + def _cb(url): + return {} + header_cb = _cb + + if name == 'user-data': + retries = 0 else: - headers = {} + retries = None + try: ssl_details = util.fetch_ssl_details(paths) - resp = util.read_file_or_url(url, - headers=headers, + resp = util.read_file_or_url(url, retries=retries, + headers_cb=header_cb, timeout=timeout, ssl_details=ssl_details) if resp.ok(): diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 6b4516e0..24ce6871 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -102,8 +102,8 @@ class UrlError(IOError): def readurl(url, data=None, timeout=None, retries=0, sec_between=1, - headers=None, ssl_details=None, check_status=True, - allow_redirects=True): + headers=None, headers_cb=None, ssl_details=None, + check_status=True, allow_redirects=True): url = _cleanurl(url) req_args = { 'url': url, @@ -149,8 +149,11 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, headers = { 'User-Agent': 'Cloud-Init/%s' % (version.version_string()), } - req_args['headers'] = headers - LOG.debug("Attempting to open '%s' with %s configuration", url, req_args) + if not headers_cb: + def _cb(url): + return headers + headers_cb = _cb + if data: # Do this after the log (it might be large) req_args['data'] = data @@ -161,6 +164,11 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, # doesn't handle sleeping between tries... for i in range(0, manual_tries): try: + req_args['headers'] = headers_cb(url) + LOG.debug("[%s/%s] open '%s' with %s configuration", i, + manual_tries, url, + {k: req_args[k] for k in req_args if k != 'data'}) + r = requests.request(**req_args) if check_status: r.raise_for_status() # pylint: disable=E1103 @@ -174,7 +182,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, except exceptions.RequestException as e: if (isinstance(e, (exceptions.HTTPError)) and hasattr(e, 'response') # This appeared in v 0.10.8 - and e.response): + and hasattr(e.response, 'status_code')): excps.append(UrlError(e, code=e.response.status_code, headers=e.response.headers)) else: diff --git a/cloudinit/util.py b/cloudinit/util.py index 50de55fe..053fa95d 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -671,7 +671,8 @@ def fetch_ssl_details(paths=None): def read_file_or_url(url, timeout=5, retries=10, - headers=None, data=None, sec_between=1, ssl_details=None): + headers=None, data=None, sec_between=1, ssl_details=None, + headers_cb=None): url = url.lstrip() if url.startswith("/"): url = "file://%s" % url @@ -685,6 +686,7 @@ def read_file_or_url(url, timeout=5, retries=10, timeout=timeout, retries=retries, headers=headers, + headers_cb=headers_cb, data=data, sec_between=sec_between, ssl_details=ssl_details) -- cgit v1.2.3 From a69c9c8a35b61f69f6e959448af8d0619989589f Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 25 Apr 2013 12:13:08 -0400 Subject: fix test --- tests/unittests/test_datasource/test_maas.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index 47f8caa4..2007a6df 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -116,9 +116,10 @@ class TestMAASDataSource(mocker.MockerTestCase): for key in valid_order: url = "%s/%s/%s" % (my_seed, my_ver, key) - mock_request(url, headers=my_headers, timeout=mocker.ANY, + mock_request(url, headers=None, timeout=mocker.ANY, data=mocker.ANY, sec_between=mocker.ANY, - ssl_details=mocker.ANY, retries=mocker.ANY) + ssl_details=mocker.ANY, retries=mocker.ANY, + headers_cb=my_headers_cb) resp = valid.get(key) self.mocker.result(util.StringResponse(resp)) self.mocker.replay() -- cgit v1.2.3 From 4a669649b17cf01b6f89f7902b6683d02ef0bee1 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 3 May 2013 14:41:28 -0700 Subject: More merging adjustments. Looks like this should be in pretty good shape and has passed some of the basic backwards compat. merging tests that I added. --- cloudinit/mergers/__init__.py | 2 +- cloudinit/mergers/m_dict.py | 27 ++++--- cloudinit/mergers/m_list.py | 9 +-- cloudinit/mergers/m_str.py | 44 ++++++++++++ tests/data/merge_sources/expected2.yaml | 4 +- tests/data/merge_sources/expected5.yaml | 7 ++ tests/data/merge_sources/source5-1.yaml | 6 ++ tests/data/merge_sources/source5-2.yaml | 8 +++ tests/unittests/test_merging.py | 123 +++++++++++++++++++++++++++----- tests/unittests/test_userdata.py | 5 +- 10 files changed, 201 insertions(+), 34 deletions(-) create mode 100644 cloudinit/mergers/m_str.py create mode 100644 tests/data/merge_sources/expected5.yaml create mode 100644 tests/data/merge_sources/source5-1.yaml create mode 100644 tests/data/merge_sources/source5-2.yaml diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py index 221e93b5..0978b2c6 100644 --- a/cloudinit/mergers/__init__.py +++ b/cloudinit/mergers/__init__.py @@ -25,7 +25,7 @@ from cloudinit import type_utils NAME_MTCH = re.compile(r"(^[a-zA-Z_][A-Za-z0-9_]*)\((.*?)\)$") LOG = logging.getLogger(__name__) -DEF_MERGE_TYPE = "list()+dict()" +DEF_MERGE_TYPE = "list()+dict()+str()" MERGER_PREFIX = 'm_' MERGER_ATTR = 'Merger' diff --git a/cloudinit/mergers/m_dict.py b/cloudinit/mergers/m_dict.py index 2c1c845f..82caa004 100644 --- a/cloudinit/mergers/m_dict.py +++ b/cloudinit/mergers/m_dict.py @@ -16,21 +16,32 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +DEF_MERGE_TYPE = 'no_replace' +MERGE_TYPES = ('replace', DEF_MERGE_TYPE,) + + +def _has_any(what, *keys): + for k in keys: + if k in what: + return True + return False + class Merger(object): def __init__(self, merger, opts): self._merger = merger # Affects merging behavior... - self._method = 'replace' - for m in ['replace', 'no_replace']: + self._method = DEF_MERGE_TYPE + for m in MERGE_TYPES: if m in opts: self._method = m break - # Affect how recursive merging is done on other primitives + # Affect how recursive merging is done on other primitives. self._recurse_str = 'recurse_str' in opts - self._recurse_dict = True - self._recurse_array = 'recurse_array' in opts + self._recurse_array = _has_any(opts, 'recurse_array', 'recurse_list') self._allow_delete = 'allow_delete' in opts + # Backwards compat require this to be on. + self._recurse_dict = True def __str__(self): s = ('DictMerger: (method=%s,recurse_str=%s,' @@ -42,14 +53,14 @@ class Merger(object): self._allow_delete) return s - def _do_dict_replace(self, value, merge_with, do_replace=True): + def _do_dict_replace(self, value, merge_with, do_replace): def merge_same_key(old_v, new_v): if do_replace: return new_v if isinstance(new_v, (list, tuple)) and self._recurse_array: return self._merger.merge(old_v, new_v) - if isinstance(new_v, (str, basestring)) and self._recurse_str: + if isinstance(new_v, (basestring)) and self._recurse_str: return self._merger.merge(old_v, new_v) if isinstance(new_v, (dict)) and self._recurse_dict: return self._merger.merge(old_v, new_v) @@ -70,7 +81,7 @@ class Merger(object): if not isinstance(merge_with, (dict)): return value if self._method == 'replace': - merged = self._do_dict_replace(dict(value), merge_with) + merged = self._do_dict_replace(dict(value), merge_with, True) elif self._method == 'no_replace': merged = self._do_dict_replace(dict(value), merge_with, False) else: diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py index c6a23d85..50f279e8 100644 --- a/cloudinit/mergers/m_list.py +++ b/cloudinit/mergers/m_list.py @@ -32,10 +32,11 @@ class Merger(object): self._recurse_array = 'recurse_array' in opts def __str__(self): - return 'ListMerger: (m=%s,rs=%s,rd=%s,ra=%s)' % (self._method, - self._recurse_str, - self._recurse_dict, - self._recurse_array) + return ('ListMerger: (method=%s,recurse_str=%s,' + 'recurse_dict=%s,recurse_array=%s)') % (self._method, + self._recurse_str, + self._recurse_dict, + self._recurse_array) def _on_tuple(self, value, merge_with): return tuple(self._on_list(list(value), merge_with)) diff --git a/cloudinit/mergers/m_str.py b/cloudinit/mergers/m_str.py new file mode 100644 index 00000000..e22ce28a --- /dev/null +++ b/cloudinit/mergers/m_str.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +class Merger(object): + def __init__(self, _merger, opts): + self._append = 'append' in opts + + def __str__(self): + return 'StringMerger: (append=%s)' % (self._append) + + # On encountering a unicode object to merge value with + # we will for now just proxy into the string method to let it handle it. + def _on_unicode(self, value, merge_with): + return self._on_str(value, merge_with) + + # On encountering a string object to merge with we will + # perform the following action, if appending we will + # merge them together, otherwise we will just return value. + def _on_str(self, value, merge_with): + if not isinstance(value, (basestring)): + return merge_with + if not self._append: + return merge_with + if isinstance(value, unicode): + return value + unicode(merge_with) + else: + return value + str(merge_with) diff --git a/tests/data/merge_sources/expected2.yaml b/tests/data/merge_sources/expected2.yaml index 6eccc2cf..f5312eb1 100644 --- a/tests/data/merge_sources/expected2.yaml +++ b/tests/data/merge_sources/expected2.yaml @@ -1,3 +1,3 @@ -Blah: 3 +Blah: 1 Blah2: 2 -Blah3: [1] +Blah3: 3 diff --git a/tests/data/merge_sources/expected5.yaml b/tests/data/merge_sources/expected5.yaml new file mode 100644 index 00000000..628f5878 --- /dev/null +++ b/tests/data/merge_sources/expected5.yaml @@ -0,0 +1,7 @@ +#cloud-config + +Blah: 3 +Blah2: 2 +Blah3: [1] + + diff --git a/tests/data/merge_sources/source5-1.yaml b/tests/data/merge_sources/source5-1.yaml new file mode 100644 index 00000000..c7a33aaa --- /dev/null +++ b/tests/data/merge_sources/source5-1.yaml @@ -0,0 +1,6 @@ +#cloud-config + + +Blah: 1 +Blah2: 2 +Blah3: 3 diff --git a/tests/data/merge_sources/source5-2.yaml b/tests/data/merge_sources/source5-2.yaml new file mode 100644 index 00000000..f61c96a2 --- /dev/null +++ b/tests/data/merge_sources/source5-2.yaml @@ -0,0 +1,8 @@ +#cloud-config + +Blah: 3 +Blah2: 2 +Blah3: [1] + + +merge_how: 'dict(replace)+list(append)' diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index 470b18c7..f83522d7 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -11,14 +11,39 @@ import glob import os import re +SOURCE_PAT = "source*.*yaml" +EXPECTED_PAT = "expected%s.yaml" + + +def _old_mergedict(src, cand): + """ + Merge values from C{cand} into C{src}. + If C{src} has a key C{cand} will not override. + Nested dictionaries are merged recursively. + """ + if isinstance(src, dict) and isinstance(cand, dict): + for (k, v) in cand.iteritems(): + if k not in src: + src[k] = v + else: + src[k] = _old_mergedict(src[k], v) + return src + + +def _old_mergemanydict(*args): + out = {} + for a in args: + out = _old_mergedict(out, a) + return out + class TestSimpleRun(helpers.ResourceUsingTestCase): - def _load_merge_files(self, data_dir): - merge_root = self.resourceLocation(data_dir) + def _load_merge_files(self): + merge_root = self.resourceLocation('merge_sources') tests = [] source_ids = collections.defaultdict(list) expected_files = {} - for fn in glob.glob(os.path.join(merge_root, "source*.*yaml")): + for fn in glob.glob(os.path.join(merge_root, SOURCE_PAT)): base_fn = os.path.basename(fn) file_id = re.match(r"source(\d+)\-(\d+)[.]yaml", base_fn) if not file_id: @@ -26,31 +51,97 @@ class TestSimpleRun(helpers.ResourceUsingTestCase): % (fn)) file_id = int(file_id.group(1)) source_ids[file_id].append(fn) - expected_fn = os.path.join(merge_root, - "expected%s.yaml" % (file_id)) + expected_fn = os.path.join(merge_root, EXPECTED_PAT % (file_id)) if not os.path.isfile(expected_fn): raise IOError("No expected file found at %s" % (expected_fn)) expected_files[file_id] = expected_fn - for id in sorted(source_ids.keys()): + for i in sorted(source_ids.keys()): source_file_contents = [] - for fn in sorted(source_ids[id]): - source_file_contents.append(util.load_file(fn)) - expected = util.load_yaml(util.load_file(expected_files[id])) - tests.append((source_file_contents, expected)) + for fn in sorted(source_ids[i]): + source_file_contents.append([fn, util.load_file(fn)]) + expected = util.load_yaml(util.load_file(expected_files[i])) + entry = [source_file_contents, [expected, expected_files[i]]] + tests.append(entry) return tests def test_merge_samples(self): - tests = self._load_merge_files('merge_sources') + tests = self._load_merge_files() paths = c_helpers.Paths({}) cc_handler = cloud_config.CloudConfigPartHandler(paths) cc_handler.cloud_fn = None - for (payloads, expected_merge) in tests: + for (payloads, (expected_merge, expected_fn)) in tests: cc_handler.handle_part(None, CONTENT_START, None, None, None, None) - for (i, p) in enumerate(payloads): - cc_handler.handle_part(None, None, "t-%s.yaml" % (i + 1), - p, None, {}) + merging_fns = [] + for (fn, contents) in payloads: + cc_handler.handle_part(None, None, "%s.yaml" % (fn), + contents, None, {}) + merging_fns.append(fn) merged_buf = cc_handler.cloud_buf cc_handler.handle_part(None, CONTENT_END, None, None, None, None) - self.assertEquals(expected_merge, merged_buf) + fail_msg = "Equality failure on checking %s with %s: %s != %s" + fail_msg = fail_msg % (expected_fn, + ",".join(merging_fns), merged_buf, + expected_merge) + self.assertEquals(expected_merge, merged_buf, msg=fail_msg) + + def test_compat_merges_dict(self): + a = { + '1': '2', + 'b': 'c', + } + b = { + 'b': 'e', + } + c = _old_mergedict(a, b) + d = util.mergemanydict([a, b]) + self.assertEquals(c, d) + + def test_compat_merges_list(self): + a = {'b': [1, 2, 3]} + b = {'b': [4, 5]} + c = {'b': [6, 7]} + e = _old_mergemanydict(a, b, c) + f = util.mergemanydict([a, b, c]) + self.assertEquals(e, f) + + def test_compat_merges_str(self): + a = {'b': "hi"} + b = {'b': "howdy"} + c = {'b': "hallo"} + e = _old_mergemanydict(a, b, c) + f = util.mergemanydict([a, b, c]) + self.assertEquals(e, f) + + def test_compat_merge_sub_dict(self): + a = { + '1': '2', + 'b': { + 'f': 'g', + } + } + b = { + 'b': { + 'e': 'c', + } + } + c = _old_mergedict(a, b) + d = util.mergemanydict([a, b]) + self.assertEquals(c, d) + + def test_compat_merge_sub_list(self): + a = { + '1': '2', + 'b': { + 'f': ['1'], + } + } + b = { + 'b': { + 'f': [], + } + } + c = _old_mergedict(a, b) + d = util.mergemanydict([a, b]) + self.assertEquals(c, d) diff --git a/tests/unittests/test_userdata.py b/tests/unittests/test_userdata.py index fdfe2542..5fb9acd9 100644 --- a/tests/unittests/test_userdata.py +++ b/tests/unittests/test_userdata.py @@ -60,7 +60,6 @@ run: - c ''' message1 = MIMEBase("text", "cloud-config") - message1['Merge-Type'] = 'dict()+list(extend)+str(append)' message1.set_payload(blob) blob2 = ''' @@ -72,7 +71,8 @@ run: - morestuff ''' message2 = MIMEBase("text", "cloud-config") - message2['X-Merge-Type'] = 'dict()+list(extend)+str()' + message2['X-Merge-Type'] = ('dict(recurse_array,' + 'recurse_str)+list(append)+str(append)') message2.set_payload(blob2) blob3 = ''' @@ -84,7 +84,6 @@ e: p: 1 ''' message3 = MIMEBase("text", "cloud-config") - message3['Merge-Type'] = 'dict()+list()+str()' message3.set_payload(blob3) messages = [message1, message2, message3] -- cgit v1.2.3 From c972396ecccb4b67eafc038a482ffeaa1df2c93e Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 3 May 2013 14:46:52 -0700 Subject: A few more tests + cleanings. --- cloudinit/mergers/m_list.py | 7 +++++-- tests/unittests/test_merging.py | 24 ++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py index 50f279e8..34b32379 100644 --- a/cloudinit/mergers/m_list.py +++ b/cloudinit/mergers/m_list.py @@ -16,13 +16,16 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +DEF_MERGE_TYPE = 'replace' +MERGE_TYPES = ('append', 'prepend', DEF_MERGE_TYPE,) + class Merger(object): def __init__(self, merger, opts): self._merger = merger # Affects merging behavior... - self._method = 'replace' - for m in ['append', 'prepend', 'replace']: + self._method = DEF_MERGE_TYPE + for m in MERGE_TYPES: if m in opts: self._method = m break diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index f83522d7..cff8ac12 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -115,6 +115,30 @@ class TestSimpleRun(helpers.ResourceUsingTestCase): self.assertEquals(e, f) def test_compat_merge_sub_dict(self): + a = { + '1': '2', + 'b': { + 'f': 'g', + 'e': 'c', + 'h': 'd', + 'hh': { + '1': 2, + }, + } + } + b = { + 'b': { + 'e': 'c', + 'hh': { + '3': 4, + } + } + } + c = _old_mergedict(a, b) + d = util.mergemanydict([a, b]) + self.assertEquals(c, d) + + def test_compat_merge_sub_dict2(self): a = { '1': '2', 'b': { -- cgit v1.2.3 From 5118a33b22a376954bd048c3142f2d3f7f55d003 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 3 May 2013 14:49:16 -0700 Subject: Rename the merge cc sample function name. --- tests/unittests/test_merging.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index cff8ac12..dd8c2eee 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -64,7 +64,7 @@ class TestSimpleRun(helpers.ResourceUsingTestCase): tests.append(entry) return tests - def test_merge_samples(self): + def test_merge_cc_samples(self): tests = self._load_merge_files() paths = c_helpers.Paths({}) cc_handler = cloud_config.CloudConfigPartHandler(paths) -- cgit v1.2.3 From 9a1584b701cecbbba4a9371542114bcc806ec596 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 3 May 2013 15:05:45 -0700 Subject: A few pep8/pylint cleanups. --- cloudinit/mergers/m_dict.py | 7 ++----- cloudinit/mergers/m_list.py | 2 +- tests/unittests/test_merging.py | 2 +- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/cloudinit/mergers/m_dict.py b/cloudinit/mergers/m_dict.py index 82caa004..a16141fa 100644 --- a/cloudinit/mergers/m_dict.py +++ b/cloudinit/mergers/m_dict.py @@ -46,11 +46,8 @@ class Merger(object): def __str__(self): s = ('DictMerger: (method=%s,recurse_str=%s,' 'recurse_dict=%s,recurse_array=%s,allow_delete=%s)') - s = s % (self._method, - self._recurse_str, - self._recurse_dict, - self._recurse_array, - self._allow_delete) + s = s % (self._method, self._recurse_str, + self._recurse_dict, self._recurse_array, self._allow_delete) return s def _do_dict_replace(self, value, merge_with, do_replace): diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py index 34b32379..8a0b5827 100644 --- a/cloudinit/mergers/m_list.py +++ b/cloudinit/mergers/m_list.py @@ -47,7 +47,7 @@ class Merger(object): def _on_list(self, value, merge_with): if (self._method == 'replace' and not isinstance(merge_with, (tuple, list))): - return merge_with + return merge_with # Ok we now know that what we are merging with is a list or tuple. merged_list = [] diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index dd8c2eee..ac2ccad4 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -12,7 +12,7 @@ import os import re SOURCE_PAT = "source*.*yaml" -EXPECTED_PAT = "expected%s.yaml" +EXPECTED_PAT = "expected%s.yaml" def _old_mergedict(src, cand): -- cgit v1.2.3 From e4677e5ef69ff523459d97405dcf90fe6818555e Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 3 May 2013 15:11:32 -0700 Subject: Fix a feature that doesn't yet exist on python 2.6 --- cloudinit/url_helper.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 24ce6871..19a30409 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -165,9 +165,14 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, for i in range(0, manual_tries): try: req_args['headers'] = headers_cb(url) + filtered_req_args = {} + for (k, v) in req_args.items(): + if k == 'data': + continue + filtered_req_args[k] = v + LOG.debug("[%s/%s] open '%s' with %s configuration", i, - manual_tries, url, - {k: req_args[k] for k in req_args if k != 'data'}) + manual_tries, url, filtered_req_args) r = requests.request(**req_args) if check_status: -- cgit v1.2.3 From 563af0754dc53fe4a95a4dee8ed18282f7a38104 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 4 May 2013 12:57:15 -0700 Subject: Add a set of randomized (seeded) dict tests. --- tests/unittests/test_merging.py | 69 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index ac2ccad4..dddf8c6c 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -9,10 +9,13 @@ from cloudinit import util import collections import glob import os +import random import re +import string SOURCE_PAT = "source*.*yaml" EXPECTED_PAT = "expected%s.yaml" +TYPES = [long, int, dict, str, list, tuple, None] def _old_mergedict(src, cand): @@ -37,6 +40,60 @@ def _old_mergemanydict(*args): return out +def _random_str(rand): + base = '' + for _i in xrange(rand.randint(1, 2**8)): + base += rand.choice(string.letters + string.digits) + return base + + +class _NoMoreException(Exception): + pass + + +def _make_dict(current_depth, max_depth, rand): + if current_depth >= max_depth: + raise _NoMoreException() + if current_depth == 0: + t = dict + else: + t = rand.choice(TYPES) + base = None + if t in [None]: + return base + if t in [dict, list, tuple]: + if t in [dict]: + amount = rand.randint(0, 5) + keys = [_random_str(rand) for _i in xrange(0, amount)] + base = {} + for k in keys: + try: + base[k] = _make_dict(current_depth + 1, max_depth, rand) + except _NoMoreException: + pass + elif t in [list, tuple]: + base = [] + amount = rand.randint(0, 5) + for _i in xrange(0, amount): + try: + base.append(_make_dict(current_depth + 1, max_depth, rand)) + except _NoMoreException: + pass + if t in [tuple]: + base = tuple(base) + elif t in [long, int]: + base = rand.randint(0, 2**8) + elif t in [str]: + base = _random_str(rand) + return base + + +def make_dict(max_depth, seed=None): + max_depth = max(1, max_depth) + rand = random.Random(seed) + return _make_dict(0, max_depth, rand) + + class TestSimpleRun(helpers.ResourceUsingTestCase): def _load_merge_files(self): merge_root = self.resourceLocation('merge_sources') @@ -64,6 +121,18 @@ class TestSimpleRun(helpers.ResourceUsingTestCase): tests.append(entry) return tests + def test_seed_runs(self): + test_dicts = [] + for i in range(1, 50): + base_dicts = [] + for j in range(1, 50): + base_dicts.append(make_dict(5, i * j)) + test_dicts.append(base_dicts) + for test in test_dicts: + c = _old_mergemanydict(*test) + d = util.mergemanydict(test) + self.assertEquals(c, d) + def test_merge_cc_samples(self): tests = self._load_merge_files() paths = c_helpers.Paths({}) -- cgit v1.2.3 From 229df67191e7dff058151f1e1f6e007667d55d9c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 7 May 2013 21:38:06 -0700 Subject: A few more test files. --- cloudinit/mergers/m_list.py | 12 ++++++++--- tests/data/merge_sources/expected6.yaml | 9 ++++++++ tests/data/merge_sources/expected7.yaml | 38 +++++++++++++++++++++++++++++++++ tests/data/merge_sources/expected8.yaml | 7 ++++++ tests/data/merge_sources/expected9.yaml | 5 +++++ tests/data/merge_sources/source6-1.yaml | 5 +++++ tests/data/merge_sources/source6-2.yaml | 8 +++++++ tests/data/merge_sources/source7-1.yaml | 27 +++++++++++++++++++++++ tests/data/merge_sources/source7-2.yaml | 17 +++++++++++++++ tests/data/merge_sources/source8-1.yaml | 7 ++++++ tests/data/merge_sources/source8-2.yaml | 6 ++++++ tests/data/merge_sources/source9-1.yaml | 5 +++++ tests/data/merge_sources/source9-2.yaml | 6 ++++++ 13 files changed, 149 insertions(+), 3 deletions(-) create mode 100644 tests/data/merge_sources/expected6.yaml create mode 100644 tests/data/merge_sources/expected7.yaml create mode 100644 tests/data/merge_sources/expected8.yaml create mode 100644 tests/data/merge_sources/expected9.yaml create mode 100644 tests/data/merge_sources/source6-1.yaml create mode 100644 tests/data/merge_sources/source6-2.yaml create mode 100644 tests/data/merge_sources/source7-1.yaml create mode 100644 tests/data/merge_sources/source7-2.yaml create mode 100644 tests/data/merge_sources/source8-1.yaml create mode 100644 tests/data/merge_sources/source8-2.yaml create mode 100644 tests/data/merge_sources/source9-1.yaml create mode 100644 tests/data/merge_sources/source9-2.yaml diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py index 8a0b5827..1184ded7 100644 --- a/cloudinit/mergers/m_list.py +++ b/cloudinit/mergers/m_list.py @@ -19,6 +19,12 @@ DEF_MERGE_TYPE = 'replace' MERGE_TYPES = ('append', 'prepend', DEF_MERGE_TYPE,) +def _has_any(what, *keys): + for k in keys: + if k in what: + return True + return False + class Merger(object): def __init__(self, merger, opts): @@ -30,9 +36,9 @@ class Merger(object): self._method = m break # Affect how recursive merging is done on other primitives - self._recurse_str = 'recurse_str' in opts - self._recurse_dict = 'recurse_dict' in opts - self._recurse_array = 'recurse_array' in opts + self._recurse_str = _has_any(opts, 'recurse_str') + self._recurse_dict = _has_any(opts, 'recurse_dict') + self._recurse_array = _has_any(opts, 'recurse_array', 'recurse_list') def __str__(self): return ('ListMerger: (method=%s,recurse_str=%s,' diff --git a/tests/data/merge_sources/expected6.yaml b/tests/data/merge_sources/expected6.yaml new file mode 100644 index 00000000..7afe1d7c --- /dev/null +++ b/tests/data/merge_sources/expected6.yaml @@ -0,0 +1,9 @@ +#cloud-config + +run_cmds: + - bash + - top + - ps + - vi + - emacs + diff --git a/tests/data/merge_sources/expected7.yaml b/tests/data/merge_sources/expected7.yaml new file mode 100644 index 00000000..25284f04 --- /dev/null +++ b/tests/data/merge_sources/expected7.yaml @@ -0,0 +1,38 @@ +#cloud-config + +users: + - default + - name: foobar + gecos: Foo B. Bar + primary-group: foobar + groups: users + selinux-user: staff_u + expiredate: 2012-09-01 + ssh-import-id: foobar + lock-passwd: false + passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/ + - name: barfoo + gecos: Bar B. Foo + sudo: ALL=(ALL) NOPASSWD:ALL + groups: users, admin + ssh-import-id: None + lock-passwd: true + ssh-authorized-keys: + - + - + - name: cloudy + gecos: Magic Cloud App Daemon User + inactive: true + system: true + - bob + - joe + - sue + - name: foobar_jr + gecos: Foo B. Bar Jr + primary-group: foobar + groups: users + selinux-user: staff_u + expiredate: 2012-09-01 + ssh-import-id: foobar + lock-passwd: false + passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/ diff --git a/tests/data/merge_sources/expected8.yaml b/tests/data/merge_sources/expected8.yaml new file mode 100644 index 00000000..69ca562d --- /dev/null +++ b/tests/data/merge_sources/expected8.yaml @@ -0,0 +1,7 @@ +#cloud-config + +mounts: + - [ ephemeral22, /mnt, auto, "defaults,noexec" ] + - [ sdc, /opt/data ] + - [ xvdh, /opt/data, "auto", "defaults,nobootwait", "0", "0" ] + - [ dd, /dev/zero ] diff --git a/tests/data/merge_sources/expected9.yaml b/tests/data/merge_sources/expected9.yaml new file mode 100644 index 00000000..00f91ca0 --- /dev/null +++ b/tests/data/merge_sources/expected9.yaml @@ -0,0 +1,5 @@ +#cloud-config + +phone_home: + url: http://my.example.com/$INSTANCE_ID/$BLAH_BLAH + post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ] diff --git a/tests/data/merge_sources/source6-1.yaml b/tests/data/merge_sources/source6-1.yaml new file mode 100644 index 00000000..519f7309 --- /dev/null +++ b/tests/data/merge_sources/source6-1.yaml @@ -0,0 +1,5 @@ +#cloud-config + +run_cmds: + - bash + - top diff --git a/tests/data/merge_sources/source6-2.yaml b/tests/data/merge_sources/source6-2.yaml new file mode 100644 index 00000000..d8fac446 --- /dev/null +++ b/tests/data/merge_sources/source6-2.yaml @@ -0,0 +1,8 @@ +#cloud-config + +run_cmds: + - ps + - vi + - emacs + +merge_type: 'list(append)+dict(recurse_array)+str()' diff --git a/tests/data/merge_sources/source7-1.yaml b/tests/data/merge_sources/source7-1.yaml new file mode 100644 index 00000000..8fb9b32a --- /dev/null +++ b/tests/data/merge_sources/source7-1.yaml @@ -0,0 +1,27 @@ +#cloud-config + +users: + - default + - name: foobar + gecos: Foo B. Bar + primary-group: foobar + groups: users + selinux-user: staff_u + expiredate: 2012-09-01 + ssh-import-id: foobar + lock-passwd: false + passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/ + - name: barfoo + gecos: Bar B. Foo + sudo: ALL=(ALL) NOPASSWD:ALL + groups: users, admin + ssh-import-id: None + lock-passwd: true + ssh-authorized-keys: + - + - + - name: cloudy + gecos: Magic Cloud App Daemon User + inactive: true + system: true + diff --git a/tests/data/merge_sources/source7-2.yaml b/tests/data/merge_sources/source7-2.yaml new file mode 100644 index 00000000..1e26201b --- /dev/null +++ b/tests/data/merge_sources/source7-2.yaml @@ -0,0 +1,17 @@ +#cloud-config + +users: + - bob + - joe + - sue + - name: foobar_jr + gecos: Foo B. Bar Jr + primary-group: foobar + groups: users + selinux-user: staff_u + expiredate: 2012-09-01 + ssh-import-id: foobar + lock-passwd: false + passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/ + +merge_how: "dict(recurse_array)+list(append)" diff --git a/tests/data/merge_sources/source8-1.yaml b/tests/data/merge_sources/source8-1.yaml new file mode 100644 index 00000000..5ea51c2c --- /dev/null +++ b/tests/data/merge_sources/source8-1.yaml @@ -0,0 +1,7 @@ +#cloud-config + +mounts: + - [ ephemeral0, /mnt, auto, "defaults,noexec" ] + - [ sdc, /opt/data ] + - [ xvdh, /opt/data, "auto", "defaults,nobootwait", "0", "0" ] + - [ dd, /dev/zero ] diff --git a/tests/data/merge_sources/source8-2.yaml b/tests/data/merge_sources/source8-2.yaml new file mode 100644 index 00000000..7fa3262b --- /dev/null +++ b/tests/data/merge_sources/source8-2.yaml @@ -0,0 +1,6 @@ +#cloud-config + +mounts: + - [ ephemeral22, /mnt, auto, "defaults,noexec" ] + +merge_how: 'dict(recurse_array)+list(recurse_list,recurse_str)+str()' diff --git a/tests/data/merge_sources/source9-1.yaml b/tests/data/merge_sources/source9-1.yaml new file mode 100644 index 00000000..0b102ba6 --- /dev/null +++ b/tests/data/merge_sources/source9-1.yaml @@ -0,0 +1,5 @@ +#cloud-config + +phone_home: + url: http://my.example.com/$INSTANCE_ID/ + post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ] diff --git a/tests/data/merge_sources/source9-2.yaml b/tests/data/merge_sources/source9-2.yaml new file mode 100644 index 00000000..ac85afc6 --- /dev/null +++ b/tests/data/merge_sources/source9-2.yaml @@ -0,0 +1,6 @@ +#cloud-config + +phone_home: + url: $BLAH_BLAH + +merge_how: 'dict(recurse_str)+str(append)' -- cgit v1.2.3 From ff232886555964220769da6d8b73198b5d51ef16 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 7 May 2013 21:42:32 -0700 Subject: 1 more test that does some list appending. --- tests/data/merge_sources/expected10.yaml | 7 +++++++ tests/data/merge_sources/source10-1.yaml | 6 ++++++ tests/data/merge_sources/source10-2.yaml | 6 ++++++ 3 files changed, 19 insertions(+) create mode 100644 tests/data/merge_sources/expected10.yaml create mode 100644 tests/data/merge_sources/source10-1.yaml create mode 100644 tests/data/merge_sources/source10-2.yaml diff --git a/tests/data/merge_sources/expected10.yaml b/tests/data/merge_sources/expected10.yaml new file mode 100644 index 00000000..b865db16 --- /dev/null +++ b/tests/data/merge_sources/expected10.yaml @@ -0,0 +1,7 @@ +#cloud-config + +power_state: + delay: 30 + mode: poweroff + message: [Bye, Bye, Pew, Pew] + diff --git a/tests/data/merge_sources/source10-1.yaml b/tests/data/merge_sources/source10-1.yaml new file mode 100644 index 00000000..6ae72a13 --- /dev/null +++ b/tests/data/merge_sources/source10-1.yaml @@ -0,0 +1,6 @@ +#cloud-config + +power_state: + delay: 30 + mode: poweroff + message: [Bye, Bye] diff --git a/tests/data/merge_sources/source10-2.yaml b/tests/data/merge_sources/source10-2.yaml new file mode 100644 index 00000000..a38cf1c5 --- /dev/null +++ b/tests/data/merge_sources/source10-2.yaml @@ -0,0 +1,6 @@ +#cloud-config + +power_state: + message: [Pew, Pew] + +merge_how: 'dict(recurse_list)+list(append)' -- cgit v1.2.3 From 2b351c5435939d16ba06ec0c45847d47f4b21d51 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 9 May 2013 22:34:31 -0700 Subject: Fix the cloud config merging so that it is backwards compat. The new change for merging works well in the mergedict case but the default merging type for cloud config needs to reflect how yaml was loaded in bulk, which is the same as the replacing keys merging type that is now provided. --- cloudinit/handlers/cloud_config.py | 15 +++++++++++++-- tests/data/merge_sources/expected11.yaml | 5 +++++ tests/data/merge_sources/expected12.yaml | 5 +++++ tests/data/merge_sources/expected2.yaml | 4 ++-- tests/data/merge_sources/source11-1.yaml | 5 +++++ tests/data/merge_sources/source11-2.yaml | 3 +++ tests/data/merge_sources/source11-3.yaml | 3 +++ tests/data/merge_sources/source12-1.yaml | 8 ++++++++ tests/data/merge_sources/source12-2.yaml | 5 +++++ tests/unittests/test_merging.py | 15 +++++++++++++++ tests/unittests/test_userdata.py | 2 +- 11 files changed, 65 insertions(+), 5 deletions(-) create mode 100644 tests/data/merge_sources/expected11.yaml create mode 100644 tests/data/merge_sources/expected12.yaml create mode 100644 tests/data/merge_sources/source11-1.yaml create mode 100644 tests/data/merge_sources/source11-2.yaml create mode 100644 tests/data/merge_sources/source11-3.yaml create mode 100644 tests/data/merge_sources/source12-1.yaml create mode 100644 tests/data/merge_sources/source12-2.yaml diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index 2ae9b226..529109ce 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -30,7 +30,13 @@ from cloudinit.settings import (PER_ALWAYS) LOG = logging.getLogger(__name__) MERGE_HEADER = 'Merge-Type' -DEF_MERGERS = mergers.default_mergers() + +# Due to the way the loading of yaml configuration was done previously, +# where previously each cloud config part was appended to a larger yaml +# file and then finally that file was loaded as one big yaml file we need +# to mimic that behavior by altering the default strategy to be replacing +# keys of later mergers. +DEF_MERGERS = mergers.string_extract_mergers('dict(replace)+list()+str()') class CloudConfigPartHandler(handlers.Handler): @@ -53,6 +59,8 @@ class CloudConfigPartHandler(handlers.Handler): if self.file_names: file_lines.append("# from %s files" % (len(self.file_names))) for fn in self.file_names: + if not fn: + fn = '?' file_lines.append("# %s" % (fn)) file_lines.append("") if self.cloud_buf is not None: @@ -111,7 +119,10 @@ class CloudConfigPartHandler(handlers.Handler): return try: self._merge_part(payload, headers) - self.file_names.append(filename) + # Ensure filename is ok to store + for i in ("\n", "\r", "\t"): + filename = filename.replace(i, " ") + self.file_names.append(filename.strip()) except: util.logexc(LOG, "Failed at merging in cloud config part from %s", filename) diff --git a/tests/data/merge_sources/expected11.yaml b/tests/data/merge_sources/expected11.yaml new file mode 100644 index 00000000..c0530dc3 --- /dev/null +++ b/tests/data/merge_sources/expected11.yaml @@ -0,0 +1,5 @@ +#cloud-config + +a: 22 +b: 4 +c: 3 diff --git a/tests/data/merge_sources/expected12.yaml b/tests/data/merge_sources/expected12.yaml new file mode 100644 index 00000000..0421d2c8 --- /dev/null +++ b/tests/data/merge_sources/expected12.yaml @@ -0,0 +1,5 @@ +#cloud-config + +a: + e: + y: 2 diff --git a/tests/data/merge_sources/expected2.yaml b/tests/data/merge_sources/expected2.yaml index f5312eb1..6eccc2cf 100644 --- a/tests/data/merge_sources/expected2.yaml +++ b/tests/data/merge_sources/expected2.yaml @@ -1,3 +1,3 @@ -Blah: 1 +Blah: 3 Blah2: 2 -Blah3: 3 +Blah3: [1] diff --git a/tests/data/merge_sources/source11-1.yaml b/tests/data/merge_sources/source11-1.yaml new file mode 100644 index 00000000..ee29d681 --- /dev/null +++ b/tests/data/merge_sources/source11-1.yaml @@ -0,0 +1,5 @@ +#cloud-config + +a: 1 +b: 2 +c: 3 diff --git a/tests/data/merge_sources/source11-2.yaml b/tests/data/merge_sources/source11-2.yaml new file mode 100644 index 00000000..a9914c34 --- /dev/null +++ b/tests/data/merge_sources/source11-2.yaml @@ -0,0 +1,3 @@ +#cloud-config + +b: 4 diff --git a/tests/data/merge_sources/source11-3.yaml b/tests/data/merge_sources/source11-3.yaml new file mode 100644 index 00000000..8f2b8944 --- /dev/null +++ b/tests/data/merge_sources/source11-3.yaml @@ -0,0 +1,3 @@ +#cloud-config + +a: 22 diff --git a/tests/data/merge_sources/source12-1.yaml b/tests/data/merge_sources/source12-1.yaml new file mode 100644 index 00000000..09e7c899 --- /dev/null +++ b/tests/data/merge_sources/source12-1.yaml @@ -0,0 +1,8 @@ +#cloud-config + +a: + c: 1 + d: 2 + e: + z: a + y: b diff --git a/tests/data/merge_sources/source12-2.yaml b/tests/data/merge_sources/source12-2.yaml new file mode 100644 index 00000000..0421d2c8 --- /dev/null +++ b/tests/data/merge_sources/source12-2.yaml @@ -0,0 +1,5 @@ +#cloud-config + +a: + e: + y: 2 diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index dddf8c6c..ba1c67d7 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -167,6 +167,21 @@ class TestSimpleRun(helpers.ResourceUsingTestCase): d = util.mergemanydict([a, b]) self.assertEquals(c, d) + def test_compat_merges_dict(self): + a = { + 'Blah': 1, + 'Blah2': 2, + 'Blah3': 3, + } + b = { + 'Blah': 1, + 'Blah2': 2, + 'Blah3': [1], + } + c = _old_mergedict(a, b) + d = util.mergemanydict([a, b]) + self.assertEquals(c, d) + def test_compat_merges_list(self): a = {'b': [1, 2, 3]} b = {'b': [4, 5]} diff --git a/tests/unittests/test_userdata.py b/tests/unittests/test_userdata.py index 5fb9acd9..0ebb0484 100644 --- a/tests/unittests/test_userdata.py +++ b/tests/unittests/test_userdata.py @@ -108,7 +108,7 @@ p: 1 contents = util.load_yaml(contents) self.assertEquals(contents['run'], ['b', 'c', 'stuff', 'morestuff']) self.assertEquals(contents['a'], 'be') - self.assertEquals(contents['e'], 'fg') + self.assertEquals(contents['e'], [1, 2, 3]) self.assertEquals(contents['p'], 1) def test_unhandled_type_warning(self): -- cgit v1.2.3 From b22a82787378e442cb477d2368ccd7653fd95594 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 9 May 2013 22:47:14 -0700 Subject: Adjust comment on why we are merging cloud config the way we are. --- cloudinit/handlers/cloud_config.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index 529109ce..c97ca3e8 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -35,7 +35,19 @@ MERGE_HEADER = 'Merge-Type' # where previously each cloud config part was appended to a larger yaml # file and then finally that file was loaded as one big yaml file we need # to mimic that behavior by altering the default strategy to be replacing -# keys of later mergers. +# keys of prior merges. +# +# +# For example +# #file 1 +# a: 3 +# #file 2 +# a: 22 +# #combined file (comments not included) +# a: 3 +# a: 22 +# +# This gets loaded into yaml with final result {'a': 22} DEF_MERGERS = mergers.string_extract_mergers('dict(replace)+list()+str()') -- cgit v1.2.3 From 670b46d151477d32056f3fa4eb968c7960f3b472 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 9 May 2013 23:04:03 -0700 Subject: Allow lists to have no_replace option. --- cloudinit/mergers/m_list.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py index 1184ded7..76591bea 100644 --- a/cloudinit/mergers/m_list.py +++ b/cloudinit/mergers/m_list.py @@ -17,7 +17,7 @@ # along with this program. If not, see . DEF_MERGE_TYPE = 'replace' -MERGE_TYPES = ('append', 'prepend', DEF_MERGE_TYPE,) +MERGE_TYPES = ('append', 'prepend', DEF_MERGE_TYPE, 'no_replace') def _has_any(what, *keys): for k in keys: @@ -67,14 +67,16 @@ class Merger(object): return merged_list def merge_same_index(old_v, new_v): + if self._method == 'no_replace': + # Leave it be... + return old_v if isinstance(new_v, (list, tuple)) and self._recurse_array: return self._merger.merge(old_v, new_v) if isinstance(new_v, (str, basestring)) and self._recurse_str: return self._merger.merge(old_v, new_v) if isinstance(new_v, (dict)) and self._recurse_dict: return self._merger.merge(old_v, new_v) - # Otherwise leave it be... - return old_v + return new_v # Ok now we are replacing same indexes merged_list.extend(value) -- cgit v1.2.3 From 4d83822985bc45dacd611859d76aa5cc3e35e3bc Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 10 May 2013 14:15:44 -0700 Subject: fix pep8 --- tests/unittests/test_merging.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index ba1c67d7..4c28f955 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -42,7 +42,7 @@ def _old_mergemanydict(*args): def _random_str(rand): base = '' - for _i in xrange(rand.randint(1, 2**8)): + for _i in xrange(rand.randint(1, 2 ** 8)): base += rand.choice(string.letters + string.digits) return base @@ -82,7 +82,7 @@ def _make_dict(current_depth, max_depth, rand): if t in [tuple]: base = tuple(base) elif t in [long, int]: - base = rand.randint(0, 2**8) + base = rand.randint(0, 2 ** 8) elif t in [str]: base = _random_str(rand) return base -- cgit v1.2.3 From 9f866ff5540558bab56f10e38481e4ad2efa079b Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 10 May 2013 14:25:13 -0700 Subject: fix pylint --- tests/unittests/test_merging.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index 4c28f955..486b9158 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -11,7 +11,7 @@ import glob import os import random import re -import string +import string # pylint: disable=W0402 SOURCE_PAT = "source*.*yaml" EXPECTED_PAT = "expected%s.yaml" @@ -167,7 +167,7 @@ class TestSimpleRun(helpers.ResourceUsingTestCase): d = util.mergemanydict([a, b]) self.assertEquals(c, d) - def test_compat_merges_dict(self): + def test_compat_merges_dict2(self): a = { 'Blah': 1, 'Blah2': 2, -- cgit v1.2.3 From 1b58359c03943ff9356482419adda12a36bf931d Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 13 May 2013 15:12:30 -0700 Subject: Fix how the 'dist' is incorrectly returned when patching the os functions. --- tests/unittests/test_handler/test_handler_set_hostname.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py index a1aba62f..b2f01cdb 100644 --- a/tests/unittests/test_handler/test_handler_set_hostname.py +++ b/tests/unittests/test_handler/test_handler_set_hostname.py @@ -35,7 +35,6 @@ class TestHostname(t_help.FilesystemMockingTestCase): ds = None cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) - self.patchOS(self.tmp) cc_set_hostname.handle('cc_set_hostname', cfg, cc, LOG, []) contents = util.load_file("/etc/sysconfig/network") -- cgit v1.2.3 From 59100ebb25781e2dfc6d9ccb21cee5a07bc03443 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 15 May 2013 15:31:19 -0400 Subject: mention new merge format in ChageLog --- ChangeLog | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ChangeLog b/ChangeLog index 06c9de25..5c25f0cc 100644 --- a/ChangeLog +++ b/ChangeLog @@ -56,6 +56,8 @@ package_upgrade. (LP: #1164147) - improvements for systemd with Fedora 18 - workaround 2.6 kernel issue that stopped blkid from showing /dev/sr0 + - add new, backwards compatible merging syntax so merging of cloud-config + can be more useful. 0.7.1: - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 -- cgit v1.2.3 From 812b11146e35851aca8bb76482658bd85853ee0e Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 16 May 2013 12:44:28 -0400 Subject: remove executable on logo.svg --- doc/rtd/static/logo.svg | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 doc/rtd/static/logo.svg diff --git a/doc/rtd/static/logo.svg b/doc/rtd/static/logo.svg old mode 100755 new mode 100644 -- cgit v1.2.3 From 9a72f2e9d7f4e5fcfbd40fc7f0ecc86680f995df Mon Sep 17 00:00:00 2001 From: Chris Wing Date: Mon, 20 May 2013 21:18:13 +0000 Subject: Fix Chef client 'omnibus' install; util.write_file() expects a string not UrlResponse --- cloudinit/config/cc_chef.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index 607f789e..727769cd 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -110,7 +110,7 @@ def handle(name, cfg, cloud, log, _args): with util.tempdir() as tmpd: # use tmpd over tmpfile to avoid 'Text file busy' on execute tmpf = "%s/chef-omnibus-install" % tmpd - util.write_file(tmpf, content, mode=0700) + util.write_file(tmpf, str(content), mode=0700) util.subp([tmpf], capture=False) else: log.warn("Unknown chef install type %s", install_type) -- cgit v1.2.3 From 4231e5df67160362d40c5f2976e8965a13de8ef0 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 20 May 2013 20:43:09 -0400 Subject: open 0.7.3 --- ChangeLog | 1 + 1 file changed, 1 insertion(+) diff --git a/ChangeLog b/ChangeLog index 5c25f0cc..d03f8878 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,4 @@ +0.7.3: 0.7.2: - add a debian watch file - add 'sudo' entry to ubuntu's default user (LP: #1080717) -- cgit v1.2.3 From 48a3b4ca0ba55699825d6eabe75d843286c23545 Mon Sep 17 00:00:00 2001 From: Greg Padgett Date: Tue, 4 Jun 2013 20:42:55 -0400 Subject: support optical drives with dev node /dev/sr1 Extend a prior fix which helped discovery of media on systems using 2.6 kernels. /dev/sr0 covers only some of the use cases, /dev/sr1 is also common. --- cloudinit/sources/DataSourceConfigDrive.py | 1 + cloudinit/sources/DataSourceNoCloud.py | 1 + 2 files changed, 2 insertions(+) diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index d3443c2b..835f2a9a 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -260,6 +260,7 @@ def find_candidate_devs(): # Query optical drive to get it in blkid cache for 2.6 kernels util.find_devs_with(path="/dev/sr0") + util.find_devs_with(path="/dev/sr1") by_fstype = (util.find_devs_with("TYPE=vfat") + util.find_devs_with("TYPE=iso9660")) diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 01c99028..084abca7 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -89,6 +89,7 @@ class DataSourceNoCloud(sources.DataSource): if label is not None: # Query optical drive to get it in blkid cache for 2.6 kernels util.find_devs_with(path="/dev/sr0") + util.find_devs_with(path="/dev/sr1") fslist = util.find_devs_with("TYPE=vfat") fslist.extend(util.find_devs_with("TYPE=iso9660")) -- cgit v1.2.3 From 233761a7fe4a995fbe0c8f65f512172a2b64090c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 7 Jun 2013 13:30:03 -0400 Subject: DataSourceOVF: small fix for iso9660 transport This is not really a problem, because nothing would call transport_iso9660 with 'require_iso' as False, but if it did, then we would have still required iso9660 filesystem on the mount. --- ChangeLog | 1 + cloudinit/sources/DataSourceOVF.py | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index adc8b954..6495a19b 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,6 @@ 0.7.3: - fix omnibus chef installer (LP: #1182265) [Chris Wing] + - small fix for OVF datasource for iso transport on non-iso9660 filesystem 0.7.2: - add a debian watch file - add 'sudo' entry to ubuntu's default user (LP: #1080717) diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 0530c4b7..77b43e17 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -194,6 +194,11 @@ def transport_iso9660(require_iso=True): if contents is not False: return (contents, dev, fname) + if require_iso: + mtype = "iso9660" + else: + mtype = None + devs = os.listdir("/dev/") devs.sort() for dev in devs: @@ -211,7 +216,7 @@ def transport_iso9660(require_iso=True): try: (fname, contents) = util.mount_cb(fullp, - get_ovf_env, mtype="iso9660") + get_ovf_env, mtype=mtype) except util.MountFailedError: LOG.debug("%s not mountable as iso9660" % fullp) continue -- cgit v1.2.3 From 944623f4ad3e4c7319758c64053d06a3b05555a2 Mon Sep 17 00:00:00 2001 From: Juerg Haefliger Date: Wed, 19 Jun 2013 08:44:00 +0200 Subject: fix and cleanup usage of util.logexc --- cloudinit/config/cc_bootcmd.py | 5 ++--- cloudinit/config/cc_growpart.py | 8 +++++--- cloudinit/config/cc_phone_home.py | 14 +++++++------- cloudinit/config/cc_rightscale_userdata.py | 10 +++++----- cloudinit/config/cc_set_hostname.py | 6 +++--- cloudinit/config/cc_set_passwords.py | 6 +++--- cloudinit/config/cc_ssh.py | 10 +++++----- cloudinit/config/cc_ssh_import_id.py | 6 +++--- cloudinit/config/cc_update_hostname.py | 6 +++--- cloudinit/distros/__init__.py | 26 ++++++++++++------------- cloudinit/distros/rhel.py | 7 +++---- cloudinit/handlers/__init__.py | 13 ++++++------- cloudinit/handlers/boot_hook.py | 4 ++-- cloudinit/helpers.py | 18 ++++++++--------- cloudinit/sources/DataSourceAltCloud.py | 31 +++++++++++++++--------------- cloudinit/sources/DataSourceCloudStack.py | 6 ++++-- cloudinit/sources/DataSourceNoCloud.py | 6 +++--- cloudinit/ssh_util.py | 8 +++----- cloudinit/stages.py | 7 +++---- cloudinit/util.py | 5 ++--- 20 files changed, 100 insertions(+), 102 deletions(-) diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py index 896cb4d0..3ac22967 100644 --- a/cloudinit/config/cc_bootcmd.py +++ b/cloudinit/config/cc_bootcmd.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2009-2011 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # # Author: Scott Moser # Author: Juerg Haefliger @@ -50,6 +50,5 @@ def handle(name, cfg, cloud, log, _args): cmd = ['/bin/sh', tmpf.name] util.subp(cmd, env=env, capture=False) except: - util.logexc(log, - ("Failed to run bootcmd module %s"), name) + util.logexc(log, "Failed to run bootcmd module %s", name) raise diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index b6e1fd37..4f8c8f80 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -1,8 +1,10 @@ # vi: ts=4 expandtab # # Copyright (C) 2011 Canonical Ltd. +# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. # # Author: Scott Moser +# Author: Juerg Haefliger # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as @@ -121,15 +123,15 @@ class ResizeGrowPart(object): util.subp(["growpart", '--dry-run', diskdev, partnum]) except util.ProcessExecutionError as e: if e.exit_code != 1: - util.logexc(LOG, ("Failed growpart --dry-run for (%s, %s)" % - (diskdev, partnum))) + util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)", + diskdev, partnum) raise ResizeFailedException(e) return (before, before) try: util.subp(["growpart", diskdev, partnum]) except util.ProcessExecutionError as e: - util.logexc(LOG, "Failed: growpart %s %s" % (diskdev, partnum)) + util.logexc(LOG, "Failed: growpart %s %s", diskdev, partnum) raise ResizeFailedException(e) return (before, get_size(partdev)) diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index c873c8a8..2e058ccd 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2011 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # # Author: Scott Moser # Author: Juerg Haefliger @@ -65,8 +65,8 @@ def handle(name, cfg, cloud, log, args): tries = int(tries) except: tries = 10 - util.logexc(log, ("Configuration entry 'tries'" - " is not an integer, using %s instead"), tries) + util.logexc(log, "Configuration entry 'tries' is not an integer, " + "using %s instead", tries) if post_list == "all": post_list = POST_LIST_ALL @@ -85,8 +85,8 @@ def handle(name, cfg, cloud, log, args): try: all_keys[n] = util.load_file(path) except: - util.logexc(log, ("%s: failed to open, can not" - " phone home that data!"), path) + util.logexc(log, "%s: failed to open, can not phone home that " + "data!", path) submit_keys = {} for k in post_list: @@ -115,5 +115,5 @@ def handle(name, cfg, cloud, log, args): retries=tries, sec_between=3, ssl_details=util.fetch_ssl_details(cloud.paths)) except: - util.logexc(log, ("Failed to post phone home data to" - " %s in %s tries"), url, tries) + util.logexc(log, "Failed to post phone home data to %s in %s tries", + url, tries) diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py index 4bf18516..c771728d 100644 --- a/cloudinit/config/cc_rightscale_userdata.py +++ b/cloudinit/config/cc_rightscale_userdata.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2011 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # # Author: Scott Moser # Author: Juerg Haefliger @@ -64,8 +64,8 @@ def handle(name, _cfg, cloud, log, _args): " raw userdata"), name, MY_HOOKNAME) return except: - util.logexc(log, ("Failed to parse query string %s" - " into a dictionary"), ud) + util.logexc(log, "Failed to parse query string %s into a dictionary", + ud) raise wrote_fns = [] @@ -86,8 +86,8 @@ def handle(name, _cfg, cloud, log, _args): wrote_fns.append(fname) except Exception as e: captured_excps.append(e) - util.logexc(log, "%s failed to read %s and write %s", - MY_NAME, url, fname) + util.logexc(log, "%s failed to read %s and write %s", MY_NAME, url, + fname) if wrote_fns: log.debug("Wrote out rightscale userdata to %s files", len(wrote_fns)) diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py index 2b32fc94..5d7f4331 100644 --- a/cloudinit/config/cc_set_hostname.py +++ b/cloudinit/config/cc_set_hostname.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2011 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # # Author: Scott Moser # Author: Juerg Haefliger @@ -32,6 +32,6 @@ def handle(name, cfg, cloud, log, _args): log.debug("Setting the hostname to %s (%s)", fqdn, hostname) cloud.distro.set_hostname(hostname, fqdn) except Exception: - util.logexc(log, "Failed to set the hostname to %s (%s)", - fqdn, hostname) + util.logexc(log, "Failed to set the hostname to %s (%s)", fqdn, + hostname) raise diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index c6bf62fd..e93c8c6f 100644 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2009-2010 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # # Author: Scott Moser # Author: Juerg Haefliger @@ -81,8 +81,8 @@ def handle(_name, cfg, cloud, log, args): util.subp(['chpasswd'], ch_in) except Exception as e: errors.append(e) - util.logexc(log, - "Failed to set passwords with chpasswd for %s", users) + util.logexc(log, "Failed to set passwords with chpasswd for %s", + users) if len(randlist): blurb = ("Set the following 'random' passwords\n", diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 7ef20d9f..64a5e3cb 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2009-2010 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # # Author: Scott Moser # Author: Juerg Haefliger @@ -85,8 +85,8 @@ def handle(_name, cfg, cloud, log, _args): util.subp(cmd, capture=False) log.debug("Generated a key for %s from %s", pair[0], pair[1]) except: - util.logexc(log, ("Failed generated a key" - " for %s from %s"), pair[0], pair[1]) + util.logexc(log, "Failed generated a key for %s from %s", + pair[0], pair[1]) else: # if not, generate them genkeys = util.get_cfg_option_list(cfg, @@ -102,8 +102,8 @@ def handle(_name, cfg, cloud, log, _args): with util.SeLinuxGuard("/etc/ssh", recursive=True): util.subp(cmd, capture=False) except: - util.logexc(log, ("Failed generating key type" - " %s to file %s"), keytype, keyfile) + util.logexc(log, "Failed generating key type %s to " + "file %s", keytype, keyfile) try: (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py index 83af36e9..50d96e15 100644 --- a/cloudinit/config/cc_ssh_import_id.py +++ b/cloudinit/config/cc_ssh_import_id.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2009-2010 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # # Author: Scott Moser # Author: Juerg Haefliger @@ -71,8 +71,8 @@ def handle(_name, cfg, cloud, log, args): try: import_ssh_ids(import_ids, user, log) except Exception as exc: - util.logexc(log, "ssh-import-id failed for: %s %s" % - (user, import_ids), exc) + util.logexc(log, "ssh-import-id failed for: %s %s", user, + import_ids) elist.append(exc) if len(elist): diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py index 52225cd8..e396ba13 100644 --- a/cloudinit/config/cc_update_hostname.py +++ b/cloudinit/config/cc_update_hostname.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2011 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # # Author: Scott Moser # Author: Juerg Haefliger @@ -38,6 +38,6 @@ def handle(name, cfg, cloud, log, _args): log.debug("Updating hostname to %s (%s)", fqdn, hostname) cloud.distro.update_hostname(hostname, fqdn, prev_fn) except Exception: - util.logexc(log, "Failed to update the hostname to %s (%s)", - fqdn, hostname) + util.logexc(log, "Failed to update the hostname to %s (%s)", fqdn, + hostname) raise diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 50d52594..e99cb16f 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2012 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # Copyright (C) 2012 Yahoo! Inc. # # Author: Scott Moser @@ -142,8 +142,8 @@ class Distro(object): try: util.subp(['hostname', hostname]) except util.ProcessExecutionError: - util.logexc(LOG, ("Failed to non-persistently adjust" - " the system hostname to %s"), hostname) + util.logexc(LOG, "Failed to non-persistently adjust the system " + "hostname to %s", hostname) @abc.abstractmethod def _select_hostname(self, hostname, fqdn): @@ -200,8 +200,8 @@ class Distro(object): try: self._write_hostname(hostname, fn) except IOError: - util.logexc(LOG, "Failed to write hostname %s to %s", - hostname, fn) + util.logexc(LOG, "Failed to write hostname %s to %s", hostname, + fn) if (sys_hostname and prev_hostname and sys_hostname != prev_hostname): @@ -347,7 +347,7 @@ class Distro(object): try: util.subp(adduser_cmd, logstring=x_adduser_cmd) except Exception as e: - util.logexc(LOG, "Failed to create user %s due to error.", e) + util.logexc(LOG, "Failed to create user %s", name) raise e # Set password if plain-text password provided @@ -360,8 +360,8 @@ class Distro(object): try: util.subp(['passwd', '--lock', name]) except Exception as e: - util.logexc(LOG, ("Failed to disable password logins for" - "user %s" % name), e) + util.logexc(LOG, "Failed to disable password logins for " + "user %s", name) raise e # Configure sudo access @@ -385,7 +385,7 @@ class Distro(object): try: util.subp(cmd, pass_string, logstring="chpasswd for %s" % user) except Exception as e: - util.logexc(LOG, "Failed to set password for %s" % user) + util.logexc(LOG, "Failed to set password for %s", user) raise e return True @@ -427,7 +427,7 @@ class Distro(object): util.append_file(sudo_base, sudoers_contents) LOG.debug("Added '#includedir %s' to %s" % (path, sudo_base)) except IOError as e: - util.logexc(LOG, "Failed to write %s" % sudo_base, e) + util.logexc(LOG, "Failed to write %s", sudo_base) raise e util.ensure_dir(path, 0750) @@ -478,15 +478,15 @@ class Distro(object): try: util.subp(group_add_cmd) LOG.info("Created new group %s" % name) - except Exception as e: - util.logexc("Failed to create group %s" % name, e) + except Exception: + util.logexc("Failed to create group %s", name) # Add members to the group, if so defined if len(members) > 0: for member in members: if not util.is_user(member): LOG.warn("Unable to add group member '%s' to group '%s'" - "; user does not exist." % (member, name)) + "; user does not exist.", member, name) continue util.subp(['usermod', '-a', '-G', name, member]) diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 174da3ab..0727ecd1 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2012 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # Copyright (C) 2012 Yahoo! Inc. # # Author: Scott Moser @@ -72,9 +72,8 @@ class Distro(distros.Distro): r_conf = ResolvConf(util.load_file(self.resolve_conf_fn)) r_conf.parse() except IOError: - util.logexc(LOG, - "Failed at parsing %s reverting to an empty instance", - self.resolve_conf_fn) + util.logexc(LOG, "Failed at parsing %s reverting to an empty " + "instance", self.resolve_conf_fn) r_conf = ResolvConf('') r_conf.parse() if dns_servers: diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 924463ce..497d68c5 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2012 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # Copyright (C) 2012 Yahoo! Inc. # # Author: Scott Moser @@ -117,10 +117,9 @@ def run_part(mod, data, filename, payload, frequency, headers): else: raise ValueError("Unknown module version %s" % (mod_ver)) except: - util.logexc(LOG, ("Failed calling handler %s (%s, %s, %s)" - " with frequency %s"), - mod, content_type, filename, - mod_ver, frequency) + util.logexc(LOG, "Failed calling handler %s (%s, %s, %s) with " + "frequency %s", mod, content_type, filename, mod_ver, + frequency) def call_begin(mod, data, frequency): @@ -158,8 +157,8 @@ def walker_handle_handler(pdata, _ctype, _filename, payload): handlers.register(mod) pdata['handlercount'] = curcount + 1 except: - util.logexc(LOG, ("Failed at registering python file: %s" - " (part handler %s)"), modfname, curcount) + util.logexc(LOG, "Failed at registering python file: %s (part " + "handler %s)", modfname, curcount) def _extract_first_or_bytes(blob, size): diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py index bf2899ab..11ac4fe5 100644 --- a/cloudinit/handlers/boot_hook.py +++ b/cloudinit/handlers/boot_hook.py @@ -70,5 +70,5 @@ class BootHookPartHandler(handlers.Handler): except util.ProcessExecutionError: util.logexc(LOG, "Boothooks script %s execution error", filepath) except Exception: - util.logexc(LOG, ("Boothooks unknown " - "error when running %s"), filepath) + util.logexc(LOG, "Boothooks unknown error when running %s", + filepath) diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index a4e6fb03..b91c1290 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2012 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # Copyright (C) 2012 Yahoo! Inc. # # Author: Scott Moser @@ -216,8 +216,8 @@ class ConfigMerger(object): if ds_cfg and isinstance(ds_cfg, (dict)): d_cfgs.append(ds_cfg) except: - util.logexc(LOG, ("Failed loading of datasource" - " config object from %s"), self._ds) + util.logexc(LOG, "Failed loading of datasource config object " + "from %s", self._ds) return d_cfgs def _get_env_configs(self): @@ -227,8 +227,8 @@ class ConfigMerger(object): try: e_cfgs.append(util.read_conf(e_fn)) except: - util.logexc(LOG, ('Failed loading of env. config' - ' from %s'), e_fn) + util.logexc(LOG, 'Failed loading of env. config from %s', + e_fn) return e_cfgs def _get_instance_configs(self): @@ -242,8 +242,8 @@ class ConfigMerger(object): try: i_cfgs.append(util.read_conf(cc_fn)) except: - util.logexc(LOG, ('Failed loading of cloud-config' - ' from %s'), cc_fn) + util.logexc(LOG, 'Failed loading of cloud-config from %s', + cc_fn) return i_cfgs def _read_cfg(self): @@ -259,8 +259,8 @@ class ConfigMerger(object): try: cfgs.append(util.read_conf(c_fn)) except: - util.logexc(LOG, ("Failed loading of configuration" - " from %s"), c_fn) + util.logexc(LOG, "Failed loading of configuration from %s", + c_fn) cfgs.extend(self._get_env_configs()) cfgs.extend(self._get_instance_configs()) diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index 64548d43..a834f8eb 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -1,10 +1,11 @@ # vi: ts=4 expandtab # # Copyright (C) 2009-2010 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # Copyright (C) 2012 Yahoo! Inc. # # Author: Joe VLcek +# Author: Juerg Haefliger # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as @@ -79,7 +80,7 @@ def read_user_data_callback(mount_dir): try: user_data = util.load_file(user_data_file).strip() except IOError: - util.logexc(LOG, ('Failed accessing user data file.')) + util.logexc(LOG, 'Failed accessing user data file.') return None return user_data @@ -178,7 +179,7 @@ class DataSourceAltCloud(sources.DataSource): return False # No user data found - util.logexc(LOG, ('Failed accessing user data.')) + util.logexc(LOG, 'Failed accessing user data.') return False def user_data_rhevm(self): @@ -205,12 +206,12 @@ class DataSourceAltCloud(sources.DataSource): (cmd_out, _err) = util.subp(cmd) LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out)) except ProcessExecutionError, _err: - util.logexc(LOG, (('Failed command: %s\n%s') % \ - (' '.join(cmd), _err.message))) + util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), + _err.message) return False except OSError, _err: - util.logexc(LOG, (('Failed command: %s\n%s') % \ - (' '.join(cmd), _err.message))) + util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), + _err.message) return False floppy_dev = '/dev/fd0' @@ -222,12 +223,12 @@ class DataSourceAltCloud(sources.DataSource): (cmd_out, _err) = util.subp(cmd) LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out)) except ProcessExecutionError, _err: - util.logexc(LOG, (('Failed command: %s\n%s') % \ - (' '.join(cmd), _err.message))) + util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), + _err.message) return False except OSError, _err: - util.logexc(LOG, (('Failed command: %s\n%s') % \ - (' '.join(cmd), _err.message))) + util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), + _err.message) return False try: @@ -236,8 +237,8 @@ class DataSourceAltCloud(sources.DataSource): if err.errno != errno.ENOENT: raise except util.MountFailedError: - util.logexc(LOG, ("Failed to mount %s" - " when looking for user data"), floppy_dev) + util.logexc(LOG, "Failed to mount %s when looking for user data", + floppy_dev) self.userdata_raw = return_str self.metadata = META_DATA_NOT_SUPPORTED @@ -272,8 +273,8 @@ class DataSourceAltCloud(sources.DataSource): if err.errno != errno.ENOENT: raise except util.MountFailedError: - util.logexc(LOG, ("Failed to mount %s" - " when looking for user data"), cdrom_dev) + util.logexc(LOG, "Failed to mount %s when looking for user " + "data", cdrom_dev) self.userdata_raw = return_str self.metadata = META_DATA_NOT_SUPPORTED diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 81c8cda9..08f661e4 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -4,11 +4,13 @@ # Copyright (C) 2012 Cosmin Luta # Copyright (C) 2012 Yahoo! Inc. # Copyright (C) 2012 Gerard Dethier +# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. # # Author: Cosmin Luta # Author: Scott Moser # Author: Joshua Harlow # Author: Gerard Dethier +# Author: Juerg Haefliger # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as @@ -109,8 +111,8 @@ class DataSourceCloudStack(sources.DataSource): int(time.time() - start_time)) return True except Exception: - util.logexc(LOG, ('Failed fetching from metadata ' - 'service %s'), self.metadata_address) + util.logexc(LOG, 'Failed fetching from metadata service %s', + self.metadata_address) return False def get_instance_id(self): diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 084abca7..4ef92a56 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2009-2010 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # Copyright (C) 2012 Yahoo! Inc. # # Author: Scott Moser @@ -119,8 +119,8 @@ class DataSourceNoCloud(sources.DataSource): if e.errno != errno.ENOENT: raise except util.MountFailedError: - util.logexc(LOG, ("Failed to mount %s" - " when looking for data"), dev) + util.logexc(LOG, "Failed to mount %s when looking for " + "data", dev) # There was no indication on kernel cmdline or data # in the seeddir suggesting this handler should be used. diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 95133236..70a577bc 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -229,11 +229,9 @@ def extract_authorized_keys(username): except (IOError, OSError): # Give up and use a default key filename auth_key_fn = os.path.join(ssh_dir, 'authorized_keys') - util.logexc(LOG, ("Failed extracting 'AuthorizedKeysFile'" - " in ssh config" - " from %r, using 'AuthorizedKeysFile' file" - " %r instead"), - DEF_SSHD_CFG, auth_key_fn) + util.logexc(LOG, "Failed extracting 'AuthorizedKeysFile' in ssh " + "config from %r, using 'AuthorizedKeysFile' file " + "%r instead", DEF_SSHD_CFG, auth_key_fn) return (auth_key_fn, parse_authorized_keys(auth_key_fn)) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 543d247f..df49cabb 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2012 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # Copyright (C) 2012 Yahoo! Inc. # # Author: Scott Moser @@ -154,9 +154,8 @@ class Init(object): try: util.chownbyname(log_file, u, g) except OSError: - util.logexc(LOG, ("Unable to change the ownership" - " of %s to user %s, group %s"), - log_file, u, g) + util.logexc(LOG, "Unable to change the ownership of %s to " + "user %s, group %s", log_file, u, g) def read_cfg(self, extra_fns=None): # None check so that we don't keep on re-loading if empty diff --git a/cloudinit/util.py b/cloudinit/util.py index b27b3567..c45aae06 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1,7 +1,7 @@ # vi: ts=4 expandtab # # Copyright (C) 2012 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # Copyright (C) 2012 Yahoo! Inc. # # Author: Scott Moser @@ -219,8 +219,7 @@ def fork_cb(child_cb, *args): child_cb(*args) os._exit(0) # pylint: disable=W0212 except: - logexc(LOG, ("Failed forking and" - " calling callback %s"), + logexc(LOG, "Failed forking and calling callback %s", type_utils.obj_name(child_cb)) os._exit(1) # pylint: disable=W0212 else: -- cgit v1.2.3 From 2f853948f269f50038533782ee45e381d60d02bf Mon Sep 17 00:00:00 2001 From: Juerg Haefliger Date: Wed, 19 Jun 2013 08:46:54 +0200 Subject: fix pep8 errors. --- cloudinit/mergers/m_list.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py index 76591bea..62999b4e 100644 --- a/cloudinit/mergers/m_list.py +++ b/cloudinit/mergers/m_list.py @@ -19,6 +19,7 @@ DEF_MERGE_TYPE = 'replace' MERGE_TYPES = ('append', 'prepend', DEF_MERGE_TYPE, 'no_replace') + def _has_any(what, *keys): for k in keys: if k in what: -- cgit v1.2.3 From d445836b3ec9ca94b26edd3eb4df9f4a53e67bc6 Mon Sep 17 00:00:00 2001 From: Juerg Haefliger Date: Thu, 20 Jun 2013 15:53:16 +0200 Subject: Cleanup Distro.create_user() method Move adding of a user and locking of a password to their own methods so that distro handlers can override them. --- cloudinit/distros/__init__.py | 101 ++++++++++++++++++++++++------------------ 1 file changed, 57 insertions(+), 44 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index e99cb16f..c5990960 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -281,15 +281,16 @@ class Distro(object): def get_default_user(self): return self.get_option('default_user') - def create_user(self, name, **kwargs): + def add_user(self, name, **kwargs): """ - Creates users for the system using the GNU passwd tools. This - will work on an GNU system. This should be overriden on - distros where useradd is not desirable or not available. + Add a user to the system using standard GNU tools """ + if util.is_user(name): + LOG.info("User %s already exists, skipping." % name) + return adduser_cmd = ['useradd', name] - x_adduser_cmd = ['useradd', name] + log_adduser_cmd = ['useradd', name] # Since we are creating users, we want to carefully validate the # inputs. If something goes wrong, we can end up with a system @@ -306,63 +307,65 @@ class Distro(object): "selinux_user": '--selinux-user', } - adduser_opts_flags = { + adduser_flags = { "no_user_group": '--no-user-group', "system": '--system', "no_log_init": '--no-log-init', - "no_create_home": "-M", } - redact_fields = ['passwd'] + redact_opts = ['passwd'] + + # Check the values and create the command + for key, val in kwargs.iteritems(): + + if key in adduser_opts and val and isinstance(val, str): + adduser_cmd.extend([adduser_opts[key], val]) - # Now check the value and create the command - for option in kwargs: - value = kwargs[option] - if option in adduser_opts and value \ - and isinstance(value, str): - adduser_cmd.extend([adduser_opts[option], value]) - # Redact certain fields from the logs - if option in redact_fields: - x_adduser_cmd.extend([adduser_opts[option], 'REDACTED']) - else: - x_adduser_cmd.extend([adduser_opts[option], value]) - elif option in adduser_opts_flags and value: - adduser_cmd.append(adduser_opts_flags[option]) # Redact certain fields from the logs - if option in redact_fields: - x_adduser_cmd.append('REDACTED') + if key in redact_opts: + log_adduser_cmd.extend([adduser_opts[key], 'REDACTED']) else: - x_adduser_cmd.append(adduser_opts_flags[option]) + log_adduser_cmd.extend([adduser_opts[key], val]) - # Default to creating home directory unless otherwise directed - # Also, we do not create home directories for system users. - if "no_create_home" not in kwargs and "system" not in kwargs: - adduser_cmd.append('-m') + elif key in adduser_flags and val: + adduser_cmd.append(adduser_flags[key]) + log_adduser_cmd.append(adduser_flags[key]) - # Create the user - if util.is_user(name): - LOG.warn("User %s already exists, skipping." % name) + # Don't create the home directory if directed so or if the user is a + # system user + if 'no_create_home' in kwargs or 'system' in kwargs: + adduser_cmd.append('-M') + log_adduser_cmd.append('-M') else: - LOG.debug("Adding user named %s", name) - try: - util.subp(adduser_cmd, logstring=x_adduser_cmd) - except Exception as e: - util.logexc(LOG, "Failed to create user %s", name) - raise e + adduser_cmd.append('-m') + log_adduser_cmd.append('-m') + + # Run the command + LOG.debug("Adding user %s", name) + try: + util.subp(adduser_cmd, logstring=log_adduser_cmd) + except Exception as e: + util.logexc(LOG, "Failed to create user %s", name) + raise e + + def create_user(self, name, **kwargs): + """ + Creates users for the system using the GNU passwd tools. This + will work on an GNU system. This should be overriden on + distros where useradd is not desirable or not available. + """ + + # Add the user + self.add_user(name, **kwargs) # Set password if plain-text password provided - if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']: + if 'plain_text_passwd' in kwargs: self.set_passwd(name, kwargs['plain_text_passwd']) # Default locking down the account. 'lock_passwd' defaults to True. # lock account unless lock_password is False. if kwargs.get('lock_passwd', True): - try: - util.subp(['passwd', '--lock', name]) - except Exception as e: - util.logexc(LOG, "Failed to disable password logins for " - "user %s", name) - raise e + self.lock_passwd(name) # Configure sudo access if 'sudo' in kwargs: @@ -375,6 +378,16 @@ class Distro(object): return True + def lock_passwd(self, name): + """ + Lock the password of a user, i.e., disable password logins + """ + try: + util.subp(['passwd', '--lock', name]) + except Exception as e: + util.logexc(LOG, 'Failed to disable password for user %s', name) + raise e + def set_passwd(self, user, passwd, hashed=False): pass_string = '%s:%s' % (user, passwd) cmd = ['chpasswd'] -- cgit v1.2.3 From ff4e9912bde07fb88de90f2dda2e8657ef779679 Mon Sep 17 00:00:00 2001 From: Juerg Haefliger Date: Tue, 25 Jun 2013 08:49:35 +0200 Subject: Move some RHEL distro methods to their own new file So that they can be used by other handlers. --- cloudinit/distros/rhel.py | 165 +++---------------------------------- cloudinit/distros/rhel_util.py | 179 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 191 insertions(+), 153 deletions(-) create mode 100644 cloudinit/distros/rhel_util.py diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 0727ecd1..4fd4239c 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -3,10 +3,12 @@ # Copyright (C) 2012 Canonical Ltd. # Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # Copyright (C) 2012 Yahoo! Inc. +# Copyright (C) 2013 SUSE LLC # # Author: Scott Moser # Author: Juerg Haefliger # Author: Joshua Harlow +# Author: Robert Schweikert # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as @@ -23,14 +25,11 @@ import os from cloudinit import distros - -from cloudinit.distros.parsers.resolv_conf import ResolvConf -from cloudinit.distros.parsers.sys_conf import SysConf - from cloudinit import helpers from cloudinit import log as logging from cloudinit import util +from cloudinit.distros import rhel_util from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -67,32 +66,9 @@ class Distro(distros.Distro): def install_packages(self, pkglist): self.package_command('install', pkgs=pkglist) - def _adjust_resolve(self, dns_servers, search_servers): - try: - r_conf = ResolvConf(util.load_file(self.resolve_conf_fn)) - r_conf.parse() - except IOError: - util.logexc(LOG, "Failed at parsing %s reverting to an empty " - "instance", self.resolve_conf_fn) - r_conf = ResolvConf('') - r_conf.parse() - if dns_servers: - for s in dns_servers: - try: - r_conf.add_nameserver(s) - except ValueError: - util.logexc(LOG, "Failed at adding nameserver %s", s) - if search_servers: - for s in search_servers: - try: - r_conf.add_search_domain(s) - except ValueError: - util.logexc(LOG, "Failed at adding search domain %s", s) - util.write_file(self.resolve_conf_fn, str(r_conf), 0644) - def _write_network(self, settings): # TODO(harlowja) fix this... since this is the ubuntu format - entries = translate_network(settings) + entries = rhel_util.translate_network(settings) LOG.debug("Translated ubuntu style network settings %s into %s", settings, entries) # Make the intermediate format as the rhel format... @@ -111,41 +87,21 @@ class Distro(distros.Distro): 'MACADDR': info.get('hwaddress'), 'ONBOOT': _make_sysconfig_bool(info.get('auto')), } - self._update_sysconfig_file(net_fn, net_cfg) + rhel_util.update_sysconfig_file(net_fn, net_cfg) if 'dns-nameservers' in info: nameservers.extend(info['dns-nameservers']) if 'dns-search' in info: searchservers.extend(info['dns-search']) if nameservers or searchservers: - self._adjust_resolve(nameservers, searchservers) + rhel_util.update_resolve_conf_file(self.resolve_conf_fn, + nameservers, searchservers) if dev_names: net_cfg = { 'NETWORKING': _make_sysconfig_bool(True), } - self._update_sysconfig_file(self.network_conf_fn, net_cfg) + rhel_util.update_sysconfig_file(self.network_conf_fn, net_cfg) return dev_names - def _update_sysconfig_file(self, fn, adjustments, allow_empty=False): - if not adjustments: - return - (exists, contents) = self._read_conf(fn) - updated_am = 0 - for (k, v) in adjustments.items(): - if v is None: - continue - v = str(v) - if len(v) == 0 and not allow_empty: - continue - contents[k] = v - updated_am += 1 - if updated_am: - lines = [ - str(contents), - ] - if not exists: - lines.insert(0, util.make_header()) - util.write_file(fn, "\n".join(lines) + "\n", 0644) - def _dist_uses_systemd(self): # Fedora 18 and RHEL 7 were the first adopters in their series (dist, vers) = util.system_info()['dist'][:2] @@ -164,7 +120,7 @@ class Distro(distros.Distro): locale_cfg = { 'LANG': locale, } - self._update_sysconfig_file(out_fn, locale_cfg) + rhel_util.update_sysconfig_file(out_fn, locale_cfg) def _write_hostname(self, hostname, out_fn): if self._dist_uses_systemd(): @@ -173,7 +129,7 @@ class Distro(distros.Distro): host_cfg = { 'HOSTNAME': hostname, } - self._update_sysconfig_file(out_fn, host_cfg) + rhel_util.update_sysconfig_file(out_fn, host_cfg) def _select_hostname(self, hostname, fqdn): # See: http://bit.ly/TwitgL @@ -197,22 +153,12 @@ class Distro(distros.Distro): else: return default else: - (_exists, contents) = self._read_conf(filename) + (_exists, contents) = rhel_util.read_sysconfig_file(filename) if 'HOSTNAME' in contents: return contents['HOSTNAME'] else: return default - def _read_conf(self, fn): - exists = False - try: - contents = util.load_file(fn).splitlines() - exists = True - except IOError: - contents = [] - return (exists, - SysConf(contents)) - def _bring_up_interfaces(self, device_names): if device_names and 'all' in device_names: raise RuntimeError(('Distro %s can not translate ' @@ -236,7 +182,7 @@ class Distro(distros.Distro): clock_cfg = { 'ZONE': str(tz), } - self._update_sysconfig_file(self.clock_conf_fn, clock_cfg) + rhel_util.update_sysconfig_file(self.clock_conf_fn, clock_cfg) # This ensures that the correct tz will be used for the system util.copy(tz_file, self.tz_local_fn) @@ -271,90 +217,3 @@ class Distro(distros.Distro): def update_package_sources(self): self._runner.run("update-sources", self.package_command, ["makecache"], freq=PER_INSTANCE) - - -# This is a util function to translate a ubuntu /etc/network/interfaces 'blob' -# to a rhel equiv. that can then be written to /etc/sysconfig/network-scripts/ -# TODO(harlowja) remove when we have python-netcf active... -def translate_network(settings): - # Get the standard cmd, args from the ubuntu format - entries = [] - for line in settings.splitlines(): - line = line.strip() - if not line or line.startswith("#"): - continue - split_up = line.split(None, 1) - if len(split_up) <= 1: - continue - entries.append(split_up) - # Figure out where each iface section is - ifaces = [] - consume = {} - for (cmd, args) in entries: - if cmd == 'iface': - if consume: - ifaces.append(consume) - consume = {} - consume[cmd] = args - else: - consume[cmd] = args - # Check if anything left over to consume - absorb = False - for (cmd, args) in consume.iteritems(): - if cmd == 'iface': - absorb = True - if absorb: - ifaces.append(consume) - # Now translate - real_ifaces = {} - for info in ifaces: - if 'iface' not in info: - continue - iface_details = info['iface'].split(None) - dev_name = None - if len(iface_details) >= 1: - dev = iface_details[0].strip().lower() - if dev: - dev_name = dev - if not dev_name: - continue - iface_info = {} - if len(iface_details) >= 3: - proto_type = iface_details[2].strip().lower() - # Seems like this can be 'loopback' which we don't - # really care about - if proto_type in ['dhcp', 'static']: - iface_info['bootproto'] = proto_type - # These can just be copied over - for k in ['netmask', 'address', 'gateway', 'broadcast']: - if k in info: - val = info[k].strip().lower() - if val: - iface_info[k] = val - # Name server info provided?? - if 'dns-nameservers' in info: - iface_info['dns-nameservers'] = info['dns-nameservers'].split() - # Name server search info provided?? - if 'dns-search' in info: - iface_info['dns-search'] = info['dns-search'].split() - # Is any mac address spoofing going on?? - if 'hwaddress' in info: - hw_info = info['hwaddress'].lower().strip() - hw_split = hw_info.split(None, 1) - if len(hw_split) == 2 and hw_split[0].startswith('ether'): - hw_addr = hw_split[1] - if hw_addr: - iface_info['hwaddress'] = hw_addr - real_ifaces[dev_name] = iface_info - # Check for those that should be started on boot via 'auto' - for (cmd, args) in entries: - if cmd == 'auto': - # Seems like auto can be like 'auto eth0 eth0:1' so just get the - # first part out as the device name - args = args.split(None) - if not args: - continue - dev_name = args[0].strip().lower() - if dev_name in real_ifaces: - real_ifaces[dev_name]['auto'] = True - return real_ifaces diff --git a/cloudinit/distros/rhel_util.py b/cloudinit/distros/rhel_util.py new file mode 100644 index 00000000..504b5d2c --- /dev/null +++ b/cloudinit/distros/rhel_util.py @@ -0,0 +1,179 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Canonical Ltd. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012 Yahoo! Inc. +# Copyright (C) 2013 SUSE LLC +# +# Author: Scott Moser +# Author: Juerg Haefliger +# Author: Joshua Harlow +# Author: Robert Schweikert +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from cloudinit.distros.parsers.resolv_conf import ResolvConf +from cloudinit.distros.parsers.sys_conf import SysConf + +from cloudinit import log as logging +from cloudinit import util + +LOG = logging.getLogger(__name__) + + +# This is a util function to translate Debian based distro interface blobs as +# given in /etc/network/interfaces to an equivalent format for distributions +# that use ifcfg-* style (Red Hat and SUSE). +# TODO(harlowja) remove when we have python-netcf active... +def translate_network(settings): + # Get the standard cmd, args from the ubuntu format + entries = [] + for line in settings.splitlines(): + line = line.strip() + if not line or line.startswith("#"): + continue + split_up = line.split(None, 1) + if len(split_up) <= 1: + continue + entries.append(split_up) + # Figure out where each iface section is + ifaces = [] + consume = {} + for (cmd, args) in entries: + if cmd == 'iface': + if consume: + ifaces.append(consume) + consume = {} + consume[cmd] = args + else: + consume[cmd] = args + # Check if anything left over to consume + absorb = False + for (cmd, args) in consume.iteritems(): + if cmd == 'iface': + absorb = True + if absorb: + ifaces.append(consume) + # Now translate + real_ifaces = {} + for info in ifaces: + if 'iface' not in info: + continue + iface_details = info['iface'].split(None) + dev_name = None + if len(iface_details) >= 1: + dev = iface_details[0].strip().lower() + if dev: + dev_name = dev + if not dev_name: + continue + iface_info = {} + if len(iface_details) >= 3: + proto_type = iface_details[2].strip().lower() + # Seems like this can be 'loopback' which we don't + # really care about + if proto_type in ['dhcp', 'static']: + iface_info['bootproto'] = proto_type + # These can just be copied over + for k in ['netmask', 'address', 'gateway', 'broadcast']: + if k in info: + val = info[k].strip().lower() + if val: + iface_info[k] = val + # Name server info provided?? + if 'dns-nameservers' in info: + iface_info['dns-nameservers'] = info['dns-nameservers'].split() + # Name server search info provided?? + if 'dns-search' in info: + iface_info['dns-search'] = info['dns-search'].split() + # Is any mac address spoofing going on?? + if 'hwaddress' in info: + hw_info = info['hwaddress'].lower().strip() + hw_split = hw_info.split(None, 1) + if len(hw_split) == 2 and hw_split[0].startswith('ether'): + hw_addr = hw_split[1] + if hw_addr: + iface_info['hwaddress'] = hw_addr + real_ifaces[dev_name] = iface_info + # Check for those that should be started on boot via 'auto' + for (cmd, args) in entries: + if cmd == 'auto': + # Seems like auto can be like 'auto eth0 eth0:1' so just get the + # first part out as the device name + args = args.split(None) + if not args: + continue + dev_name = args[0].strip().lower() + if dev_name in real_ifaces: + real_ifaces[dev_name]['auto'] = True + return real_ifaces + + +# Helper function to update a RHEL/SUSE /etc/sysconfig/* file +def update_sysconfig_file(fn, adjustments, allow_empty=False): + if not adjustments: + return + (exists, contents) = read_sysconfig_file(fn) + updated_am = 0 + for (k, v) in adjustments.items(): + if v is None: + continue + v = str(v) + if len(v) == 0 and not allow_empty: + continue + contents[k] = v + updated_am += 1 + if updated_am: + lines = [ + str(contents), + ] + if not exists: + lines.insert(0, util.make_header()) + util.write_file(fn, "\n".join(lines) + "\n", 0644) + + +# Helper function to read a RHEL/SUSE /etc/sysconfig/* file +def read_sysconfig_file(fn): + exists = False + try: + contents = util.load_file(fn).splitlines() + exists = True + except IOError: + contents = [] + return (exists, SysConf(contents)) + + +# Helper function to update RHEL/SUSE /etc/resolv.conf +def update_resolve_conf_file(fn, dns_servers, search_servers): + try: + r_conf = ResolvConf(util.load_file(fn)) + r_conf.parse() + except IOError: + util.logexc(LOG, "Failed at parsing %s reverting to an empty " + "instance", fn) + r_conf = ResolvConf('') + r_conf.parse() + if dns_servers: + for s in dns_servers: + try: + r_conf.add_nameserver(s) + except ValueError: + util.logexc(LOG, "Failed at adding nameserver %s", s) + if search_servers: + for s in search_servers: + try: + r_conf.add_search_domain(s) + except ValueError: + util.logexc(LOG, "Failed at adding search domain %s", s) + util.write_file(fn, str(r_conf), 0644) -- cgit v1.2.3 From e05e747a7293f991843ca4b7ba5e0736cb5f043f Mon Sep 17 00:00:00 2001 From: Juerg Haefliger Date: Tue, 25 Jun 2013 08:51:21 +0200 Subject: Add a SLES distro handler --- cloudinit/config/cc_resolv_conf.py | 6 +- cloudinit/distros/__init__.py | 5 +- cloudinit/distros/sles.py | 226 +++++++++++++++++++++++++++++++++++++ 3 files changed, 235 insertions(+), 2 deletions(-) create mode 100644 cloudinit/distros/sles.py diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py index 8a460f7e..d4fead12 100644 --- a/cloudinit/config/cc_resolv_conf.py +++ b/cloudinit/config/cc_resolv_conf.py @@ -1,8 +1,12 @@ # vi: ts=4 expandtab # # Copyright (C) 2013 Craig Tracey +# Copyright (C) 2013 SUSE LLC +# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. # # Author: Craig Tracey +# Author: Robert Schweikert +# Author: Juerg Haefliger # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as @@ -53,7 +57,7 @@ from cloudinit import util frequency = PER_INSTANCE -distros = ['fedora', 'rhel'] +distros = ['fedora', 'rhel', 'sles'] def generate_resolv_conf(cloud, log, params): diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index c5990960..ef5db86b 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -3,11 +3,13 @@ # Copyright (C) 2012 Canonical Ltd. # Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # Copyright (C) 2012 Yahoo! Inc. +# Copyright (C) 2013 SUSE LLC # # Author: Scott Moser # Author: Juerg Haefliger # Author: Joshua Harlow # Author: Ben Howard +# Author: Robert Schweikert # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as @@ -38,7 +40,8 @@ from cloudinit.distros.parsers import hosts OSFAMILIES = { 'debian': ['debian', 'ubuntu'], - 'redhat': ['fedora', 'rhel'] + 'redhat': ['fedora', 'rhel'], + 'suse': ['sles'] } LOG = logging.getLogger(__name__) diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py new file mode 100644 index 00000000..e068b4bd --- /dev/null +++ b/cloudinit/distros/sles.py @@ -0,0 +1,226 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2013 SUSE LLC +# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. +# +# Author: Robert Schweikert +# Author: Juerg Haefliger +# +# Leaning very heavily on the RHEL and Debian implementation +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os + +from cloudinit import distros + +from cloudinit.distros.parsers.hostname import HostnameConf + +from cloudinit import helpers +from cloudinit import log as logging +from cloudinit import util + +from cloudinit.distros import rhel_util +from cloudinit.settings import PER_INSTANCE + +LOG = logging.getLogger(__name__) + + +class Distro(distros.Distro): + clock_conf_fn = '/etc/sysconfig/clock' + locale_conf_fn = '/etc/sysconfig/language' + network_conf_fn = '/etc/sysconfig/network' + hostname_conf_fn = '/etc/HOSTNAME' + network_script_tpl = '/etc/sysconfig/network/ifcfg-%s' + resolve_conf_fn = '/etc/resolv.conf' + tz_local_fn = '/etc/localtime' + tz_zone_dir = '/usr/share/zoneinfo' + + def __init__(self, name, cfg, paths): + distros.Distro.__init__(self, name, cfg, paths) + # This will be used to restrict certain + # calls from repeatly happening (when they + # should only happen say once per instance...) + self._runner = helpers.Runners(paths) + self.osfamily = 'suse' + + def install_packages(self, pkglist): + self.package_command('install', args='-l', pkgs=pkglist) + + def _write_network(self, settings): + # Convert debian settings to ifcfg format + entries = rhel_util.translate_network(settings) + LOG.debug("Translated ubuntu style network settings %s into %s", + settings, entries) + # Make the intermediate format as the suse format... + nameservers = [] + searchservers = [] + dev_names = entries.keys() + for (dev, info) in entries.iteritems(): + net_fn = self.network_script_tpl % (dev) + mode = info.get('auto') + if mode and mode.lower() == 'true': + mode = 'auto' + else: + mode = 'manual' + net_cfg = { + 'BOOTPROTO': info.get('bootproto'), + 'BROADCAST': info.get('broadcast'), + 'GATEWAY': info.get('gateway'), + 'IPADDR': info.get('address'), + 'LLADDR': info.get('hwaddress'), + 'NETMASK': info.get('netmask'), + 'STARTMODE': mode, + 'USERCONTROL': 'no' + } + if dev != 'lo': + net_cfg['ETHERDEVICE'] = dev + net_cfg['ETHTOOL_OPTIONS'] = '' + else: + net_cfg['FIREWALL'] = 'no' + rhel_util.update_sysconfig_file(net_fn, net_cfg, True) + if 'dns-nameservers' in info: + nameservers.extend(info['dns-nameservers']) + if 'dns-search' in info: + searchservers.extend(info['dns-search']) + if nameservers or searchservers: + rhel_util.update_resolve_conf_file(self.resolve_conf_fn, + nameservers, searchservers) + return dev_names + + def apply_locale(self, locale, out_fn=None): + if not out_fn: + out_fn = self.locale_conf_fn + locale_cfg = { + 'RC_LANG': locale, + } + rhel_util.update_sysconfig_file(out_fn, locale_cfg) + + def _write_hostname(self, hostname, out_fn): + conf = None + try: + # Try to update the previous one + # so lets see if we can read it first. + conf = self._read_hostname_conf(out_fn) + except IOError: + pass + if not conf: + conf = HostnameConf('') + conf.set_hostname(hostname) + util.write_file(out_fn, str(conf), 0644) + + def _select_hostname(self, hostname, fqdn): + # Prefer the short hostname over the long + # fully qualified domain name + if not hostname: + return fqdn + return hostname + + def _read_system_hostname(self): + host_fn = self.hostname_conf_fn + return (host_fn, self._read_hostname(host_fn)) + + def _read_hostname_conf(self, filename): + conf = HostnameConf(util.load_file(filename)) + conf.parse() + return conf + + def _read_hostname(self, filename, default=None): + hostname = None + try: + conf = self._read_hostname_conf(filename) + hostname = conf.hostname + except IOError: + pass + if not hostname: + return default + return hostname + + def _bring_up_interfaces(self, device_names): + if device_names and 'all' in device_names: + raise RuntimeError(('Distro %s can not translate ' + 'the device name "all"') % (self.name)) + return distros.Distro._bring_up_interfaces(self, device_names) + + def set_timezone(self, tz): + # TODO(harlowja): move this code into + # the parent distro... + tz_file = os.path.join(self.tz_zone_dir, str(tz)) + if not os.path.isfile(tz_file): + raise RuntimeError(("Invalid timezone %s," + " no file found at %s") % (tz, tz_file)) + # Adjust the sysconfig clock zone setting + clock_cfg = { + 'TIMEZONE': str(tz), + } + rhel_util.update_sysconfig_file(self.clock_conf_fn, clock_cfg) + # This ensures that the correct tz will be used for the system + util.copy(tz_file, self.tz_local_fn) + + def package_command(self, command, args=None, pkgs=None): + if pkgs is None: + pkgs = [] + + cmd = ['zypper'] + # No user interaction possible, enable non-interactive mode + cmd.append('-t') + # Do not check the keys, we assume that the initial repos configured + # in the image can be trusted + cmd.append('--no-gpg-checks') + + # Comand is the operation, such as install + cmd.append(command) + + # args are the arguments to the command, not global options + if args and isinstance(args, str): + cmd.append(args) + elif args and isinstance(args, list): + cmd.extend(args) + + pkglist = util.expand_package_list('%s-%s', pkgs) + cmd.extend(pkglist) + + # Allow the output of this to flow outwards (ie not be captured) + util.subp(cmd, capture=False) + + def update_package_sources(self): + self._runner.run("update-sources", self.package_command, + ['refresh'], freq=PER_INSTANCE) + + # Copied from parent class and modified to use short option names since + # the SLES command doesn't support long names (yet). This method can be + # removed when SLES finally catches up. + def lock_passwd(self, name): + """ + Lock the password of a user, i.e., disable password logins + """ + try: + util.subp(['passwd', '-l', name]) + except Exception as e: + util.logexc(LOG, 'Failed to disable password for user %s', name) + raise e + + # Copied from parent class and modified to use short option names since + # the SLES command doesn't support long names (yet). This method can be + # removed when SLES finally catches up. + def set_passwd(self, user, passwd, hashed=False): + pass_string = '%s:%s' % (user, passwd) + cmd = ['chpasswd'] + if hashed: + cmd.append('-e') + try: + util.subp(cmd, pass_string, logstring="chpasswd for %s" % user) + except Exception as e: + util.logexc(LOG, "Failed to set password for %s", user) + raise e + return True -- cgit v1.2.3 From 99baf9641689cf67389f46f1cb8bb09451d6f5ae Mon Sep 17 00:00:00 2001 From: Juerg Haefliger Date: Tue, 25 Jun 2013 08:56:57 +0200 Subject: Add SLES hosts template file --- templates/hosts.suse.tmpl | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 templates/hosts.suse.tmpl diff --git a/templates/hosts.suse.tmpl b/templates/hosts.suse.tmpl new file mode 100644 index 00000000..5d3d57e4 --- /dev/null +++ b/templates/hosts.suse.tmpl @@ -0,0 +1,24 @@ +#* + This file /etc/cloud/templates/hosts.suse.tmpl is only utilized + if enabled in cloud-config. Specifically, in order to enable it + you need to add the following to config: + manage_etc_hosts: True +*# +# Your system has configured 'manage_etc_hosts' as True. +# As a result, if you wish for changes to this file to persist +# then you will need to either +# a.) make changes to the master file in /etc/cloud/templates/hosts.suse.tmpl +# b.) change or remove the value of 'manage_etc_hosts' in +# /etc/cloud/cloud.cfg or cloud-config from user-data +# +# The following lines are desirable for IPv4 capable hosts +127.0.0.1 localhost + +# The following lines are desirable for IPv6 capable hosts +::1 localhost ipv6-localhost ipv6-loopback +fe00::0 ipv6-localnet + +ff00::0 ipv6-mcastprefix +ff02::1 ipv6-allnodes +ff02::2 ipv6-allrouters +ff02::3 ipv6-allhosts -- cgit v1.2.3 From a8e22f5707248671116b6cfea42608137e1c1873 Mon Sep 17 00:00:00 2001 From: Juerg Haefliger Date: Tue, 25 Jun 2013 08:57:27 +0200 Subject: Add unit tests for SLES handler --- tests/unittests/helpers.py | 3 +- .../unittests/test_handler/test_handler_locale.py | 64 ++++++++++++++++++ .../test_handler/test_handler_set_hostname.py | 13 ++++ .../test_handler/test_handler_timezone.py | 75 ++++++++++++++++++++++ 4 files changed, 154 insertions(+), 1 deletion(-) create mode 100644 tests/unittests/test_handler/test_handler_locale.py create mode 100644 tests/unittests/test_handler/test_handler_timezone.py diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index e020a3ec..c0da0983 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -146,7 +146,8 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): ('chmod', 1), ('delete_dir_contents', 1), ('del_file', 1), - ('sym_link', -1)], + ('sym_link', -1), + ('copy', -1)], } for (mod, funcs) in patch_funcs.items(): for (f, am) in funcs: diff --git a/tests/unittests/test_handler/test_handler_locale.py b/tests/unittests/test_handler/test_handler_locale.py new file mode 100644 index 00000000..72ad00fd --- /dev/null +++ b/tests/unittests/test_handler/test_handler_locale.py @@ -0,0 +1,64 @@ +# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. +# +# Author: Juerg Haefliger +# +# Based on test_handler_set_hostname.py +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from cloudinit.config import cc_locale + +from cloudinit import cloud +from cloudinit import distros +from cloudinit import helpers +from cloudinit import util + +from cloudinit.sources import DataSourceNoCloud + +from tests.unittests import helpers as t_help + +from configobj import ConfigObj + +from StringIO import StringIO + +import logging + +LOG = logging.getLogger(__name__) + + +class TestLocale(t_help.FilesystemMockingTestCase): + def setUp(self): + super(TestLocale, self).setUp() + self.new_root = self.makeDir(prefix="unittest_") + + def _get_cloud(self, distro): + self.patchUtils(self.new_root) + paths = helpers.Paths({}) + + cls = distros.fetch(distro) + d = cls(distro, {}, paths) + ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths) + cc = cloud.Cloud(ds, paths, {}, d, None) + return cc + + def test_set_locale_sles(self): + + cfg = { + 'locale': 'My.Locale', + } + cc = self._get_cloud('sles') + cc_locale.handle('cc_locale', cfg, cc, LOG, []) + + contents = util.load_file('/etc/sysconfig/language') + n_cfg = ConfigObj(StringIO(contents)) + self.assertEquals({'RC_LANG': cfg['locale']}, dict(n_cfg)) diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py index b2f01cdb..6344ec0c 100644 --- a/tests/unittests/test_handler/test_handler_set_hostname.py +++ b/tests/unittests/test_handler/test_handler_set_hostname.py @@ -55,3 +55,16 @@ class TestHostname(t_help.FilesystemMockingTestCase): cfg, cc, LOG, []) contents = util.load_file("/etc/hostname") self.assertEquals('blah', contents.strip()) + + def test_write_hostname_sles(self): + cfg = { + 'hostname': 'blah.blah.blah.suse.com', + } + distro = self._fetch_distro('sles') + paths = helpers.Paths({}) + ds = None + cc = cloud.Cloud(ds, paths, {}, distro, None) + self.patchUtils(self.tmp) + cc_set_hostname.handle('cc_set_hostname', cfg, cc, LOG, []) + contents = util.load_file("/etc/HOSTNAME") + self.assertEquals('blah', contents.strip()) diff --git a/tests/unittests/test_handler/test_handler_timezone.py b/tests/unittests/test_handler/test_handler_timezone.py new file mode 100644 index 00000000..40b69773 --- /dev/null +++ b/tests/unittests/test_handler/test_handler_timezone.py @@ -0,0 +1,75 @@ +# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. +# +# Author: Juerg Haefliger +# +# Based on test_handler_set_hostname.py +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from cloudinit.config import cc_timezone + +from cloudinit import cloud +from cloudinit import distros +from cloudinit import helpers +from cloudinit import util + +from cloudinit.sources import DataSourceNoCloud + +from tests.unittests import helpers as t_help + +from configobj import ConfigObj + +from StringIO import StringIO + +import logging + +LOG = logging.getLogger(__name__) + + +class TestTimezone(t_help.FilesystemMockingTestCase): + def setUp(self): + super(TestTimezone, self).setUp() + self.new_root = self.makeDir(prefix="unittest_") + + def _get_cloud(self, distro): + self.patchUtils(self.new_root) + self.patchOS(self.new_root) + + paths = helpers.Paths({}) + + cls = distros.fetch(distro) + d = cls(distro, {}, paths) + ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths) + cc = cloud.Cloud(ds, paths, {}, d, None) + return cc + + def test_set_timezone_sles(self): + + cfg = { + 'timezone': 'Tatooine/Bestine', + } + cc = self._get_cloud('sles') + + # Create a dummy timezone file + dummy_contents = '0123456789abcdefgh' + util.write_file('/usr/share/zoneinfo/%s' % cfg['timezone'], + dummy_contents) + + cc_timezone.handle('cc_timezone', cfg, cc, LOG, []) + + contents = util.load_file('/etc/sysconfig/clock') + n_cfg = ConfigObj(StringIO(contents)) + self.assertEquals({'TIMEZONE': cfg['timezone']}, dict(n_cfg)) + + contents = util.load_file('/etc/localtime') + self.assertEquals(dummy_contents, contents.strip()) -- cgit v1.2.3 From c8eb622ae0c3f9fab2b25112aa87a2dbf39788db Mon Sep 17 00:00:00 2001 From: Juerg Haefliger Date: Wed, 26 Jun 2013 09:51:59 +0200 Subject: Use short option names for passwd utilities SLES 11 doesn't support long option names for the passwd utilities. Use the short option names in the parent distro class and remove the custom SLES methods. --- cloudinit/distros/__init__.py | 10 ++++++++-- cloudinit/distros/sles.py | 28 ---------------------------- 2 files changed, 8 insertions(+), 30 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index ef5db86b..f8727fc1 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -386,7 +386,10 @@ class Distro(object): Lock the password of a user, i.e., disable password logins """ try: - util.subp(['passwd', '--lock', name]) + # Need to use the short option name '-l' instead of '--lock' + # (which would be more descriptive) since SLES 11 doesn't know + # about long names. + util.subp(['passwd', '-l', name]) except Exception as e: util.logexc(LOG, 'Failed to disable password for user %s', name) raise e @@ -396,7 +399,10 @@ class Distro(object): cmd = ['chpasswd'] if hashed: - cmd.append('--encrypted') + # Need to use the short option name '-e' instead of '--encrypted' + # (which would be more descriptive) since SLES 11 doesn't know + # about long names. + cmd.append('-e') try: util.subp(cmd, pass_string, logstring="chpasswd for %s" % user) diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py index e068b4bd..95bc411a 100644 --- a/cloudinit/distros/sles.py +++ b/cloudinit/distros/sles.py @@ -196,31 +196,3 @@ class Distro(distros.Distro): def update_package_sources(self): self._runner.run("update-sources", self.package_command, ['refresh'], freq=PER_INSTANCE) - - # Copied from parent class and modified to use short option names since - # the SLES command doesn't support long names (yet). This method can be - # removed when SLES finally catches up. - def lock_passwd(self, name): - """ - Lock the password of a user, i.e., disable password logins - """ - try: - util.subp(['passwd', '-l', name]) - except Exception as e: - util.logexc(LOG, 'Failed to disable password for user %s', name) - raise e - - # Copied from parent class and modified to use short option names since - # the SLES command doesn't support long names (yet). This method can be - # removed when SLES finally catches up. - def set_passwd(self, user, passwd, hashed=False): - pass_string = '%s:%s' % (user, passwd) - cmd = ['chpasswd'] - if hashed: - cmd.append('-e') - try: - util.subp(cmd, pass_string, logstring="chpasswd for %s" % user) - except Exception as e: - util.logexc(LOG, "Failed to set password for %s", user) - raise e - return True -- cgit v1.2.3 From 9a0c412be667c2b0b235ceef920ebd2df72c1d2f Mon Sep 17 00:00:00 2001 From: Juerg Haefliger Date: Thu, 27 Jun 2013 13:46:56 +0200 Subject: Add support for building a SLES rpm package --- Makefile | 10 ++- packages/brpm | 47 ++++++++---- packages/suse/cloud-init.spec.in | 162 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 202 insertions(+), 17 deletions(-) create mode 100644 packages/suse/cloud-init.spec.in diff --git a/Makefile b/Makefile index b659836f..29bfe0bd 100644 --- a/Makefile +++ b/Makefile @@ -8,6 +8,10 @@ YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f ) CHANGELOG_VERSION=$(shell $(CWD)/tools/read-version) CODE_VERSION=$(shell python -c "from cloudinit import version; print version.version_string()") +ifeq ($(distro),) + distro = redhat +endif + all: test check_version pep8: @@ -25,7 +29,7 @@ test: check_version: @if [ "$(CHANGELOG_VERSION)" != "$(CODE_VERSION)" ]; then \ echo "Error: ChangeLog version $(CHANGELOG_VERSION)" \ - "not equal to code version $(CODE_VERSION)"; exit 2; \ + "not equal to code version $(CODE_VERSION)"; exit 2; \ else true; fi 2to3: @@ -37,9 +41,9 @@ clean: yaml: @$(CWD)/tools/validate-yaml.py $(YAML_FILES) - + rpm: - ./packages/brpm + ./packages/brpm --distro $(distro) deb: ./packages/bddeb diff --git a/packages/brpm b/packages/brpm index 53de802c..14faea4f 100755 --- a/packages/brpm +++ b/packages/brpm @@ -34,14 +34,26 @@ from cloudinit import util # this is a translation of the 'requires' # file pypi package name to a redhat/fedora package name. PKG_MP = { - 'argparse': 'python-argparse', - 'boto': 'python-boto', - 'cheetah': 'python-cheetah', - 'configobj': 'python-configobj', - 'oauth': 'python-oauth', - 'prettytable': 'python-prettytable', - 'pyyaml': 'PyYAML', - 'requests': 'python-requests', + 'redhat': { + 'argparse': 'python-argparse', + 'boto': 'python-boto', + 'cheetah': 'python-cheetah', + 'configobj': 'python-configobj', + 'oauth': 'python-oauth', + 'prettytable': 'python-prettytable', + 'pyyaml': 'PyYAML', + 'requests': 'python-requests', + }, + 'suse': { + 'argparse': 'python-argparse', + 'boto': 'python-boto', + 'cheetah': 'python-cheetah', + 'configobj': 'python-configobj', + 'oauth': 'python-oauth', + 'prettytable': 'python-prettytable', + 'pyyaml': 'python-yaml', + 'requests': 'python-requests', + } } # Subdirectories of the ~/rpmbuild dir @@ -120,7 +132,7 @@ def generate_spec_contents(args, tmpl_fn, top_dir, arc_fn): # Map to known packages requires = [] for p in pkgs: - tgt_pkg = PKG_MP.get(p) + tgt_pkg = PKG_MP[args.distro].get(p) if not tgt_pkg: raise RuntimeError(("Do not know how to translate pypi dependency" " %r to a known package") % (p)) @@ -142,10 +154,11 @@ def generate_spec_contents(args, tmpl_fn, top_dir, arc_fn): missing_versions += 1 if missing_versions == 1: # Must be using a new 'dev'/'trunk' release - changelog_lines.append(format_change_line(datetime.now(), '??')) + changelog_lines.append(format_change_line(datetime.now(), + '??')) else: - sys.stderr.write(("Changelog version line %s " - "does not have a corresponding tag!\n") % (line)) + sys.stderr.write(("Changelog version line %s does not " + "have a corresponding tag!\n") % (line)) else: changelog_lines.append(header) else: @@ -171,6 +184,10 @@ def generate_spec_contents(args, tmpl_fn, top_dir, arc_fn): def main(): parser = argparse.ArgumentParser() + parser.add_argument("-d", "--distro", dest="distro", + help="select distro (default: %(default)s)", + metavar="DISTRO", default='redhat', + choices=('redhat', 'suse')) parser.add_argument("-b", "--boot", dest="boot", help="select boot type (default: %(default)s)", metavar="TYPE", default='sysvinit', @@ -218,7 +235,7 @@ def main(): # Form the spec file to be used tmpl_fn = util.abs_join(find_root(), 'packages', - 'redhat', 'cloud-init.spec.in') + args.distro, 'cloud-init.spec.in') contents = generate_spec_contents(args, tmpl_fn, root_dir, os.path.basename(archive_fn)) spec_fn = util.abs_join(root_dir, 'cloud-init.spec') @@ -236,6 +253,8 @@ def main(): globs = [] globs.extend(glob.glob("%s/*.rpm" % (util.abs_join(root_dir, 'RPMS', 'noarch')))) + globs.extend(glob.glob("%s/*.rpm" % + (util.abs_join(root_dir, 'RPMS', 'x86_64')))) globs.extend(glob.glob("%s/*.rpm" % (util.abs_join(root_dir, 'RPMS')))) globs.extend(glob.glob("%s/*.rpm" % @@ -243,7 +262,7 @@ def main(): for rpm_fn in globs: tgt_fn = util.abs_join(os.getcwd(), os.path.basename(rpm_fn)) shutil.move(rpm_fn, tgt_fn) - print("Wrote out redhat package %r" % (tgt_fn)) + print("Wrote out %s package %r" % (args.distro, tgt_fn)) return 0 diff --git a/packages/suse/cloud-init.spec.in b/packages/suse/cloud-init.spec.in new file mode 100644 index 00000000..296505c6 --- /dev/null +++ b/packages/suse/cloud-init.spec.in @@ -0,0 +1,162 @@ +## This is a cheetah template + +# See: http://www.zarb.org/~jasonc/macros.php +# Or: http://fedoraproject.org/wiki/Packaging:ScriptletSnippets +# Or: http://www.rpm.org/max-rpm/ch-rpm-inside.html + +#for $d in $defines +%define ${d} +#end for + +Name: cloud-init +Version: ${version} +Release: ${release}${subrelease}%{?dist} +Summary: Cloud instance init scripts + +Group: System/Management +License: GPLv3 +URL: http://launchpad.net/cloud-init + +Source0: ${archive_name} +BuildRoot: %{_tmppath}/%{name}-%{version}-build + +%if 0%{?suse_version} && 0%{?suse_version} <= 1110 +%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")} +%else +BuildArch: noarch +%endif + +BuildRequires: fdupes +BuildRequires: filesystem +BuildRequires: python-devel +BuildRequires: python-setuptools +BuildRequires: python-cheetah + +%if 0%{?suse_version} && 0%{?suse_version} <= 1210 + %define initsys sysvinit +%else + %define initsys systemd +%endif + +# System util packages needed +Requires: iproute2 +Requires: e2fsprogs +Requires: net-tools +Requires: procps + +# Install pypi 'dynamic' requirements +#for $r in $requires +Requires: ${r} +#end for + +# Custom patches +#set $size = 0 +#for $p in $patches +Patch${size}: $p +#set $size += 1 +#end for + +%description +Cloud-init is a set of init scripts for cloud instances. Cloud instances +need special scripts to run during initialization to retrieve and install +ssh keys and to let the user run various scripts. + +%prep +%setup -q -n %{name}-%{version}~${release} + +# Custom patches activation +#set $size = 0 +#for $p in $patches +%patch${size} -p1 +#set $size += 1 +#end for + +%build +%{__python} setup.py build + +%install +%{__python} setup.py install \ + --skip-build --root=%{buildroot} --prefix=%{_prefix} \ + --record-rpm=INSTALLED_FILES --install-lib=%{python_sitelib} \ + --init-system=%{initsys} + +# Remove non-SUSE templates +rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.debian.* +rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.redhat.* +rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.ubuntu.* + +# Remove cloud-init tests +rm -r %{buildroot}/%{python_sitelib}/tests + +# Move sysvinit scripts to the correct place and create symbolic links +%if %{initsys} == sysvinit + mkdir -p %{buildroot}/%{_initddir} + mv %{buildroot}%{_sysconfdir}/rc.d/init.d/* %{buildroot}%{_initddir}/ + rmdir %{buildroot}%{_sysconfdir}/rc.d/init.d + rmdir %{buildroot}%{_sysconfdir}/rc.d + + mkdir -p %{buildroot}/%{_sbindir} + pushd %{buildroot}/%{_initddir} + for file in * ; do + ln -s %{_initddir}/\${file} %{buildroot}/%{_sbindir}/rc\${file} + done + popd +%endif + +# Move documentation +mkdir -p %{buildroot}/%{_defaultdocdir} +mv %{buildroot}/usr/share/doc/cloud-init %{buildroot}/%{_defaultdocdir} +for doc in TODO LICENSE ChangeLog Requires ; do + cp \${doc} %{buildroot}/%{_defaultdocdir}/cloud-init +done + +# Remove duplicate files +%if 0%{?suse_version} + %fdupes %{buildroot}/%{python_sitelib} +%endif + +mkdir -p %{buildroot}/var/lib/cloud + +%postun +%insserv_cleanup + +%files + +# Sysvinit scripts +%if %{initsys} == sysvinit + %attr(0755, root, root) %{_initddir}/cloud-config + %attr(0755, root, root) %{_initddir}/cloud-final + %attr(0755, root, root) %{_initddir}/cloud-init-local + %attr(0755, root, root) %{_initddir}/cloud-init + + %{_sbindir}/rccloud-* +%endif + +# Program binaries +%{_bindir}/cloud-init* + +# There doesn't seem to be an agreed upon place for these +# although it appears the standard says /usr/lib but rpmbuild +# will try /usr/lib64 ?? +/usr/lib/%{name}/uncloud-init +/usr/lib/%{name}/write-ssh-key-fingerprints + +# Docs +%doc %{_defaultdocdir}/cloud-init/* + +# Configs +%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg +%dir %{_sysconfdir}/cloud/cloud.cfg.d +%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/*.cfg +%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/README +%dir %{_sysconfdir}/cloud/templates +%config(noreplace) %{_sysconfdir}/cloud/templates/* + +# Python code is here... +%{python_sitelib}/* + +/var/lib/cloud + +%changelog + +${changelog} -- cgit v1.2.3 From 467d45906c4575c1d231af268c47e812356657b8 Mon Sep 17 00:00:00 2001 From: Juerg Haefliger Date: Thu, 27 Jun 2013 13:50:33 +0200 Subject: Remove 'Copyright SUSE' from the headers Per discussion with Robert @ SUSE since he can't sign the CCA. --- cloudinit/config/cc_resolv_conf.py | 2 -- cloudinit/distros/__init__.py | 2 -- cloudinit/distros/rhel.py | 2 -- cloudinit/distros/rhel_util.py | 2 -- cloudinit/distros/sles.py | 2 -- 5 files changed, 10 deletions(-) diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py index d4fead12..879b62b1 100644 --- a/cloudinit/config/cc_resolv_conf.py +++ b/cloudinit/config/cc_resolv_conf.py @@ -1,11 +1,9 @@ # vi: ts=4 expandtab # # Copyright (C) 2013 Craig Tracey -# Copyright (C) 2013 SUSE LLC # Copyright (C) 2013 Hewlett-Packard Development Company, L.P. # # Author: Craig Tracey -# Author: Robert Schweikert # Author: Juerg Haefliger # # This program is free software: you can redistribute it and/or modify diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index f8727fc1..cda2c6af 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -3,13 +3,11 @@ # Copyright (C) 2012 Canonical Ltd. # Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # Copyright (C) 2012 Yahoo! Inc. -# Copyright (C) 2013 SUSE LLC # # Author: Scott Moser # Author: Juerg Haefliger # Author: Joshua Harlow # Author: Ben Howard -# Author: Robert Schweikert # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 4fd4239c..a022ca60 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -3,12 +3,10 @@ # Copyright (C) 2012 Canonical Ltd. # Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # Copyright (C) 2012 Yahoo! Inc. -# Copyright (C) 2013 SUSE LLC # # Author: Scott Moser # Author: Juerg Haefliger # Author: Joshua Harlow -# Author: Robert Schweikert # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as diff --git a/cloudinit/distros/rhel_util.py b/cloudinit/distros/rhel_util.py index 504b5d2c..1aba58b8 100644 --- a/cloudinit/distros/rhel_util.py +++ b/cloudinit/distros/rhel_util.py @@ -3,12 +3,10 @@ # Copyright (C) 2012 Canonical Ltd. # Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # Copyright (C) 2012 Yahoo! Inc. -# Copyright (C) 2013 SUSE LLC # # Author: Scott Moser # Author: Juerg Haefliger # Author: Joshua Harlow -# Author: Robert Schweikert # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py index 95bc411a..d0c15feb 100644 --- a/cloudinit/distros/sles.py +++ b/cloudinit/distros/sles.py @@ -1,9 +1,7 @@ # vi: ts=4 expandtab # -# Copyright (C) 2013 SUSE LLC # Copyright (C) 2013 Hewlett-Packard Development Company, L.P. # -# Author: Robert Schweikert # Author: Juerg Haefliger # # Leaning very heavily on the RHEL and Debian implementation -- cgit v1.2.3 From 0c7c5b999e09acf8795c6db2f1b50a801c0eae8f Mon Sep 17 00:00:00 2001 From: Juerg Haefliger Date: Thu, 27 Jun 2013 14:14:51 +0200 Subject: Fix SLES zypper command usage --- cloudinit/distros/sles.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py index d0c15feb..904e931a 100644 --- a/cloudinit/distros/sles.py +++ b/cloudinit/distros/sles.py @@ -171,10 +171,7 @@ class Distro(distros.Distro): cmd = ['zypper'] # No user interaction possible, enable non-interactive mode - cmd.append('-t') - # Do not check the keys, we assume that the initial repos configured - # in the image can be trusted - cmd.append('--no-gpg-checks') + cmd.append('--non-interactive') # Comand is the operation, such as install cmd.append(command) -- cgit v1.2.3 From 691fe6d4ef3dad5d77e1b250d05bb0858234afee Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 8 Jul 2013 17:04:53 -0400 Subject: commit dev state --- cloudinit/sources/DataSourceAzure.py | 205 ++++++++++++++++++++++++++++++ doc/examples/cloud-config-datasources.txt | 4 + 2 files changed, 209 insertions(+) create mode 100644 cloudinit/sources/DataSourceAzure.py diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py new file mode 100644 index 00000000..83c4603c --- /dev/null +++ b/cloudinit/sources/DataSourceAzure.py @@ -0,0 +1,205 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2013 Canonical Ltd. +# +# Author: Scott Moser +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import base64 +import os +from xml.dom import minidom + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import util + +LOG = logging.getLogger(__name__) + +DS_NAME = 'Azure' +DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} +AGENT_START = ['service', 'walinuxagent', 'start'] +DEFAULT_DS_CONFIG = {'datasource': {DS_NAME: {'agent_command': AGENT_START}}} + + +class DataSourceAzureNet(sources.DataSource): + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.seed_dir = os.path.join(paths.seed_dir, 'azure') + self.cfg = {} + self.seed = None + + def __str__(self): + root = sources.DataSource.__str__(self) + return "%s [seed=%s]" % (root, self.seed) + + def get_data(self): + candidates = [self.seed_dir] + candidates.extend(list_possible_azure_ds_devs()) + found = None + + for cdev in candidates: + try: + if cdev.startswith("/dev/"): + ret = util.mount_cb(cdev, load_azure_ds_dir) + else: + ret = load_azure_ds_dir(cdev) + + except NonAzureDataSource: + pass + except BrokenAzureDataSource as exc: + raise exc + except util.MountFailedError: + LOG.warn("%s was not mountable" % cdev) + + (md, self.userdata_raw, cfg) = ret + self.seed = cdev + self.metadata = util.mergemanydict([md, DEFAULT_METADATA]) + self.cfg = util.mergemanydict([cfg, DEFAULT_DS_CONFIG]) + found = cdev + + LOG.debug("found datasource in %s", cdev) + break + + if not found: + return False + + path = ['datasource', DS_NAME, 'agent_command'] + cmd = None + for cfg in (self.cfg, self.sys_cfg): + cmd = util.get_cfg_by_path(cfg, keyp=path) + if cmd is not None: + break + invoke_agent(cmd) + + def get_config_obj(self): + return self.cfg + + +def invoke_agent(cmd): + if cmd: + LOG.debug("invoking agent: %s" % cmd) + util.subp(cmd, shell=(not isinstance(cmd, list))) + else: + LOG.debug("not invoking agent") + + +def find_child(node, filter_func): + ret = [] + if not node.hasChildNodes(): + return ret + for child in node.childNodes: + if filter_func(child): + ret.append(child) + return ret + + +def read_azure_ovf(contents): + dom = minidom.parseString(contents) + results = find_child(dom.documentElement, + lambda n: n.localName == "ProvisioningSection") + + if len(results) == 0: + raise NonAzureDataSource("No ProvisioningSection") + if len(results) > 1: + raise BrokenAzureDataSource("found '%d' ProvisioningSection items" % + len(results)) + provSection = results[0] + + lpcs_nodes = find_child(provSection, + lambda n: n.localName == "LinuxProvisioningConfigurationSet") + + if len(results) == 0: + raise NonAzureDataSource("No LinuxProvisioningConfigurationSet") + if len(results) > 1: + raise BrokenAzureDataSource("found '%d' %ss" % + ("LinuxProvisioningConfigurationSet", + len(results))) + lpcs = lpcs_nodes[0] + + if not lpcs.hasChildNodes(): + raise BrokenAzureDataSource("no child nodes of configuration set") + + md_props = 'seedfrom' + md = {'azure_data': {}} + cfg = {} + ud = "" + + for child in lpcs.childNodes: + if child.nodeType == dom.TEXT_NODE or not child.localName: + continue + + name = child.localName.lower() + + simple = False + if (len(child.childNodes) == 1 and + child.childNodes[0].nodeType == dom.TEXT_NODE): + simple = True + value = child.childNodes[0].wholeText + + if name == "userdata": + ud = base64.b64decode(''.join(value.split())) + elif name == "username": + cfg['system_info'] = {'default_user': {'name': value}} + elif name == "hostname": + md['local-hostname'] = value + elif name == "dscfg": + cfg['datasource'] = {DS_NAME: util.load_yaml(value, default={})} + elif simple: + if name in md_props: + md[name] = value + else: + md['azure_data'][name] = value + + return (md, ud, cfg) + + +def list_possible_azure_ds_devs(): + # return a sorted list of devices that might have a azure datasource + devlist = [] + for fstype in ("iso9660", "udf"): + devlist.extend(util.find_devs_with("TYPE=%s" % fstype)) + + devlist.sort(reverse=True) + return devlist + + +def load_azure_ds_dir(source_dir): + ovf_file = os.path.join(source_dir, "ovf-env.xml") + + if not os.path.isfile(ovf_file): + raise NonAzureDataSource("No ovf-env file found") + + with open(ovf_file, "r") as fp: + contents = fp.read() + + return read_azure_ovf(contents) + + +class BrokenAzureDataSource(Exception): + pass + + +class NonAzureDataSource(Exception): + pass + + +# Used to match classes to dependencies +datasources = [ + (DataSourceAzureNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt index fc8c22d4..fbabcad9 100644 --- a/doc/examples/cloud-config-datasources.txt +++ b/doc/examples/cloud-config-datasources.txt @@ -42,3 +42,7 @@ datasource: meta-data: instance-id: i-87018aed local-hostname: myhost.internal + + Azure: + agent_command: [service, walinuxagent, start] + -- cgit v1.2.3 From 4afa528dcf2938e209b6dc852f6d4c4076084fa5 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 8 Jul 2013 23:21:29 -0400 Subject: fix test_nocloud testcase --- tests/unittests/test_datasource/test_nocloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index 62fc5358..7328b240 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -22,7 +22,7 @@ class TestNoCloudDataSource(MockerTestCase): def tearDown(self): apply_patches([i for i in reversed(self.unapply)]) - super(TestNoCloudDataSource, self).setUp() + super(TestNoCloudDataSource, self).tearDown() def apply_patches(self, patches): ret = apply_patches(patches) -- cgit v1.2.3 From 21ea6154ab2aafbe51c7b23fd56e43bd1cc26b00 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 9 Jul 2013 01:35:49 -0400 Subject: add some unit tests, fix things found by doing so --- cloudinit/sources/DataSourceAzure.py | 26 +++- tests/unittests/test_datasource/test_azure.py | 168 ++++++++++++++++++++++++++ 2 files changed, 188 insertions(+), 6 deletions(-) create mode 100644 tests/unittests/test_datasource/test_azure.py diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 83c4603c..6a04b333 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -18,6 +18,7 @@ import base64 import os +import os.path from xml.dom import minidom from cloudinit import log as logging @@ -29,7 +30,7 @@ LOG = logging.getLogger(__name__) DS_NAME = 'Azure' DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} AGENT_START = ['service', 'walinuxagent', 'start'] -DEFAULT_DS_CONFIG = {'datasource': {DS_NAME: {'agent_command': AGENT_START}}} +BUILTIN_DS_CONFIG = {'datasource': {DS_NAME: {'agent_command': AGENT_START}}} class DataSourceAzureNet(sources.DataSource): @@ -56,16 +57,17 @@ class DataSourceAzureNet(sources.DataSource): ret = load_azure_ds_dir(cdev) except NonAzureDataSource: - pass + continue except BrokenAzureDataSource as exc: raise exc except util.MountFailedError: LOG.warn("%s was not mountable" % cdev) + continue (md, self.userdata_raw, cfg) = ret self.seed = cdev self.metadata = util.mergemanydict([md, DEFAULT_METADATA]) - self.cfg = util.mergemanydict([cfg, DEFAULT_DS_CONFIG]) + self.cfg = cfg found = cdev LOG.debug("found datasource in %s", cdev) @@ -76,17 +78,25 @@ class DataSourceAzureNet(sources.DataSource): path = ['datasource', DS_NAME, 'agent_command'] cmd = None - for cfg in (self.cfg, self.sys_cfg): + for cfg in (self.cfg, self.sys_cfg, BUILTIN_DS_CONFIG): cmd = util.get_cfg_by_path(cfg, keyp=path) if cmd is not None: break - invoke_agent(cmd) + + try: + invoke_agent(cmd) + except util.ProcessExecutionError: + # claim the datasource even if the command failed + util.logexc(LOG, "agent command '%s' failed.", cmd) + + return True def get_config_obj(self): return self.cfg def invoke_agent(cmd): + # this is a function itself to simplify patching it for test if cmd: LOG.debug("invoking agent: %s" % cmd) util.subp(cmd, shell=(not isinstance(cmd, list))) @@ -105,7 +115,11 @@ def find_child(node, filter_func): def read_azure_ovf(contents): - dom = minidom.parseString(contents) + try: + dom = minidom.parseString(contents) + except Exception as e: + raise NonAzureDataSource("invalid xml: %s" % e) + results = find_child(dom.documentElement, lambda n: n.localName == "ProvisioningSection") diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py new file mode 100644 index 00000000..179fb50a --- /dev/null +++ b/tests/unittests/test_datasource/test_azure.py @@ -0,0 +1,168 @@ +from cloudinit import helpers +from cloudinit.sources import DataSourceAzure +from tests.unittests.helpers import populate_dir + +import base64 +from mocker import MockerTestCase +import os +import yaml + + +def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): + if data is None: + data = {'HostName': 'FOOHOST'} + if pubkeys is None: + pubkeys = {} + + content = """ + + + 1.0 + + LinuxProvisioningConfiguration + """ + for key, val in data.items(): + content += "<%s>%s\n" % (key, val, key) + + if userdata: + content += "%s\n" % (base64.b64encode(userdata)) + + if pubkeys: + content += "\n" + for fp, path in pubkeys.items(): + content += " " + content += ("%s%s" % + (fp, path)) + content += " " + content += """ + + + 1.0 + + kms.core.windows.net + false + + + + """ + + return content + + +class TestAzureDataSource(MockerTestCase): + + def setUp(self): + # makeDir comes from MockerTestCase + self.tmp = self.makeDir() + + # patch cloud_dir, so our 'seed_dir' is guaranteed empty + self.paths = helpers.Paths({'cloud_dir': self.tmp}) + + self.unapply = [] + super(TestAzureDataSource, self).setUp() + + def tearDown(self): + apply_patches([i for i in reversed(self.unapply)]) + super(TestAzureDataSource, self).tearDown() + + def apply_patches(self, patches): + ret = apply_patches(patches) + self.unapply += ret + + def _get_ds(self, data): + + def dsdevs(): + return data.get('dsdevs', []) + + def invoker(cmd): + data['agent_invoked'] = cmd + + if data.get('ovfcontent') is not None: + populate_dir(os.path.join(self.paths.seed_dir, "azure"), + {'ovf-env.xml': data['ovfcontent']}) + + mod = DataSourceAzure + + if data.get('dsdevs'): + self.apply_patches([(mod, 'list_possible_azure_ds_devs', dsdevs)]) + + self.apply_patches([(mod, 'invoke_agent', invoker)]) + + dsrc = mod.DataSourceAzureNet( + data.get('sys_cfg', {}), distro=None, paths=self.paths) + + return dsrc + + def test_basic_seed_dir(self): + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(dsrc.userdata_raw, "") + self.assertEqual(dsrc.metadata['local-hostname'], odata['HostName']) + + def test_user_cfg_set_agent_command(self): + cfg = {'agent_command': "my_command"} + odata = {'HostName': "myhost", 'UserName': "myuser", + 'dscfg': yaml.dump(cfg)} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(data['agent_invoked'], cfg['agent_command']) + + def test_sys_cfg_set_agent_command(self): + sys_cfg = {'datasource': {'Azure': {'agent_command': '_COMMAND'}}} + data = {'ovfcontent': construct_valid_ovf_env(data={}), + 'sys_cfg': sys_cfg} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(data['agent_invoked'], '_COMMAND') + + def test_userdata_found(self): + mydata = "FOOBAR" + odata = {'UserData': base64.b64encode(mydata)} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(dsrc.userdata_raw, mydata) + + def test_no_datasource_expected(self): + #no source should be found if no seed_dir and no devs + data = {} + dsrc = self._get_ds({}) + ret = dsrc.get_data() + self.assertFalse(ret) + self.assertFalse('agent_invoked' in data) + + +class TestReadAzureOvf(MockerTestCase): + def test_invalid_xml_raises_non_azure_ds(self): + invalid_xml = "" + construct_valid_ovf_env(data={}) + self.assertRaises(DataSourceAzure.NonAzureDataSource, + DataSourceAzure.read_azure_ovf, invalid_xml) + + +def apply_patches(patches): + ret = [] + for (ref, name, replace) in patches: + if replace is None: + continue + orig = getattr(ref, name) + setattr(ref, name, replace) + ret.append((ref, name, orig)) + return ret -- cgit v1.2.3 From 6cb13e582c66068b26231d31c5db6a987fc4a2e6 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 9 Jul 2013 02:26:28 -0400 Subject: handlers/upstart_job.py: invoke reload-configuration if we can. Previously we'd not invoked reload-configuration because doing so caused issues with upstart. Now, instead, determine if we can invoke it. LP: #1124384 --- ChangeLog | 2 ++ cloudinit/handlers/upstart_job.py | 60 ++++++++++++++++++++++++++++++++------- 2 files changed, 52 insertions(+), 10 deletions(-) diff --git a/ChangeLog b/ChangeLog index 6495a19b..40c8912a 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,6 +1,8 @@ 0.7.3: - fix omnibus chef installer (LP: #1182265) [Chris Wing] - small fix for OVF datasource for iso transport on non-iso9660 filesystem + - determine if upstart version is suitable for + 'initctl reload-configuration' (LP: #1124384). If so, then invoke it. 0.7.2: - add a debian watch file - add 'sudo' entry to ubuntu's default user (LP: #1080717) diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py index edd56527..b3d60d6e 100644 --- a/cloudinit/handlers/upstart_job.py +++ b/cloudinit/handlers/upstart_job.py @@ -22,6 +22,7 @@ import os +import re from cloudinit import handlers from cloudinit import log as logging @@ -66,14 +67,53 @@ class UpstartJobPartHandler(handlers.Handler): path = os.path.join(self.upstart_dir, filename) util.write_file(path, payload, 0644) - # FIXME LATER (LP: #1124384) - # a bug in upstart means that invoking reload-configuration - # at this stage in boot causes havoc. So, until that is fixed - # we will not do that. However, I'd like to be able to easily - # test to see if this bug is still present in an image with - # a newer upstart. So, a boot hook could easiliy write this file. - if os.path.exists("/run/cloud-init-upstart-reload"): - # if inotify support is not present in the root filesystem - # (overlayroot) then we need to tell upstart to re-read /etc - + if SUITABLE_UPSTART: util.subp(["initctl", "reload-configuration"], capture=False) + + +def _has_suitable_upstart(): + # (LP: #1124384) + # a bug in upstart means that invoking reload-configuration + # at this stage in boot causes havoc. So, try to determine if upstart + # is installed, and reloading configuration is OK. + if not os.path.exists("/sbin/initctl"): + return False + try: + (version_out, _err) = util.subp(["initctl", "version"]) + except: + util.logexc(LOG, "initctl version failed") + return False + + # expecting 'initctl version' to output something like: init (upstart X.Y) + if re.match("upstart 1.[0-7][\)]", version_out): + return False + if "upstart 0." in version_out: + return False + elif "upstart 1.8" in version_out: + if not os.path.exists("/usr/bin/dpkg-query"): + return False + try: + (dpkg_ver, _err) = util.subp(["dpkg-query", + "--showformat=${Version}", + "--show", "upstart"], rcs=[0, 1]) + except Exception: + util.logexc(LOG, "dpkg-query failed") + return False + + try: + util.subp(["dpkg", "--compare-versions", dpkg_ver, "ge", good]) + print "good version" + return True + except util.ProcessExecutionError as e: + if e.exit_code is 1: + pass + else: + util.logexc(LOG, "dpkg --compare-versions failed [%s]", + e.exit_code) + except Exception as e: + util.logexc(LOG, "dpkg --compare-versions failed") + return False + else: + return True + +SUITABLE_UPSTART = _has_suitable_upstart() -- cgit v1.2.3 From 1c76b49ccbe0c31187aeea12b0e395774aa90faa Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 9 Jul 2013 02:33:32 -0400 Subject: re-enable test case because 1124384 is fixed --- tests/unittests/test_builtin_handlers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py index 9cf28215..e25a5144 100644 --- a/tests/unittests/test_builtin_handlers.py +++ b/tests/unittests/test_builtin_handlers.py @@ -35,7 +35,6 @@ class TestBuiltins(test_helpers.FilesystemMockingTestCase): None, None, None) self.assertEquals(0, len(os.listdir(up_root))) - @unittest.skip("until LP: #1124384 fixed") def test_upstart_frequency_single(self): # files should be written out when frequency is ! per-instance new_root = self.makeDir() @@ -47,6 +46,7 @@ class TestBuiltins(test_helpers.FilesystemMockingTestCase): 'upstart_dir': "/etc/upstart", }) + upstart_job.SUITABLE_UPSTART = True util.ensure_dir("/run") util.ensure_dir("/etc/upstart") -- cgit v1.2.3 From 00319aaf5777883cff311778744e4cd72d42a496 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 9 Jul 2013 13:58:49 -0400 Subject: add azure to ds list --- cloudinit/settings.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloudinit/settings.py b/cloudinit/settings.py index 8cc9e3b4..dc371cd2 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -31,6 +31,7 @@ CFG_BUILTIN = { 'datasource_list': [ 'NoCloud', 'ConfigDrive', + 'Azure', 'AltCloud', 'OVF', 'MAAS', -- cgit v1.2.3 From d1d96451a420672a01d9097cc0b14ee13e8a9256 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 9 Jul 2013 14:13:24 -0400 Subject: add stub for ssh keys The idea is that in the future, the DataSources get_data could check to see if there were ssh keys provided. if there were, it could poll and wait (or inotify) until the .crt files appeared in the /var/lib/walinux directory. Once they did, it'd populate the metadata's public keys. --- cloudinit/sources/DataSourceAzure.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 6a04b333..143b7e4a 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -114,6 +114,18 @@ def find_child(node, filter_func): return ret +def load_azure_ovf_pubkeys(sshnode): + # in the future this would return a list of dicts like: + # [{'fp': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7', + # 'path': 'where/to/go'}] + # + # + # ABC/ABC + # ... + # + return [] + + def read_azure_ovf(contents): try: dom = minidom.parseString(contents) @@ -169,6 +181,8 @@ def read_azure_ovf(contents): md['local-hostname'] = value elif name == "dscfg": cfg['datasource'] = {DS_NAME: util.load_yaml(value, default={})} + elif name == "ssh": + cfg['_pubkeys'] = loadAzurePubkeys(child) elif simple: if name in md_props: md[name] = value -- cgit v1.2.3 From 6bea1cb867c13e05e3548c648d5f051d2c49f07b Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 9 Jul 2013 14:41:55 -0400 Subject: better handling for user/password --- cloudinit/sources/DataSourceAzure.py | 21 ++++++++++++++++++++- tests/unittests/test_datasource/test_azure.py | 27 +++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 1 deletion(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 143b7e4a..5037c1a3 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -160,6 +160,8 @@ def read_azure_ovf(contents): md = {'azure_data': {}} cfg = {} ud = "" + password = None + username = None for child in lpcs.childNodes: if child.nodeType == dom.TEXT_NODE or not child.localName: @@ -176,19 +178,36 @@ def read_azure_ovf(contents): if name == "userdata": ud = base64.b64decode(''.join(value.split())) elif name == "username": - cfg['system_info'] = {'default_user': {'name': value}} + username = value + elif name == "userpassword": + password = value elif name == "hostname": md['local-hostname'] = value elif name == "dscfg": cfg['datasource'] = {DS_NAME: util.load_yaml(value, default={})} elif name == "ssh": cfg['_pubkeys'] = loadAzurePubkeys(child) + elif name == "disablesshpasswordauthentication": + cfg['ssh_pwauth'] = util.is_true(value) elif simple: if name in md_props: md[name] = value else: md['azure_data'][name] = value + defuser = {} + if username: + defuser['name'] = username + if password: + defuser['password'] = password + defuser['lock_passwd'] = False + + if defuser: + cfg['system_info'] = {'default_user': defuser} + + if 'ssh_pwauth' not in cfg and password: + cfg['ssh_pwauth'] = True + return (md, ud, cfg) diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 179fb50a..a2347f1b 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -131,6 +131,33 @@ class TestAzureDataSource(MockerTestCase): self.assertTrue(ret) self.assertEqual(data['agent_invoked'], '_COMMAND') + def test_username_used(self): + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(dsrc.cfg['system_info']['default_user']['name'], + "myuser") + + def test_password_given(self): + odata = {'HostName': "myhost", 'UserName': "myuser", + 'UserPassword': "mypass"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertTrue('default_user' in dsrc.cfg['system_info']) + defuser = dsrc.cfg['system_info']['default_user'] + + # default user shoudl be updated for password and username + # and should not be locked. + self.assertEqual(defuser['name'], odata['UserName']) + self.assertEqual(defuser['password'], odata['UserPassword']) + self.assertFalse(defuser['lock_passwd']) + def test_userdata_found(self): mydata = "FOOBAR" odata = {'UserData': base64.b64encode(mydata)} -- cgit v1.2.3 From cf1b10900626dfa6194c77b6720291e7edbaf9f6 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 9 Jul 2013 15:07:38 -0400 Subject: populate /var/lib/waagent with ovf-env.xml this will copy the ovf-env.xml file that was found to the configured directory (default /var/lib/waagent) --- cloudinit/sources/DataSourceAzure.py | 43 +++++++++++++++++++-------- tests/unittests/test_datasource/test_azure.py | 8 +++++ 2 files changed, 39 insertions(+), 12 deletions(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 5037c1a3..f1c7c771 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -30,7 +30,9 @@ LOG = logging.getLogger(__name__) DS_NAME = 'Azure' DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} AGENT_START = ['service', 'walinuxagent', 'start'] -BUILTIN_DS_CONFIG = {'datasource': {DS_NAME: {'agent_command': AGENT_START}}} +BUILTIN_DS_CONFIG = {'datasource': {DS_NAME: { + 'agent_command': AGENT_START, + 'data_dir': "/var/lib/waagent"}}} class DataSourceAzureNet(sources.DataSource): @@ -64,7 +66,7 @@ class DataSourceAzureNet(sources.DataSource): LOG.warn("%s was not mountable" % cdev) continue - (md, self.userdata_raw, cfg) = ret + (md, self.userdata_raw, cfg, files) = ret self.seed = cdev self.metadata = util.mergemanydict([md, DEFAULT_METADATA]) self.cfg = cfg @@ -76,18 +78,24 @@ class DataSourceAzureNet(sources.DataSource): if not found: return False - path = ['datasource', DS_NAME, 'agent_command'] - cmd = None + fields = [('cmd', ['datasource', DS_NAME, 'agent_command']), + ('datadir', ['datasource', DS_NAME, 'data_dir'])] + mycfg = {} for cfg in (self.cfg, self.sys_cfg, BUILTIN_DS_CONFIG): - cmd = util.get_cfg_by_path(cfg, keyp=path) - if cmd is not None: - break + for name, path in fields: + if name in mycfg: + continue + value = util.get_cfg_by_path(cfg, keyp=path) + if value is not None: + mycfg[name] = value + + write_files(mycfg['datadir'], files) try: - invoke_agent(cmd) + invoke_agent(mycfg['cmd']) except util.ProcessExecutionError: # claim the datasource even if the command failed - util.logexc(LOG, "agent command '%s' failed.", cmd) + util.logexc(LOG, "agent command '%s' failed.", mycfg['cmd']) return True @@ -95,6 +103,16 @@ class DataSourceAzureNet(sources.DataSource): return self.cfg +def write_files(datadir, files): + if not datadir: + return + if not files: + files = {} + for (name, content) in files.items(): + util.write_file(filename=os.path.join(datadir, name), + content=content, mode=0600) + + def invoke_agent(cmd): # this is a function itself to simplify patching it for test if cmd: @@ -114,7 +132,7 @@ def find_child(node, filter_func): return ret -def load_azure_ovf_pubkeys(sshnode): +def load_azure_ovf_pubkeys(_sshnode): # in the future this would return a list of dicts like: # [{'fp': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7', # 'path': 'where/to/go'}] @@ -186,7 +204,7 @@ def read_azure_ovf(contents): elif name == "dscfg": cfg['datasource'] = {DS_NAME: util.load_yaml(value, default={})} elif name == "ssh": - cfg['_pubkeys'] = loadAzurePubkeys(child) + cfg['_pubkeys'] = load_azure_ovf_pubkeys(child) elif name == "disablesshpasswordauthentication": cfg['ssh_pwauth'] = util.is_true(value) elif simple: @@ -230,7 +248,8 @@ def load_azure_ds_dir(source_dir): with open(ovf_file, "r") as fp: contents = fp.read() - return read_azure_ovf(contents) + md, ud, cfg = read_azure_ovf(contents) + return (md, ud, cfg, {'ovf-env.xml': contents}) class BrokenAzureDataSource(Exception): diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index a2347f1b..68f4bcca 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -83,6 +83,12 @@ class TestAzureDataSource(MockerTestCase): def invoker(cmd): data['agent_invoked'] = cmd + def file_writer(datadir, files): + data['files'] = {} + data['datadir'] = datadir + for (fname, content) in files.items(): + data['files'][fname] = content + if data.get('ovfcontent') is not None: populate_dir(os.path.join(self.paths.seed_dir, "azure"), {'ovf-env.xml': data['ovfcontent']}) @@ -93,6 +99,7 @@ class TestAzureDataSource(MockerTestCase): self.apply_patches([(mod, 'list_possible_azure_ds_devs', dsdevs)]) self.apply_patches([(mod, 'invoke_agent', invoker)]) + self.apply_patches([(mod, 'write_files', file_writer)]) dsrc = mod.DataSourceAzureNet( data.get('sys_cfg', {}), distro=None, paths=self.paths) @@ -109,6 +116,7 @@ class TestAzureDataSource(MockerTestCase): self.assertTrue(ret) self.assertEqual(dsrc.userdata_raw, "") self.assertEqual(dsrc.metadata['local-hostname'], odata['HostName']) + self.assertTrue('ovf-env.xml' in data['files']) def test_user_cfg_set_agent_command(self): cfg = {'agent_command': "my_command"} -- cgit v1.2.3 From c12845193066ac2eb14b9bbef75657d579b696b5 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 9 Jul 2013 15:49:55 -0400 Subject: search in the default storage directory for cached ovf-env.xml since azure ejects the disk on reboot, we need to look there to find this datasource. --- cloudinit/sources/DataSourceAzure.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index f1c7c771..92b6172b 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -47,8 +47,19 @@ class DataSourceAzureNet(sources.DataSource): return "%s [seed=%s]" % (root, self.seed) def get_data(self): + ddir_cfgpath = ['datasource', DS_NAME, 'data_dir'] + # azure removes/ejects the cdrom containing the ovf-env.xml + # file on reboot. So, in order to successfully reboot we + # need to look in the datadir and consider that valid + ddir = util.get_cfg_by_path(self.sys_cfg, ddir_cfgpath) + if ddir is None: + ddir = util.get_cfg_by_path(BUILTIN_DS_CONFIG, ddir_cfgpath) + candidates = [self.seed_dir] candidates.extend(list_possible_azure_ds_devs()) + if ddir: + candidates.append(ddir) + found = None for cdev in candidates: @@ -79,7 +90,7 @@ class DataSourceAzureNet(sources.DataSource): return False fields = [('cmd', ['datasource', DS_NAME, 'agent_command']), - ('datadir', ['datasource', DS_NAME, 'data_dir'])] + ('datadir', ddir_cfgpath)] mycfg = {} for cfg in (self.cfg, self.sys_cfg, BUILTIN_DS_CONFIG): for name, path in fields: -- cgit v1.2.3 From 4368b264be42472c53bc3333587c7029373ad56a Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 9 Jul 2013 16:03:22 -0400 Subject: mention using cached --- cloudinit/sources/DataSourceAzure.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 92b6172b..d8e39392 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -89,6 +89,9 @@ class DataSourceAzureNet(sources.DataSource): if not found: return False + if found == ddir: + LOG.debug("using cached datasource in %s", ddir) + fields = [('cmd', ['datasource', DS_NAME, 'agent_command']), ('datadir', ddir_cfgpath)] mycfg = {} -- cgit v1.2.3 From 950762bb008d25f529c71aae4c0b04f6b0134abb Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 9 Jul 2013 20:20:55 -0400 Subject: fill out load_azure_ovf_pubkeys now if there are pubkeys, the cfg['_pubkeys'] entry will have a list of dicts where each dict has 'fingerprint' and 'path' entries. The next thing to do is to block waiting for the .crt files to appear in /var/lib/waagent. --- cloudinit/sources/DataSourceAzure.py | 40 +++++++++++++++++++++++++-- tests/unittests/test_datasource/test_azure.py | 13 +++++++-- 2 files changed, 49 insertions(+), 4 deletions(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index d8e39392..43a963ad 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -146,7 +146,7 @@ def find_child(node, filter_func): return ret -def load_azure_ovf_pubkeys(_sshnode): +def load_azure_ovf_pubkeys(sshnode): # in the future this would return a list of dicts like: # [{'fp': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7', # 'path': 'where/to/go'}] @@ -155,7 +155,43 @@ def load_azure_ovf_pubkeys(_sshnode): # ABC/ABC # ... # - return [] + results = find_child(sshnode, lambda n: n.localName == "PublicKeys") + if len(results) == 0: + return [] + if len(results) > 1: + raise BrokenAzureDataSource("Multiple 'PublicKeys'(%s) in SSH node" % + len(results)) + + pubkeys_node = results[0] + pubkeys = find_child(pubkeys_node, lambda n: n.localName == "PublicKey") + + if len(pubkeys) == 0: + return [] + + found = [] + text_node = minidom.Document.TEXT_NODE + + for pk_node in pubkeys: + if not pk_node.hasChildNodes(): + continue + cur = {'fingerprint': "", 'path': ""} + for child in pk_node.childNodes: + if (child.nodeType == text_node or not child.localName): + continue + + name = child.localName.lower() + + if name not in cur.keys(): + continue + + if (len(child.childNodes) != 1 or + child.childNodes[0].nodeType != text_node): + continue + + cur[name] = child.childNodes[0].wholeText.strip() + found.append(cur) + + return found def read_azure_ovf(contents): diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 68f4bcca..be6fab70 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -34,11 +34,12 @@ def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): if pubkeys: content += "\n" - for fp, path in pubkeys.items(): + for fp, path in pubkeys: content += " " content += ("%s%s" % (fp, path)) - content += " " + content += "\n" + content += "" content += """ @@ -191,6 +192,14 @@ class TestReadAzureOvf(MockerTestCase): self.assertRaises(DataSourceAzure.NonAzureDataSource, DataSourceAzure.read_azure_ovf, invalid_xml) + def test_load_with_pubkeys(self): + mypklist = [{'fingerprint': 'fp1', 'path': 'path1'}] + pubkeys = [(x['fingerprint'], x['path']) for x in mypklist] + content = construct_valid_ovf_env(pubkeys=pubkeys) + (md, ud, cfg) = DataSourceAzure.read_azure_ovf(content) + for mypk in mypklist: + self.assertIn(mypk, cfg['_pubkeys']) + def apply_patches(patches): ret = [] -- cgit v1.2.3 From ec22feeefe309187107e0fb5471136f1c8a646c9 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 9 Jul 2013 20:36:28 -0400 Subject: build up the 'wait_for' list including fingerprint.crt files --- cloudinit/sources/DataSourceAzure.py | 6 ++++++ tests/unittests/test_datasource/test_azure.py | 13 +++++++++++++ 2 files changed, 19 insertions(+) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 43a963ad..ab570344 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -111,6 +111,12 @@ class DataSourceAzureNet(sources.DataSource): # claim the datasource even if the command failed util.logexc(LOG, "agent command '%s' failed.", mycfg['cmd']) + wait_for = [os.path.join(mycfg['datadir'], "SharedConfig.xml")] + + for pk in self.cfg.get('_pubkeys', []): + bname = pk['fingerprint'] + ".crt" + wait_for += [os.path.join(mycfg['datadir'], bname)] + return True def get_config_obj(self): diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index be6fab70..a7094ec6 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -185,6 +185,19 @@ class TestAzureDataSource(MockerTestCase): self.assertFalse(ret) self.assertFalse('agent_invoked' in data) + def test_cfg_has_pubkeys(self): + odata = {'HostName': "myhost", 'UserName': "myuser"} + mypklist = [{'fingerprint': 'fp1', 'path': 'path1'}] + pubkeys = [(x['fingerprint'], x['path']) for x in mypklist] + data = {'ovfcontent': construct_valid_ovf_env(data=odata, + pubkeys=pubkeys)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + for mypk in mypklist: + self.assertIn(mypk, dsrc.cfg['_pubkeys']) + class TestReadAzureOvf(MockerTestCase): def test_invalid_xml_raises_non_azure_ds(self): -- cgit v1.2.3 From ce949d5b4c94caf9c1df6393abe86de2872e05ae Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 10 Jul 2013 13:08:23 -0400 Subject: add waiting for files and reading of crt keys --- cloudinit/sources/DataSourceAzure.py | 46 ++++++++++++++++++++++++++- packages/debian/changelog.in | 2 +- tests/unittests/test_datasource/test_azure.py | 21 +++++++++--- 3 files changed, 62 insertions(+), 7 deletions(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index ab570344..200bede5 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -19,6 +19,7 @@ import base64 import os import os.path +import time from xml.dom import minidom from cloudinit import log as logging @@ -113,9 +114,18 @@ class DataSourceAzureNet(sources.DataSource): wait_for = [os.path.join(mycfg['datadir'], "SharedConfig.xml")] + fp_files = [] for pk in self.cfg.get('_pubkeys', []): bname = pk['fingerprint'] + ".crt" - wait_for += [os.path.join(mycfg['datadir'], bname)] + fp_files += [os.path.join(mycfg['datadir'], bname)] + + missing = wait_for_files(wait_for + fp_files) + if len(missing): + LOG.warn("Did not find files, but going on: %s" % missing) + + pubkeys = pubkeys_from_crt_files(fp_files) + + self.metadata['public-keys'] = pubkeys return True @@ -123,6 +133,40 @@ class DataSourceAzureNet(sources.DataSource): return self.cfg +def crtfile_to_pubkey(fname): + pipeline = ('openssl x509 -noout -pubkey < "$0" |' + 'ssh-keygen -i -m PKCS8 -f /dev/stdin') + (out, _err) = util.subp(['sh', '-c', pipeline, fname], capture=True) + return out.rstrip() + + +def pubkeys_from_crt_files(flist): + pubkeys = [] + errors = [] + for fname in flist: + try: + pubkeys.append(crtfile_to_pubkey(fname)) + except util.ProcessExecutionError: + errors.extend(fname) + + if errors: + LOG.warn("failed to convert the crt files to pubkey: %s" % errors) + + return pubkeys + + +def wait_for_files(flist, maxwait=60, naplen=.5): + need = set(flist) + waited = 0 + while waited < maxwait: + need -= set([f for f in need if os.path.exists(f)]) + if len(need) == 0: + return [] + time.sleep(naplen) + waited += naplen + return need + + def write_files(datadir, files): if not datadir: return diff --git a/packages/debian/changelog.in b/packages/debian/changelog.in index e3e94f54..4944230b 100644 --- a/packages/debian/changelog.in +++ b/packages/debian/changelog.in @@ -1,5 +1,5 @@ ## This is a cheetah template -cloud-init (${version}~bzr${revision}-1) UNRELEASED; urgency=low +cloud-init (${version}~bzr${revision}-1) raring; urgency=low * build diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index a7094ec6..74ed7197 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -81,15 +81,23 @@ class TestAzureDataSource(MockerTestCase): def dsdevs(): return data.get('dsdevs', []) - def invoker(cmd): + def _invoke_agent(cmd): data['agent_invoked'] = cmd - def file_writer(datadir, files): + def _write_files(datadir, files): data['files'] = {} data['datadir'] = datadir for (fname, content) in files.items(): data['files'][fname] = content + def _wait_for_files(flist, _maxwait=None, _naplen=None): + data['waited'] = flist + return [] + + def _pubkeys_from_crt_files(flist): + data['pubkey_files'] = flist + return ["pubkey_from: %s" % f for f in flist] + if data.get('ovfcontent') is not None: populate_dir(os.path.join(self.paths.seed_dir, "azure"), {'ovf-env.xml': data['ovfcontent']}) @@ -99,8 +107,11 @@ class TestAzureDataSource(MockerTestCase): if data.get('dsdevs'): self.apply_patches([(mod, 'list_possible_azure_ds_devs', dsdevs)]) - self.apply_patches([(mod, 'invoke_agent', invoker)]) - self.apply_patches([(mod, 'write_files', file_writer)]) + self.apply_patches([(mod, 'invoke_agent', _invoke_agent), + (mod, 'write_files', _write_files), + (mod, 'wait_for_files', _wait_for_files), + (mod, 'pubkeys_from_crt_files', + _pubkeys_from_crt_files)]) dsrc = mod.DataSourceAzureNet( data.get('sys_cfg', {}), distro=None, paths=self.paths) @@ -209,7 +220,7 @@ class TestReadAzureOvf(MockerTestCase): mypklist = [{'fingerprint': 'fp1', 'path': 'path1'}] pubkeys = [(x['fingerprint'], x['path']) for x in mypklist] content = construct_valid_ovf_env(pubkeys=pubkeys) - (md, ud, cfg) = DataSourceAzure.read_azure_ovf(content) + (_md, _ud, cfg) = DataSourceAzure.read_azure_ovf(content) for mypk in mypklist: self.assertIn(mypk, cfg['_pubkeys']) -- cgit v1.2.3 From 078f0fc644e640eacad316aeda7ee05eb8957aa3 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 11 Jul 2013 10:14:49 -0400 Subject: upstart_job.py: fix pylint and bad bad variable in _has_suitable_upstart --- cloudinit/handlers/upstart_job.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py index b3d60d6e..7a73d1b2 100644 --- a/cloudinit/handlers/upstart_job.py +++ b/cloudinit/handlers/upstart_job.py @@ -85,7 +85,7 @@ def _has_suitable_upstart(): return False # expecting 'initctl version' to output something like: init (upstart X.Y) - if re.match("upstart 1.[0-7][\)]", version_out): + if re.match("upstart 1.[0-7][)]", version_out): return False if "upstart 0." in version_out: return False @@ -101,8 +101,8 @@ def _has_suitable_upstart(): return False try: + good = "1.8-0ubuntu1.2" util.subp(["dpkg", "--compare-versions", dpkg_ver, "ge", good]) - print "good version" return True except util.ProcessExecutionError as e: if e.exit_code is 1: -- cgit v1.2.3 From 299af1d4a3e09ad9c961cb641e62a20ab5998640 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 11 Jul 2013 10:15:58 -0400 Subject: test_builtin_handlers.py: fix pylint --- tests/unittests/test_builtin_handlers.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py index e25a5144..b387f13b 100644 --- a/tests/unittests/test_builtin_handlers.py +++ b/tests/unittests/test_builtin_handlers.py @@ -1,7 +1,6 @@ """Tests of the built-in user data handlers.""" import os -import unittest from tests.unittests import helpers as test_helpers -- cgit v1.2.3 From 8c15320283012d218c2165f5dfb38a29d2cf7b53 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 11 Jul 2013 10:16:18 -0400 Subject: DataSourceAzure.py: allow 'customdata' to be the name for userdata Also, fix a comment and write a log message on how long we waited for the files to appear. --- cloudinit/sources/DataSourceAzure.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 200bede5..2818408c 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -119,9 +119,13 @@ class DataSourceAzureNet(sources.DataSource): bname = pk['fingerprint'] + ".crt" fp_files += [os.path.join(mycfg['datadir'], bname)] + start = time.time() missing = wait_for_files(wait_for + fp_files) if len(missing): - LOG.warn("Did not find files, but going on: %s" % missing) + LOG.warn("Did not find files, but going on: %s", missing) + else: + LOG.debug("waited %.3f seconds for %d files to appear", + time.time() - start, len(wait_for)) pubkeys = pubkeys_from_crt_files(fp_files) @@ -197,7 +201,8 @@ def find_child(node, filter_func): def load_azure_ovf_pubkeys(sshnode): - # in the future this would return a list of dicts like: + # This parses a 'SSH' node formatted like below, and returns + # an array of dicts. # [{'fp': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7', # 'path': 'where/to/go'}] # @@ -293,7 +298,9 @@ def read_azure_ovf(contents): simple = True value = child.childNodes[0].wholeText - if name == "userdata": + # we accept either UserData or CustomData. If both are present + # then behavior is undefined. + if (name == "userdata" or name == "customdata"): ud = base64.b64decode(''.join(value.split())) elif name == "username": username = value -- cgit v1.2.3 From beb7cfbba437440811adb1b40a7882b15888b341 Mon Sep 17 00:00:00 2001 From: Juerg Haefliger Date: Tue, 16 Jul 2013 09:54:32 +0200 Subject: Add SysV init scripts for Debian --- sysvinit/debian/cloud-config | 52 ++++++++++++++++++++++++++++++++++++++ sysvinit/debian/cloud-final | 54 ++++++++++++++++++++++++++++++++++++++++ sysvinit/debian/cloud-init | 52 ++++++++++++++++++++++++++++++++++++++ sysvinit/debian/cloud-init-local | 52 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 210 insertions(+) create mode 100755 sysvinit/debian/cloud-config create mode 100755 sysvinit/debian/cloud-final create mode 100755 sysvinit/debian/cloud-init create mode 100755 sysvinit/debian/cloud-init-local diff --git a/sysvinit/debian/cloud-config b/sysvinit/debian/cloud-config new file mode 100755 index 00000000..57888653 --- /dev/null +++ b/sysvinit/debian/cloud-config @@ -0,0 +1,52 @@ +#! /bin/sh +### BEGIN INIT INFO +# Provides: cloud-config +# Required-Start: cloud-init cloud-init-local +# Required-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Cloud init modules --mode config +# Description: Cloud configuration initialization +### END INIT INFO + +# Authors: Julien Danjou +# Juerg Haefliger + +PATH=/sbin:/usr/sbin:/bin:/usr/bin +DESC="Cloud service" +NAME=cloud-init +DAEMON=/usr/bin/$NAME +DAEMON_ARGS="modules --mode config" +SCRIPTNAME=/etc/init.d/$NAME + +# Exit if the package is not installed +[ -x "$DAEMON" ] || exit 0 + +# Read configuration variable file if it is present +[ -r /etc/default/$NAME ] && . /etc/default/$NAME + +# Define LSB log_* functions. +# Depend on lsb-base (>= 3.2-14) to ensure that this file is present +# and status_of_proc is working. +. /lib/lsb/init-functions + +case "$1" in +start) + log_daemon_msg "Starting $DESC" "$NAME" + $DAEMON ${DAEMON_ARGS} + case "$?" in + 0|1) log_end_msg 0 ;; + 2) log_end_msg 1 ;; + esac +;; +stop|restart|force-reload) + echo "Error: argument '$1' not supported" >&2 + exit 3 +;; +*) + echo "Usage: $SCRIPTNAME {start}" >&2 + exit 3 +;; +esac + +: diff --git a/sysvinit/debian/cloud-final b/sysvinit/debian/cloud-final new file mode 100755 index 00000000..46e9b454 --- /dev/null +++ b/sysvinit/debian/cloud-final @@ -0,0 +1,54 @@ +#! /bin/sh +### BEGIN INIT INFO +# Provides: cloud-final +# Required-Start: $all cloud-config +# Required-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Cloud init modules final jobs +# Description: This runs the cloud configuration initialization "final" jobs +# and can be seen as the traditional "rc.local" time for the cloud. +# It runs after all cloud-config jobs are run +### END INIT INFO + +# Authors: Julien Danjou +# Juerg Haefliger + +PATH=/sbin:/usr/sbin:/bin:/usr/bin +DESC="Cloud service" +NAME=cloud-init +DAEMON=/usr/bin/$NAME +DAEMON_ARGS="modules --mode final" +SCRIPTNAME=/etc/init.d/$NAME + +# Exit if the package is not installed +[ -x "$DAEMON" ] || exit 0 + +# Read configuration variable file if it is present +[ -r /etc/default/$NAME ] && . /etc/default/$NAME + +# Define LSB log_* functions. +# Depend on lsb-base (>= 3.2-14) to ensure that this file is present +# and status_of_proc is working. +. /lib/lsb/init-functions + +case "$1" in +start) + log_daemon_msg "Starting $DESC" "$NAME" + $DAEMON ${DAEMON_ARGS} + case "$?" in + 0|1) log_end_msg 0 ;; + 2) log_end_msg 1 ;; + esac +;; +stop|restart|force-reload) + echo "Error: argument '$1' not supported" >&2 + exit 3 +;; +*) + echo "Usage: $SCRIPTNAME {start}" >&2 + exit 3 +;; +esac + +: diff --git a/sysvinit/debian/cloud-init b/sysvinit/debian/cloud-init new file mode 100755 index 00000000..15ffeb2e --- /dev/null +++ b/sysvinit/debian/cloud-init @@ -0,0 +1,52 @@ +#! /bin/sh +### BEGIN INIT INFO +# Provides: cloud-init +# Required-Start: $local_fs $remote_fs $syslog $network cloud-init-local +# Required-Stop: $remote_fs +# X-Start-Before: sshd +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Cloud init +# Description: Cloud configuration initialization +### END INIT INFO + +# Author: Julien Danjou + +PATH=/sbin:/usr/sbin:/bin:/usr/bin +DESC="Cloud service" +NAME=cloud-init +DAEMON=/usr/bin/$NAME +DAEMON_ARGS="init" +SCRIPTNAME=/etc/init.d/$NAME + +# Exit if the package is not installed +[ -x "$DAEMON" ] || exit 0 + +# Read configuration variable file if it is present +[ -r /etc/default/$NAME ] && . /etc/default/$NAME + +# Define LSB log_* functions. +# Depend on lsb-base (>= 3.2-14) to ensure that this file is present +# and status_of_proc is working. +. /lib/lsb/init-functions + +case "$1" in + start) + log_daemon_msg "Starting $DESC" "$NAME" + $DAEMON ${DAEMON_ARGS} + case "$?" in + 0|1) log_end_msg 0 ;; + 2) log_end_msg 1 ;; + esac + ;; + stop|restart|force-reload) + echo "Error: argument '$1' not supported" >&2 + exit 3 + ;; + *) + echo "Usage: $SCRIPTNAME {start}" >&2 + exit 3 + ;; +esac + +: diff --git a/sysvinit/debian/cloud-init-local b/sysvinit/debian/cloud-init-local new file mode 100755 index 00000000..a1685c1e --- /dev/null +++ b/sysvinit/debian/cloud-init-local @@ -0,0 +1,52 @@ +#! /bin/sh +### BEGIN INIT INFO +# Provides: cloud-init-local +# Required-Start: $local_fs $remote_fs +# Required-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Cloud init local +# Description: Cloud configuration initialization +### END INIT INFO + +# Authors: Julien Danjou +# Juerg Haefliger + +PATH=/sbin:/usr/sbin:/bin:/usr/bin +DESC="Cloud service" +NAME=cloud-init +DAEMON=/usr/bin/$NAME +DAEMON_ARGS="init --local" +SCRIPTNAME=/etc/init.d/$NAME + +# Exit if the package is not installed +[ -x "$DAEMON" ] || exit 0 + +# Read configuration variable file if it is present +[ -r /etc/default/$NAME ] && . /etc/default/$NAME + +# Define LSB log_* functions. +# Depend on lsb-base (>= 3.2-14) to ensure that this file is present +# and status_of_proc is working. +. /lib/lsb/init-functions + +case "$1" in +start) + log_daemon_msg "Starting $DESC" "$NAME" + $DAEMON ${DAEMON_ARGS} + case "$?" in + 0|1) log_end_msg 0 ;; + 2) log_end_msg 1 ;; + esac +;; +stop|restart|force-reload) + echo "Error: argument '$1' not supported" >&2 + exit 3 +;; +*) + echo "Usage: $SCRIPTNAME {start}" >&2 + exit 3 +;; +esac + +: -- cgit v1.2.3 From e2e5becb83827e741bfaaeac5edc1c7937718faa Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Tue, 16 Jul 2013 15:21:26 -0600 Subject: Use the inverse of DisableSshPasswordAuthentication for ssh_pwauth. (LP: 1201969) --- cloudinit/sources/DataSourceAzure.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 2818408c..5071ee67 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -313,7 +313,10 @@ def read_azure_ovf(contents): elif name == "ssh": cfg['_pubkeys'] = load_azure_ovf_pubkeys(child) elif name == "disablesshpasswordauthentication": - cfg['ssh_pwauth'] = util.is_true(value) + # The verb 'disablessh..." asks whether to disable password auth. + # Since it is disabled by default, "false" indicates that + # password authentication should be enabled. + cfg['ssh_pwauth'] = util.is_false(value) elif simple: if name in md_props: md[name] = value -- cgit v1.2.3 From 8f70bb7e7144f2225b4e9a589d16ae6d15992a3d Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 17 Jul 2013 13:36:32 -0400 Subject: Azure: make /var/lib/waagent with 0700 perms The walinux agent expects that the files it writes with 0644 (default umask) permissions are not globally readable. Since we were creating the directory for it, and using default umaks (0755), the files inside were readable to non-priviledged users. --- cloudinit/sources/DataSourceAzure.py | 7 +++++-- tests/unittests/test_datasource/test_azure.py | 4 +++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index f1419296..c90d7b07 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -104,7 +104,9 @@ class DataSourceAzureNet(sources.DataSource): if value is not None: mycfg[name] = value - write_files(mycfg['datadir'], files) + # walinux agent writes files world readable, but expects + # the directory to be protected. + write_files(mycfg['datadir'], files, dirmode=0700) try: invoke_agent(mycfg['cmd']) @@ -171,11 +173,12 @@ def wait_for_files(flist, maxwait=60, naplen=.5): return need -def write_files(datadir, files): +def write_files(datadir, files, dirmode=None): if not datadir: return if not files: files = {} + util.ensure_dir(datadir, dirmode) for (name, content) in files.items(): util.write_file(filename=os.path.join(datadir, name), content=content, mode=0600) diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 74ed7197..c79c25d8 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -84,9 +84,10 @@ class TestAzureDataSource(MockerTestCase): def _invoke_agent(cmd): data['agent_invoked'] = cmd - def _write_files(datadir, files): + def _write_files(datadir, files, dirmode): data['files'] = {} data['datadir'] = datadir + data['datadir_mode'] = dirmode for (fname, content) in files.items(): data['files'][fname] = content @@ -129,6 +130,7 @@ class TestAzureDataSource(MockerTestCase): self.assertEqual(dsrc.userdata_raw, "") self.assertEqual(dsrc.metadata['local-hostname'], odata['HostName']) self.assertTrue('ovf-env.xml' in data['files']) + self.assertEqual(0700, data['datadir_mode']) def test_user_cfg_set_agent_command(self): cfg = {'agent_command': "my_command"} -- cgit v1.2.3 From fe1e3197482a25365379be306741d0a943dcdfd5 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 17 Jul 2013 15:31:57 -0400 Subject: fix indentation a bit in Makefile --- Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 29bfe0bd..8cf1659a 100644 --- a/Makefile +++ b/Makefile @@ -28,9 +28,9 @@ test: check_version: @if [ "$(CHANGELOG_VERSION)" != "$(CODE_VERSION)" ]; then \ - echo "Error: ChangeLog version $(CHANGELOG_VERSION)" \ - "not equal to code version $(CODE_VERSION)"; exit 2; \ - else true; fi + echo "Error: ChangeLog version $(CHANGELOG_VERSION)" \ + "not equal to code version $(CODE_VERSION)"; exit 2; \ + else true; fi 2to3: 2to3 $(PY_FILES) -- cgit v1.2.3 From 67162bca0c49d415f92aefa22972fd3ffe179da6 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 17 Jul 2013 15:35:01 -0400 Subject: plain text password of '' or None should not trigger setting --- cloudinit/distros/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index cda2c6af..249e1b19 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -359,8 +359,8 @@ class Distro(object): # Add the user self.add_user(name, **kwargs) - # Set password if plain-text password provided - if 'plain_text_passwd' in kwargs: + # Set password if plain-text password provided and non-empty + if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']: self.set_passwd(name, kwargs['plain_text_passwd']) # Default locking down the account. 'lock_passwd' defaults to True. -- cgit v1.2.3 From 6b7e65e4f57902c25363c78a7e47aa2caa579b7b Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Thu, 18 Jul 2013 15:37:18 -0600 Subject: Added SmartOS datasource and unit tests. --- cloudinit/settings.py | 1 + cloudinit/sources/DataSourceSmartOS.py | 172 +++++++++++++++++++++ cloudinit/util.py | 18 +++ tests/unittests/test_datasource/test_smartos.py | 191 ++++++++++++++++++++++++ 4 files changed, 382 insertions(+) create mode 100644 cloudinit/sources/DataSourceSmartOS.py create mode 100644 tests/unittests/test_datasource/test_smartos.py diff --git a/cloudinit/settings.py b/cloudinit/settings.py index dc371cd2..9f6badae 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -37,6 +37,7 @@ CFG_BUILTIN = { 'MAAS', 'Ec2', 'CloudStack', + 'SmartOS', # At the end to act as a 'catch' when none of the above work... 'None', ], diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py new file mode 100644 index 00000000..f9b724eb --- /dev/null +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -0,0 +1,172 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2013 Canonical Ltd. +# +# Author: Ben Howard +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# +# Datasource for provisioning on SmartOS. This works on Joyent +# and public/private Clouds using SmartOS. +# +# SmartOS hosts use a serial console (/dev/ttyS1) on Linux Guests. +# The meta-data is transmitted via key/value pairs made by +# requests on the console. For example, to get the hostname, you +# would send "GET hostname" on /dev/ttyS1. +# + + +import os +import os.path +import serial +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import util + + +TTY_LOC = '/dev/ttyS1' +LOG = logging.getLogger(__name__) + + +class DataSourceSmartOS(sources.DataSource): + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.seed_dir = os.path.join(paths.seed_dir, 'sdc') + self.seed = None + self.is_smartdc = None + + def __str__(self): + root = sources.DataSource.__str__(self) + return "%s [seed=%s]" % (root, self.seed) + + def get_data(self): + md = {} + ud = "" + + if not os.path.exists(TTY_LOC): + LOG.debug("Host does not appear to be on SmartOS") + return False + self.seed = TTY_LOC + + system_uuid, system_type = dmi_data() + if 'smartdc' not in system_type.lower(): + LOG.debug("Host is not on SmartOS") + return False + self.is_smartdc = True + + hostname = query_data("hostname", strip=True) + if not hostname: + hostname = system_uuid + + md['local-hostname'] = hostname + md['instance-id'] = system_uuid + md['public-keys'] = query_data("root_authorized_keys", strip=True) + ud = query_data("user-script") + md['iptables_disable'] = query_data("disable_iptables_flag", + strip=True) + md['motd_sys_info'] = query_data("enable_motd_sys_info", strip=True) + + self.metadata = md + self.userdata_raw = ud + return True + + def get_instance_id(self): + return self.metadata['instance-id'] + + +def get_serial(): + """This is replaced in unit testing, allowing us to replace + serial.Serial with a mocked class""" + return serial.Serial() + + +def query_data(noun, strip=False): + """Makes a request to via the serial console via "GET " + + In the response, the first line is the status, while subsequent lines + are is the value. A blank line with a "." is used to indicate end of + response. + + The timeout value of 60 seconds should never be hit. The value + is taken from SmartOS own provisioning tools. Since we are reading + each line individually up until the single ".", the transfer is + usually very fast (i.e. microseconds) to get the response. + """ + if not noun: + return False + + ser = get_serial() + ser.port = '/dev/ttyS1' + ser.open() + if not ser.isOpen(): + LOG.debug("Serial console is not open") + return False + + ser.write("GET %s\n" % noun.rstrip()) + status = str(ser.readline()).rstrip() + response = [] + eom_found = False + + if 'SUCCESS' not in status: + ser.close() + return None + + while not eom_found: + m = ser.readline() + if m.rstrip() == ".": + eom_found = True + else: + response.append(m) + + ser.close() + if not strip: + return "".join(response) + else: + return "".join(response).rstrip() + + return None + + +def dmi_data(): + sys_uuid, sys_type = None, None + dmidecode_path = util.which('dmidecode') + if not dmidecode_path: + return False + + sys_uuid_cmd = [dmidecode_path, "-s", "system-uuid"] + try: + LOG.debug("Getting hostname from dmidecode") + (sys_uuid, _err) = util.subp(sys_uuid_cmd) + except Exception as e: + util.logexc(LOG, "Failed to get system UUID", e) + + sys_type_cmd = [dmidecode_path, "-s", "system-product-name"] + try: + LOG.debug("Determining hypervisor product name via dmidecode") + (sys_type, _err) = util.subp(sys_type_cmd) + except Exception as e: + util.logexc(LOG, "Failed to get system UUID", e) + + return sys_uuid.lower(), sys_type + + +# Used to match classes to dependencies +datasources = [ + (DataSourceSmartOS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/util.py b/cloudinit/util.py index c45aae06..7163225f 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1743,3 +1743,21 @@ def get_mount_info(path, log=LOG): mountinfo_path = '/proc/%s/mountinfo' % os.getpid() lines = load_file(mountinfo_path).splitlines() return parse_mount_info(path, lines, log) + +def which(program): + # Return path of program for execution if found in path + def is_exe(fpath): + return os.path.isfile(fpath) and os.access(fpath, os.X_OK) + + fpath, fname = os.path.split(program) + if fpath: + if is_exe(program): + return program + else: + for path in os.environ["PATH"].split(os.pathsep): + path = path.strip('"') + exe_file = os.path.join(path, program) + if is_exe(exe_file): + return exe_file + + return None diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py new file mode 100644 index 00000000..494f9828 --- /dev/null +++ b/tests/unittests/test_datasource/test_smartos.py @@ -0,0 +1,191 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2013 Canonical Ltd. +# +# Author: Ben Howard +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# +# This is a testcase for the SmartOS datasource. It replicates a serial +# console and acts like the SmartOS console does in order to validate +# return responses. +# + +from cloudinit import helpers +from cloudinit.sources import DataSourceSmartOS + +from mocker import MockerTestCase +import uuid + +mock_returns = { + 'hostname': 'test-host', + 'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname', + 'disable_iptables_flag': False, + 'enable_motd_sys_info': False, + 'system_uuid': str(uuid.uuid4()), + 'smartdc': 'smartdc', + 'userdata': """ +#!/bin/sh +/bin/true +""", +} + + +class MockSerial(object): + """Fake a serial terminal for testing the code that + interfaces with the serial""" + + port = None + + def __init__(self): + self.last = None + self.last = None + self.new = True + self.count = 0 + self.mocked_out = [] + + def open(self): + return True + + def close(self): + return True + + def isOpen(self): + return True + + def write(self, line): + line = line.replace('GET ', '') + self.last = line.rstrip() + + def readline(self): + if self.new: + self.new = False + if self.last in mock_returns: + return 'SUCCESS\n' + else: + return 'NOTFOUND %s\n' % self.last + + if self.last in mock_returns: + if not self.mocked_out: + self.mocked_out = [x for x in self._format_out()] + print self.mocked_out + + if len(self.mocked_out) > self.count: + self.count += 1 + return self.mocked_out[self.count - 1] + + def _format_out(self): + if self.last in mock_returns: + try: + for l in mock_returns[self.last].splitlines(): + yield "%s\n" % l + except: + yield "%s\n" % mock_returns[self.last] + + yield '\n' + yield '.' + + +class TestSmartOSDataSource(MockerTestCase): + def setUp(self): + # makeDir comes from MockerTestCase + self.tmp = self.makeDir() + + # patch cloud_dir, so our 'seed_dir' is guaranteed empty + self.paths = helpers.Paths({'cloud_dir': self.tmp}) + + self.unapply = [] + super(TestSmartOSDataSource, self).setUp() + + def tearDown(self): + apply_patches([i for i in reversed(self.unapply)]) + super(TestSmartOSDataSource, self).tearDown() + + def apply_patches(self, patches): + ret = apply_patches(patches) + self.unapply += ret + + def _get_ds(self): + + def _get_serial(): + return MockSerial() + + def _dmi_data(): + return mock_returns['system_uuid'], 'smartdc' + + data = {'sys_cfg': {}} + mod = DataSourceSmartOS + self.apply_patches([(mod, 'get_serial', _get_serial)]) + self.apply_patches([(mod, 'dmi_data', _dmi_data)]) + dsrc = mod.DataSourceSmartOS( + data.get('sys_cfg', {}), distro=None, paths=self.paths) + return dsrc + + def test_seed(self): + dsrc = self._get_ds() + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals('/dev/ttyS1', dsrc.seed) + + def test_issmartdc(self): + dsrc = self._get_ds() + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertTrue(dsrc.is_smartdc) + + def test_uuid(self): + dsrc = self._get_ds() + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(mock_returns['system_uuid'], + dsrc.metadata['instance-id']) + + def test_root_keys(self): + dsrc = self._get_ds() + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(mock_returns['root_authorized_keys'], + dsrc.metadata['public-keys']) + + def test_hostname(self): + dsrc = self._get_ds() + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(mock_returns['hostname'], + dsrc.metadata['local-hostname']) + + def test_disable_iptables_flag(self): + dsrc = self._get_ds() + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(str(mock_returns['disable_iptables_flag']), + dsrc.metadata['iptables_disable']) + + def test_motd_sys_info(self): + dsrc = self._get_ds() + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(str(mock_returns['enable_motd_sys_info']), + dsrc.metadata['motd_sys_info']) + + +def apply_patches(patches): + ret = [] + for (ref, name, replace) in patches: + if replace is None: + continue + orig = getattr(ref, name) + setattr(ref, name, replace) + ret.append((ref, name, orig)) + return ret -- cgit v1.2.3 From 198206b67f18f0f51bf057d76cee8b3a3526c8eb Mon Sep 17 00:00:00 2001 From: Juerg Haefliger Date: Fri, 19 Jul 2013 09:23:53 +0200 Subject: move redhat sysvinit scripts --- setup.py | 4 +- sysvinit/cloud-config | 121 --------------------------------------- sysvinit/cloud-final | 121 --------------------------------------- sysvinit/cloud-init | 121 --------------------------------------- sysvinit/cloud-init-local | 121 --------------------------------------- sysvinit/redhat/cloud-config | 121 +++++++++++++++++++++++++++++++++++++++ sysvinit/redhat/cloud-final | 121 +++++++++++++++++++++++++++++++++++++++ sysvinit/redhat/cloud-init | 121 +++++++++++++++++++++++++++++++++++++++ sysvinit/redhat/cloud-init-local | 121 +++++++++++++++++++++++++++++++++++++++ 9 files changed, 486 insertions(+), 486 deletions(-) delete mode 100755 sysvinit/cloud-config delete mode 100755 sysvinit/cloud-final delete mode 100755 sysvinit/cloud-init delete mode 100755 sysvinit/cloud-init-local create mode 100755 sysvinit/redhat/cloud-config create mode 100755 sysvinit/redhat/cloud-final create mode 100755 sysvinit/redhat/cloud-init create mode 100755 sysvinit/redhat/cloud-init-local diff --git a/setup.py b/setup.py index 4aa1a47c..8d18b97e 100755 --- a/setup.py +++ b/setup.py @@ -37,8 +37,8 @@ def is_f(p): INITSYS_FILES = { - 'sysvinit': [f for f in glob('sysvinit/*') if is_f(f)], - 'sysvinit_deb': [f for f in glob('sysvinit/*') if is_f(f)], + 'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)], + 'sysvinit_deb': [f for f in glob('sysvinit/debian/*') if is_f(f)], 'systemd': [f for f in glob('systemd/*') if is_f(f)], 'upstart': [f for f in glob('upstart/*') if is_f(f)], } diff --git a/sysvinit/cloud-config b/sysvinit/cloud-config deleted file mode 100755 index ad8ed831..00000000 --- a/sysvinit/cloud-config +++ /dev/null @@ -1,121 +0,0 @@ -#!/bin/sh - -# -# Copyright (C) 2012 Yahoo! Inc. -# -# Author: Joshua Harlow -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# - -# See: http://wiki.debian.org/LSBInitScripts -# See: http://tiny.cc/czvbgw -# See: http://www.novell.com/coolsolutions/feature/15380.html -# Also based on dhcpd in RHEL (for comparison) - -### BEGIN INIT INFO -# Provides: cloud-config -# Required-Start: cloud-init cloud-init-local -# Should-Start: $time -# Required-Stop: -# Should-Stop: -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: The config cloud-init job -# Description: Start cloud-init and runs the config phase -# and any associated config modules as desired. -### END INIT INFO - -# Return values acc. to LSB for all commands but status: -# 0 - success -# 1 - generic or unspecified error -# 2 - invalid or excess argument(s) -# 3 - unimplemented feature (e.g. "reload") -# 4 - user had insufficient privileges -# 5 - program is not installed -# 6 - program is not configured -# 7 - program is not running -# 8--199 - reserved (8--99 LSB, 100--149 distrib, 150--199 appl) -# -# Note that starting an already running service, stopping -# or restarting a not-running service as well as the restart -# with force-reload (in case signaling is not supported) are -# considered a success. - -RETVAL=0 - -prog="cloud-init" -cloud_init="/usr/bin/cloud-init" -conf="/etc/cloud/cloud.cfg" - -# If there exist sysconfig/default variable override files use it... -[ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init -[ -f /etc/default/cloud-init ] && . /etc/default/cloud-init - -start() { - [ -x $cloud_init ] || return 5 - [ -f $conf ] || return 6 - - echo -n $"Starting $prog: " - $cloud_init $CLOUDINITARGS modules --mode config - RETVAL=$? - return $RETVAL -} - -stop() { - echo -n $"Shutting down $prog: " - # No-op - RETVAL=7 - return $RETVAL -} - -case "$1" in - start) - start - RETVAL=$? - ;; - stop) - stop - RETVAL=$? - ;; - restart|try-restart|condrestart) - ## Stop the service and regardless of whether it was - ## running or not, start it again. - # - ## Note: try-restart is now part of LSB (as of 1.9). - ## RH has a similar command named condrestart. - start - RETVAL=$? - ;; - reload|force-reload) - # It does not support reload - RETVAL=3 - ;; - status) - echo -n $"Checking for service $prog:" - # Return value is slightly different for the status command: - # 0 - service up and running - # 1 - service dead, but /var/run/ pid file exists - # 2 - service dead, but /var/lock/ lock file exists - # 3 - service not running (unused) - # 4 - service status unknown :-( - # 5--199 reserved (5--99 LSB, 100--149 distro, 150--199 appl.) - RETVAL=3 - ;; - *) - echo "Usage: $0 {start|stop|status|try-restart|condrestart|restart|force-reload|reload}" - RETVAL=3 - ;; -esac - -exit $RETVAL diff --git a/sysvinit/cloud-final b/sysvinit/cloud-final deleted file mode 100755 index aeae8903..00000000 --- a/sysvinit/cloud-final +++ /dev/null @@ -1,121 +0,0 @@ -#!/bin/sh - -# -# Copyright (C) 2012 Yahoo! Inc. -# -# Author: Joshua Harlow -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# - -# See: http://wiki.debian.org/LSBInitScripts -# See: http://tiny.cc/czvbgw -# See: http://www.novell.com/coolsolutions/feature/15380.html -# Also based on dhcpd in RHEL (for comparison) - -### BEGIN INIT INFO -# Provides: cloud-final -# Required-Start: $all cloud-config -# Should-Start: $time -# Required-Stop: -# Should-Stop: -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: The final cloud-init job -# Description: Start cloud-init and runs the final phase -# and any associated final modules as desired. -### END INIT INFO - -# Return values acc. to LSB for all commands but status: -# 0 - success -# 1 - generic or unspecified error -# 2 - invalid or excess argument(s) -# 3 - unimplemented feature (e.g. "reload") -# 4 - user had insufficient privileges -# 5 - program is not installed -# 6 - program is not configured -# 7 - program is not running -# 8--199 - reserved (8--99 LSB, 100--149 distrib, 150--199 appl) -# -# Note that starting an already running service, stopping -# or restarting a not-running service as well as the restart -# with force-reload (in case signaling is not supported) are -# considered a success. - -RETVAL=0 - -prog="cloud-init" -cloud_init="/usr/bin/cloud-init" -conf="/etc/cloud/cloud.cfg" - -# If there exist sysconfig/default variable override files use it... -[ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init -[ -f /etc/default/cloud-init ] && . /etc/default/cloud-init - -start() { - [ -x $cloud_init ] || return 5 - [ -f $conf ] || return 6 - - echo -n $"Starting $prog: " - $cloud_init $CLOUDINITARGS modules --mode final - RETVAL=$? - return $RETVAL -} - -stop() { - echo -n $"Shutting down $prog: " - # No-op - RETVAL=7 - return $RETVAL -} - -case "$1" in - start) - start - RETVAL=$? - ;; - stop) - stop - RETVAL=$? - ;; - restart|try-restart|condrestart) - ## Stop the service and regardless of whether it was - ## running or not, start it again. - # - ## Note: try-restart is now part of LSB (as of 1.9). - ## RH has a similar command named condrestart. - start - RETVAL=$? - ;; - reload|force-reload) - # It does not support reload - RETVAL=3 - ;; - status) - echo -n $"Checking for service $prog:" - # Return value is slightly different for the status command: - # 0 - service up and running - # 1 - service dead, but /var/run/ pid file exists - # 2 - service dead, but /var/lock/ lock file exists - # 3 - service not running (unused) - # 4 - service status unknown :-( - # 5--199 reserved (5--99 LSB, 100--149 distro, 150--199 appl.) - RETVAL=3 - ;; - *) - echo "Usage: $0 {start|stop|status|try-restart|condrestart|restart|force-reload|reload}" - RETVAL=3 - ;; -esac - -exit $RETVAL diff --git a/sysvinit/cloud-init b/sysvinit/cloud-init deleted file mode 100755 index c1c92ad0..00000000 --- a/sysvinit/cloud-init +++ /dev/null @@ -1,121 +0,0 @@ -#!/bin/sh - -# -# Copyright (C) 2012 Yahoo! Inc. -# -# Author: Joshua Harlow -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# - -# See: http://wiki.debian.org/LSBInitScripts -# See: http://tiny.cc/czvbgw -# See: http://www.novell.com/coolsolutions/feature/15380.html -# Also based on dhcpd in RHEL (for comparison) - -### BEGIN INIT INFO -# Provides: cloud-init -# Required-Start: $local_fs $network $named $remote_fs cloud-init-local -# Should-Start: $time -# Required-Stop: -# Should-Stop: -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: The initial cloud-init job (net and fs contingent) -# Description: Start cloud-init and runs the initialization phase -# and any associated initial modules as desired. -### END INIT INFO - -# Return values acc. to LSB for all commands but status: -# 0 - success -# 1 - generic or unspecified error -# 2 - invalid or excess argument(s) -# 3 - unimplemented feature (e.g. "reload") -# 4 - user had insufficient privileges -# 5 - program is not installed -# 6 - program is not configured -# 7 - program is not running -# 8--199 - reserved (8--99 LSB, 100--149 distrib, 150--199 appl) -# -# Note that starting an already running service, stopping -# or restarting a not-running service as well as the restart -# with force-reload (in case signaling is not supported) are -# considered a success. - -RETVAL=0 - -prog="cloud-init" -cloud_init="/usr/bin/cloud-init" -conf="/etc/cloud/cloud.cfg" - -# If there exist sysconfig/default variable override files use it... -[ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init -[ -f /etc/default/cloud-init ] && . /etc/default/cloud-init - -start() { - [ -x $cloud_init ] || return 5 - [ -f $conf ] || return 6 - - echo -n $"Starting $prog: " - $cloud_init $CLOUDINITARGS init - RETVAL=$? - return $RETVAL -} - -stop() { - echo -n $"Shutting down $prog: " - # No-op - RETVAL=7 - return $RETVAL -} - -case "$1" in - start) - start - RETVAL=$? - ;; - stop) - stop - RETVAL=$? - ;; - restart|try-restart|condrestart) - ## Stop the service and regardless of whether it was - ## running or not, start it again. - # - ## Note: try-restart is now part of LSB (as of 1.9). - ## RH has a similar command named condrestart. - start - RETVAL=$? - ;; - reload|force-reload) - # It does not support reload - RETVAL=3 - ;; - status) - echo -n $"Checking for service $prog:" - # Return value is slightly different for the status command: - # 0 - service up and running - # 1 - service dead, but /var/run/ pid file exists - # 2 - service dead, but /var/lock/ lock file exists - # 3 - service not running (unused) - # 4 - service status unknown :-( - # 5--199 reserved (5--99 LSB, 100--149 distro, 150--199 appl.) - RETVAL=3 - ;; - *) - echo "Usage: $0 {start|stop|status|try-restart|condrestart|restart|force-reload|reload}" - RETVAL=3 - ;; -esac - -exit $RETVAL diff --git a/sysvinit/cloud-init-local b/sysvinit/cloud-init-local deleted file mode 100755 index b53e0db2..00000000 --- a/sysvinit/cloud-init-local +++ /dev/null @@ -1,121 +0,0 @@ -#!/bin/sh - -# -# Copyright (C) 2012 Yahoo! Inc. -# -# Author: Joshua Harlow -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# - -# See: http://wiki.debian.org/LSBInitScripts -# See: http://tiny.cc/czvbgw -# See: http://www.novell.com/coolsolutions/feature/15380.html -# Also based on dhcpd in RHEL (for comparison) - -### BEGIN INIT INFO -# Provides: cloud-init-local -# Required-Start: $local_fs $remote_fs -# Should-Start: $time -# Required-Stop: -# Should-Stop: -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: The initial cloud-init job (local fs contingent) -# Description: Start cloud-init and runs the initialization phases -# and any associated initial modules as desired. -### END INIT INFO - -# Return values acc. to LSB for all commands but status: -# 0 - success -# 1 - generic or unspecified error -# 2 - invalid or excess argument(s) -# 3 - unimplemented feature (e.g. "reload") -# 4 - user had insufficient privileges -# 5 - program is not installed -# 6 - program is not configured -# 7 - program is not running -# 8--199 - reserved (8--99 LSB, 100--149 distrib, 150--199 appl) -# -# Note that starting an already running service, stopping -# or restarting a not-running service as well as the restart -# with force-reload (in case signaling is not supported) are -# considered a success. - -RETVAL=0 - -prog="cloud-init" -cloud_init="/usr/bin/cloud-init" -conf="/etc/cloud/cloud.cfg" - -# If there exist sysconfig/default variable override files use it... -[ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init -[ -f /etc/default/cloud-init ] && . /etc/default/cloud-init - -start() { - [ -x $cloud_init ] || return 5 - [ -f $conf ] || return 6 - - echo -n $"Starting $prog: " - $cloud_init $CLOUDINITARGS init --local - RETVAL=$? - return $RETVAL -} - -stop() { - echo -n $"Shutting down $prog: " - # No-op - RETVAL=7 - return $RETVAL -} - -case "$1" in - start) - start - RETVAL=$? - ;; - stop) - stop - RETVAL=$? - ;; - restart|try-restart|condrestart) - ## Stop the service and regardless of whether it was - ## running or not, start it again. - # - ## Note: try-restart is now part of LSB (as of 1.9). - ## RH has a similar command named condrestart. - start - RETVAL=$? - ;; - reload|force-reload) - # It does not support reload - RETVAL=3 - ;; - status) - echo -n $"Checking for service $prog:" - # Return value is slightly different for the status command: - # 0 - service up and running - # 1 - service dead, but /var/run/ pid file exists - # 2 - service dead, but /var/lock/ lock file exists - # 3 - service not running (unused) - # 4 - service status unknown :-( - # 5--199 reserved (5--99 LSB, 100--149 distro, 150--199 appl.) - RETVAL=3 - ;; - *) - echo "Usage: $0 {start|stop|status|try-restart|condrestart|restart|force-reload|reload}" - RETVAL=3 - ;; -esac - -exit $RETVAL diff --git a/sysvinit/redhat/cloud-config b/sysvinit/redhat/cloud-config new file mode 100755 index 00000000..ad8ed831 --- /dev/null +++ b/sysvinit/redhat/cloud-config @@ -0,0 +1,121 @@ +#!/bin/sh + +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +# See: http://wiki.debian.org/LSBInitScripts +# See: http://tiny.cc/czvbgw +# See: http://www.novell.com/coolsolutions/feature/15380.html +# Also based on dhcpd in RHEL (for comparison) + +### BEGIN INIT INFO +# Provides: cloud-config +# Required-Start: cloud-init cloud-init-local +# Should-Start: $time +# Required-Stop: +# Should-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: The config cloud-init job +# Description: Start cloud-init and runs the config phase +# and any associated config modules as desired. +### END INIT INFO + +# Return values acc. to LSB for all commands but status: +# 0 - success +# 1 - generic or unspecified error +# 2 - invalid or excess argument(s) +# 3 - unimplemented feature (e.g. "reload") +# 4 - user had insufficient privileges +# 5 - program is not installed +# 6 - program is not configured +# 7 - program is not running +# 8--199 - reserved (8--99 LSB, 100--149 distrib, 150--199 appl) +# +# Note that starting an already running service, stopping +# or restarting a not-running service as well as the restart +# with force-reload (in case signaling is not supported) are +# considered a success. + +RETVAL=0 + +prog="cloud-init" +cloud_init="/usr/bin/cloud-init" +conf="/etc/cloud/cloud.cfg" + +# If there exist sysconfig/default variable override files use it... +[ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init +[ -f /etc/default/cloud-init ] && . /etc/default/cloud-init + +start() { + [ -x $cloud_init ] || return 5 + [ -f $conf ] || return 6 + + echo -n $"Starting $prog: " + $cloud_init $CLOUDINITARGS modules --mode config + RETVAL=$? + return $RETVAL +} + +stop() { + echo -n $"Shutting down $prog: " + # No-op + RETVAL=7 + return $RETVAL +} + +case "$1" in + start) + start + RETVAL=$? + ;; + stop) + stop + RETVAL=$? + ;; + restart|try-restart|condrestart) + ## Stop the service and regardless of whether it was + ## running or not, start it again. + # + ## Note: try-restart is now part of LSB (as of 1.9). + ## RH has a similar command named condrestart. + start + RETVAL=$? + ;; + reload|force-reload) + # It does not support reload + RETVAL=3 + ;; + status) + echo -n $"Checking for service $prog:" + # Return value is slightly different for the status command: + # 0 - service up and running + # 1 - service dead, but /var/run/ pid file exists + # 2 - service dead, but /var/lock/ lock file exists + # 3 - service not running (unused) + # 4 - service status unknown :-( + # 5--199 reserved (5--99 LSB, 100--149 distro, 150--199 appl.) + RETVAL=3 + ;; + *) + echo "Usage: $0 {start|stop|status|try-restart|condrestart|restart|force-reload|reload}" + RETVAL=3 + ;; +esac + +exit $RETVAL diff --git a/sysvinit/redhat/cloud-final b/sysvinit/redhat/cloud-final new file mode 100755 index 00000000..aeae8903 --- /dev/null +++ b/sysvinit/redhat/cloud-final @@ -0,0 +1,121 @@ +#!/bin/sh + +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +# See: http://wiki.debian.org/LSBInitScripts +# See: http://tiny.cc/czvbgw +# See: http://www.novell.com/coolsolutions/feature/15380.html +# Also based on dhcpd in RHEL (for comparison) + +### BEGIN INIT INFO +# Provides: cloud-final +# Required-Start: $all cloud-config +# Should-Start: $time +# Required-Stop: +# Should-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: The final cloud-init job +# Description: Start cloud-init and runs the final phase +# and any associated final modules as desired. +### END INIT INFO + +# Return values acc. to LSB for all commands but status: +# 0 - success +# 1 - generic or unspecified error +# 2 - invalid or excess argument(s) +# 3 - unimplemented feature (e.g. "reload") +# 4 - user had insufficient privileges +# 5 - program is not installed +# 6 - program is not configured +# 7 - program is not running +# 8--199 - reserved (8--99 LSB, 100--149 distrib, 150--199 appl) +# +# Note that starting an already running service, stopping +# or restarting a not-running service as well as the restart +# with force-reload (in case signaling is not supported) are +# considered a success. + +RETVAL=0 + +prog="cloud-init" +cloud_init="/usr/bin/cloud-init" +conf="/etc/cloud/cloud.cfg" + +# If there exist sysconfig/default variable override files use it... +[ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init +[ -f /etc/default/cloud-init ] && . /etc/default/cloud-init + +start() { + [ -x $cloud_init ] || return 5 + [ -f $conf ] || return 6 + + echo -n $"Starting $prog: " + $cloud_init $CLOUDINITARGS modules --mode final + RETVAL=$? + return $RETVAL +} + +stop() { + echo -n $"Shutting down $prog: " + # No-op + RETVAL=7 + return $RETVAL +} + +case "$1" in + start) + start + RETVAL=$? + ;; + stop) + stop + RETVAL=$? + ;; + restart|try-restart|condrestart) + ## Stop the service and regardless of whether it was + ## running or not, start it again. + # + ## Note: try-restart is now part of LSB (as of 1.9). + ## RH has a similar command named condrestart. + start + RETVAL=$? + ;; + reload|force-reload) + # It does not support reload + RETVAL=3 + ;; + status) + echo -n $"Checking for service $prog:" + # Return value is slightly different for the status command: + # 0 - service up and running + # 1 - service dead, but /var/run/ pid file exists + # 2 - service dead, but /var/lock/ lock file exists + # 3 - service not running (unused) + # 4 - service status unknown :-( + # 5--199 reserved (5--99 LSB, 100--149 distro, 150--199 appl.) + RETVAL=3 + ;; + *) + echo "Usage: $0 {start|stop|status|try-restart|condrestart|restart|force-reload|reload}" + RETVAL=3 + ;; +esac + +exit $RETVAL diff --git a/sysvinit/redhat/cloud-init b/sysvinit/redhat/cloud-init new file mode 100755 index 00000000..c1c92ad0 --- /dev/null +++ b/sysvinit/redhat/cloud-init @@ -0,0 +1,121 @@ +#!/bin/sh + +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +# See: http://wiki.debian.org/LSBInitScripts +# See: http://tiny.cc/czvbgw +# See: http://www.novell.com/coolsolutions/feature/15380.html +# Also based on dhcpd in RHEL (for comparison) + +### BEGIN INIT INFO +# Provides: cloud-init +# Required-Start: $local_fs $network $named $remote_fs cloud-init-local +# Should-Start: $time +# Required-Stop: +# Should-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: The initial cloud-init job (net and fs contingent) +# Description: Start cloud-init and runs the initialization phase +# and any associated initial modules as desired. +### END INIT INFO + +# Return values acc. to LSB for all commands but status: +# 0 - success +# 1 - generic or unspecified error +# 2 - invalid or excess argument(s) +# 3 - unimplemented feature (e.g. "reload") +# 4 - user had insufficient privileges +# 5 - program is not installed +# 6 - program is not configured +# 7 - program is not running +# 8--199 - reserved (8--99 LSB, 100--149 distrib, 150--199 appl) +# +# Note that starting an already running service, stopping +# or restarting a not-running service as well as the restart +# with force-reload (in case signaling is not supported) are +# considered a success. + +RETVAL=0 + +prog="cloud-init" +cloud_init="/usr/bin/cloud-init" +conf="/etc/cloud/cloud.cfg" + +# If there exist sysconfig/default variable override files use it... +[ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init +[ -f /etc/default/cloud-init ] && . /etc/default/cloud-init + +start() { + [ -x $cloud_init ] || return 5 + [ -f $conf ] || return 6 + + echo -n $"Starting $prog: " + $cloud_init $CLOUDINITARGS init + RETVAL=$? + return $RETVAL +} + +stop() { + echo -n $"Shutting down $prog: " + # No-op + RETVAL=7 + return $RETVAL +} + +case "$1" in + start) + start + RETVAL=$? + ;; + stop) + stop + RETVAL=$? + ;; + restart|try-restart|condrestart) + ## Stop the service and regardless of whether it was + ## running or not, start it again. + # + ## Note: try-restart is now part of LSB (as of 1.9). + ## RH has a similar command named condrestart. + start + RETVAL=$? + ;; + reload|force-reload) + # It does not support reload + RETVAL=3 + ;; + status) + echo -n $"Checking for service $prog:" + # Return value is slightly different for the status command: + # 0 - service up and running + # 1 - service dead, but /var/run/ pid file exists + # 2 - service dead, but /var/lock/ lock file exists + # 3 - service not running (unused) + # 4 - service status unknown :-( + # 5--199 reserved (5--99 LSB, 100--149 distro, 150--199 appl.) + RETVAL=3 + ;; + *) + echo "Usage: $0 {start|stop|status|try-restart|condrestart|restart|force-reload|reload}" + RETVAL=3 + ;; +esac + +exit $RETVAL diff --git a/sysvinit/redhat/cloud-init-local b/sysvinit/redhat/cloud-init-local new file mode 100755 index 00000000..b53e0db2 --- /dev/null +++ b/sysvinit/redhat/cloud-init-local @@ -0,0 +1,121 @@ +#!/bin/sh + +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +# See: http://wiki.debian.org/LSBInitScripts +# See: http://tiny.cc/czvbgw +# See: http://www.novell.com/coolsolutions/feature/15380.html +# Also based on dhcpd in RHEL (for comparison) + +### BEGIN INIT INFO +# Provides: cloud-init-local +# Required-Start: $local_fs $remote_fs +# Should-Start: $time +# Required-Stop: +# Should-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: The initial cloud-init job (local fs contingent) +# Description: Start cloud-init and runs the initialization phases +# and any associated initial modules as desired. +### END INIT INFO + +# Return values acc. to LSB for all commands but status: +# 0 - success +# 1 - generic or unspecified error +# 2 - invalid or excess argument(s) +# 3 - unimplemented feature (e.g. "reload") +# 4 - user had insufficient privileges +# 5 - program is not installed +# 6 - program is not configured +# 7 - program is not running +# 8--199 - reserved (8--99 LSB, 100--149 distrib, 150--199 appl) +# +# Note that starting an already running service, stopping +# or restarting a not-running service as well as the restart +# with force-reload (in case signaling is not supported) are +# considered a success. + +RETVAL=0 + +prog="cloud-init" +cloud_init="/usr/bin/cloud-init" +conf="/etc/cloud/cloud.cfg" + +# If there exist sysconfig/default variable override files use it... +[ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init +[ -f /etc/default/cloud-init ] && . /etc/default/cloud-init + +start() { + [ -x $cloud_init ] || return 5 + [ -f $conf ] || return 6 + + echo -n $"Starting $prog: " + $cloud_init $CLOUDINITARGS init --local + RETVAL=$? + return $RETVAL +} + +stop() { + echo -n $"Shutting down $prog: " + # No-op + RETVAL=7 + return $RETVAL +} + +case "$1" in + start) + start + RETVAL=$? + ;; + stop) + stop + RETVAL=$? + ;; + restart|try-restart|condrestart) + ## Stop the service and regardless of whether it was + ## running or not, start it again. + # + ## Note: try-restart is now part of LSB (as of 1.9). + ## RH has a similar command named condrestart. + start + RETVAL=$? + ;; + reload|force-reload) + # It does not support reload + RETVAL=3 + ;; + status) + echo -n $"Checking for service $prog:" + # Return value is slightly different for the status command: + # 0 - service up and running + # 1 - service dead, but /var/run/ pid file exists + # 2 - service dead, but /var/lock/ lock file exists + # 3 - service not running (unused) + # 4 - service status unknown :-( + # 5--199 reserved (5--99 LSB, 100--149 distro, 150--199 appl.) + RETVAL=3 + ;; + *) + echo "Usage: $0 {start|stop|status|try-restart|condrestart|restart|force-reload|reload}" + RETVAL=3 + ;; +esac + +exit $RETVAL -- cgit v1.2.3 From c818ddba06ff7d486a085edae531896156c14e9d Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 19 Jul 2013 15:49:35 -0700 Subject: Add the ability to decompress MIME gzip. Instead of being restricted to only gzip compressing the overall mime segment or individual included segments, allow for each mime segment to be gzip compressed. LP: #1203203 --- cloudinit/user_data.py | 41 +++++++++++++++++++++++++++++++++-------- 1 file changed, 33 insertions(+), 8 deletions(-) diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index df069ff8..23c31fde 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -48,6 +48,18 @@ ARCHIVE_TYPES = ["text/cloud-config-archive"] UNDEF_TYPE = "text/plain" ARCHIVE_UNDEF_TYPE = "text/cloud-config" +# This seems to hit most of the gzip possible content types. +DECOMP_TYPES = [ + 'application/gzip', + 'application/gzip-compressed', + 'application/gzipped', + 'application/x-compress', + 'application/x-compressed', + 'application/x-gunzip', + 'application/x-gzip', + 'application/x-gzip-compressed', +] + # Msg header used to track attachments ATTACHMENT_FIELD = 'Number-Attachments' @@ -67,6 +79,13 @@ class UserDataProcessor(object): return accumulating_msg def _process_msg(self, base_msg, append_msg): + + def replace_header(part, key, value): + if key in part: + part.replace_header(key, value) + else: + part[key] = value + for part in base_msg.walk(): if is_skippable(part): continue @@ -75,6 +94,18 @@ class UserDataProcessor(object): ctype_orig = part.get_content_type() payload = part.get_payload(decode=True) + # When the message states it is of a gzipped content type ensure + # that we attempt to decode said payload so that the decompressed + # data can be examined (instead of the compressed data). + if ctype_orig in DECOMP_TYPES: + try: + payload = util.decomp_gzip(payload, quiet=False) + ctype_orig = UNDEF_TYPE + # TODO(harlowja): should we also set the payload to the + # decompressed value?? + except util.DecompressionError: + pass + if not ctype_orig: ctype_orig = UNDEF_TYPE @@ -85,10 +116,7 @@ class UserDataProcessor(object): ctype = ctype_orig if ctype != ctype_orig: - if CONTENT_TYPE in part: - part.replace_header(CONTENT_TYPE, ctype) - else: - part[CONTENT_TYPE] = ctype + replace_header(part, CONTENT_TYPE, ctype) if ctype in INCLUDE_TYPES: self._do_include(payload, append_msg) @@ -100,10 +128,7 @@ class UserDataProcessor(object): # Should this be happening, shouldn't # the part header be modified and not the base? - if CONTENT_TYPE in base_msg: - base_msg.replace_header(CONTENT_TYPE, ctype) - else: - base_msg[CONTENT_TYPE] = ctype + replace_header(base_msg, CONTENT_TYPE, ctype) self._attach_part(append_msg, part) -- cgit v1.2.3 From f51e04ba97a42782e6a0e973488290552346387f Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 19 Jul 2013 16:32:48 -0700 Subject: Add the ability to merge with jsonpatch. Jsonpatch is a new RFC standard for merging json-like structures which the cloud-init cloud-config is one such structure. To use this in a limited fashion (to start) add the ability for the cloud-config handler to accept this content-type and use it as an alternate way to merge new cloud-config sections into the accumulated cloud-config. LP: #1200476 --- Requires | 3 +++ cloudinit/handlers/cloud_config.py | 25 ++++++++++++++++++++----- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/Requires b/Requires index de51a4e4..5086230d 100644 --- a/Requires +++ b/Requires @@ -27,3 +27,6 @@ requests # Boto for ec2 boto + +# For patching pieces of cloud-config together +jsonpatch diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index c97ca3e8..8d1ba37f 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -20,6 +20,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +import jsonpatch + from cloudinit import handlers from cloudinit import log as logging from cloudinit import mergers @@ -50,6 +52,9 @@ MERGE_HEADER = 'Merge-Type' # This gets loaded into yaml with final result {'a': 22} DEF_MERGERS = mergers.string_extract_mergers('dict(replace)+list()+str()') +# See: https://tools.ietf.org/html/rfc6902 +JSON_PATCH_CTYPE = 'application/json-patch+json' + class CloudConfigPartHandler(handlers.Handler): def __init__(self, paths, **_kwargs): @@ -59,9 +64,11 @@ class CloudConfigPartHandler(handlers.Handler): self.file_names = [] def list_types(self): - return [ + ctypes_handled = [ handlers.type_from_starts_with("#cloud-config"), + JSON_PATCH_CTYPE, ] + return ctypes_handled def _write_cloud_config(self): if not self.cloud_fn: @@ -107,13 +114,15 @@ class CloudConfigPartHandler(handlers.Handler): all_mergers = DEF_MERGERS return (payload_yaml, all_mergers) + def _merge_patch(self, payload): + patch = jsonpatch.JsonPatch.from_string(payload) + LOG.debug("Merging by applying json patch %s", patch) + self.cloud_buf = patch.apply(self.cloud_buf, in_place=False) + def _merge_part(self, payload, headers): (payload_yaml, my_mergers) = self._extract_mergers(payload, headers) LOG.debug("Merging by applying %s", my_mergers) merger = mergers.construct(my_mergers) - if self.cloud_buf is None: - # First time through, merge with an empty dict... - self.cloud_buf = {} self.cloud_buf = merger.merge(self.cloud_buf, payload_yaml) def _reset(self): @@ -130,7 +139,13 @@ class CloudConfigPartHandler(handlers.Handler): self._reset() return try: - self._merge_part(payload, headers) + # First time through, merge with an empty dict... + if self.cloud_buf is None or not self.file_names: + self.cloud_buf = {} + if ctype == JSON_PATCH_CTYPE: + self._merge_patch(payload) + else: + self._merge_part(payload, headers) # Ensure filename is ok to store for i in ("\n", "\r", "\t"): filename = filename.replace(i, " ") -- cgit v1.2.3 From 36bbd898e9b8bef508b5d185dc1e52af0f13cfd0 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 20 Jul 2013 05:56:30 -0700 Subject: Add usage of '#json-patch' --- cloudinit/handlers/__init__.py | 1 + cloudinit/handlers/cloud_config.py | 17 ++++++++--------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 497d68c5..297e7451 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -62,6 +62,7 @@ INCLUSION_TYPES_MAP = { '#part-handler': 'text/part-handler', '#cloud-boothook': 'text/cloud-boothook', '#cloud-config-archive': 'text/cloud-config-archive', + '#json-patch': 'application/json-patch+json', } # Sorted longest first diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index 8d1ba37f..84653375 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -66,22 +66,21 @@ class CloudConfigPartHandler(handlers.Handler): def list_types(self): ctypes_handled = [ handlers.type_from_starts_with("#cloud-config"), - JSON_PATCH_CTYPE, + handlers.type_from_starts_with("#json-patch"), ] return ctypes_handled def _write_cloud_config(self): - if not self.cloud_fn: + if not self.cloud_fn or not len(self.file_names): return # Capture which files we merged from... file_lines = [] - if self.file_names: - file_lines.append("# from %s files" % (len(self.file_names))) - for fn in self.file_names: - if not fn: - fn = '?' - file_lines.append("# %s" % (fn)) - file_lines.append("") + file_lines.append("# from %s files" % (len(self.file_names))) + for fn in self.file_names: + if not fn: + fn = '?' + file_lines.append("# %s" % (fn)) + file_lines.append("") if self.cloud_buf is not None: # Something was actually gathered.... lines = [ -- cgit v1.2.3 From 03d93b4fe94d0cd4de8fb661748207ea251fbb2a Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 20 Jul 2013 06:26:30 -0700 Subject: Fix content-type constant. --- cloudinit/handlers/cloud_config.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index 84653375..3a4f2150 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -52,9 +52,6 @@ MERGE_HEADER = 'Merge-Type' # This gets loaded into yaml with final result {'a': 22} DEF_MERGERS = mergers.string_extract_mergers('dict(replace)+list()+str()') -# See: https://tools.ietf.org/html/rfc6902 -JSON_PATCH_CTYPE = 'application/json-patch+json' - class CloudConfigPartHandler(handlers.Handler): def __init__(self, paths, **_kwargs): -- cgit v1.2.3 From 1f73904df31feb22ad8545a3a336ba2e92e367bb Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 20 Jul 2013 07:01:33 -0700 Subject: Fix constant move. --- cloudinit/handlers/cloud_config.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index 3a4f2150..99dc71d0 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -52,6 +52,12 @@ MERGE_HEADER = 'Merge-Type' # This gets loaded into yaml with final result {'a': 22} DEF_MERGERS = mergers.string_extract_mergers('dict(replace)+list()+str()') +# The file header -> content types this module will handle. +CC_TYPES = { + '#json-patch': handlers.type_from_starts_with("#json-patch"), + '#cloud-config': handlers.type_from_starts_with("#cloud-config"), +} + class CloudConfigPartHandler(handlers.Handler): def __init__(self, paths, **_kwargs): @@ -61,23 +67,20 @@ class CloudConfigPartHandler(handlers.Handler): self.file_names = [] def list_types(self): - ctypes_handled = [ - handlers.type_from_starts_with("#cloud-config"), - handlers.type_from_starts_with("#json-patch"), - ] - return ctypes_handled + return list(CC_TYPES.values()) def _write_cloud_config(self): - if not self.cloud_fn or not len(self.file_names): + if not self.cloud_fn: return # Capture which files we merged from... file_lines = [] - file_lines.append("# from %s files" % (len(self.file_names))) - for fn in self.file_names: - if not fn: - fn = '?' - file_lines.append("# %s" % (fn)) - file_lines.append("") + if self.file_names: + file_lines.append("# from %s files" % (len(self.file_names))) + for fn in self.file_names: + if not fn: + fn = '?' + file_lines.append("# %s" % (fn)) + file_lines.append("") if self.cloud_buf is not None: # Something was actually gathered.... lines = [ @@ -138,7 +141,7 @@ class CloudConfigPartHandler(handlers.Handler): # First time through, merge with an empty dict... if self.cloud_buf is None or not self.file_names: self.cloud_buf = {} - if ctype == JSON_PATCH_CTYPE: + if ctype == CC_TYPES['#json-patch']: self._merge_patch(payload) else: self._merge_part(payload, headers) -- cgit v1.2.3 From 0be217f4c177a34b5ec46aa3e64fb1bede4ceb33 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 20 Jul 2013 07:34:31 -0700 Subject: Remove json-patch inclusion header if payload contains it. --- cloudinit/handlers/cloud_config.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index 99dc71d0..654314c0 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -114,6 +114,12 @@ class CloudConfigPartHandler(handlers.Handler): return (payload_yaml, all_mergers) def _merge_patch(self, payload): + if payload.startswith("#json-patch"): + # JSON doesn't handle comments in this manner, so ensure that + # if we started with this 'type' that we remove it before + # attempting to load it as json (which the jsonpatch library will + # attempt to do). + payload = payload[len("#json-patch"):] patch = jsonpatch.JsonPatch.from_string(payload) LOG.debug("Merging by applying json patch %s", patch) self.cloud_buf = patch.apply(self.cloud_buf, in_place=False) -- cgit v1.2.3 From eae3b6ad499b88b725a52cf07245e4721af380cf Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 20 Jul 2013 07:37:02 -0700 Subject: Ensure we remove the same way we detect. --- cloudinit/handlers/cloud_config.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index 654314c0..4dcdbe8b 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -114,7 +114,8 @@ class CloudConfigPartHandler(handlers.Handler): return (payload_yaml, all_mergers) def _merge_patch(self, payload): - if payload.startswith("#json-patch"): + payload = payload.lstrip() + if payload.lower().startswith("#json-patch"): # JSON doesn't handle comments in this manner, so ensure that # if we started with this 'type' that we remove it before # attempting to load it as json (which the jsonpatch library will -- cgit v1.2.3 From 79b384341eb756bb2d5a9e5f53fbaa4aa40d8527 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 20 Jul 2013 12:18:44 -0700 Subject: Bump the version. LP: #1203364 --- cloudinit/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/version.py b/cloudinit/version.py index 024d5118..4b29a587 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -20,7 +20,7 @@ from distutils import version as vr def version(): - return vr.StrictVersion("0.7.2") + return vr.StrictVersion("0.7.3") def version_string(): -- cgit v1.2.3 From 4e9a13142f1ee81c905a2cc9401a88f115ec778e Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 20 Jul 2013 12:38:25 -0700 Subject: Init and finalize refactor. Instead of previously initializing and not finalizing the handles that completed successfully when a handler initializing or running failed we should attempt to always give said handlers a chance to finalize (even when another handler fails). --- cloudinit/stages.py | 82 ++++++++++++++++++++++++++++++++++------------------- 1 file changed, 53 insertions(+), 29 deletions(-) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index df49cabb..6893afd9 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -383,36 +383,60 @@ class Init(object): # Form our cloud interface data = self.cloudify() - # Init the handlers first - called = [] - for (_ctype, mod) in c_handlers.iteritems(): - if mod in called: - continue - handlers.call_begin(mod, data, frequency) - called.append(mod) - - # Walk the user data - part_data = { - 'handlers': c_handlers, - # Any new handlers that are encountered get writen here - 'handlerdir': idir, - 'data': data, - # The default frequency if handlers don't have one - 'frequency': frequency, - # This will be used when new handlers are found - # to help write there contents to files with numbered - # names... - 'handlercount': 0, - } - handlers.walk(user_data_msg, handlers.walker_callback, data=part_data) + # This list contains the modules initialized (so that we only finalize + # ones that were actually initialized) + inited_handlers = [] + + def init_handlers(): + # Init the handlers first + called = [] + for (_ctype, mod) in c_handlers.iteritems(): + if mod in called: + # Avoid initing the same module twice (if said module + # is registered to more than one content-type). + continue + handlers.call_begin(mod, data, frequency) + inited_handlers.append(mod) + called.append(mod) + + def walk_handlers(): + # Walk the user data + part_data = { + 'handlers': c_handlers, + # Any new handlers that are encountered get writen here + 'handlerdir': idir, + 'data': data, + # The default frequency if handlers don't have one + 'frequency': frequency, + # This will be used when new handlers are found + # to help write there contents to files with numbered + # names... + 'handlercount': 0, + } + handlers.walk(user_data_msg, handlers.walker_callback, + data=part_data) + + def finalize_handlers(): + # Give callbacks opportunity to finalize + called = [] + for (_ctype, mod) in c_handlers.iteritems(): + if mod in called: + # Avoid finalizing the same module twice (if said module + # is registered to more than one content-type). + continue + if mod not in inited_handlers: + continue + called.append(mod) + try: + handlers.call_end(mod, data, frequency) + except: + util.logexc(LOG, "Failed to finalize handler: %s", mod) - # Give callbacks opportunity to finalize - called = [] - for (_ctype, mod) in c_handlers.iteritems(): - if mod in called: - continue - handlers.call_end(mod, data, frequency) - called.append(mod) + try: + init_handlers() + walk_handlers() + finally: + finalize_handlers() # Perform post-consumption adjustments so that # modules that run during the init stage reflect -- cgit v1.2.3 From a3ef9d24c6c913676d22dd7017a1f1b235d47a45 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 20 Jul 2013 12:43:51 -0700 Subject: Update comments + link to bug. LP: #1203368 --- cloudinit/stages.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 6893afd9..ed995628 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -425,6 +425,8 @@ class Init(object): # is registered to more than one content-type). continue if mod not in inited_handlers: + # Said module was never inited in the first place, so lets + # not attempt to finalize those that never got called. continue called.append(mod) try: -- cgit v1.2.3 From 2849c8d3eb44b186e9eaed46080796d56e9529f2 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 20 Jul 2013 13:06:55 -0700 Subject: Also handle custom handlers correctly. LP: #1203368 --- cloudinit/handlers/__init__.py | 9 ++++++--- cloudinit/helpers.py | 2 ++ cloudinit/stages.py | 28 +++++++++++----------------- 3 files changed, 19 insertions(+), 20 deletions(-) diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 497d68c5..93df5b61 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -151,10 +151,12 @@ def walker_handle_handler(pdata, _ctype, _filename, payload): try: mod = fixup_handler(importer.import_module(modname)) call_begin(mod, pdata['data'], frequency) - # Only register and increment - # after the above have worked (so we don't if it - # fails) + # Only register and increment after the above have worked, so we don't + # register if it fails starting. handlers.register(mod) + # Ensure that it gets finalized by marking said module as having been + # initialized correctly. + handlers.markings[mod].append('initialized') pdata['handlercount'] = curcount + 1 except: util.logexc(LOG, "Failed at registering python file: %s (part " @@ -230,6 +232,7 @@ def walk(msg, callback, data): headers['Content-Type'] = ctype callback(data, filename, part.get_payload(decode=True), headers) partnum = partnum + 1 + return partnum def fixup_handler(mod, def_freq=PER_INSTANCE): diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index b91c1290..bd37b8a3 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -22,6 +22,7 @@ from time import time +import collections import contextlib import io import os @@ -281,6 +282,7 @@ class ContentHandlers(object): def __init__(self): self.registered = {} + self.markings = collections.defaultdict(list) def __contains__(self, item): return self.is_registered(item) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index ed995628..43eaca1b 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -383,21 +383,15 @@ class Init(object): # Form our cloud interface data = self.cloudify() - # This list contains the modules initialized (so that we only finalize - # ones that were actually initialized) - inited_handlers = [] - def init_handlers(): # Init the handlers first - called = [] for (_ctype, mod) in c_handlers.iteritems(): - if mod in called: + if 'initialized' in c_handlers.markings[mod]: # Avoid initing the same module twice (if said module # is registered to more than one content-type). continue handlers.call_begin(mod, data, frequency) - inited_handlers.append(mod) - called.append(mod) + c_handlers.markings[mod].append('initialized') def walk_handlers(): # Walk the user data @@ -413,22 +407,22 @@ class Init(object): # names... 'handlercount': 0, } - handlers.walk(user_data_msg, handlers.walker_callback, - data=part_data) + return handlers.walk(user_data_msg, handlers.walker_callback, + data=part_data) def finalize_handlers(): # Give callbacks opportunity to finalize - called = [] for (_ctype, mod) in c_handlers.iteritems(): - if mod in called: - # Avoid finalizing the same module twice (if said module - # is registered to more than one content-type). - continue - if mod not in inited_handlers: + mod_markings = c_handlers.markings[mod] + if 'initialized' not in mod_markings: # Said module was never inited in the first place, so lets # not attempt to finalize those that never got called. continue - called.append(mod) + if 'finalized' in mod_markings: + # Avoid finalizing the same module twice (if said module + # is registered to more than one content-type). + continue + c_handlers.markings[mod].append('finalized') try: handlers.call_end(mod, data, frequency) except: -- cgit v1.2.3 From bbfc76fb74595881b25acc1bbbd426314c2390ed Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 20 Jul 2013 13:13:02 -0700 Subject: Remove return not used. --- cloudinit/handlers/__init__.py | 1 - cloudinit/stages.py | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 93df5b61..f9b90323 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -232,7 +232,6 @@ def walk(msg, callback, data): headers['Content-Type'] = ctype callback(data, filename, part.get_payload(decode=True), headers) partnum = partnum + 1 - return partnum def fixup_handler(mod, def_freq=PER_INSTANCE): diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 43eaca1b..ba974a3e 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -407,8 +407,8 @@ class Init(object): # names... 'handlercount': 0, } - return handlers.walk(user_data_msg, handlers.walker_callback, - data=part_data) + handlers.walk(user_data_msg, handlers.walker_callback, + data=part_data) def finalize_handlers(): # Give callbacks opportunity to finalize -- cgit v1.2.3 From 7880588f804ea035f03eba9335af71f3322dab97 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 20 Jul 2013 14:34:00 -0700 Subject: Ensure we reset the part after decompression. --- cloudinit/user_data.py | 36 +++++++++++++++++++++++++++--------- 1 file changed, 27 insertions(+), 9 deletions(-) diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index 23c31fde..97853e51 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -23,8 +23,10 @@ import os import email + from email.mime.base import MIMEBase from email.mime.multipart import MIMEMultipart +from email.mime.nonmultipart import MIMENonMultipart from email.mime.text import MIMEText from cloudinit import handlers @@ -80,6 +82,10 @@ class UserDataProcessor(object): def _process_msg(self, base_msg, append_msg): + def find_ctype(payload): + ctype = handlers.type_from_starts_with(payload) + return ctype + def replace_header(part, key, value): if key in part: part.replace_header(key, value) @@ -93,6 +99,7 @@ class UserDataProcessor(object): ctype = None ctype_orig = part.get_content_type() payload = part.get_payload(decode=True) + was_compressed = False # When the message states it is of a gzipped content type ensure # that we attempt to decode said payload so that the decompressed @@ -100,21 +107,32 @@ class UserDataProcessor(object): if ctype_orig in DECOMP_TYPES: try: payload = util.decomp_gzip(payload, quiet=False) - ctype_orig = UNDEF_TYPE - # TODO(harlowja): should we also set the payload to the - # decompressed value?? - except util.DecompressionError: - pass + # At this point we don't know what the content-type is + # since we just decompressed it. + ctype_orig = None + was_compressed = True + except util.DecompressionError as e: + LOG.warn("Failed decompressing payload from %s of length" + " %s due to: %s", ctype_orig, len(payload), e) + continue + # Attempt to figure out the payloads content-type if not ctype_orig: ctype_orig = UNDEF_TYPE - if ctype_orig in TYPE_NEEDED: - ctype = handlers.type_from_starts_with(payload) - + ctype = find_ctype(payload) if ctype is None: ctype = ctype_orig + # In the case where the data was compressed, we want to make sure + # that we create a new message that contains the found content + # type with the uncompressed content since later traversals of the + # messages will expect a part not compressed. + if was_compressed: + maintype, subtype = ctype.split("/", 1) + part = MIMENonMultipart(maintype, subtype) + part.set_payload(payload) + if ctype != ctype_orig: replace_header(part, CONTENT_TYPE, ctype) @@ -126,7 +144,7 @@ class UserDataProcessor(object): self._explode_archive(payload, append_msg) continue - # Should this be happening, shouldn't + # TODO(harlowja): Should this be happening, shouldn't # the part header be modified and not the base? replace_header(base_msg, CONTENT_TYPE, ctype) -- cgit v1.2.3 From 64c69053c11385cc43b6c628dbe8a1bf28ccc49c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 20 Jul 2013 14:57:42 -0700 Subject: Keep filename from original part. --- cloudinit/user_data.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index 97853e51..e17bcaee 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -130,8 +130,12 @@ class UserDataProcessor(object): # messages will expect a part not compressed. if was_compressed: maintype, subtype = ctype.split("/", 1) - part = MIMENonMultipart(maintype, subtype) - part.set_payload(payload) + n_part = MIMENonMultipart(maintype, subtype) + n_part.set_payload(payload) + if part.get_filename(): + n_part.add_header('Content-Disposition', 'attachment', + filename=part.get_filename()) + part = n_part if ctype != ctype_orig: replace_header(part, CONTENT_TYPE, ctype) -- cgit v1.2.3 From 432778cf2890c19940f29f47f9efc2cb8e784f43 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 20 Jul 2013 16:34:39 -0700 Subject: Unify filename, header replacement. --- cloudinit/user_data.py | 56 ++++++++++++++++++++++++++++---------------------- 1 file changed, 32 insertions(+), 24 deletions(-) diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index e17bcaee..454f3c06 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -70,6 +70,19 @@ ATTACHMENT_FIELD = 'Number-Attachments' EXAMINE_FOR_LAUNCH_INDEX = ["text/cloud-config"] +def _replace_header(msg, key, value): + del msg[key] + msg[key] = value + + +def _set_filename(msg, filename): + if not filename: + return + del msg['Content-Disposition'] + msg.add_header('Content-Disposition', + 'attachment', filename=str(filename)) + + class UserDataProcessor(object): def __init__(self, paths): self.paths = paths @@ -83,14 +96,7 @@ class UserDataProcessor(object): def _process_msg(self, base_msg, append_msg): def find_ctype(payload): - ctype = handlers.type_from_starts_with(payload) - return ctype - - def replace_header(part, key, value): - if key in part: - part.replace_header(key, value) - else: - part[key] = value + return handlers.type_from_starts_with(payload) for part in base_msg.walk(): if is_skippable(part): @@ -132,13 +138,17 @@ class UserDataProcessor(object): maintype, subtype = ctype.split("/", 1) n_part = MIMENonMultipart(maintype, subtype) n_part.set_payload(payload) - if part.get_filename(): - n_part.add_header('Content-Disposition', 'attachment', - filename=part.get_filename()) + # Copy various headers from the old part to the new one, + # but don't include all the headers since some are not useful + # after decoding and decompression. + _set_filename(n_part, part.get_filename()) + for h in ('Launch-Index',): + if h in part: + _replace_header(n_part, h, str(part[h])) part = n_part if ctype != ctype_orig: - replace_header(part, CONTENT_TYPE, ctype) + _replace_header(part, CONTENT_TYPE, ctype) if ctype in INCLUDE_TYPES: self._do_include(payload, append_msg) @@ -150,7 +160,7 @@ class UserDataProcessor(object): # TODO(harlowja): Should this be happening, shouldn't # the part header be modified and not the base? - replace_header(base_msg, CONTENT_TYPE, ctype) + _replace_header(base_msg, CONTENT_TYPE, ctype) self._attach_part(append_msg, part) @@ -185,8 +195,7 @@ class UserDataProcessor(object): def _process_before_attach(self, msg, attached_id): if not msg.get_filename(): - msg.add_header('Content-Disposition', - 'attachment', filename=PART_FN_TPL % (attached_id)) + _set_filename(msg, PART_FN_TPL % (attached_id)) self._attach_launch_index(msg) def _do_include(self, content, append_msg): @@ -264,13 +273,15 @@ class UserDataProcessor(object): msg.set_payload(content) if 'filename' in ent: - msg.add_header('Content-Disposition', - 'attachment', filename=ent['filename']) + _set_filename(msg, ent['filename']) if 'launch-index' in ent: msg.add_header('Launch-Index', str(ent['launch-index'])) for header in list(ent.keys()): - if header in ('content', 'filename', 'type', 'launch-index'): + if header.lower() in ('content', 'filename', 'type', + 'launch-index', 'content-disposition', + ATTACHMENT_FIELD.lower(), + CONTENT_TYPE.lower()): continue msg.add_header(header, ent[header]) @@ -285,13 +296,13 @@ class UserDataProcessor(object): outer_msg[ATTACHMENT_FIELD] = '0' if new_count is not None: - outer_msg.replace_header(ATTACHMENT_FIELD, str(new_count)) + _replace_header(outer_msg, ATTACHMENT_FIELD, str(new_count)) fetched_count = 0 try: fetched_count = int(outer_msg.get(ATTACHMENT_FIELD)) except (ValueError, TypeError): - outer_msg.replace_header(ATTACHMENT_FIELD, str(fetched_count)) + _replace_header(outer_msg, ATTACHMENT_FIELD, str(fetched_count)) return fetched_count def _attach_part(self, outer_msg, part): @@ -323,10 +334,7 @@ def convert_string(raw_data, headers=None): if "mime-version:" in data[0:4096].lower(): msg = email.message_from_string(data) for (key, val) in headers.iteritems(): - if key in msg: - msg.replace_header(key, val) - else: - msg[key] = val + _replace_header(msg, key, val) else: mtype = headers.get(CONTENT_TYPE, NOT_MULTIPART_TYPE) maintype, subtype = mtype.split("/", 1) -- cgit v1.2.3 From 251317563bd36a339e6fa7a08a0fc05b5ee975a4 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 20 Jul 2013 16:40:11 -0700 Subject: Just check the filename existing. --- cloudinit/user_data.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index 454f3c06..d49ea094 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -76,8 +76,6 @@ def _replace_header(msg, key, value): def _set_filename(msg, filename): - if not filename: - return del msg['Content-Disposition'] msg.add_header('Content-Disposition', 'attachment', filename=str(filename)) @@ -141,7 +139,8 @@ class UserDataProcessor(object): # Copy various headers from the old part to the new one, # but don't include all the headers since some are not useful # after decoding and decompression. - _set_filename(n_part, part.get_filename()) + if part.get_filename(): + _set_filename(n_part, part.get_filename()) for h in ('Launch-Index',): if h in part: _replace_header(n_part, h, str(part[h])) -- cgit v1.2.3 From 7022512f3ceb955be2834844f05d4683f78ff276 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 21 Jul 2013 09:26:44 -0700 Subject: Use constants for repeated type strings. --- cloudinit/handlers/boot_hook.py | 13 ++++++------- cloudinit/handlers/cloud_config.py | 5 +++-- cloudinit/handlers/shell_script.py | 3 ++- cloudinit/handlers/upstart_job.py | 3 ++- 4 files changed, 13 insertions(+), 11 deletions(-) diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py index 11ac4fe5..5e7b6204 100644 --- a/cloudinit/handlers/boot_hook.py +++ b/cloudinit/handlers/boot_hook.py @@ -29,6 +29,7 @@ from cloudinit import util from cloudinit.settings import (PER_ALWAYS) LOG = logging.getLogger(__name__) +BOOTHOOK_PREFIX = "#cloud-boothook" class BootHookPartHandler(handlers.Handler): @@ -41,18 +42,16 @@ class BootHookPartHandler(handlers.Handler): def list_types(self): return [ - handlers.type_from_starts_with("#cloud-boothook"), + handlers.type_from_starts_with(BOOTHOOK_PREFIX), ] def _write_part(self, payload, filename): filename = util.clean_filename(filename) - payload = util.dos2unix(payload) - prefix = "#cloud-boothook" - start = 0 - if payload.startswith(prefix): - start = len(prefix) + 1 filepath = os.path.join(self.boothook_dir, filename) - contents = payload[start:] + contents = util.dos2unix(payload) + if contents.startswith(BOOTHOOK_PREFIX): + real_start = len(BOOTHOOK_PREFIX) + 1 + contents = contents[real_start:] util.write_file(filepath, contents, 0700) return filepath diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index c97ca3e8..730672d7 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -49,6 +49,7 @@ MERGE_HEADER = 'Merge-Type' # # This gets loaded into yaml with final result {'a': 22} DEF_MERGERS = mergers.string_extract_mergers('dict(replace)+list()+str()') +CLOUD_PREFIX = "#cloud-config" class CloudConfigPartHandler(handlers.Handler): @@ -60,7 +61,7 @@ class CloudConfigPartHandler(handlers.Handler): def list_types(self): return [ - handlers.type_from_starts_with("#cloud-config"), + handlers.type_from_starts_with(CLOUD_PREFIX), ] def _write_cloud_config(self): @@ -78,7 +79,7 @@ class CloudConfigPartHandler(handlers.Handler): if self.cloud_buf is not None: # Something was actually gathered.... lines = [ - "#cloud-config", + CLOUD_PREFIX, '', ] lines.extend(file_lines) diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py index b185c374..62289d98 100644 --- a/cloudinit/handlers/shell_script.py +++ b/cloudinit/handlers/shell_script.py @@ -29,6 +29,7 @@ from cloudinit import util from cloudinit.settings import (PER_ALWAYS) LOG = logging.getLogger(__name__) +SHELL_PREFIX = "#!" class ShellScriptPartHandler(handlers.Handler): @@ -38,7 +39,7 @@ class ShellScriptPartHandler(handlers.Handler): def list_types(self): return [ - handlers.type_from_starts_with("#!"), + handlers.type_from_starts_with(SHELL_PREFIX), ] def handle_part(self, _data, ctype, filename, # pylint: disable=W0221 diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py index 7a73d1b2..bac4cad2 100644 --- a/cloudinit/handlers/upstart_job.py +++ b/cloudinit/handlers/upstart_job.py @@ -31,6 +31,7 @@ from cloudinit import util from cloudinit.settings import (PER_INSTANCE) LOG = logging.getLogger(__name__) +UPSTART_PREFIX = "#upstart-job" class UpstartJobPartHandler(handlers.Handler): @@ -40,7 +41,7 @@ class UpstartJobPartHandler(handlers.Handler): def list_types(self): return [ - handlers.type_from_starts_with("#upstart-job"), + handlers.type_from_starts_with(UPSTART_PREFIX), ] def handle_part(self, _data, ctype, filename, # pylint: disable=W0221 -- cgit v1.2.3 From 27f096a1ab2e60222f85d87c961e388fdefaf92c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 21 Jul 2013 09:34:26 -0700 Subject: Use a util helper to do prefix/suffix removal. --- cloudinit/handlers/boot_hook.py | 8 +++----- cloudinit/util.py | 8 ++++++++ 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py index 5e7b6204..1848ce2c 100644 --- a/cloudinit/handlers/boot_hook.py +++ b/cloudinit/handlers/boot_hook.py @@ -48,11 +48,9 @@ class BootHookPartHandler(handlers.Handler): def _write_part(self, payload, filename): filename = util.clean_filename(filename) filepath = os.path.join(self.boothook_dir, filename) - contents = util.dos2unix(payload) - if contents.startswith(BOOTHOOK_PREFIX): - real_start = len(BOOTHOOK_PREFIX) + 1 - contents = contents[real_start:] - util.write_file(filepath, contents, 0700) + contents = util.strip_prefix_suffix(util.dos2unix(payload), + prefix=BOOTHOOK_PREFIX) + util.write_file(filepath, contents.lstrip(), 0700) return filepath def handle_part(self, _data, ctype, filename, # pylint: disable=W0221 diff --git a/cloudinit/util.py b/cloudinit/util.py index c45aae06..47d71ef4 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1530,6 +1530,14 @@ def shellify(cmdlist, add_header=True): return content +def strip_prefix_suffix(line, prefix=None, suffix=None): + if prefix and line.startswith(prefix): + line = line[len(prefix):] + if suffix and line.endswith(suffix): + line = line[:-len(suffix)] + return line + + def is_container(): """ Checks to see if this code running in a container of some sort -- cgit v1.2.3 From 971c2b2366c6e58921e1d2dd3ba18e597cbc20e8 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 21 Jul 2013 10:45:29 -0700 Subject: Just use an initialized array. --- cloudinit/handlers/__init__.py | 5 +---- cloudinit/helpers.py | 7 ++++--- cloudinit/stages.py | 13 ++++--------- 3 files changed, 9 insertions(+), 16 deletions(-) diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index f9b90323..1d450061 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -153,10 +153,7 @@ def walker_handle_handler(pdata, _ctype, _filename, payload): call_begin(mod, pdata['data'], frequency) # Only register and increment after the above have worked, so we don't # register if it fails starting. - handlers.register(mod) - # Ensure that it gets finalized by marking said module as having been - # initialized correctly. - handlers.markings[mod].append('initialized') + handlers.register(mod, initialized=True) pdata['handlercount'] = curcount + 1 except: util.logexc(LOG, "Failed at registering python file: %s (part " diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index bd37b8a3..1c46efde 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -22,7 +22,6 @@ from time import time -import collections import contextlib import io import os @@ -282,7 +281,7 @@ class ContentHandlers(object): def __init__(self): self.registered = {} - self.markings = collections.defaultdict(list) + self.initialized = [] def __contains__(self, item): return self.is_registered(item) @@ -293,11 +292,13 @@ class ContentHandlers(object): def is_registered(self, content_type): return content_type in self.registered - def register(self, mod): + def register(self, mod, initialized=False): types = set() for t in mod.list_types(): self.registered[t] = mod types.add(t) + if initialized and mod not in self.initialized: + self.initialized.append(mod) return types def _get_handler(self, content_type): diff --git a/cloudinit/stages.py b/cloudinit/stages.py index ba974a3e..fade1182 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -386,12 +386,12 @@ class Init(object): def init_handlers(): # Init the handlers first for (_ctype, mod) in c_handlers.iteritems(): - if 'initialized' in c_handlers.markings[mod]: + if mod in c_handlers.initialized: # Avoid initing the same module twice (if said module # is registered to more than one content-type). continue handlers.call_begin(mod, data, frequency) - c_handlers.markings[mod].append('initialized') + c_handlers.initialized.append(mod) def walk_handlers(): # Walk the user data @@ -413,16 +413,11 @@ class Init(object): def finalize_handlers(): # Give callbacks opportunity to finalize for (_ctype, mod) in c_handlers.iteritems(): - mod_markings = c_handlers.markings[mod] - if 'initialized' not in mod_markings: + if mod not in c_handlers.initialized: # Said module was never inited in the first place, so lets # not attempt to finalize those that never got called. continue - if 'finalized' in mod_markings: - # Avoid finalizing the same module twice (if said module - # is registered to more than one content-type). - continue - c_handlers.markings[mod].append('finalized') + c_handlers.initialized.remove(mod) try: handlers.call_end(mod, data, frequency) except: -- cgit v1.2.3 From a5dd2146bb98874219eb449ae06f57203099d4d4 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 21 Jul 2013 11:01:55 -0700 Subject: Also make the dir handler registration a simple function. --- cloudinit/stages.py | 57 +++++++++++++++++++++++++++++++---------------------- 1 file changed, 33 insertions(+), 24 deletions(-) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index fade1182..f08589a7 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -344,12 +344,13 @@ class Init(object): cdir = self.paths.get_cpath("handlers") idir = self._get_ipath("handlers") - # Add the path to the plugins dir to the top of our list for import - # instance dir should be read before cloud-dir - if cdir and cdir not in sys.path: - sys.path.insert(0, cdir) - if idir and idir not in sys.path: - sys.path.insert(0, idir) + # Add the path to the plugins dir to the top of our list for importing + # new handlers. + # + # Note(harlowja): instance dir should be read before cloud-dir + for d in [cdir, idir]: + if d and d not in sys.path: + sys.path.insert(0, d) # Ensure datasource fetched before activation (just incase) user_data_msg = self.datasource.get_userdata(True) @@ -357,24 +358,32 @@ class Init(object): # This keeps track of all the active handlers c_handlers = helpers.ContentHandlers() - # Add handlers in cdir - potential_handlers = util.find_modules(cdir) - for (fname, mod_name) in potential_handlers.iteritems(): - try: - mod_locs = importer.find_module(mod_name, [''], - ['list_types', - 'handle_part']) - if not mod_locs: - LOG.warn(("Could not find a valid user-data handler" - " named %s in file %s"), mod_name, fname) - continue - mod = importer.import_module(mod_locs[0]) - mod = handlers.fixup_handler(mod) - types = c_handlers.register(mod) - LOG.debug("Added handler for %s from %s", types, fname) - except: - util.logexc(LOG, "Failed to register handler from %s", fname) - + def register_handlers_in_dir(path): + # Attempts to register any handler modules under the given path. + potential_handlers = util.find_modules(path) + for (fname, mod_name) in potential_handlers.iteritems(): + try: + mod_locs = importer.find_module(mod_name, [''], + ['list_types', + 'handle_part']) + if not mod_locs: + LOG.warn(("Could not find a valid user-data handler" + " named %s in file %s"), mod_name, fname) + continue + mod = importer.import_module(mod_locs[0]) + mod = handlers.fixup_handler(mod) + types = c_handlers.register(mod) + LOG.debug("Added handler for %s from %s", types, fname) + except Exception: + util.logexc(LOG, "Failed to register handler from %s", + fname) + + # Add any handlers in the cloud-dir + register_handlers_in_dir(cdir) + + # Register any other handlers that come from the default set. This + # is done after the cloud-dir handlers so that the cdir modules can + # take over the default user-data handler content-types. def_handlers = self._default_userdata_handlers() applied_def_handlers = c_handlers.register_defaults(def_handlers) if applied_def_handlers: -- cgit v1.2.3 From d655d019fb0a45389d87db39b0ef5001e27e2616 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 21 Jul 2013 11:04:55 -0700 Subject: Ensure what we are searching over is a directory. --- cloudinit/stages.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index f08589a7..3e49e8c5 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -360,6 +360,8 @@ class Init(object): def register_handlers_in_dir(path): # Attempts to register any handler modules under the given path. + if not path or not os.path.isdir(path): + return potential_handlers = util.find_modules(path) for (fname, mod_name) in potential_handlers.iteritems(): try: -- cgit v1.2.3 From 1072010fdde26203bc69b911e4a478953323a6ef Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 23 Jul 2013 11:45:34 -0400 Subject: Fix password setting for rhel5. Changing password via 'chpasswd' command in rhel5 would fail, if input to the 'chpasswd' command doesn't end with '\n'. The fix is just to append a carriage return to the input. --- ChangeLog | 2 ++ cloudinit/config/cc_set_passwords.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index dcc67626..4cdd20ad 100644 --- a/ChangeLog +++ b/ChangeLog @@ -5,6 +5,8 @@ 'initctl reload-configuration' (LP: #1124384). If so, then invoke it. - add Azure datasource. - add support for SuSE / SLES [Juerg Haefliger] + - add a trailing carriage return to chpasswd input, which reportedly + caused a problem on rhel5 if missing. 0.7.2: - add a debian watch file - add 'sudo' entry to ubuntu's default user (LP: #1080717) diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index e93c8c6f..56a36906 100644 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -75,7 +75,7 @@ def handle(_name, cfg, cloud, log, args): plist_in.append("%s:%s" % (u, p)) users.append(u) - ch_in = '\n'.join(plist_in) + ch_in = '\n'.join(plist_in) + '\n' try: log.debug("Changing password for %s:", users) util.subp(['chpasswd'], ch_in) -- cgit v1.2.3 From ccbdf8c360e4272055208afb013a17a218c9f097 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 23 Jul 2013 13:10:33 -0400 Subject: alias 'availability_zone' to availability-zone in metadata service. The place this was noticed was in trying to use the 'nova.clouds.archive.ubuntu.com' mirror selection. Because the config-drive-v2 has a metadata entry of 'availability_zone', it didn't get found by the availabilty_zone property in cloudinit/sources/__init__.py LP: #1190431 --- cloudinit/sources/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index d8fbacdd..974c0407 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -135,7 +135,8 @@ class DataSource(object): @property def availability_zone(self): - return self.metadata.get('availability-zone') + return self.metadata.get('availability-zone', + self.metadata.get('availability_zone')) def get_instance_id(self): if not self.metadata or 'instance-id' not in self.metadata: -- cgit v1.2.3 From c67cc904bfddc9ed462f54d85d31236b39b6285f Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Tue, 23 Jul 2013 12:36:15 -0600 Subject: Move more functionality into get_serial() --- cloudinit/sources/DataSourceSmartOS.py | 42 ++++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index f9b724eb..129020ec 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -35,7 +35,8 @@ from cloudinit import sources from cloudinit import util -TTY_LOC = '/dev/ttyS1' +DEF_TTY_LOC = '/dev/ttyS1' +TTY_LOC = None LOG = logging.getLogger(__name__) @@ -54,6 +55,7 @@ class DataSourceSmartOS(sources.DataSource): md = {} ud = "" + TTY_LOC = self.sys_cfg.get("serial_device", DEF_TTY_LOC) if not os.path.exists(TTY_LOC): LOG.debug("Host does not appear to be on SmartOS") return False @@ -72,11 +74,17 @@ class DataSourceSmartOS(sources.DataSource): md['local-hostname'] = hostname md['instance-id'] = system_uuid md['public-keys'] = query_data("root_authorized_keys", strip=True) - ud = query_data("user-script") + md['user-script'] = query_data("user-script") + md['user-data'] = query_data("user-script") md['iptables_disable'] = query_data("disable_iptables_flag", strip=True) md['motd_sys_info'] = query_data("enable_motd_sys_info", strip=True) + if md['user-data']: + ud = md['user-data'] + else: + ud = md['user-script'] + self.metadata = md self.userdata_raw = ud return True @@ -87,8 +95,22 @@ class DataSourceSmartOS(sources.DataSource): def get_serial(): """This is replaced in unit testing, allowing us to replace - serial.Serial with a mocked class""" - return serial.Serial() + serial.Serial with a mocked class + + The timeout value of 60 seconds should never be hit. The value + is taken from SmartOS own provisioning tools. Since we are reading + each line individually up until the single ".", the transfer is + usually very fast (i.e. microseconds) to get the response. + """ + if not TTY_LOC: + raise AttributeError("TTY_LOC value is not set") + + _ret = serial.Serial(TTY_LOC, timeout=60) + if not _ret.isOpen(): + raise SystemError("Unable to open %s" % TTY_LOC) + + return _ret + def query_data(noun, strip=False): @@ -97,22 +119,12 @@ def query_data(noun, strip=False): In the response, the first line is the status, while subsequent lines are is the value. A blank line with a "." is used to indicate end of response. + """ - The timeout value of 60 seconds should never be hit. The value - is taken from SmartOS own provisioning tools. Since we are reading - each line individually up until the single ".", the transfer is - usually very fast (i.e. microseconds) to get the response. - """ if not noun: return False ser = get_serial() - ser.port = '/dev/ttyS1' - ser.open() - if not ser.isOpen(): - LOG.debug("Serial console is not open") - return False - ser.write("GET %s\n" % noun.rstrip()) status = str(ser.readline()).rstrip() response = [] -- cgit v1.2.3 From a4310ee3db0b394dcebd4f6b49d3b25bba37fedf Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 23 Jul 2013 15:17:50 -0400 Subject: on azure datasource, grab use Deployment as the instance-id LP: #1204190 --- cloudinit/sources/DataSourceAzure.py | 44 ++++++++++++++++++++++++++- tests/unittests/test_datasource/test_azure.py | 23 +++++++++++++- 2 files changed, 65 insertions(+), 2 deletions(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index c90d7b07..0a5caebe 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -114,7 +114,8 @@ class DataSourceAzureNet(sources.DataSource): # claim the datasource even if the command failed util.logexc(LOG, "agent command '%s' failed.", mycfg['cmd']) - wait_for = [os.path.join(mycfg['datadir'], "SharedConfig.xml")] + shcfgxml = os.path.join(mycfg['datadir'], "SharedConfig.xml") + wait_for = [shcfgxml] fp_files = [] for pk in self.cfg.get('_pubkeys', []): @@ -129,6 +130,14 @@ class DataSourceAzureNet(sources.DataSource): LOG.debug("waited %.3f seconds for %d files to appear", time.time() - start, len(wait_for)) + if shcfgxml in missing: + LOG.warn("SharedConfig.xml missing, using static instance-id") + else: + try: + self.metadata['instance-id'] = iid_from_shared_config(shcfgxml) + except ValueError as e: + LOG.warn("failed to get instance id in %s: %s" % (shcfgxml, e)) + pubkeys = pubkeys_from_crt_files(fp_files) self.metadata['public-keys'] = pubkeys @@ -252,6 +261,20 @@ def load_azure_ovf_pubkeys(sshnode): return found +def single_node_at_path(node, pathlist): + curnode = node + for tok in pathlist: + results = find_child(curnode, lambda n: n.localName == tok) + if len(results) == 0: + raise ValueError("missing %s token in %s" % (tok, str(pathlist))) + if len(results) > 1: + raise ValueError("found %s nodes of type %s looking for %s" % + (len(results), tok, str(pathlist))) + curnode = results[0] + + return curnode + + def read_azure_ovf(contents): try: dom = minidom.parseString(contents) @@ -362,6 +385,25 @@ def load_azure_ds_dir(source_dir): return (md, ud, cfg, {'ovf-env.xml': contents}) +def iid_from_shared_config(path): + with open(path, "rb") as fp: + content = fp.read() + return iid_from_shared_config_content(content) + + +def iid_from_shared_config_content(content): + """ + find INSTANCE_ID in: + + + + + """ + dom = minidom.parseString(content) + depnode = single_node_at_path(dom, ["SharedConfig", "Deployment"]) + return depnode.attributes.get('name').value + + class BrokenAzureDataSource(Exception): pass diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index c79c25d8..2e8583f9 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -99,6 +99,10 @@ class TestAzureDataSource(MockerTestCase): data['pubkey_files'] = flist return ["pubkey_from: %s" % f for f in flist] + def _iid_from_shared_config(path): + data['iid_from_shared_cfg'] = path + return 'i-my-azure-id' + if data.get('ovfcontent') is not None: populate_dir(os.path.join(self.paths.seed_dir, "azure"), {'ovf-env.xml': data['ovfcontent']}) @@ -112,7 +116,9 @@ class TestAzureDataSource(MockerTestCase): (mod, 'write_files', _write_files), (mod, 'wait_for_files', _wait_for_files), (mod, 'pubkeys_from_crt_files', - _pubkeys_from_crt_files)]) + _pubkeys_from_crt_files), + (mod, 'iid_from_shared_config', + _iid_from_shared_config), ]) dsrc = mod.DataSourceAzureNet( data.get('sys_cfg', {}), distro=None, paths=self.paths) @@ -131,6 +137,7 @@ class TestAzureDataSource(MockerTestCase): self.assertEqual(dsrc.metadata['local-hostname'], odata['HostName']) self.assertTrue('ovf-env.xml' in data['files']) self.assertEqual(0700, data['datadir_mode']) + self.assertEqual(dsrc.metadata['instance-id'], 'i-my-azure-id') def test_user_cfg_set_agent_command(self): cfg = {'agent_command': "my_command"} @@ -227,6 +234,20 @@ class TestReadAzureOvf(MockerTestCase): self.assertIn(mypk, cfg['_pubkeys']) +class TestReadAzureSharedConfig(MockerTestCase): + def test_valid_content(self): + xml = """ + + + + + + + """ + ret = DataSourceAzure.iid_from_shared_config_content(xml) + self.assertEqual("MY_INSTANCE_ID", ret) + + def apply_patches(patches): ret = [] for (ref, name, replace) in patches: -- cgit v1.2.3 From 4b41f7dc3d37d5bf7397bbc34d8a5e0c56798ac7 Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Tue, 23 Jul 2013 16:33:46 -0600 Subject: Changed get_serial to be fully parameterized and return the serial initialized. Added a mapping of attributes between cloud-init and smartos. --- cloudinit/sources/DataSourceSmartOS.py | 64 ++++++++++++++----------- cloudinit/util.py | 5 +- tests/unittests/test_datasource/test_smartos.py | 10 ++-- 3 files changed, 43 insertions(+), 36 deletions(-) diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 129020ec..d6589f57 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -27,25 +27,37 @@ # -import os -import os.path -import serial from cloudinit import log as logging from cloudinit import sources from cloudinit import util +import os +import os.path +import serial DEF_TTY_LOC = '/dev/ttyS1' -TTY_LOC = None +DEF_TTY_TIMEOUT = 60 LOG = logging.getLogger(__name__) +SMARTOS_ATTRIB_MAP = { + #Cloud-init Key : (SmartOS Key, Strip line endings) + 'local-hostname': ('hostname', True), + 'public-keys': ('root_authorized_keys', True), + 'user-script': ('user-script', False), + 'user-data': ('user-data', False), + 'iptables_disable': ('iptables_disable', True), + 'motd_sys_info': ('motd_sys_info', True), +} + class DataSourceSmartOS(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, 'sdc') - self.seed = None self.is_smartdc = None + self.seed = self.sys_cfg.get("serial_device", DEF_TTY_LOC) + self.seed_timeout = self.sys_cfg.get("serial_timeout", + DEF_TTY_TIMEOUT) def __str__(self): root = sources.DataSource.__str__(self) @@ -55,30 +67,25 @@ class DataSourceSmartOS(sources.DataSource): md = {} ud = "" - TTY_LOC = self.sys_cfg.get("serial_device", DEF_TTY_LOC) - if not os.path.exists(TTY_LOC): + if not os.path.exists(self.seed): LOG.debug("Host does not appear to be on SmartOS") return False - self.seed = TTY_LOC + self.seed = self.seed system_uuid, system_type = dmi_data() if 'smartdc' not in system_type.lower(): LOG.debug("Host is not on SmartOS") return False self.is_smartdc = True + md['instance-id'] = system_uuid - hostname = query_data("hostname", strip=True) - if not hostname: - hostname = system_uuid + for ci_noun, attribute in SMARTOS_ATTRIB_MAP.iteritems(): + smartos_noun, strip = attribute + md[ci_noun] = query_data(smartos_noun, self.seed, + self.seed_timeout, strip=strip) - md['local-hostname'] = hostname - md['instance-id'] = system_uuid - md['public-keys'] = query_data("root_authorized_keys", strip=True) - md['user-script'] = query_data("user-script") - md['user-data'] = query_data("user-script") - md['iptables_disable'] = query_data("disable_iptables_flag", - strip=True) - md['motd_sys_info'] = query_data("enable_motd_sys_info", strip=True) + if not md['local-hostname']: + md['local-hostname'] = system_uuid if md['user-data']: ud = md['user-data'] @@ -93,7 +100,7 @@ class DataSourceSmartOS(sources.DataSource): return self.metadata['instance-id'] -def get_serial(): +def get_serial(seed_device, seed_timeout): """This is replaced in unit testing, allowing us to replace serial.Serial with a mocked class @@ -102,18 +109,17 @@ def get_serial(): each line individually up until the single ".", the transfer is usually very fast (i.e. microseconds) to get the response. """ - if not TTY_LOC: - raise AttributeError("TTY_LOC value is not set") - - _ret = serial.Serial(TTY_LOC, timeout=60) - if not _ret.isOpen(): - raise SystemError("Unable to open %s" % TTY_LOC) + if not seed_device: + raise AttributeError("seed_device value is not set") - return _ret + ser = serial.Serial(seed_device, timeout=seed_timeout) + if not ser.isOpen(): + raise SystemError("Unable to open %s" % seed_device) + return ser -def query_data(noun, strip=False): +def query_data(noun, seed_device, seed_timeout, strip=False): """Makes a request to via the serial console via "GET " In the response, the first line is the status, while subsequent lines @@ -124,7 +130,7 @@ def query_data(noun, strip=False): if not noun: return False - ser = get_serial() + ser = get_serial(seed_device, seed_timeout) ser.write("GET %s\n" % noun.rstrip()) status = str(ser.readline()).rstrip() response = [] diff --git a/cloudinit/util.py b/cloudinit/util.py index 7163225f..a2fbc004 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1744,13 +1744,14 @@ def get_mount_info(path, log=LOG): lines = load_file(mountinfo_path).splitlines() return parse_mount_info(path, lines, log) + def which(program): # Return path of program for execution if found in path def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) - fpath, fname = os.path.split(program) - if fpath: + _fpath, _ = os.path.split(program) + if _fpath: if is_exe(program): return program else: diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 494f9828..6c12f1e2 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -31,8 +31,8 @@ import uuid mock_returns = { 'hostname': 'test-host', 'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname', - 'disable_iptables_flag': False, - 'enable_motd_sys_info': False, + 'disable_iptables_flag': None, + 'enable_motd_sys_info': None, 'system_uuid': str(uuid.uuid4()), 'smartdc': 'smartdc', 'userdata': """ @@ -118,7 +118,7 @@ class TestSmartOSDataSource(MockerTestCase): def _get_ds(self): - def _get_serial(): + def _get_serial(*_): return MockSerial() def _dmi_data(): @@ -169,14 +169,14 @@ class TestSmartOSDataSource(MockerTestCase): dsrc = self._get_ds() ret = dsrc.get_data() self.assertTrue(ret) - self.assertEquals(str(mock_returns['disable_iptables_flag']), + self.assertEquals(mock_returns['disable_iptables_flag'], dsrc.metadata['iptables_disable']) def test_motd_sys_info(self): dsrc = self._get_ds() ret = dsrc.get_data() self.assertTrue(ret) - self.assertEquals(str(mock_returns['enable_motd_sys_info']), + self.assertEquals(mock_returns['enable_motd_sys_info'], dsrc.metadata['motd_sys_info']) -- cgit v1.2.3 From 0be043f1e677f533f64d0191f02b5fe956844157 Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Tue, 23 Jul 2013 16:44:54 -0600 Subject: Added blurb to documentation about the datasource --- doc/examples/cloud-config-datasources.txt | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt index fbabcad9..9f0ac386 100644 --- a/doc/examples/cloud-config-datasources.txt +++ b/doc/examples/cloud-config-datasources.txt @@ -45,4 +45,16 @@ datasource: Azure: agent_command: [service, walinuxagent, start] - + + SmartOS: + Smart OS provisions via a serial console actings a server. By default, + the second serial console is the device. SmartOS also uses a serial + timeout of 60 seconds, although that should never be hit. + + serial device: /dev/ttyS1 + serial timeout: 60 + + To change the defaults, you can define it via a cloud-config by creating + a .cfg file in /etc/cloud/cloud.cfg.d with the following: + serial_device: /dev/ttyS1 + serial_timeout: 30 -- cgit v1.2.3 From 1750c756e0d4a63747ffdcd0040958622e3caf58 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 24 Jul 2013 00:34:58 -0700 Subject: Add test for mime gzipped message segments. --- tests/unittests/test_userdata.py | 52 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 48 insertions(+), 4 deletions(-) diff --git a/tests/unittests/test_userdata.py b/tests/unittests/test_userdata.py index 0ebb0484..dc252b5d 100644 --- a/tests/unittests/test_userdata.py +++ b/tests/unittests/test_userdata.py @@ -2,10 +2,14 @@ import StringIO +import gzip import logging +import mocker import os from email.mime.base import MIMEBase +from email.mime.multipart import MIMEMultipart +from email.mime.application import MIMEApplication from cloudinit import handlers from cloudinit import helpers as c_helpers @@ -118,7 +122,7 @@ p: 1 ci.datasource = FakeDataSource(data) mock_write = self.mocker.replace("cloudinit.util.write_file", - passthrough=False) + passthrough=False) mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) self.mocker.replay() @@ -129,6 +133,46 @@ p: 1 "Unhandled non-multipart (text/x-not-multipart) userdata:", log_file.getvalue()) + def test_mime_gzip_compressed(self): + """Tests that individual message gzip encoding works.""" + + def gzip_part(text): + contents = StringIO.StringIO() + f = gzip.GzipFile(fileobj=contents, mode='w') + f.write(str(text)) + f.flush() + f.close() + return MIMEApplication(contents.getvalue(), 'gzip') + + base_content1 = ''' +#cloud-config +a: 2 +''' + + base_content2 = ''' +#cloud-config +b: 3 +c: 4 +''' + + message = MIMEMultipart('test') + message.attach(gzip_part(base_content1)) + message.attach(gzip_part(base_content2)) + ci = stages.Init() + ci.datasource = FakeDataSource(str(message)) + new_root = self.makeDir() + self.patchUtils(new_root) + self.patchOS(new_root) + ci.fetch() + ci.consume_userdata() + contents = util.load_file(ci.paths.get_ipath("cloud_config")) + contents = util.load_yaml(contents) + self.assertTrue(isinstance(contents, dict)) + self.assertEquals(3, len(contents)) + self.assertEquals(2, contents['a']) + self.assertEquals(3, contents['b']) + self.assertEquals(4, contents['c']) + def test_mime_text_plain(self): """Mime message of type text/plain is ignored but shows warning.""" ci = stages.Init() @@ -137,7 +181,7 @@ p: 1 ci.datasource = FakeDataSource(message.as_string()) mock_write = self.mocker.replace("cloudinit.util.write_file", - passthrough=False) + passthrough=False) mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) self.mocker.replay() @@ -156,7 +200,7 @@ p: 1 outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001") mock_write = self.mocker.replace("cloudinit.util.write_file", - passthrough=False) + passthrough=False) mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) mock_write(outpath, script, 0700) self.mocker.replay() @@ -176,7 +220,7 @@ p: 1 outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001") mock_write = self.mocker.replace("cloudinit.util.write_file", - passthrough=False) + passthrough=False) mock_write(ci.paths.get_ipath("cloud_config"), "", 0600) mock_write(outpath, script, 0700) self.mocker.replay() -- cgit v1.2.3 From c1f3fa86b45ba8b7ca6e0f6971de171ac5fccf15 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 24 Jul 2013 10:26:01 -0400 Subject: shorten comments in example cloud-config --- doc/examples/cloud-config-datasources.txt | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt index 9f0ac386..a19353fc 100644 --- a/doc/examples/cloud-config-datasources.txt +++ b/doc/examples/cloud-config-datasources.txt @@ -47,14 +47,8 @@ datasource: agent_command: [service, walinuxagent, start] SmartOS: - Smart OS provisions via a serial console actings a server. By default, - the second serial console is the device. SmartOS also uses a serial - timeout of 60 seconds, although that should never be hit. - + # Smart OS datasource works over a serial console interacting with + # a server on the other end. By default, the second serial console is the + # device. SmartOS also uses a serial timeout of 60 seconds. serial device: /dev/ttyS1 serial timeout: 60 - - To change the defaults, you can define it via a cloud-config by creating - a .cfg file in /etc/cloud/cloud.cfg.d with the following: - serial_device: /dev/ttyS1 - serial_timeout: 30 -- cgit v1.2.3 From 0891f6611d1c264220a6f71306802db1e70651fc Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 24 Jul 2013 10:44:21 -0400 Subject: DataSourceSmartOS: fix issue if dmidecode is not present --- cloudinit/sources/DataSourceSmartOS.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index d6589f57..1ce20c10 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -72,7 +72,12 @@ class DataSourceSmartOS(sources.DataSource): return False self.seed = self.seed - system_uuid, system_type = dmi_data() + dmi_info = dmi_data() + if dmi_info is False: + LOG.debug("No dmidata utility found") + return False + + system_uuid, system_type = dmi_info if 'smartdc' not in system_type.lower(): LOG.debug("Host is not on SmartOS") return False -- cgit v1.2.3 From 243df010c49de52be0ca9159e15378bb335b1163 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 24 Jul 2013 11:04:56 -0400 Subject: change 'json-patch' to 'cloud-config-jsonp' --- cloudinit/handlers/__init__.py | 2 +- cloudinit/handlers/cloud_config.py | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 4c7c9295..2ddc75f4 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -62,7 +62,7 @@ INCLUSION_TYPES_MAP = { '#part-handler': 'text/part-handler', '#cloud-boothook': 'text/cloud-boothook', '#cloud-config-archive': 'text/cloud-config-archive', - '#json-patch': 'application/json-patch+json', + '#cloud-config-jsonp': 'text/cloud-config-jsonp', } # Sorted longest first diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index 0f080e66..8bbc904d 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -54,8 +54,9 @@ DEF_MERGERS = mergers.string_extract_mergers('dict(replace)+list()+str()') CLOUD_PREFIX = "#cloud-config" # The file header -> content types this module will handle. +CC_JSONP_PRE = "#cloud-config-jsonp" CC_TYPES = { - '#json-patch': handlers.type_from_starts_with("#json-patch"), + CC_JSONP_PRE: handlers.type_from_starts_with(CC_JSONP_PRE), '#cloud-config': handlers.type_from_starts_with("#cloud-config"), } @@ -116,12 +117,12 @@ class CloudConfigPartHandler(handlers.Handler): def _merge_patch(self, payload): payload = payload.lstrip() - if payload.lower().startswith("#json-patch"): + if payload.lower().startswith(CC_JSONP_PRE): # JSON doesn't handle comments in this manner, so ensure that # if we started with this 'type' that we remove it before # attempting to load it as json (which the jsonpatch library will # attempt to do). - payload = payload[len("#json-patch"):] + payload = payload[CC_JSONP_PRE:] patch = jsonpatch.JsonPatch.from_string(payload) LOG.debug("Merging by applying json patch %s", patch) self.cloud_buf = patch.apply(self.cloud_buf, in_place=False) @@ -149,7 +150,7 @@ class CloudConfigPartHandler(handlers.Handler): # First time through, merge with an empty dict... if self.cloud_buf is None or not self.file_names: self.cloud_buf = {} - if ctype == CC_TYPES['#json-patch']: + if ctype == CC_TYPES[CC_JSONP_PRE]: self._merge_patch(payload) else: self._merge_part(payload, headers) -- cgit v1.2.3 From 97b19f3b1992e56ef8e1a055fbe64a19d1eacfbf Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 24 Jul 2013 11:07:55 -0400 Subject: rename CC_JSONP_PRE again (JSONP_PREFIX) and use CLOUD_PREFIX --- cloudinit/handlers/cloud_config.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index 8bbc904d..7edae13d 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -52,12 +52,12 @@ MERGE_HEADER = 'Merge-Type' # This gets loaded into yaml with final result {'a': 22} DEF_MERGERS = mergers.string_extract_mergers('dict(replace)+list()+str()') CLOUD_PREFIX = "#cloud-config" +JSONP_PREFIX = "#cloud-config-jsonp" # The file header -> content types this module will handle. -CC_JSONP_PRE = "#cloud-config-jsonp" CC_TYPES = { - CC_JSONP_PRE: handlers.type_from_starts_with(CC_JSONP_PRE), - '#cloud-config': handlers.type_from_starts_with("#cloud-config"), + JSONP_PREFIX: handlers.type_from_starts_with(JSONP_PREFIX), + CLOUD_PREFIX: handlers.type_from_starts_with(CLOUD_PREFIX), } @@ -117,12 +117,12 @@ class CloudConfigPartHandler(handlers.Handler): def _merge_patch(self, payload): payload = payload.lstrip() - if payload.lower().startswith(CC_JSONP_PRE): + if payload.lower().startswith(JSONP_PREFIX): # JSON doesn't handle comments in this manner, so ensure that # if we started with this 'type' that we remove it before # attempting to load it as json (which the jsonpatch library will # attempt to do). - payload = payload[CC_JSONP_PRE:] + payload = payload[JSONP_PREFIX:] patch = jsonpatch.JsonPatch.from_string(payload) LOG.debug("Merging by applying json patch %s", patch) self.cloud_buf = patch.apply(self.cloud_buf, in_place=False) @@ -150,7 +150,7 @@ class CloudConfigPartHandler(handlers.Handler): # First time through, merge with an empty dict... if self.cloud_buf is None or not self.file_names: self.cloud_buf = {} - if ctype == CC_TYPES[CC_JSONP_PRE]: + if ctype == CC_TYPES[JSONP_PREFIX]: self._merge_patch(payload) else: self._merge_part(payload, headers) -- cgit v1.2.3 From 851906b6acde33fddcbdd1d41f366d6652d1e84d Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 24 Jul 2013 08:39:03 -0700 Subject: Fix small prefix bug + jsonp tests. Fix the wrong usage of the prefix removal array action by just using the new util function that does these actions correctly. Add in a couple of unit tests to verify the jsonp merging and usage works as expected. --- cloudinit/handlers/cloud_config.py | 11 ++++--- tests/unittests/test_userdata.py | 59 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+), 6 deletions(-) diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index 7edae13d..34a73115 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -116,13 +116,12 @@ class CloudConfigPartHandler(handlers.Handler): return (payload_yaml, all_mergers) def _merge_patch(self, payload): + # JSON doesn't handle comments in this manner, so ensure that + # if we started with this 'type' that we remove it before + # attempting to load it as json (which the jsonpatch library will + # attempt to do). payload = payload.lstrip() - if payload.lower().startswith(JSONP_PREFIX): - # JSON doesn't handle comments in this manner, so ensure that - # if we started with this 'type' that we remove it before - # attempting to load it as json (which the jsonpatch library will - # attempt to do). - payload = payload[JSONP_PREFIX:] + payload = util.strip_prefix_suffix(payload, prefix=JSONP_PREFIX) patch = jsonpatch.JsonPatch.from_string(payload) LOG.debug("Merging by applying json patch %s", patch) self.cloud_buf = patch.apply(self.cloud_buf, in_place=False) diff --git a/tests/unittests/test_userdata.py b/tests/unittests/test_userdata.py index 0ebb0484..5cd50f4f 100644 --- a/tests/unittests/test_userdata.py +++ b/tests/unittests/test_userdata.py @@ -6,6 +6,7 @@ import logging import os from email.mime.base import MIMEBase +from email.mime.multipart import MIMEMultipart from cloudinit import handlers from cloudinit import helpers as c_helpers @@ -50,6 +51,64 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase): self._log.addHandler(self._log_handler) return log_file + def test_simple_jsonp(self): + blob = ''' +#cloud-config-jsonp +[ + { "op": "add", "path": "/baz", "value": "qux" }, + { "op": "add", "path": "/bar", "value": "qux2" } +] +''' + + ci = stages.Init() + ci.datasource = FakeDataSource(blob) + new_root = self.makeDir() + self.patchUtils(new_root) + self.patchOS(new_root) + ci.fetch() + ci.consume_userdata() + cc_contents = util.load_file(ci.paths.get_ipath("cloud_config")) + cc = util.load_yaml(cc_contents) + self.assertEquals(2, len(cc)) + self.assertEquals('qux', cc['baz']) + self.assertEquals('qux2', cc['bar']) + + def test_mixed_cloud_config(self): + blob_cc = ''' +#cloud-config +a: b +c: d +''' + message_cc = MIMEBase("text", "cloud-config") + message_cc.set_payload(blob_cc) + + blob_jp = ''' +#cloud-config-jsonp +[ + { "op": "replace", "path": "/a", "value": "c" }, + { "op": "remove", "path": "/c" } +] +''' + + message_jp = MIMEBase('text', "cloud-config-jsonp") + message_jp.set_payload(blob_jp) + + message = MIMEMultipart() + message.attach(message_cc) + message.attach(message_jp) + + ci = stages.Init() + ci.datasource = FakeDataSource(str(message)) + new_root = self.makeDir() + self.patchUtils(new_root) + self.patchOS(new_root) + ci.fetch() + ci.consume_userdata() + cc_contents = util.load_file(ci.paths.get_ipath("cloud_config")) + cc = util.load_yaml(cc_contents) + self.assertEquals(1, len(cc)) + self.assertEquals('c', cc['a']) + def test_merging_cloud_config(self): blob = ''' #cloud-config -- cgit v1.2.3 From ca24df789f7bccdc968727dad2a6a83f5a6d0520 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 24 Jul 2013 13:31:16 -0400 Subject: tests: fix pep8 issue Move long lines out of the test_util.py file and into tests/data. no pep8 or pylint errors now. --- tests/data/mountinfo_precise_ext4.txt | 24 ++++++++++++++++ tests/data/mountinfo_raring_btrfs.txt | 13 +++++++++ tests/unittests/test_util.py | 52 +++++------------------------------ 3 files changed, 44 insertions(+), 45 deletions(-) create mode 100644 tests/data/mountinfo_precise_ext4.txt create mode 100644 tests/data/mountinfo_raring_btrfs.txt diff --git a/tests/data/mountinfo_precise_ext4.txt b/tests/data/mountinfo_precise_ext4.txt new file mode 100644 index 00000000..a7a1db67 --- /dev/null +++ b/tests/data/mountinfo_precise_ext4.txt @@ -0,0 +1,24 @@ +15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=16422216k,nr_inodes=4105554,mode=755 +18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +19 20 0:15 / /run rw,nosuid,relatime - tmpfs tmpfs rw,size=6572812k,mode=755 +20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root rw,errors=remount-ro,data=ordered +21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs cgroup rw,mode=755 +22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw +23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw +25 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +26 19 0:19 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k +27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw +28 19 0:21 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 +24 21 0:18 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset +29 21 0:22 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu +30 21 0:23 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct +31 21 0:24 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory +32 21 0:25 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices +33 21 0:26 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer +34 21 0:27 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio +35 21 0:28 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event +36 20 9:0 / /boot rw,relatime - ext4 /dev/md0 rw,data=ordered +37 16 0:29 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw +39 28 0:30 / /run/user/foobar/gvfs rw,nosuid,nodev,relatime - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000 diff --git a/tests/data/mountinfo_raring_btrfs.txt b/tests/data/mountinfo_raring_btrfs.txt new file mode 100644 index 00000000..c5795636 --- /dev/null +++ b/tests/data/mountinfo_raring_btrfs.txt @@ -0,0 +1,13 @@ +15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=865556k,nr_inodes=216389,mode=755 +18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +19 20 0:15 / /run rw,nosuid,relatime - tmpfs tmpfs rw,size=348196k,mode=755 +20 1 0:16 /@ / rw,relatime - btrfs /dev/vda1 rw,compress=lzo,space_cache +21 15 0:19 / /sys/fs/fuse/connections rw,relatime - fusectl none rw +22 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw +23 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +24 19 0:20 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k +25 19 0:21 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw +26 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 +27 20 0:16 /@home /home rw,relatime - btrfs /dev/vda1 rw,compress=lzo,space_cache diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 5853cb0f..87415cb5 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -6,6 +6,7 @@ import yaml from mocker import MockerTestCase from unittest import TestCase +from tests.unittests import helpers from cloudinit import importer from cloudinit import util @@ -250,50 +251,10 @@ class TestLoadYaml(TestCase): myobj) -class TestMountinfoParsing(TestCase): - precise_ext4_mountinfo = \ -"""15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw -16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw -17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=16422216k,nr_inodes=4105554,mode=755 -18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 -19 20 0:15 / /run rw,nosuid,relatime - tmpfs tmpfs rw,size=6572812k,mode=755 -20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root rw,errors=remount-ro,data=ordered -21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs cgroup rw,mode=755 -22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw -23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw -25 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw -26 19 0:19 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k -27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw -28 19 0:21 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 -24 21 0:18 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset -29 21 0:22 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu -30 21 0:23 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct -31 21 0:24 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory -32 21 0:25 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices -33 21 0:26 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer -34 21 0:27 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio -35 21 0:28 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event -36 20 9:0 / /boot rw,relatime - ext4 /dev/md0 rw,data=ordered -37 16 0:29 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw -39 28 0:30 / /run/user/foobar/gvfs rw,nosuid,nodev,relatime - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000""" - - raring_btrfs_mountinfo = \ -"""15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw -16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw -17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=865556k,nr_inodes=216389,mode=755 -18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 -19 20 0:15 / /run rw,nosuid,relatime - tmpfs tmpfs rw,size=348196k,mode=755 -20 1 0:16 /@ / rw,relatime - btrfs /dev/vda1 rw,compress=lzo,space_cache -21 15 0:19 / /sys/fs/fuse/connections rw,relatime - fusectl none rw -22 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw -23 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw -24 19 0:20 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k -25 19 0:21 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw -26 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 -27 20 0:16 /@home /home rw,relatime - btrfs /dev/vda1 rw,compress=lzo,space_cache""" - +class TestMountinfoParsing(helpers.ResourceUsingTestCase): def test_invalid_mountinfo(self): - line = "20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root rw,errors=remount-ro,data=ordered" + line = ("20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root" + "rw,errors=remount-ro,data=ordered") elements = line.split() for i in range(len(elements) + 1): lines = [' '.join(elements[0:i])] @@ -304,7 +265,8 @@ class TestMountinfoParsing(TestCase): self.assertEqual(expected, util.parse_mount_info('/', lines)) def test_precise_ext4_root(self): - lines = TestMountinfoParsing.precise_ext4_mountinfo.splitlines() + + lines = self.readResource('mountinfo_precise_ext4.txt').splitlines() expected = ('/dev/mapper/vg0-root', 'ext4', '/') self.assertEqual(expected, util.parse_mount_info('/', lines)) @@ -326,7 +288,7 @@ class TestMountinfoParsing(TestCase): self.assertEqual(expected, util.parse_mount_info('/run/lock', lines)) def test_raring_btrfs_root(self): - lines = TestMountinfoParsing.raring_btrfs_mountinfo.splitlines() + lines = self.readResource('mountinfo_raring_btrfs.txt').splitlines() expected = ('/dev/vda1', 'btrfs', '/') self.assertEqual(expected, util.parse_mount_info('/', lines)) -- cgit v1.2.3 From 8f3bfd9db16bfc6cd2acfb068b9ffe14ea1570d3 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 24 Jul 2013 15:08:40 -0400 Subject: update dependencies for jsonpatch --- packages/bddeb | 1 + packages/brpm | 2 ++ 2 files changed, 3 insertions(+) diff --git a/packages/bddeb b/packages/bddeb index 00bc717e..7ae07a80 100755 --- a/packages/bddeb +++ b/packages/bddeb @@ -36,6 +36,7 @@ PKG_MP = { 'prettytable': 'python-prettytable', 'pyyaml': 'python-yaml', 'requests': 'python-requests', + 'jsonpatch': 'python-json-patch', } DEBUILD_ARGS = ["-us", "-S", "-uc", "-d"] diff --git a/packages/brpm b/packages/brpm index 14faea4f..91a0a0ec 100755 --- a/packages/brpm +++ b/packages/brpm @@ -39,6 +39,7 @@ PKG_MP = { 'boto': 'python-boto', 'cheetah': 'python-cheetah', 'configobj': 'python-configobj', + 'jsonpatch': 'python-jsonpatch', 'oauth': 'python-oauth', 'prettytable': 'python-prettytable', 'pyyaml': 'PyYAML', @@ -49,6 +50,7 @@ PKG_MP = { 'boto': 'python-boto', 'cheetah': 'python-cheetah', 'configobj': 'python-configobj', + 'jsonpatch': 'python-jsonpatch', 'oauth': 'python-oauth', 'prettytable': 'python-prettytable', 'pyyaml': 'python-yaml', -- cgit v1.2.3 From 696bcc1f0acc67646872cd6ce1b90375ca0ae068 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 24 Jul 2013 22:12:33 -0700 Subject: Remove duplicate timezone file finding and error raising. --- cloudinit/distros/__init__.py | 9 +++++++++ cloudinit/distros/debian.py | 8 +------- cloudinit/distros/rhel.py | 8 +------- cloudinit/distros/sles.py | 8 +------- 4 files changed, 12 insertions(+), 21 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 249e1b19..74e95797 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -47,9 +47,11 @@ LOG = logging.getLogger(__name__) class Distro(object): __metaclass__ = abc.ABCMeta + hosts_fn = "/etc/hosts" ci_sudoers_fn = "/etc/sudoers.d/90-cloud-init-users" hostname_conf_fn = "/etc/hostname" + tz_zone_dir = "/usr/share/zoneinfo" def __init__(self, name, cfg, paths): self._paths = paths @@ -66,6 +68,13 @@ class Distro(object): # to write this blob out in a distro format raise NotImplementedError() + def _find_tz_file(self, tz): + tz_file = os.path.join(self.tz_zone_dir, str(tz)) + if not os.path.isfile(tz_file): + raise IOError(("Invalid timezone %s," + " no file found at %s") % (tz, tz_file)) + return tz_file + def get_option(self, opt_name, default=None): return self._cfg.get(opt_name, default) diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 0811eefd..8fe49cbe 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -44,7 +44,6 @@ class Distro(distros.Distro): network_conf_fn = "/etc/network/interfaces" tz_conf_fn = "/etc/timezone" tz_local_fn = "/etc/localtime" - tz_zone_dir = "/usr/share/zoneinfo" def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) @@ -130,12 +129,7 @@ class Distro(distros.Distro): return "127.0.1.1" def set_timezone(self, tz): - # TODO(harlowja): move this code into - # the parent distro... - tz_file = os.path.join(self.tz_zone_dir, str(tz)) - if not os.path.isfile(tz_file): - raise RuntimeError(("Invalid timezone %s," - " no file found at %s") % (tz, tz_file)) + tz_file = self._find_tz_file(tz) # Note: "" provides trailing newline during join tz_lines = [ util.make_header(), diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index a022ca60..ece1a5ff 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -51,7 +51,6 @@ class Distro(distros.Distro): network_script_tpl = '/etc/sysconfig/network-scripts/ifcfg-%s' resolve_conf_fn = "/etc/resolv.conf" tz_local_fn = "/etc/localtime" - tz_zone_dir = "/usr/share/zoneinfo" def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) @@ -164,12 +163,7 @@ class Distro(distros.Distro): return distros.Distro._bring_up_interfaces(self, device_names) def set_timezone(self, tz): - # TODO(harlowja): move this code into - # the parent distro... - tz_file = os.path.join(self.tz_zone_dir, str(tz)) - if not os.path.isfile(tz_file): - raise RuntimeError(("Invalid timezone %s," - " no file found at %s") % (tz, tz_file)) + tz_file = self._find_tz_file(tz) if self._dist_uses_systemd(): # Currently, timedatectl complains if invoked during startup # so for compatibility, create the link manually. diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py index 904e931a..92a1c307 100644 --- a/cloudinit/distros/sles.py +++ b/cloudinit/distros/sles.py @@ -42,7 +42,6 @@ class Distro(distros.Distro): network_script_tpl = '/etc/sysconfig/network/ifcfg-%s' resolve_conf_fn = '/etc/resolv.conf' tz_local_fn = '/etc/localtime' - tz_zone_dir = '/usr/share/zoneinfo' def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) @@ -151,12 +150,7 @@ class Distro(distros.Distro): return distros.Distro._bring_up_interfaces(self, device_names) def set_timezone(self, tz): - # TODO(harlowja): move this code into - # the parent distro... - tz_file = os.path.join(self.tz_zone_dir, str(tz)) - if not os.path.isfile(tz_file): - raise RuntimeError(("Invalid timezone %s," - " no file found at %s") % (tz, tz_file)) + tz_file = self._find_tz_file(tz) # Adjust the sysconfig clock zone setting clock_cfg = { 'TIMEZONE': str(tz), -- cgit v1.2.3 From 3d10b8d080a874be022f9e25063ba77f0293c5e8 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 25 Jul 2013 14:37:10 -0400 Subject: azure: support bouncing interfaces to publish new hostname See the added doc/sources/azure/README.rst for why this is necessary. Essentially, we now are doing the following in the get_data() method of azure datasource to publish this NewHostname: hostname NewHostName ifdown eth0; ifup eth0 LP: #1202758 --- cloudinit/sources/DataSourceAzure.py | 114 +++++++++++++++++----- doc/examples/cloud-config-datasources.txt | 5 + doc/sources/azure/README.rst | 134 ++++++++++++++++++++++++++ tests/unittests/test_datasource/test_azure.py | 77 ++++++++++++++- 4 files changed, 300 insertions(+), 30 deletions(-) create mode 100644 doc/sources/azure/README.rst diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 0a5caebe..30b06fef 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -31,9 +31,20 @@ LOG = logging.getLogger(__name__) DS_NAME = 'Azure' DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} AGENT_START = ['service', 'walinuxagent', 'start'] -BUILTIN_DS_CONFIG = {'datasource': {DS_NAME: { - 'agent_command': AGENT_START, - 'data_dir': "/var/lib/waagent"}}} +BOUNCE_COMMAND = ("i=$interface; x=0; ifdown $i || x=$?; " + "ifup $i || x=$?; exit $x") +BUILTIN_DS_CONFIG = { + 'agent_command': AGENT_START, + 'data_dir': "/var/lib/waagent", + 'set_hostname': True, + 'hostname_bounce': { + 'interface': 'eth0', + 'policy': True, + 'command': BOUNCE_COMMAND, + 'hostname_command': 'hostname', + } +} +DS_CFG_PATH = ['datasource', DS_NAME] class DataSourceAzureNet(sources.DataSource): @@ -42,19 +53,19 @@ class DataSourceAzureNet(sources.DataSource): self.seed_dir = os.path.join(paths.seed_dir, 'azure') self.cfg = {} self.seed = None + self.ds_cfg = util.mergemanydict([ + util.get_cfg_by_path(sys_cfg, DS_CFG_PATH), + BUILTIN_DS_CONFIG]) def __str__(self): root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) def get_data(self): - ddir_cfgpath = ['datasource', DS_NAME, 'data_dir'] # azure removes/ejects the cdrom containing the ovf-env.xml # file on reboot. So, in order to successfully reboot we # need to look in the datadir and consider that valid - ddir = util.get_cfg_by_path(self.sys_cfg, ddir_cfgpath) - if ddir is None: - ddir = util.get_cfg_by_path(BUILTIN_DS_CONFIG, ddir_cfgpath) + ddir = self.ds_cfg['data_dir'] candidates = [self.seed_dir] candidates.extend(list_possible_azure_ds_devs()) @@ -91,36 +102,40 @@ class DataSourceAzureNet(sources.DataSource): return False if found == ddir: - LOG.debug("using cached datasource in %s", ddir) - - fields = [('cmd', ['datasource', DS_NAME, 'agent_command']), - ('datadir', ddir_cfgpath)] - mycfg = {} - for cfg in (self.cfg, self.sys_cfg, BUILTIN_DS_CONFIG): - for name, path in fields: - if name in mycfg: - continue - value = util.get_cfg_by_path(cfg, keyp=path) - if value is not None: - mycfg[name] = value + LOG.debug("using files cached in %s", ddir) + + # now update ds_cfg to reflect contents pass in config + usercfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) + self.ds_cfg = util.mergemanydict([usercfg, self.ds_cfg]) + mycfg = self.ds_cfg # walinux agent writes files world readable, but expects # the directory to be protected. - write_files(mycfg['datadir'], files, dirmode=0700) + write_files(mycfg['data_dir'], files, dirmode=0700) + + # handle the hostname 'publishing' + try: + handle_set_hostname(mycfg.get('set_hostname'), + self.metadata.get('local-hostname'), + mycfg['hostname_bounce']) + except Exception as e: + LOG.warn("Failed publishing hostname: %s" % e) + util.logexc(LOG, "handling set_hostname failed") try: - invoke_agent(mycfg['cmd']) + invoke_agent(mycfg['agent_command']) except util.ProcessExecutionError: # claim the datasource even if the command failed - util.logexc(LOG, "agent command '%s' failed.", mycfg['cmd']) + util.logexc(LOG, "agent command '%s' failed.", + mycfg['agent_command']) - shcfgxml = os.path.join(mycfg['datadir'], "SharedConfig.xml") + shcfgxml = os.path.join(mycfg['data_dir'], "SharedConfig.xml") wait_for = [shcfgxml] fp_files = [] for pk in self.cfg.get('_pubkeys', []): bname = pk['fingerprint'] + ".crt" - fp_files += [os.path.join(mycfg['datadir'], bname)] + fp_files += [os.path.join(mycfg['data_dir'], bname)] start = time.time() missing = wait_for_files(wait_for + fp_files) @@ -148,6 +163,43 @@ class DataSourceAzureNet(sources.DataSource): return self.cfg +def handle_set_hostname(enabled, hostname, cfg): + if not util.is_true(enabled): + return + + if not hostname: + LOG.warn("set_hostname was true but no local-hostname") + return + + apply_hostname_bounce(hostname=hostname, policy=cfg['policy'], + interface=cfg['interface'], + command=cfg['command'], + hostname_command=cfg['hostname_command']) + + +def apply_hostname_bounce(hostname, policy, interface, command, + hostname_command="hostname"): + # set the hostname to 'hostname' if it is not already set to that. + # then, if policy is not off, bounce the interface using command + prev_hostname = util.subp(hostname_command, capture=True)[0].strip() + + util.subp([hostname_command, hostname]) + + if util.is_false(policy): + return + + if prev_hostname == hostname and policy != "force": + return + + env = os.environ.copy() + env['interface'] = interface + + if command == "builtin": + command = BOUNCE_COMMAND + + util.subp(command, shell=(not isinstance(command, list)), capture=True) + + def crtfile_to_pubkey(fname): pipeline = ('openssl x509 -noout -pubkey < "$0" |' 'ssh-keygen -i -m PKCS8 -f /dev/stdin') @@ -319,15 +371,21 @@ def read_azure_ovf(contents): name = child.localName.lower() simple = False + value = "" if (len(child.childNodes) == 1 and child.childNodes[0].nodeType == dom.TEXT_NODE): simple = True value = child.childNodes[0].wholeText + attrs = {k: v for k, v in child.attributes.items()} + # we accept either UserData or CustomData. If both are present # then behavior is undefined. if (name == "userdata" or name == "customdata"): - ud = base64.b64decode(''.join(value.split())) + if attrs.get('encoding') in (None, "base64"): + ud = base64.b64decode(''.join(value.split())) + else: + ud = value elif name == "username": username = value elif name == "userpassword": @@ -335,7 +393,11 @@ def read_azure_ovf(contents): elif name == "hostname": md['local-hostname'] = value elif name == "dscfg": - cfg['datasource'] = {DS_NAME: util.load_yaml(value, default={})} + if attrs.get('encoding') in (None, "base64"): + dscfg = base64.b64decode(''.join(value.split())) + else: + dscfg = value + cfg['datasource'] = {DS_NAME: util.load_yaml(dscfg, default={})} elif name == "ssh": cfg['_pubkeys'] = load_azure_ovf_pubkeys(child) elif name == "disablesshpasswordauthentication": diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt index a19353fc..6544448e 100644 --- a/doc/examples/cloud-config-datasources.txt +++ b/doc/examples/cloud-config-datasources.txt @@ -45,6 +45,11 @@ datasource: Azure: agent_command: [service, walinuxagent, start] + set_hostname: True + hostname_bounce: + interface: eth0 + policy: on # [can be 'on', 'off' or 'force'] + } SmartOS: # Smart OS datasource works over a serial console interacting with diff --git a/doc/sources/azure/README.rst b/doc/sources/azure/README.rst new file mode 100644 index 00000000..8239d1fa --- /dev/null +++ b/doc/sources/azure/README.rst @@ -0,0 +1,134 @@ +================ +Azure Datasource +================ + +This datasource finds metadata and user-data from the Azure cloud platform. + +Azure Platform +-------------- +The azure cloud-platform provides initial data to an instance via an attached +CD formated in UDF. That CD contains a 'ovf-env.xml' file that provides some +information. Additional information is obtained via interaction with the +"endpoint". The ip address of the endpoint is advertised to the instance +inside of dhcp option 245. On ubuntu, that can be seen in +/var/lib/dhcp/dhclient.eth0.leases as a colon delimited hex value (example: +``option unknown-245 64:41:60:82;`` is 100.65.96.130) + +walinuxagent +------------ +In order to operate correctly, cloud-init needs walinuxagent to provide much +of the interaction with azure. In addition to "provisioning" code, walinux +does the following on the agent is a long running daemon that handles the +following things: +- generate a x509 certificate and send that to the endpoint + +waagent.conf config +~~~~~~~~~~~~~~~~~~~ +in order to use waagent.conf with cloud-init, the following settings are recommended. Other values can be changed or set to the defaults. + + :: + + # disabling provisioning turns off all 'Provisioning.*' function + Provisioning.Enabled=n + # this is currently not handled by cloud-init, so let walinuxagent do it. + ResourceDisk.Format=y + ResourceDisk.MountPoint=/mnt + + +Userdata +-------- +Userdata is provided to cloud-init inside the ovf-env.xml file. Cloud-init +expects that user-data will be provided as base64 encoded value inside the +text child of a element named ``UserData`` or ``CustomData`` which is a direct +child of the ``LinuxProvisioningConfigurationSet`` (a sibling to ``UserName``) +If both ``UserData`` and ``CustomData`` are provided behavior is undefined on +which will be selected. + +In the example below, user-data provided is 'this is my userdata', and the +datasource config provided is ``{"agent_command": ["start", "walinuxagent"]}``. +That agent command will take affect as if it were specified in system config. + +Example: + +.. code:: + + + 1.0 + + LinuxProvisioningConfiguration + myHost + myuser + + dGhpcyBpcyBteSB1c2VyZGF0YQ=== + eyJhZ2VudF9jb21tYW5kIjogWyJzdGFydCIsICJ3YWxpbnV4YWdlbnQiXX0= + true + + + + 6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7 + this-value-unused + + + + + + +Configuration +------------- +Configuration for the datasource can be read from the system config's or set +via the `dscfg` entry in the `LinuxProvisioningConfigurationSet`. Content in +dscfg node is expected to be base64 encoded yaml content, and it will be +merged into the 'datasource: Azure' entry. + +The '``hostname_bounce: command``' entry can be either the literal string +'builtin' or a command to execute. The command will be invoked after the +hostname is set, and will have the 'interface' in its environment. If +``set_hostname`` is not true, then ``hostname_bounce`` will be ignored. + +An example might be: + command: ["sh", "-c", "killall dhclient; dhclient $interface"] + +.. code:: + + datasource: + agent_command + Azure: + agent_command: [service, walinuxagent, start] + set_hostname: True + hostname_bounce: + # the name of the interface to bounce + interface: eth0 + # policy can be 'on', 'off' or 'force' + policy: on + # the method 'bounce' command. + command: "builtin" + hostname_command: "hostname" + } + +hostname +-------- +When the user launches an instance, they provide a hostname for that instance. +The hostname is provided to the instance in the ovf-env.xml file as +``HostName``. + +Whatever value the instance provides in its dhcp request will resolve in the +domain returned in the 'search' request. + +The interesting issue is that a generic image will already have a hostname +configured. The ubuntu cloud images have 'ubuntu' as the hostname of the +system, and the initial dhcp request on eth0 is not guaranteed to occur after +the datasource code has been run. So, on first boot, that initial value will +be sent in the dhcp request and *that* value will resolve. + +In order to make the ``HostName`` provided in the ovf-env.xml resolve, a +dhcp request must be made with the new value. Walinuxagent (in its current +version) handles this by polling the state of hostname and bouncing ('``ifdown +eth0; ifup eth0``' the network interface if it sees that a change has been +made. + +cloud-init handles this by setting the hostname in the DataSource's 'get_data' +method via '``hostname $HostName``', and then bouncing the interface. This +behavior can be configured or disabled in the datasource config. See +'Configuration' above. diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 2e8583f9..c944cb13 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -26,8 +26,15 @@ def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): xmlns:i="http://www.w3.org/2001/XMLSchema-instance"> LinuxProvisioningConfiguration """ - for key, val in data.items(): - content += "<%s>%s\n" % (key, val, key) + for key, dval in data.items(): + if isinstance(dval, dict): + val = dval.get('text') + attrs = ' ' + ' '.join(["%s='%s'" % (k, v) for k, v in dval.items() + if k != 'text']) + else: + val = dval + attrs = "" + content += "<%s%s>%s\n" % (key, attrs, val, key) if userdata: content += "%s\n" % (base64.b64encode(userdata)) @@ -103,6 +110,9 @@ class TestAzureDataSource(MockerTestCase): data['iid_from_shared_cfg'] = path return 'i-my-azure-id' + def _apply_hostname_bounce(**kwargs): + data['apply_hostname_bounce'] = kwargs + if data.get('ovfcontent') is not None: populate_dir(os.path.join(self.paths.seed_dir, "azure"), {'ovf-env.xml': data['ovfcontent']}) @@ -118,7 +128,9 @@ class TestAzureDataSource(MockerTestCase): (mod, 'pubkeys_from_crt_files', _pubkeys_from_crt_files), (mod, 'iid_from_shared_config', - _iid_from_shared_config), ]) + _iid_from_shared_config), + (mod, 'apply_hostname_bounce', + _apply_hostname_bounce), ]) dsrc = mod.DataSourceAzureNet( data.get('sys_cfg', {}), distro=None, paths=self.paths) @@ -139,11 +151,26 @@ class TestAzureDataSource(MockerTestCase): self.assertEqual(0700, data['datadir_mode']) self.assertEqual(dsrc.metadata['instance-id'], 'i-my-azure-id') + def test_user_cfg_set_agent_command_plain(self): + # set dscfg in via plaintext + cfg = {'agent_command': "my_command"} + odata = {'HostName': "myhost", 'UserName': "myuser", + 'dscfg': {'text': yaml.dump(cfg), 'encoding': 'plain'}} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(data['agent_invoked'], cfg['agent_command']) + def test_user_cfg_set_agent_command(self): + # set dscfg in via base64 encoded yaml cfg = {'agent_command': "my_command"} odata = {'HostName': "myhost", 'UserName': "myuser", - 'dscfg': yaml.dump(cfg)} + 'dscfg': {'text': base64.b64encode(yaml.dump(cfg)), + 'encoding': 'base64'}} data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + print data dsrc = self._get_ds(data) ret = dsrc.get_data() @@ -218,6 +245,48 @@ class TestAzureDataSource(MockerTestCase): for mypk in mypklist: self.assertIn(mypk, dsrc.cfg['_pubkeys']) + def test_disabled_bounce(self): + pass + + def test_apply_bounce_call_1(self): + # hostname needs to get through to apply_hostname_bounce + mydata = "FOOBAR" + odata = {'HostName': 'my-random-hostname'} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + self._get_ds(data).get_data() + self.assertIn('hostname', data['apply_hostname_bounce']) + self.assertEqual(data['apply_hostname_bounce']['hostname'], + odata['HostName']) + + def test_apply_bounce_call_configurable(self): + # hostname_bounce should be configurable in datasource cfg + cfg = {'hostname_bounce': {'interface': 'eth1', 'policy': 'off', + 'command': 'my-bounce-command', + 'hostname_command': 'my-hostname-command'}} + odata = {'HostName': "xhost", + 'dscfg': {'text': base64.b64encode(yaml.dump(cfg)), + 'encoding': 'base64'}} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + self._get_ds(data).get_data() + + for k in cfg['hostname_bounce']: + self.assertIn(k, data['apply_hostname_bounce']) + + for k, v in cfg['hostname_bounce'].items(): + self.assertEqual(data['apply_hostname_bounce'][k], v) + + def test_set_hostname_disabled(self): + # config specifying set_hostname off should not bounce + cfg = {'set_hostname': False} + odata = {'HostName': "xhost", + 'dscfg': {'text': base64.b64encode(yaml.dump(cfg)), + 'encoding': 'base64'}} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + self._get_ds(data).get_data() + + self.assertEqual(data.get('apply_hostname_bounce', "N/A"), "N/A") + class TestReadAzureOvf(MockerTestCase): def test_invalid_xml_raises_non_azure_ds(self): -- cgit v1.2.3 From 4d9668dac5fed8f713f3b4300fdb574f399c14ee Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 25 Jul 2013 15:27:44 -0400 Subject: minor azure cleanup --- cloudinit/sources/DataSourceAzure.py | 2 +- tests/unittests/test_datasource/test_azure.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 30b06fef..d4863429 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -54,7 +54,7 @@ class DataSourceAzureNet(sources.DataSource): self.cfg = {} self.seed = None self.ds_cfg = util.mergemanydict([ - util.get_cfg_by_path(sys_cfg, DS_CFG_PATH), + util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]) def __str__(self): diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index c944cb13..4cd3f213 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -170,7 +170,6 @@ class TestAzureDataSource(MockerTestCase): 'dscfg': {'text': base64.b64encode(yaml.dump(cfg)), 'encoding': 'base64'}} data = {'ovfcontent': construct_valid_ovf_env(data=odata)} - print data dsrc = self._get_ds(data) ret = dsrc.get_data() -- cgit v1.2.3 From 068009432909ae54b42e2cb56ec6557b04c677b0 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 25 Jul 2013 16:34:19 -0400 Subject: pull in fixes for debian bug 712985 --- sysvinit/debian/cloud-config | 12 ++++++++++++ sysvinit/debian/cloud-final | 12 ++++++++++++ sysvinit/debian/cloud-init | 14 +++++++++++++- sysvinit/debian/cloud-init-local | 11 +++++++++++ 4 files changed, 48 insertions(+), 1 deletion(-) mode change 100755 => 100644 sysvinit/debian/cloud-config mode change 100755 => 100644 sysvinit/debian/cloud-final mode change 100755 => 100644 sysvinit/debian/cloud-init-local diff --git a/sysvinit/debian/cloud-config b/sysvinit/debian/cloud-config old mode 100755 new mode 100644 index 57888653..53322748 --- a/sysvinit/debian/cloud-config +++ b/sysvinit/debian/cloud-config @@ -11,6 +11,7 @@ # Authors: Julien Danjou # Juerg Haefliger +# Thomas Goirand PATH=/sbin:/usr/sbin:/bin:/usr/bin DESC="Cloud service" @@ -30,6 +31,17 @@ SCRIPTNAME=/etc/init.d/$NAME # and status_of_proc is working. . /lib/lsb/init-functions +if init_is_upstart; then + case "$1" in + stop) + exit 0 + ;; + *) + exit 1 + ;; + esac +fi + case "$1" in start) log_daemon_msg "Starting $DESC" "$NAME" diff --git a/sysvinit/debian/cloud-final b/sysvinit/debian/cloud-final old mode 100755 new mode 100644 index 46e9b454..55afc8b0 --- a/sysvinit/debian/cloud-final +++ b/sysvinit/debian/cloud-final @@ -13,6 +13,7 @@ # Authors: Julien Danjou # Juerg Haefliger +# Thomas Goirand PATH=/sbin:/usr/sbin:/bin:/usr/bin DESC="Cloud service" @@ -32,6 +33,17 @@ SCRIPTNAME=/etc/init.d/$NAME # and status_of_proc is working. . /lib/lsb/init-functions +if init_is_upstart; then + case "$1" in + stop) + exit 0 + ;; + *) + exit 1 + ;; + esac +fi + case "$1" in start) log_daemon_msg "Starting $DESC" "$NAME" diff --git a/sysvinit/debian/cloud-init b/sysvinit/debian/cloud-init index 15ffeb2e..48fa0423 100755 --- a/sysvinit/debian/cloud-init +++ b/sysvinit/debian/cloud-init @@ -10,7 +10,8 @@ # Description: Cloud configuration initialization ### END INIT INFO -# Author: Julien Danjou +# Authors: Julien Danjou +# Thomas Goirand PATH=/sbin:/usr/sbin:/bin:/usr/bin DESC="Cloud service" @@ -30,6 +31,17 @@ SCRIPTNAME=/etc/init.d/$NAME # and status_of_proc is working. . /lib/lsb/init-functions +if init_is_upstart; then + case "$1" in + stop) + exit 0 + ;; + *) + exit 1 + ;; + esac +fi + case "$1" in start) log_daemon_msg "Starting $DESC" "$NAME" diff --git a/sysvinit/debian/cloud-init-local b/sysvinit/debian/cloud-init-local old mode 100755 new mode 100644 index a1685c1e..802ee8e9 --- a/sysvinit/debian/cloud-init-local +++ b/sysvinit/debian/cloud-init-local @@ -30,6 +30,17 @@ SCRIPTNAME=/etc/init.d/$NAME # and status_of_proc is working. . /lib/lsb/init-functions +if init_is_upstart; then + case "$1" in + stop) + exit 0 + ;; + *) + exit 1 + ;; + esac +fi + case "$1" in start) log_daemon_msg "Starting $DESC" "$NAME" -- cgit v1.2.3 From 60dfcb852c3dafdcf3563cc3e6c631dfbc2bafd8 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 25 Jul 2013 17:01:57 -0400 Subject: add --init-sytem to ./packages/bddeb this way you can now do ./package/bddeb --init-system=sysvinit_deb --- packages/bddeb | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/packages/bddeb b/packages/bddeb index 00bc717e..7bf59fcd 100755 --- a/packages/bddeb +++ b/packages/bddeb @@ -94,12 +94,20 @@ def main(): default=False, action='store_true') + parser.add_argument("--init-system", dest="init_system", + help=("build deb with INIT_SYSTEM=xxx" + " (default: %(default)s"), + default=os.environ.get("INIT_SYSTEM", "upstart")) + + for ent in DEBUILD_ARGS: parser.add_argument(ent, dest="debuild_args", action='append_const', const=ent, help=("pass through '%s' to debuild" % ent)) args = parser.parse_args() + os.environ['INIT_SYSTEM'] = args.init_system + capture = True if args.verbose: capture = False -- cgit v1.2.3 From 9da084bda820d0ab8500ea6ee13a014f46d3bfab Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 26 Jul 2013 14:05:52 -0400 Subject: azure: fix bouncing of interface the environment that was set up to include 'interface' was not actually being passed on to 'subp', so when the command ran it wasn't available. --- cloudinit/sources/DataSourceAzure.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index d4863429..9503b045 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -31,8 +31,9 @@ LOG = logging.getLogger(__name__) DS_NAME = 'Azure' DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} AGENT_START = ['service', 'walinuxagent', 'start'] -BOUNCE_COMMAND = ("i=$interface; x=0; ifdown $i || x=$?; " - "ifup $i || x=$?; exit $x") +BOUNCE_COMMAND = ['sh', '-xc', + "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"] + BUILTIN_DS_CONFIG = { 'agent_command': AGENT_START, 'data_dir': "/var/lib/waagent", @@ -185,19 +186,29 @@ def apply_hostname_bounce(hostname, policy, interface, command, util.subp([hostname_command, hostname]) + msg = ("phostname=%s hostname=%s policy=%s interface=%s" % + (prev_hostname, hostname, policy, interface)) + if util.is_false(policy): + LOG.debug("pubhname: policy false, skipping [%s]", msg) return if prev_hostname == hostname and policy != "force": + LOG.debug("pubhname: no change, policy != force. skipping. [%s]", msg) return env = os.environ.copy() env['interface'] = interface + env['hostname'] = hostname + env['old_hostname'] = prev_hostname if command == "builtin": command = BOUNCE_COMMAND - util.subp(command, shell=(not isinstance(command, list)), capture=True) + LOG.debug("pubhname: publishing hostname [%s]", msg) + shell = not isinstance(command, (list, tuple)) + (output, err) = util.subp(command, shell=shell, capture=True, env=env) + LOG.debug("output: %s. err: %s", output, err) def crtfile_to_pubkey(fname): -- cgit v1.2.3 From de1e9a7e8e23d4b986c85365a79b9cf026a85ed1 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 27 Jul 2013 14:20:12 -0700 Subject: Add the pyserial dependency. LP: #1205720 --- Requires | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Requires b/Requires index 5086230d..f19c9691 100644 --- a/Requires +++ b/Requires @@ -10,6 +10,10 @@ PrettyTable # datasource is removed, this is no longer needed oauth +# This one is currently used only by the SmartOS datasource. If that +# datasource is removed, this is no longer needed +pyserial + # This is only needed for places where we need to support configs in a manner # that the built-in config parser is not sufficent (ie # when we need to preserve comments, or do not have a top-level -- cgit v1.2.3 From e9f1190993fc5e66cd7bcc47016a5d7335fe8763 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 29 Jul 2013 12:02:03 -0400 Subject: DataSourceAzure: do not capture output of bounce command As shown in comments of bug 1202758 and filing of ntp bug 1206164, waiting for the output of this command causes us to wait for ntpdate to fully finish. Ideally I think we'd disable ntpdate running on this run, but that is not trivially possible. --- cloudinit/sources/DataSourceAzure.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 9503b045..2f28702e 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -206,9 +206,11 @@ def apply_hostname_bounce(hostname, policy, interface, command, command = BOUNCE_COMMAND LOG.debug("pubhname: publishing hostname [%s]", msg) + start = time.time() shell = not isinstance(command, (list, tuple)) - (output, err) = util.subp(command, shell=shell, capture=True, env=env) - LOG.debug("output: %s. err: %s", output, err) + # capture=False, see comments in bug 1202758 and bug 1206164. + (output, err) = util.subp(command, shell=shell, capture=False, env=env) + LOG.debug("publishing hostname took %.3f seconds", time.time() - start) def crtfile_to_pubkey(fname): -- cgit v1.2.3 From 88b2a8ef062398499a2c14d309c132a081cac26b Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 29 Jul 2013 13:21:57 -0400 Subject: add 'pyserial' to bddeb and bdrpm --- packages/bddeb | 3 ++- packages/brpm | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/packages/bddeb b/packages/bddeb index 15d424f2..30559870 100755 --- a/packages/bddeb +++ b/packages/bddeb @@ -32,11 +32,12 @@ PKG_MP = { 'boto': 'python-boto', 'cheetah': 'python-cheetah', 'configobj': 'python-configobj', + 'jsonpatch': 'python-json-patch', 'oauth': 'python-oauth', 'prettytable': 'python-prettytable', + 'pyserial': 'python-serial', 'pyyaml': 'python-yaml', 'requests': 'python-requests', - 'jsonpatch': 'python-json-patch', } DEBUILD_ARGS = ["-us", "-S", "-uc", "-d"] diff --git a/packages/brpm b/packages/brpm index 91a0a0ec..8c90a0ab 100755 --- a/packages/brpm +++ b/packages/brpm @@ -42,6 +42,7 @@ PKG_MP = { 'jsonpatch': 'python-jsonpatch', 'oauth': 'python-oauth', 'prettytable': 'python-prettytable', + 'pyserial': 'pyserial', 'pyyaml': 'PyYAML', 'requests': 'python-requests', }, @@ -53,6 +54,7 @@ PKG_MP = { 'jsonpatch': 'python-jsonpatch', 'oauth': 'python-oauth', 'prettytable': 'python-prettytable', + 'pyserial': 'python-pyserial', 'pyyaml': 'python-yaml', 'requests': 'python-requests', } -- cgit v1.2.3 From 66490ebb92af59d148f79aae42a2eddc1ecedb7e Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 30 Jul 2013 14:23:10 -0400 Subject: add util.log_time helper The reason for this is that more and more things I was wanting to be able to see how long they took. This puts that time logic into a single place. It also supports (by default) reading from /proc/uptime as the timing mechanism. While that is almost certainly slower than time.time(), it does give millisecond granularity and is not affected by 'ntpdate' having run in between the two events. --- ChangeLog | 3 +++ bin/cloud-init | 5 ++++- cloudinit/config/cc_growpart.py | 3 ++- cloudinit/config/cc_resizefs.py | 11 +++++------ cloudinit/sources/DataSourceAzure.py | 14 ++++++-------- cloudinit/util.py | 35 +++++++++++++++++++++++++++++++++++ 6 files changed, 55 insertions(+), 16 deletions(-) diff --git a/ChangeLog b/ChangeLog index a255d24a..68d03376 100644 --- a/ChangeLog +++ b/ChangeLog @@ -11,6 +11,9 @@ - always finalize handlers even if processing failed (LP: #1203368) - support merging into cloud-config via jsonp. (LP: #1200476) - add datasource 'SmartOS' for Joyent Cloud. Adds a dependency on serial. + - add 'log_time' helper to util for timing how long things take + which also reads from uptime. uptime is useful as clock may change during + boot due to ntp. 0.7.2: - add a debian watch file - add 'sudo' entry to ubuntu's default user (LP: #1080717) diff --git a/bin/cloud-init b/bin/cloud-init index c5a5b949..bd9ddc04 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -502,7 +502,10 @@ def main(): signal_handler.attach_handlers() (name, functor) = args.action - return functor(name, args) + + return util.log_time(logfunc=LOG.debug, + msg="cloud-init mode '%s'" % name, uptime=True, + func=functor, args=(name, args)) if __name__ == '__main__': diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 4f8c8f80..ba6c58af 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -264,7 +264,8 @@ def handle(_name, cfg, _cloud, log, _args): raise e return - resized = resize_devices(resizer, devices) + resized = util.log_time(logfunc=log.debug, msg="resize_devices", + func=resize_devices, args=(resizer, devices)) for (entry, action, msg) in resized: if action == RESIZE.CHANGED: log.info("'%s' resized: %s" % (entry, msg)) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index b4ee16b2..56040fdd 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -21,7 +21,6 @@ import errno import os import stat -import time from cloudinit.settings import PER_ALWAYS from cloudinit import util @@ -120,9 +119,12 @@ def handle(name, cfg, _cloud, log, args): if resize_root == NOBLOCK: # Fork to a child that will run # the resize command - util.fork_cb(do_resize, resize_cmd, log) + util.fork_cb( + util.log_time(logfunc=log.debug, msg="backgrounded Resizing", + func=do_resize, args=(resize_cmd, log))) else: - do_resize(resize_cmd, log) + util.log_time(logfunc=log.debug, msg="Resizing", + func=do_resize, args=(resize_cmd, log)) action = 'Resized' if resize_root == NOBLOCK: @@ -132,13 +134,10 @@ def handle(name, cfg, _cloud, log, args): def do_resize(resize_cmd, log): - start = time.time() try: util.subp(resize_cmd) except util.ProcessExecutionError: util.logexc(log, "Failed to resize filesystem (cmd=%s)", resize_cmd) raise - tot_time = time.time() - start - log.debug("Resizing took %.3f seconds", tot_time) # TODO(harlowja): Should we add a fsck check after this to make # sure we didn't corrupt anything? diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 2f28702e..f906b8fa 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -138,13 +138,11 @@ class DataSourceAzureNet(sources.DataSource): bname = pk['fingerprint'] + ".crt" fp_files += [os.path.join(mycfg['data_dir'], bname)] - start = time.time() - missing = wait_for_files(wait_for + fp_files) + missing = util.log_time(logfunc=LOG.debug, msg="waiting for files", + func=wait_for_files, + args=(wait_for + fp_files,)) if len(missing): LOG.warn("Did not find files, but going on: %s", missing) - else: - LOG.debug("waited %.3f seconds for %d files to appear", - time.time() - start, len(wait_for)) if shcfgxml in missing: LOG.warn("SharedConfig.xml missing, using static instance-id") @@ -206,11 +204,11 @@ def apply_hostname_bounce(hostname, policy, interface, command, command = BOUNCE_COMMAND LOG.debug("pubhname: publishing hostname [%s]", msg) - start = time.time() shell = not isinstance(command, (list, tuple)) # capture=False, see comments in bug 1202758 and bug 1206164. - (output, err) = util.subp(command, shell=shell, capture=False, env=env) - LOG.debug("publishing hostname took %.3f seconds", time.time() - start) + util.log_time(logfunc=LOG.debug, msg="publishing hostname", + func=util.subp, kwargs={'command': command, 'shell': shell, + 'capture': False, 'env': env}) def crtfile_to_pubkey(fname): diff --git a/cloudinit/util.py b/cloudinit/util.py index 8542fe27..b0eb56e6 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1770,3 +1770,38 @@ def which(program): return exe_file return None + + +def log_time(logfunc, msg, func, args=None, kwargs=None, get_uptime=True): + if args is None: + args = [] + if kwargs is None: + kwargs = {} + + start = time.time() + + ustart = None + if get_uptime: + try: + ustart = float(uptime()) + except ValueError: + pass + + try: + ret = func(*args, **kwargs) + finally: + delta = time.time() - start + if ustart is not None: + try: + udelta = float(uptime()) - ustart + except ValueError: + udelta = "N/A" + + tmsg = " took %0.3f seconds" % delta + if get_uptime: + tmsg += "(%0.2f)" % udelta + try: + logfunc(msg + tmsg) + except: + pass + return ret -- cgit v1.2.3 From 0ca150b08433fbc57e10d599a46e300142c955c5 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 30 Jul 2013 14:28:09 -0400 Subject: set reading /proc/uptime to false by default. reading /proc/uptime is going to be slower, and no reason to do it on most things. Better to only do it when you suspect maybe a need for it. --- bin/cloud-init | 5 ++--- cloudinit/sources/DataSourceAzure.py | 5 +++-- cloudinit/util.py | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/bin/cloud-init b/bin/cloud-init index bd9ddc04..b4f9fd07 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -503,9 +503,8 @@ def main(): (name, functor) = args.action - return util.log_time(logfunc=LOG.debug, - msg="cloud-init mode '%s'" % name, uptime=True, - func=functor, args=(name, args)) + return util.log_time(logfunc=LOG.debug, msg="cloud-init mode '%s'" % name, + get_uptime=True, func=functor, args=(name, args)) if __name__ == '__main__': diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index f906b8fa..1a74de21 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -207,8 +207,9 @@ def apply_hostname_bounce(hostname, policy, interface, command, shell = not isinstance(command, (list, tuple)) # capture=False, see comments in bug 1202758 and bug 1206164. util.log_time(logfunc=LOG.debug, msg="publishing hostname", - func=util.subp, kwargs={'command': command, 'shell': shell, - 'capture': False, 'env': env}) + get_uptime=True, func=util.subp, + kwargs={'command': command, 'shell': shell, 'capture': False, + 'env': env}) def crtfile_to_pubkey(fname): diff --git a/cloudinit/util.py b/cloudinit/util.py index b0eb56e6..4a74ba57 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1772,7 +1772,7 @@ def which(program): return None -def log_time(logfunc, msg, func, args=None, kwargs=None, get_uptime=True): +def log_time(logfunc, msg, func, args=None, kwargs=None, get_uptime=False): if args is None: args = [] if kwargs is None: -- cgit v1.2.3 From 8c4d88a630025b6fe9f90957343a94105768533f Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Tue, 30 Jul 2013 17:00:33 -0600 Subject: Added base64 support to SmartOS datasource. Added documentation on SmartOS datasource. --- cloudinit/sources/DataSourceSmartOS.py | 60 +++++++++++++++++++--- doc/sources/smartos/README.rst | 66 ++++++++++++++++++++++++ tests/unittests/test_datasource/test_smartos.py | 67 +++++++++++++++++++++---- 3 files changed, 176 insertions(+), 17 deletions(-) create mode 100644 doc/sources/smartos/README.rst diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 1ce20c10..e0bb871c 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -27,6 +27,7 @@ # +import base64 from cloudinit import log as logging from cloudinit import sources from cloudinit import util @@ -49,6 +50,10 @@ SMARTOS_ATTRIB_MAP = { 'motd_sys_info': ('motd_sys_info', True), } +# These are values which will never be base64 encoded. +SMARTOS_NO_BASE64 = ['root_authorized_keys', 'motd_sys_info', + 'iptables_disable'] + class DataSourceSmartOS(sources.DataSource): def __init__(self, sys_cfg, distro, paths): @@ -56,6 +61,10 @@ class DataSourceSmartOS(sources.DataSource): self.seed_dir = os.path.join(paths.seed_dir, 'sdc') self.is_smartdc = None self.seed = self.sys_cfg.get("serial_device", DEF_TTY_LOC) + self.all_base64 = self.sys_cfg.get("decode_base64", False) + self.base_64_encoded = [] + self.smartos_no_base64 = self.sys_cfg.get("no_base64_decode", + SMARTOS_NO_BASE64) self.seed_timeout = self.sys_cfg.get("serial_timeout", DEF_TTY_TIMEOUT) @@ -84,17 +93,41 @@ class DataSourceSmartOS(sources.DataSource): self.is_smartdc = True md['instance-id'] = system_uuid + self.base_64_encoded = query_data('base_64_enocded', + self.seed, + self.seed_timeout, + strip=True) + if self.base_64_encoded: + self.base_64_encoded = str(self.base_64_encoded).split(',') + else: + self.base_64_encoded = [] + + if not self.all_base64: + self.all_base64 = util.is_true(query_data('meta_encoded_base64', + self.seed, + self.seed_timeout, + strip=True)) + for ci_noun, attribute in SMARTOS_ATTRIB_MAP.iteritems(): smartos_noun, strip = attribute + + b64encoded = False + if self.all_base64 and \ + (smartos_noun not in self.smartos_no_base64 and \ + ci_noun not in self.smartos_no_base64): + b64encoded = True + md[ci_noun] = query_data(smartos_noun, self.seed, - self.seed_timeout, strip=strip) + self.seed_timeout, strip=strip, + b64encoded=b64encoded) if not md['local-hostname']: md['local-hostname'] = system_uuid + ud = None if md['user-data']: ud = md['user-data'] - else: + elif md['user-script']: ud = md['user-script'] self.metadata = md @@ -124,12 +157,17 @@ def get_serial(seed_device, seed_timeout): return ser -def query_data(noun, seed_device, seed_timeout, strip=False): +def query_data(noun, seed_device, seed_timeout, strip=False, b64encoded=False): """Makes a request to via the serial console via "GET " In the response, the first line is the status, while subsequent lines are is the value. A blank line with a "." is used to indicate end of response. + + If the response is expected to be base64 encoded, then set b64encoded + to true. Unfortantely, there is no way to know if something is 100% + encoded, so this method relies on being told if the data is base64 or + not. """ if not noun: @@ -153,12 +191,22 @@ def query_data(noun, seed_device, seed_timeout, strip=False): response.append(m) ser.close() + + resp = None if not strip: - return "".join(response) + resp = "".join(response) + elif b64encoded: + resp = "".join(response).rstrip() else: - return "".join(response).rstrip() + resp = "".join(response).rstrip() + + if b64encoded: + try: + return base64.b64decode(resp) + except TypeError: + return resp - return None + return resp def dmi_data(): diff --git a/doc/sources/smartos/README.rst b/doc/sources/smartos/README.rst new file mode 100644 index 00000000..96310857 --- /dev/null +++ b/doc/sources/smartos/README.rst @@ -0,0 +1,66 @@ +================== +SmartOS Datasource +================== + +This datasource finds metadata and user-data from the SmartOS virtualization +platform (i.e. Joyent). + +SmartOS Platform +---------------- +The SmartOS virtualization platform meta-data to the instance via the second +serial console. On Linux, this is /dev/ttyS1. The data is a provided via a +simple protocol, where something queries for the userdata, where the console +responds with the status and if "SUCCESS" returns until a single ".\n". + +The format is lossy. As such, new versions of the SmartOS tooling will include +support for base64 encoded data. + +Userdata +-------- + +In SmartOS parlance, user-data is a actually meta-data. This userdata can be +provided a key-value pairs. + +Cloud-init supports reading the traditional meta-data fields supported by the +SmartOS tools. These are: + * root_authorized_keys + * hostname + * enable_motd_sys_info + * iptables_disable + +Note: At this time iptables_disable and enable_motd_sys_info are read but + are not actioned. + +user-script +----------- + +SmartOS traditionally supports sending over a user-script for execution at the +rc.local level. Cloud-init supports running user-scripts as if they were +cloud-init user-data. In this sense, anything with a shell interpetter +directive will run + +user-data and user-script +------------------------- + +In the event that a user defines the meta-data key of "user-data" it will +always supercede any user-script data. This is for consistency. + +base64 +------ + +In order to provide a lossy format, all data except for: + * root_authorized_keys + * enable_motd_sys_info + * iptables_disable + +This means that user-script and user-data as well as other values can be +base64 encoded to provide a lossy format. Since Cloud-init can only guess +as to whether or not something is truly base64 encoded, the following +meta-data keys are hints as to whether or not to base64 decode something: + * decode_base64: Except for excluded keys, attempt to base64 decode + the values. If the value fails to decode properly, it will be + returned in its text + * base_64_encoded: A comma deliminated list of which values are base64 + encoded. + * no_base64_decode: This is a configuration setting (i.e. /etc/cloud/cloud.cfg.d) + that sets which values should not be base64 decoded. diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 6c12f1e2..ae621433 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -22,6 +22,7 @@ # return responses. # +import base64 from cloudinit import helpers from cloudinit.sources import DataSourceSmartOS @@ -35,7 +36,7 @@ mock_returns = { 'enable_motd_sys_info': None, 'system_uuid': str(uuid.uuid4()), 'smartdc': 'smartdc', - 'userdata': """ + 'user-data': """ #!/bin/sh /bin/true """, @@ -48,12 +49,14 @@ class MockSerial(object): port = None - def __init__(self): + def __init__(self, b64encode=False): self.last = None self.last = None self.new = True self.count = 0 self.mocked_out = [] + self.b64encode = b64encode + self.b64excluded = DataSourceSmartOS.SMARTOS_NO_BASE64 def open(self): return True @@ -87,11 +90,17 @@ class MockSerial(object): def _format_out(self): if self.last in mock_returns: - try: - for l in mock_returns[self.last].splitlines(): - yield "%s\n" % l - except: - yield "%s\n" % mock_returns[self.last] + _mret = mock_returns[self.last] + if self.b64encode and \ + self.last not in self.b64excluded: + yield base64.b64encode(_mret) + + else: + try: + for l in _mret.splitlines(): + yield "%s\n" % l.rstrip() + except: + yield "%s\n" % _mret.rstrip() yield '\n' yield '.' @@ -116,16 +125,19 @@ class TestSmartOSDataSource(MockerTestCase): ret = apply_patches(patches) self.unapply += ret - def _get_ds(self): + def _get_ds(self, b64encode=False, sys_cfg=None): + mod = DataSourceSmartOS def _get_serial(*_): - return MockSerial() + return MockSerial(b64encode=b64encode) def _dmi_data(): return mock_returns['system_uuid'], 'smartdc' - data = {'sys_cfg': {}} - mod = DataSourceSmartOS + if not sys_cfg: + sys_cfg = {} + + data = {'sys_cfg': sys_cfg} self.apply_patches([(mod, 'get_serial', _get_serial)]) self.apply_patches([(mod, 'dmi_data', _dmi_data)]) dsrc = mod.DataSourceSmartOS( @@ -158,6 +170,13 @@ class TestSmartOSDataSource(MockerTestCase): self.assertEquals(mock_returns['root_authorized_keys'], dsrc.metadata['public-keys']) + def test_hostname_b64(self): + dsrc = self._get_ds(b64encode=True) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(base64.b64encode(mock_returns['hostname']), + dsrc.metadata['local-hostname']) + def test_hostname(self): dsrc = self._get_ds() ret = dsrc.get_data() @@ -165,6 +184,32 @@ class TestSmartOSDataSource(MockerTestCase): self.assertEquals(mock_returns['hostname'], dsrc.metadata['local-hostname']) + def test_base64(self): + """This tests to make sure that SmartOS system key/value pairs + are not interpetted as being base64 encoded, while making + sure that the others are when 'decode_base64' is set""" + dsrc = self._get_ds(sys_cfg={'decode_base64': True}, + b64encode=True) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(mock_returns['hostname'], + dsrc.metadata['local-hostname']) + self.assertEquals("%s" % mock_returns['user-data'], + dsrc.userdata_raw) + self.assertEquals(mock_returns['root_authorized_keys'], + dsrc.metadata['public-keys']) + self.assertEquals(mock_returns['disable_iptables_flag'], + dsrc.metadata['iptables_disable']) + self.assertEquals(mock_returns['enable_motd_sys_info'], + dsrc.metadata['motd_sys_info']) + + def test_userdata(self): + dsrc = self._get_ds() + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals("%s\n" % mock_returns['user-data'], + dsrc.userdata_raw) + def test_disable_iptables_flag(self): dsrc = self._get_ds() ret = dsrc.get_data() -- cgit v1.2.3 From 1e84d5dad7dcd21a521d0977963450424ec35c9c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 6 Aug 2013 07:15:36 +0100 Subject: initially add support for apt_ftp_proxy and apt_https_proxy. tests to come. LP: #1057195 --- cloudinit/config/cc_apt_configure.py | 41 ++++++++++++++++++++++++------------ doc/examples/cloud-config-TODO.txt | 20 ------------------ doc/examples/cloud-config.txt | 3 +++ 3 files changed, 31 insertions(+), 33 deletions(-) delete mode 100644 doc/examples/cloud-config-TODO.txt diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 3ce3b351..0c26050a 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -21,13 +21,17 @@ import glob import os +from cloudinit import log as logging from cloudinit import templater from cloudinit import util +LOG = logging.getLogger(__name__) + distros = ['ubuntu', 'debian'] PROXY_TPL = "Acquire::HTTP::Proxy \"%s\";\n" -PROXY_FN = "/etc/apt/apt.conf.d/95cloud-init-proxy" +APT_CONFIG_FN = "/etc/apt/apt.conf.d/94cloud-init-config" +APT_PROXY_FN = "/etc/apt/apt.conf.d/95cloud-init-proxy" # A temporary shell program to get a given gpg key # from a given keyserver @@ -67,18 +71,10 @@ def handle(name, cfg, cloud, log, _args): "security": "security.ubuntu.com/ubuntu"}) rename_apt_lists(old_mirrors, mirrors) - # Set up any apt proxy - proxy = cfg.get("apt_proxy", None) - proxy_filename = PROXY_FN - if proxy: - try: - # See man 'apt.conf' - contents = PROXY_TPL % (proxy) - util.write_file(proxy_filename, contents) - except Exception as e: - util.logexc(log, "Failed to write proxy to %s", proxy_filename) - elif os.path.isfile(proxy_filename): - util.del_file(proxy_filename) + try: + apply_apt_config(cfg, APT_PROXY_FN, APT_CONFIG_FN) + except Exception as e: + log.warn("failed to proxy or apt config info: %s", e) # Process 'apt_sources' if 'apt_sources' in cfg: @@ -256,3 +252,22 @@ def find_apt_mirror_info(cloud, cfg): mirror_info.update({'primary': mirror}) return mirror_info + + +def apply_apt_config(cfg, proxy_fname, config_fname): + # Set up any apt proxy + cfgs = (('apt_proxy', 'Acquire::HTTP::Proxy "%s";'), + ('apt_http_proxy', 'Acquire::HTTP::Proxy "%s";'), + ('apt_ftp_proxy', 'Acquire::FTP::Proxy "%s";'), + ('apt_https_proxy', 'Acquire::HTTPS::Proxy "%s";')) + + proxies = [fmt % cfg.get(name) for (name, fmt) in cfgs if cfg.get(name)] + if len(proxies): + util.write_file(proxy_fname, '\n'.join(proxies) + '\n') + elif os.path.isfile(proxy_fname): + util.del_file(proxy_fname) + + if cfg.get('apt_config', None): + util.write_file(config_fname, cfg.get('apt_config')) + elif os.path.isfile(config_fname): + util.del_file(config_fname) diff --git a/doc/examples/cloud-config-TODO.txt b/doc/examples/cloud-config-TODO.txt deleted file mode 100644 index c7ed54ab..00000000 --- a/doc/examples/cloud-config-TODO.txt +++ /dev/null @@ -1,20 +0,0 @@ -# Add apt configuration files -# Add an apt.conf.d/ file with the relevant content -# -# See apt.conf man page for more information. -# -# Defaults: -# + filename: 00-boot-conf -# -apt_conf: - - # Creates an apt proxy configuration in /etc/apt/apt.conf.d/01-proxy - - filename: "01-proxy" - content: | - Acquire::http::Proxy "http://proxy.example.org:3142/ubuntu"; - - # Add the following line to /etc/apt/apt.conf.d/00-boot-conf - # (run debconf at a critical priority) - - content: | - DPkg::Pre-Install-Pkgs:: "/usr/sbin/dpkg-preconfigure --apt -p critical|| true"; - diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt index 24b4b36c..bcfd7917 100644 --- a/doc/examples/cloud-config.txt +++ b/doc/examples/cloud-config.txt @@ -53,6 +53,9 @@ apt_mirror_search: apt_mirror_search_dns: False # apt_proxy (configure Acquire::HTTP::Proxy) +# 'apt_http_proxy' is an alias for 'apt_proxy'. +# Also, available are 'apt_ftp_proxy' and 'apt_https_proxy'. +# These affect Acquire::FTP::Proxy and Acquire::HTTPS::Proxy respectively apt_proxy: http://my.apt.proxy:3128 # apt_pipelining (configure Acquire::http::Pipeline-Depth) -- cgit v1.2.3 From b4c0bde830fbe56a0d59a68f4ed97a2b6cbdece3 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 6 Aug 2013 07:18:45 +0100 Subject: remove unused LOG --- cloudinit/config/cc_apt_configure.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 0c26050a..73733f3d 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -25,8 +25,6 @@ from cloudinit import log as logging from cloudinit import templater from cloudinit import util -LOG = logging.getLogger(__name__) - distros = ['ubuntu', 'debian'] PROXY_TPL = "Acquire::HTTP::Proxy \"%s\";\n" -- cgit v1.2.3 From 9bdb05a72710cf20f86176c88ece537d5bb11370 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 6 Aug 2013 11:23:00 +0100 Subject: add tests for apply_apt_config --- .../test_handler/test_handler_apt_configure.py | 109 +++++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 tests/unittests/test_handler/test_handler_apt_configure.py diff --git a/tests/unittests/test_handler/test_handler_apt_configure.py b/tests/unittests/test_handler/test_handler_apt_configure.py new file mode 100644 index 00000000..07267dbd --- /dev/null +++ b/tests/unittests/test_handler/test_handler_apt_configure.py @@ -0,0 +1,109 @@ +from mocker import MockerTestCase + +from cloudinit import cloud +from cloudinit import util + +from cloudinit.config import cc_apt_configure + +import errno +import logging +import os +import re + + +class TestAptProxyConfig(MockerTestCase): + def setUp(self): + super(TestAptProxyConfig, self).setUp() + self.tmp = self.makeDir() + self.pfile = os.path.join(self.tmp, "proxy.cfg") + self.cfile = os.path.join(self.tmp, "config.cfg") + + def _search_apt_config(self, contents, ptype, value): + print( + r"acquire::%s::proxy\s+[\"']%s[\"'];\n" % (ptype, value), + contents, "flags=re.IGNORECASE") + return(re.search( + r"acquire::%s::proxy\s+[\"']%s[\"'];\n" % (ptype, value), + contents, flags=re.IGNORECASE)) + + def test_apt_proxy_written(self): + cfg = {'apt_proxy': 'myproxy'} + cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) + + self.assertTrue(os.path.isfile(self.pfile)) + self.assertFalse(os.path.isfile(self.cfile)) + + contents = str(util.read_file_or_url(self.pfile)) + self.assertTrue(self._search_apt_config(contents, "http", "myproxy")) + + def test_apt_http_proxy_written(self): + cfg = {'apt_http_proxy': 'myproxy'} + cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) + + self.assertTrue(os.path.isfile(self.pfile)) + self.assertFalse(os.path.isfile(self.cfile)) + + contents = str(util.read_file_or_url(self.pfile)) + self.assertTrue(self._search_apt_config(contents, "http", "myproxy")) + + def test_apt_all_proxy_written(self): + cfg = {'apt_http_proxy': 'myproxy_http_proxy', + 'apt_https_proxy': 'myproxy_https_proxy', + 'apt_ftp_proxy': 'myproxy_ftp_proxy'} + + values = {'http': cfg['apt_http_proxy'], + 'https': cfg['apt_https_proxy'], + 'ftp': cfg['apt_ftp_proxy'], + } + + cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) + + self.assertTrue(os.path.isfile(self.pfile)) + self.assertFalse(os.path.isfile(self.cfile)) + + contents = str(util.read_file_or_url(self.pfile)) + + for ptype, pval in values.iteritems(): + self.assertTrue(self._search_apt_config(contents, ptype, pval)) + + def test_proxy_deleted(self): + util.write_file(self.cfile, "content doesnt matter") + cc_apt_configure.apply_apt_config({}, self.pfile, self.cfile) + self.assertFalse(os.path.isfile(self.pfile)) + self.assertFalse(os.path.isfile(self.cfile)) + + def test_proxy_replaced(self): + util.write_file(self.cfile, "content doesnt matter") + cc_apt_configure.apply_apt_config({'apt_proxy': "foo"}, + self.pfile, self.cfile) + self.assertTrue(os.path.isfile(self.pfile)) + contents = str(util.read_file_or_url(self.pfile)) + self.assertTrue(self._search_apt_config(contents, "http", "foo")) + + def test_config_written(self): + payload = 'this is my apt config' + cfg = {'apt_config': payload} + + cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) + + self.assertTrue(os.path.isfile(self.cfile)) + self.assertFalse(os.path.isfile(self.pfile)) + + self.assertEqual(str(util.read_file_or_url(self.cfile)), payload) + + def test_config_replaced(self): + util.write_file(self.pfile, "content doesnt matter") + cc_apt_configure.apply_apt_config({'apt_config': "foo"}, + self.pfile, self.cfile) + self.assertTrue(os.path.isfile(self.cfile)) + self.assertEqual(str(util.read_file_or_url(self.cfile)), "foo") + + def test_config_deleted(self): + # if no 'apt_config' is provided, delete any previously written file + util.write_file(self.pfile, "content doesnt matter") + cc_apt_configure.apply_apt_config({}, self.pfile, self.cfile) + self.assertFalse(os.path.isfile(self.pfile)) + self.assertFalse(os.path.isfile(self.cfile)) + + +# vi: ts=4 expandtab -- cgit v1.2.3 From 58cfc129165d10dc00c7dc2bdd1422a43f270443 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 6 Aug 2013 11:31:49 +0100 Subject: remove unused import --- cloudinit/config/cc_apt_configure.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 73733f3d..5a407016 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -21,7 +21,6 @@ import glob import os -from cloudinit import log as logging from cloudinit import templater from cloudinit import util -- cgit v1.2.3 From 13566b6833e9bca2a5c1bbe9ad7639089f170c4a Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 6 Aug 2013 11:32:04 +0100 Subject: remove unused imports --- tests/unittests/test_handler/test_handler_apt_configure.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/unittests/test_handler/test_handler_apt_configure.py b/tests/unittests/test_handler/test_handler_apt_configure.py index 07267dbd..22c0db80 100644 --- a/tests/unittests/test_handler/test_handler_apt_configure.py +++ b/tests/unittests/test_handler/test_handler_apt_configure.py @@ -5,8 +5,6 @@ from cloudinit import util from cloudinit.config import cc_apt_configure -import errno -import logging import os import re -- cgit v1.2.3 From 2e4d57b867b71831270655956d06a8e14793a8f3 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 6 Aug 2013 11:36:30 +0100 Subject: fix pep8 and pylint warnings --- cloudinit/distros/rhel.py | 2 -- cloudinit/distros/sles.py | 2 -- tests/unittests/test_datasource/test_azure.py | 1 - 3 files changed, 5 deletions(-) diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index ece1a5ff..30195384 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -20,8 +20,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import os - from cloudinit import distros from cloudinit import helpers from cloudinit import log as logging diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py index 92a1c307..f2ac4efc 100644 --- a/cloudinit/distros/sles.py +++ b/cloudinit/distros/sles.py @@ -18,8 +18,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import os - from cloudinit import distros from cloudinit.distros.parsers.hostname import HostnameConf diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 4cd3f213..06f8a5d2 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -249,7 +249,6 @@ class TestAzureDataSource(MockerTestCase): def test_apply_bounce_call_1(self): # hostname needs to get through to apply_hostname_bounce - mydata = "FOOBAR" odata = {'HostName': 'my-random-hostname'} data = {'ovfcontent': construct_valid_ovf_env(data=odata)} -- cgit v1.2.3 From cc0b78533dd1f863901119c420f5779a66396d27 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 14 Aug 2013 21:31:54 -0400 Subject: cc_growpart: prefer growpart over parted 'resizepart' the resizepart code was not functional. We will re-favor it later under bug 1212492. For now, we'll just favor the 'growpart' resizer. Both will be found in Ubuntu cloud images. LP: #1212444 --- cloudinit/config/cc_growpart.py | 6 ++++-- tests/unittests/test_handler/test_handler_growpart.py | 3 +++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index ba6c58af..2d54aabf 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -96,7 +96,7 @@ class ResizeParted(object): def resize(self, diskdev, partnum, partdev): before = get_size(partdev) try: - util.subp(["parted", "resizepart", diskdev, partnum]) + util.subp(["parted", diskdev, "resizepart", partnum]) except util.ProcessExecutionError as e: raise ResizeFailedException(e) @@ -272,4 +272,6 @@ def handle(_name, cfg, _cloud, log, _args): else: log.debug("'%s' %s: %s" % (entry, action, msg)) -RESIZERS = (('parted', ResizeParted), ('growpart', ResizeGrowPart)) +# LP: 1212444 FIXME re-order and favor ResizeParted +#RESIZERS = (('growpart', ResizeGrowPart),) +RESIZERS = (('growpart', ResizeGrowPart), ('parted', ResizeParted)) diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index b1b872b0..c0497e08 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -9,6 +9,7 @@ import errno import logging import os import re +import unittest # growpart: # mode: auto # off, on, auto, 'growpart', 'parted' @@ -121,6 +122,7 @@ class TestConfig(MockerTestCase): # Order must be correct self.mocker.order() + @unittest.skip("until LP: #1212444 fixed") def test_no_resizers_auto_is_fine(self): subp = self.mocker.replace(util.subp, passthrough=False) subp(['parted', '--help'], env={'LANG': 'C'}) @@ -142,6 +144,7 @@ class TestConfig(MockerTestCase): self.assertRaises(ValueError, self.handle, self.name, config, self.cloud_init, self.log, self.args) + @unittest.skip("until LP: #1212444 fixed") def test_mode_auto_prefers_parted(self): subp = self.mocker.replace(util.subp, passthrough=False) subp(['parted', '--help'], env={'LANG': 'C'}) -- cgit v1.2.3 From e668da729a0f9cd5d93d909a9b44d74cf6925dd5 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 15 Aug 2013 13:16:01 -0400 Subject: do not set 'password', but set 'passwd' to crypt'd value 'password' was the wrong key. It should have been setting the default user's "plain_text_password". Instead of doing that, though, we're encrypting the value and putting it in 'passwd', which will then be passed on to useradd. The key value in doing this is that the plain text password will not be stored in obj.pkl. (admittedly it is still in plain text in the ovf-env.xml file). --- cloudinit/sources/DataSourceAzure.py | 7 ++++++- tests/unittests/test_datasource/test_azure.py | 11 ++++++++--- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 1a74de21..7ec622bf 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -17,6 +17,7 @@ # along with this program. If not, see . import base64 +import crypt import os import os.path import time @@ -424,7 +425,7 @@ def read_azure_ovf(contents): if username: defuser['name'] = username if password: - defuser['password'] = password + defuser['passwd'] = encrypt_pass(password) defuser['lock_passwd'] = False if defuser: @@ -436,6 +437,10 @@ def read_azure_ovf(contents): return (md, ud, cfg) +def encrypt_pass(password, salt_id="$6$"): + return crypt.crypt(password, salt_id + util.rand_str(strlen=16)) + + def list_possible_azure_ds_devs(): # return a sorted list of devices that might have a azure datasource devlist = [] diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 06f8a5d2..1ca6a79d 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -2,6 +2,7 @@ from cloudinit import helpers from cloudinit.sources import DataSourceAzure from tests.unittests.helpers import populate_dir +import crypt import base64 from mocker import MockerTestCase import os @@ -207,11 +208,15 @@ class TestAzureDataSource(MockerTestCase): self.assertTrue('default_user' in dsrc.cfg['system_info']) defuser = dsrc.cfg['system_info']['default_user'] - # default user shoudl be updated for password and username - # and should not be locked. + # default user should be updated username and should not be locked. self.assertEqual(defuser['name'], odata['UserName']) - self.assertEqual(defuser['password'], odata['UserPassword']) self.assertFalse(defuser['lock_passwd']) + # passwd is crypt formated string $id$salt$encrypted + # encrypting plaintext with salt value of everything up to final '$' + # should equal that after the '$' + pos = defuser['passwd'].rfind("$") + 1 + self.assertEqual(defuser['passwd'], + crypt.crypt(odata['UserPassword'], defuser['passwd'][0:pos])) def test_userdata_found(self): mydata = "FOOBAR" -- cgit v1.2.3 From e23861e5a193377023e55da6234e23acd63a521a Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 15 Aug 2013 13:21:40 -0400 Subject: pylint fix --- tests/unittests/test_handler/test_handler_apt_configure.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/unittests/test_handler/test_handler_apt_configure.py b/tests/unittests/test_handler/test_handler_apt_configure.py index 22c0db80..203dd2aa 100644 --- a/tests/unittests/test_handler/test_handler_apt_configure.py +++ b/tests/unittests/test_handler/test_handler_apt_configure.py @@ -1,6 +1,5 @@ from mocker import MockerTestCase -from cloudinit import cloud from cloudinit import util from cloudinit.config import cc_apt_configure -- cgit v1.2.3 From e683ab2baa3e67614edcd409122bd1aec99737e0 Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Tue, 20 Aug 2013 09:56:25 -0600 Subject: Fixed no_base64_decode settings --- cloudinit/sources/DataSourceSmartOS.py | 26 ++++++++++++++++++++----- doc/examples/cloud-config-datasources.txt | 2 +- doc/sources/smartos/README.rst | 12 ++++++------ tests/unittests/test_datasource/test_smartos.py | 8 ++++++++ 4 files changed, 36 insertions(+), 12 deletions(-) diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index e0bb871c..1cf9e4f0 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -35,7 +35,7 @@ import os import os.path import serial - +DS_NAME = 'SmartOS' DEF_TTY_LOC = '/dev/ttyS1' DEF_TTY_TIMEOUT = 60 LOG = logging.getLogger(__name__) @@ -60,13 +60,14 @@ class DataSourceSmartOS(sources.DataSource): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, 'sdc') self.is_smartdc = None + self.base_64_encoded = [] self.seed = self.sys_cfg.get("serial_device", DEF_TTY_LOC) self.all_base64 = self.sys_cfg.get("decode_base64", False) - self.base_64_encoded = [] - self.smartos_no_base64 = self.sys_cfg.get("no_base64_decode", - SMARTOS_NO_BASE64) self.seed_timeout = self.sys_cfg.get("serial_timeout", DEF_TTY_TIMEOUT) + self.smartos_no_base64 = SMARTOS_NO_BASE64 + if 'no_base64_decode' in self.ds_cfg: + self.smartos_no_base64 = self.ds_cfg['no_base64_decode'] def __str__(self): root = sources.DataSource.__str__(self) @@ -137,10 +138,25 @@ class DataSourceSmartOS(sources.DataSource): def get_instance_id(self): return self.metadata['instance-id'] + def not_b64_var(self, var): + """Return true if value is read as b64.""" + if var in self.smartos_no_base64 or \ + not self.all_base64: + return True + return False + + def is_b64_var(self, var): + """Return true if value is read as b64.""" + if self.all_base64 or ( + var not in self.smartos_no_base64 and + var in self.base_64_encoded): + return True + return False + def get_serial(seed_device, seed_timeout): """This is replaced in unit testing, allowing us to replace - serial.Serial with a mocked class + serial.Serial with a mocked class. The timeout value of 60 seconds should never be hit. The value is taken from SmartOS own provisioning tools. Since we are reading diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt index 6544448e..6ec0d57e 100644 --- a/doc/examples/cloud-config-datasources.txt +++ b/doc/examples/cloud-config-datasources.txt @@ -55,5 +55,5 @@ datasource: # Smart OS datasource works over a serial console interacting with # a server on the other end. By default, the second serial console is the # device. SmartOS also uses a serial timeout of 60 seconds. - serial device: /dev/ttyS1 + serial_device: /dev/ttyS1 serial timeout: 60 diff --git a/doc/sources/smartos/README.rst b/doc/sources/smartos/README.rst index 96310857..ba90e7af 100644 --- a/doc/sources/smartos/README.rst +++ b/doc/sources/smartos/README.rst @@ -12,8 +12,7 @@ serial console. On Linux, this is /dev/ttyS1. The data is a provided via a simple protocol, where something queries for the userdata, where the console responds with the status and if "SUCCESS" returns until a single ".\n". -The format is lossy. As such, new versions of the SmartOS tooling will include -support for base64 encoded data. +New versions of the SmartOS tooling will include support for base64 encoded data. Userdata -------- @@ -48,15 +47,16 @@ always supercede any user-script data. This is for consistency. base64 ------ -In order to provide a lossy format, all data except for: +The following are excempt from base64 encoding, owing to the fact that they +are provided by SmartOS: * root_authorized_keys * enable_motd_sys_info * iptables_disable This means that user-script and user-data as well as other values can be -base64 encoded to provide a lossy format. Since Cloud-init can only guess -as to whether or not something is truly base64 encoded, the following -meta-data keys are hints as to whether or not to base64 decode something: +base64 encoded. Since Cloud-init can only guess as to whether or not something +is truly base64 encoded, the following meta-data keys are hints as to whether +or not to base64 decode something: * decode_base64: Except for excluded keys, attempt to base64 decode the values. If the value fails to decode properly, it will be returned in its text diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index ae621433..b9b3a479 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -36,6 +36,7 @@ mock_returns = { 'enable_motd_sys_info': None, 'system_uuid': str(uuid.uuid4()), 'smartdc': 'smartdc', + 'test-var1': 'some data', 'user-data': """ #!/bin/sh /bin/true @@ -156,6 +157,13 @@ class TestSmartOSDataSource(MockerTestCase): self.assertTrue(ret) self.assertTrue(dsrc.is_smartdc) + def test_no_base64(self): + sys_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True} + dsrc = self._get_ds(sys_cfg=sys_cfg) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertTrue(dsrc.not_b64_var('test-var')) + def test_uuid(self): dsrc = self._get_ds() ret = dsrc.get_data() -- cgit v1.2.3 From 7af11ba50c8311ceb545b830716c78929079a0cd Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 20 Aug 2013 16:00:09 -0400 Subject: fix bad arguments to subp inside of apply_hostname_bounce This simply correctly invokes subp through util.log_time. The arguments to subp is named 'args' not 'command'. LP: #1214541 --- cloudinit/sources/DataSourceAzure.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 7ec622bf..66d7728b 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -209,7 +209,7 @@ def apply_hostname_bounce(hostname, policy, interface, command, # capture=False, see comments in bug 1202758 and bug 1206164. util.log_time(logfunc=LOG.debug, msg="publishing hostname", get_uptime=True, func=util.subp, - kwargs={'command': command, 'shell': shell, 'capture': False, + kwargs={'args': command, 'shell': shell, 'capture': False, 'env': env}) -- cgit v1.2.3 From 0a667454289a788a6f406e66d78a34c7ec914daa Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Fri, 23 Aug 2013 09:10:30 -0600 Subject: Fixed some typos. Change decode_base64 from sys_cfg to ds_cfg --- cloudinit/sources/DataSourceSmartOS.py | 5 ++++- doc/sources/smartos/README.rst | 6 +++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 1cf9e4f0..45f03a7e 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -62,9 +62,12 @@ class DataSourceSmartOS(sources.DataSource): self.is_smartdc = None self.base_64_encoded = [] self.seed = self.sys_cfg.get("serial_device", DEF_TTY_LOC) - self.all_base64 = self.sys_cfg.get("decode_base64", False) self.seed_timeout = self.sys_cfg.get("serial_timeout", DEF_TTY_TIMEOUT) + self.all_base64 = False + if 'decode_base64' in self.ds_cfg: + self.all_base64 = self.ds_cfg['decode_base64'] + self.smartos_no_base64 = SMARTOS_NO_BASE64 if 'no_base64_decode' in self.ds_cfg: self.smartos_no_base64 = self.ds_cfg['no_base64_decode'] diff --git a/doc/sources/smartos/README.rst b/doc/sources/smartos/README.rst index ba90e7af..8f72fa0f 100644 --- a/doc/sources/smartos/README.rst +++ b/doc/sources/smartos/README.rst @@ -35,7 +35,7 @@ user-script SmartOS traditionally supports sending over a user-script for execution at the rc.local level. Cloud-init supports running user-scripts as if they were -cloud-init user-data. In this sense, anything with a shell interpetter +cloud-init user-data. In this sense, anything with a shell interpreter directive will run user-data and user-script @@ -47,7 +47,7 @@ always supercede any user-script data. This is for consistency. base64 ------ -The following are excempt from base64 encoding, owing to the fact that they +The following are exempt from base64 encoding, owing to the fact that they are provided by SmartOS: * root_authorized_keys * enable_motd_sys_info @@ -63,4 +63,4 @@ or not to base64 decode something: * base_64_encoded: A comma deliminated list of which values are base64 encoded. * no_base64_decode: This is a configuration setting (i.e. /etc/cloud/cloud.cfg.d) - that sets which values should not be base64 decoded. + that sets which values should not be base64 decoded. -- cgit v1.2.3 From 10c8ec1e5c1b16572a38afd08ee794d28c450054 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sat, 24 Aug 2013 00:07:35 -0400 Subject: changes to behavior on specifying keys. The most likely end user operation (or at least a valid one) for base64 encoding would be to encode the user-data, but leave all other values as plaintext. In order to facilitate that, the user can simply add: b64-user-data=true to indicate that user-data is base64 encoded. Other changes here are to change the cloud-config and metadata keynames that are used. base64_all = boolean(True) base64_keys = [list, of, keys] Fixed up tests to accomodate. --- cloudinit/sources/DataSourceSmartOS.py | 94 ++++++--------- doc/examples/cloud-config-datasources.txt | 10 +- doc/sources/smartos/README.rst | 16 ++- tests/unittests/test_datasource/test_smartos.py | 153 ++++++++++++++---------- 4 files changed, 149 insertions(+), 124 deletions(-) diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 45f03a7e..d348d20b 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -35,7 +35,6 @@ import os import os.path import serial -DS_NAME = 'SmartOS' DEF_TTY_LOC = '/dev/ttyS1' DEF_TTY_TIMEOUT = 60 LOG = logging.getLogger(__name__) @@ -51,6 +50,7 @@ SMARTOS_ATTRIB_MAP = { } # These are values which will never be base64 encoded. +# They come from the cloud platform, not user SMARTOS_NO_BASE64 = ['root_authorized_keys', 'motd_sys_info', 'iptables_disable'] @@ -60,17 +60,13 @@ class DataSourceSmartOS(sources.DataSource): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, 'sdc') self.is_smartdc = None - self.base_64_encoded = [] - self.seed = self.sys_cfg.get("serial_device", DEF_TTY_LOC) - self.seed_timeout = self.sys_cfg.get("serial_timeout", - DEF_TTY_TIMEOUT) - self.all_base64 = False - if 'decode_base64' in self.ds_cfg: - self.all_base64 = self.ds_cfg['decode_base64'] - - self.smartos_no_base64 = SMARTOS_NO_BASE64 - if 'no_base64_decode' in self.ds_cfg: - self.smartos_no_base64 = self.ds_cfg['no_base64_decode'] + + self.seed = self.ds_cfg.get("serial_device", DEF_TTY_LOC) + self.seed_timeout = self.ds_cfg.get("serial_timeout", DEF_TTY_TIMEOUT) + self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode', + SMARTOS_NO_BASE64) + self.b64_keys = self.ds_cfg.get('base64_keys', []) + self.b64_all = self.ds_cfg.get('base64_all', False) def __str__(self): root = sources.DataSource.__str__(self) @@ -92,38 +88,22 @@ class DataSourceSmartOS(sources.DataSource): system_uuid, system_type = dmi_info if 'smartdc' not in system_type.lower(): - LOG.debug("Host is not on SmartOS") + LOG.debug("Host is not on SmartOS. system_type=%s", system_type) return False self.is_smartdc = True md['instance-id'] = system_uuid - self.base_64_encoded = query_data('base_64_enocded', - self.seed, - self.seed_timeout, - strip=True) - if self.base_64_encoded: - self.base_64_encoded = str(self.base_64_encoded).split(',') - else: - self.base_64_encoded = [] + b64_keys = self.query('base64_keys', strip=True, b64=False) + if b64_keys is not None: + self.b64_keys = [k.strip() for k in str(b64_keys).split(',')] - if not self.all_base64: - self.all_base64 = util.is_true(query_data('meta_encoded_base64', - self.seed, - self.seed_timeout, - strip=True)) + b64_all = self.query('base64_all', strip=True, b64=False) + if b64_all is not None: + self.b64_all = util.is_true(b64_all) for ci_noun, attribute in SMARTOS_ATTRIB_MAP.iteritems(): smartos_noun, strip = attribute - - b64encoded = False - if self.all_base64 and \ - (smartos_noun not in self.smartos_no_base64 and \ - ci_noun not in self.smartos_no_base64): - b64encoded = True - - md[ci_noun] = query_data(smartos_noun, self.seed, - self.seed_timeout, strip=strip, - b64encoded=b64encoded) + md[ci_noun] = self.query(smartos_noun, strip=strip) if not md['local-hostname']: md['local-hostname'] = system_uuid @@ -141,20 +121,16 @@ class DataSourceSmartOS(sources.DataSource): def get_instance_id(self): return self.metadata['instance-id'] - def not_b64_var(self, var): - """Return true if value is read as b64.""" - if var in self.smartos_no_base64 or \ - not self.all_base64: - return True - return False + def query(self, noun, strip=False, default=None, b64=None): + if b64 is None: + if noun in self.smartos_no_base64: + b64 = False + elif self.b64_all or noun in self.b64_keys: + b64 = True - def is_b64_var(self, var): - """Return true if value is read as b64.""" - if self.all_base64 or ( - var not in self.smartos_no_base64 and - var in self.base_64_encoded): - return True - return False + return query_data(noun=noun, strip=strip, seed_device=self.seed, + seed_timeout=self.seed_timeout, default=default, + b64=b64) def get_serial(seed_device, seed_timeout): @@ -176,7 +152,8 @@ def get_serial(seed_device, seed_timeout): return ser -def query_data(noun, seed_device, seed_timeout, strip=False, b64encoded=False): +def query_data(noun, seed_device, seed_timeout, strip=False, default=None, + b64=None): """Makes a request to via the serial console via "GET " In the response, the first line is the status, while subsequent lines @@ -200,7 +177,7 @@ def query_data(noun, seed_device, seed_timeout, strip=False, b64encoded=False): if 'SUCCESS' not in status: ser.close() - return None + return default while not eom_found: m = ser.readline() @@ -211,18 +188,23 @@ def query_data(noun, seed_device, seed_timeout, strip=False, b64encoded=False): ser.close() + if b64 is None: + b64 = query_data('b64-%s' % noun, seed_device=seed_device, + seed_timeout=seed_timeout, b64=False, + default=False, strip=True) + b64 = util.is_true(b64) + resp = None - if not strip: - resp = "".join(response) - elif b64encoded: + if b64 or strip: resp = "".join(response).rstrip() else: - resp = "".join(response).rstrip() + resp = "".join(response) - if b64encoded: + if b64: try: return base64.b64decode(resp) except TypeError: + LOG.warn("Failed base64 decoding key '%s'", noun) return resp return resp diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt index 6ec0d57e..65a3cdf5 100644 --- a/doc/examples/cloud-config-datasources.txt +++ b/doc/examples/cloud-config-datasources.txt @@ -56,4 +56,12 @@ datasource: # a server on the other end. By default, the second serial console is the # device. SmartOS also uses a serial timeout of 60 seconds. serial_device: /dev/ttyS1 - serial timeout: 60 + serial_timeout: 60 + + # a list of keys that will not be base64 decoded even if base64_all + no_base64_decode: ['root_authorized_keys', 'motd_sys_info', + 'iptables_disable'] + # a plaintext, comma delimited list of keys whose values are b64 encoded + base64_keys: [] + # a boolean indicating that all keys not in 'no_base64_decode' are encoded + base64_all: False diff --git a/doc/sources/smartos/README.rst b/doc/sources/smartos/README.rst index 8f72fa0f..fd4e496d 100644 --- a/doc/sources/smartos/README.rst +++ b/doc/sources/smartos/README.rst @@ -53,14 +53,20 @@ are provided by SmartOS: * enable_motd_sys_info * iptables_disable +This list can be changed through system config of variable 'no_base64_decode'. + This means that user-script and user-data as well as other values can be base64 encoded. Since Cloud-init can only guess as to whether or not something is truly base64 encoded, the following meta-data keys are hints as to whether or not to base64 decode something: - * decode_base64: Except for excluded keys, attempt to base64 decode + * base64_all: Except for excluded keys, attempt to base64 decode the values. If the value fails to decode properly, it will be returned in its text - * base_64_encoded: A comma deliminated list of which values are base64 - encoded. - * no_base64_decode: This is a configuration setting (i.e. /etc/cloud/cloud.cfg.d) - that sets which values should not be base64 decoded. + * base64_keys: A comma deliminated list of which keys are base64 encoded. + * b64-: + for any key, if there exists an entry in the metadata for 'b64-' + Then 'b64-' is expected to be a plaintext boolean indicating whether + or not its value is encoded. + * no_base64_decode: This is a configuration setting + (i.e. /etc/cloud/cloud.cfg.d) that sets which values should not be + base64 decoded. diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index b9b3a479..f53715b0 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -29,20 +29,17 @@ from cloudinit.sources import DataSourceSmartOS from mocker import MockerTestCase import uuid -mock_returns = { +MOCK_RETURNS = { 'hostname': 'test-host', 'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname', 'disable_iptables_flag': None, 'enable_motd_sys_info': None, - 'system_uuid': str(uuid.uuid4()), - 'smartdc': 'smartdc', 'test-var1': 'some data', - 'user-data': """ -#!/bin/sh -/bin/true -""", + 'user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']), } +DMI_DATA_RETURN = (str(uuid.uuid4()), 'smartdc') + class MockSerial(object): """Fake a serial terminal for testing the code that @@ -50,14 +47,13 @@ class MockSerial(object): port = None - def __init__(self, b64encode=False): + def __init__(self, mockdata): self.last = None self.last = None self.new = True self.count = 0 self.mocked_out = [] - self.b64encode = b64encode - self.b64excluded = DataSourceSmartOS.SMARTOS_NO_BASE64 + self.mockdata = mockdata def open(self): return True @@ -75,12 +71,12 @@ class MockSerial(object): def readline(self): if self.new: self.new = False - if self.last in mock_returns: + if self.last in self.mockdata: return 'SUCCESS\n' else: return 'NOTFOUND %s\n' % self.last - if self.last in mock_returns: + if self.last in self.mockdata: if not self.mocked_out: self.mocked_out = [x for x in self._format_out()] print self.mocked_out @@ -90,21 +86,16 @@ class MockSerial(object): return self.mocked_out[self.count - 1] def _format_out(self): - if self.last in mock_returns: - _mret = mock_returns[self.last] - if self.b64encode and \ - self.last not in self.b64excluded: - yield base64.b64encode(_mret) - - else: - try: - for l in _mret.splitlines(): - yield "%s\n" % l.rstrip() - except: - yield "%s\n" % _mret.rstrip() + if self.last in self.mockdata: + _mret = self.mockdata[self.last] + try: + for l in _mret.splitlines(): + yield "%s\n" % l.rstrip() + except: + yield "%s\n" % _mret.rstrip() - yield '\n' yield '.' + yield '\n' class TestSmartOSDataSource(MockerTestCase): @@ -126,26 +117,36 @@ class TestSmartOSDataSource(MockerTestCase): ret = apply_patches(patches) self.unapply += ret - def _get_ds(self, b64encode=False, sys_cfg=None): + def _get_ds(self, sys_cfg=None, ds_cfg=None, mockdata=None, dmi_data=None): mod = DataSourceSmartOS + if mockdata is None: + mockdata = MOCK_RETURNS + + if dmi_data is None: + dmi_data = DMI_DATA_RETURN + def _get_serial(*_): - return MockSerial(b64encode=b64encode) + return MockSerial(mockdata) def _dmi_data(): - return mock_returns['system_uuid'], 'smartdc' + return dmi_data - if not sys_cfg: + if sys_cfg is None: sys_cfg = {} - data = {'sys_cfg': sys_cfg} + if ds_cfg is not None: + sys_cfg['datasource'] = sys_cfg.get('datasource', {}) + sys_cfg['datasource']['SmartOS'] = ds_cfg + self.apply_patches([(mod, 'get_serial', _get_serial)]) self.apply_patches([(mod, 'dmi_data', _dmi_data)]) - dsrc = mod.DataSourceSmartOS( - data.get('sys_cfg', {}), distro=None, paths=self.paths) + dsrc = mod.DataSourceSmartOS(sys_cfg, distro=None, + paths=self.paths) return dsrc def test_seed(self): + # default seed should be /dev/ttyS1 dsrc = self._get_ds() ret = dsrc.get_data() self.assertTrue(ret) @@ -158,78 +159,106 @@ class TestSmartOSDataSource(MockerTestCase): self.assertTrue(dsrc.is_smartdc) def test_no_base64(self): - sys_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True} - dsrc = self._get_ds(sys_cfg=sys_cfg) + ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True} + dsrc = self._get_ds(ds_cfg=ds_cfg) ret = dsrc.get_data() self.assertTrue(ret) - self.assertTrue(dsrc.not_b64_var('test-var')) def test_uuid(self): - dsrc = self._get_ds() + dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEquals(mock_returns['system_uuid'], - dsrc.metadata['instance-id']) + self.assertEquals(DMI_DATA_RETURN[0], dsrc.metadata['instance-id']) def test_root_keys(self): - dsrc = self._get_ds() + dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEquals(mock_returns['root_authorized_keys'], + self.assertEquals(MOCK_RETURNS['root_authorized_keys'], dsrc.metadata['public-keys']) def test_hostname_b64(self): - dsrc = self._get_ds(b64encode=True) + dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEquals(base64.b64encode(mock_returns['hostname']), + self.assertEquals(MOCK_RETURNS['hostname'], dsrc.metadata['local-hostname']) def test_hostname(self): - dsrc = self._get_ds() + dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEquals(mock_returns['hostname'], + self.assertEquals(MOCK_RETURNS['hostname'], dsrc.metadata['local-hostname']) - def test_base64(self): - """This tests to make sure that SmartOS system key/value pairs - are not interpetted as being base64 encoded, while making - sure that the others are when 'decode_base64' is set""" - dsrc = self._get_ds(sys_cfg={'decode_base64': True}, - b64encode=True) + def test_base64_all(self): + # metadata provided base64_all of true + my_returns = MOCK_RETURNS.copy() + my_returns['base64_all'] = "true" + for k in ('hostname', 'user-data'): + my_returns[k] = base64.b64encode(my_returns[k]) + + dsrc = self._get_ds(mockdata=my_returns) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEquals(mock_returns['hostname'], + self.assertEquals(MOCK_RETURNS['hostname'], dsrc.metadata['local-hostname']) - self.assertEquals("%s" % mock_returns['user-data'], + self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw) - self.assertEquals(mock_returns['root_authorized_keys'], + self.assertEquals(MOCK_RETURNS['root_authorized_keys'], dsrc.metadata['public-keys']) - self.assertEquals(mock_returns['disable_iptables_flag'], + self.assertEquals(MOCK_RETURNS['disable_iptables_flag'], dsrc.metadata['iptables_disable']) - self.assertEquals(mock_returns['enable_motd_sys_info'], + self.assertEquals(MOCK_RETURNS['enable_motd_sys_info'], dsrc.metadata['motd_sys_info']) + def test_b64_userdata(self): + my_returns = MOCK_RETURNS.copy() + my_returns['b64-user-data'] = "true" + my_returns['b64-hostname'] = "true" + for k in ('hostname', 'user-data'): + my_returns[k] = base64.b64encode(my_returns[k]) + + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(MOCK_RETURNS['hostname'], + dsrc.metadata['local-hostname']) + self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw) + self.assertEquals(MOCK_RETURNS['root_authorized_keys'], + dsrc.metadata['public-keys']) + + def test_b64_keys(self): + my_returns = MOCK_RETURNS.copy() + my_returns['base64_keys'] = 'hostname,ignored' + for k in ('hostname',): + my_returns[k] = base64.b64encode(my_returns[k]) + + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(MOCK_RETURNS['hostname'], + dsrc.metadata['local-hostname']) + self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw) + def test_userdata(self): - dsrc = self._get_ds() + dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEquals("%s\n" % mock_returns['user-data'], - dsrc.userdata_raw) + self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw) def test_disable_iptables_flag(self): - dsrc = self._get_ds() + dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEquals(mock_returns['disable_iptables_flag'], + self.assertEquals(MOCK_RETURNS['disable_iptables_flag'], dsrc.metadata['iptables_disable']) def test_motd_sys_info(self): - dsrc = self._get_ds() + dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEquals(mock_returns['enable_motd_sys_info'], + self.assertEquals(MOCK_RETURNS['enable_motd_sys_info'], dsrc.metadata['motd_sys_info']) -- cgit v1.2.3