From acdc3c45a3deefaf599627eee889d11299525c4c Mon Sep 17 00:00:00 2001 From: Andy Grimm Date: Fri, 14 Sep 2012 11:50:11 -0400 Subject: Fix hostname derived from IP. (LP: 1050962) --- cloudinit/sources/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 3f611d44..6f126091 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -173,7 +173,7 @@ class DataSource(object): # make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx lhost = self.metadata['local-hostname'] if util.is_ipv4(lhost): - toks = "ip-%s" % lhost.replace(".", "-") + toks = [ "ip-%s" % lhost.replace(".", "-") ] else: toks = lhost.split(".") -- cgit v1.2.3 From e8a10a41d22876d555084def823817337d9c2a80 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 20 Sep 2012 17:56:22 -0700 Subject: Use only util methods for reading/loading/appending/peeking at files since it is likely soon that we will add a new way of adjusting the root of files read, also it is useful for debugging to track what is being read/written in a central fashion. --- cloudinit/distros/__init__.py | 3 +-- cloudinit/sources/DataSourceAltCloud.py | 19 ++++++++----------- cloudinit/sources/DataSourceConfigDrive.py | 20 +++++++++----------- cloudinit/sources/DataSourceMAAS.py | 6 ++---- cloudinit/sources/DataSourceOVF.py | 5 ++--- cloudinit/util.py | 10 ++++++++++ 6 files changed, 32 insertions(+), 31 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 3e9d934d..f6aa8d99 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -343,8 +343,7 @@ class Distro(object): else: try: - with open(sudo_file, 'a') as f: - f.write(content) + util.append_file(sudo_file, content) except IOError as e: util.logexc(LOG, "Failed to write %s" % sudo_file, e) raise e diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index 69c376a5..d7e1204f 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -73,13 +73,11 @@ def read_user_data_callback(mount_dir): # First try deltacloud_user_data_file. On failure try user_data_file. try: - with open(deltacloud_user_data_file, 'r') as user_data_f: - user_data = user_data_f.read().strip() - except: + user_data = util.load_file(deltacloud_user_data_file).strip() + except IOError: try: - with open(user_data_file, 'r') as user_data_f: - user_data = user_data_f.read().strip() - except: + user_data = util.load_file(user_data_file).strip() + except IOError: util.logexc(LOG, ('Failed accessing user data file.')) return None @@ -157,11 +155,10 @@ class DataSourceAltCloud(sources.DataSource): if os.path.exists(CLOUD_INFO_FILE): try: - cloud_info = open(CLOUD_INFO_FILE) - cloud_type = cloud_info.read().strip().upper() - cloud_info.close() - except: - util.logexc(LOG, 'Unable to access cloud info file.') + cloud_type = util.load_file(CLOUD_INFO_FILE).strip().upper() + except IOError: + util.logexc(LOG, 'Unable to access cloud info file at %s.', + CLOUD_INFO_FILE) return False else: cloud_type = self.get_cloud_type() diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index b8154367..b477560c 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -227,19 +227,19 @@ def read_config_drive_dir_v2(source_dir, version="2012-08-10"): found = False if os.path.isfile(fpath): try: - with open(fpath) as fp: - data = fp.read() - except Exception as exc: - raise BrokenConfigDriveDir("failed to read: %s" % fpath) + data = util.load_file(fpath) + except IOError: + raise BrokenConfigDriveDir("Failed to read: %s" % fpath) found = True elif required: - raise NonConfigDriveDir("missing mandatory %s" % fpath) + raise NonConfigDriveDir("Missing mandatory path: %s" % fpath) if found and process: try: data = process(data) except Exception as exc: - raise BrokenConfigDriveDir("failed to process: %s" % fpath) + raise BrokenConfigDriveDir(("Failed to process " + "path: %s") % fpath) if found: results[name] = data @@ -255,8 +255,7 @@ def read_config_drive_dir_v2(source_dir, version="2012-08-10"): # do not use os.path.join here, as content_path starts with / cpath = os.path.sep.join((source_dir, "openstack", "./%s" % item['content_path'])) - with open(cpath) as fp: - return(fp.read()) + return util.load_file(cpath) files = {} try: @@ -270,7 +269,7 @@ def read_config_drive_dir_v2(source_dir, version="2012-08-10"): if item: results['network_config'] = read_content_path(item) except Exception as exc: - raise BrokenConfigDriveDir("failed to read file %s: %s" % (item, exc)) + raise BrokenConfigDriveDir("Failed to read file %s: %s" % (item, exc)) # to openstack, user can specify meta ('nova boot --meta=key=value') and # those will appear under metadata['meta']. @@ -385,8 +384,7 @@ def get_previous_iid(paths): # hasn't declared itself found. fname = os.path.join(paths.get_cpath('data'), 'instance-id') try: - with open(fname) as fp: - return fp.read() + return util.load_file(fname) except IOError: return None diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index c568d365..d166e9e3 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -301,9 +301,7 @@ if __name__ == "__main__": 'token_secret': args.tsec, 'consumer_secret': args.csec} if args.config: - import yaml - with open(args.config) as fp: - cfg = yaml.safe_load(fp) + cfg = util.read_conf(args.config) if 'datasource' in cfg: cfg = cfg['datasource']['MAAS'] for key in creds.keys(): @@ -312,7 +310,7 @@ if __name__ == "__main__": def geturl(url, headers_cb): req = urllib2.Request(url, data=None, headers=headers_cb(url)) - return(urllib2.urlopen(req).read()) + return (urllib2.urlopen(req).read()) def printurl(url, headers_cb): print "== %s ==\n%s\n" % (url, geturl(url, headers_cb)) diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 771e64eb..e90150c6 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -204,9 +204,8 @@ def transport_iso9660(require_iso=True): try: # See if we can read anything at all...?? - with open(fullp, 'rb') as fp: - fp.read(512) - except: + util.peek_file(fullp, 512) + except IOError: continue try: diff --git a/cloudinit/util.py b/cloudinit/util.py index 33da73eb..18000301 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -952,6 +952,12 @@ def find_devs_with(criteria=None, oformat='device', return entries +def peek_file(fname, max_bytes): + LOG.debug("Peeking at %s (max_bytes=%s)", fname, max_bytes) + with open(fname, 'rb') as ifh: + return ifh.read(max_bytes) + + def load_file(fname, read_cb=None, quiet=False): LOG.debug("Reading from %s (quiet=%s)", fname, quiet) ofh = StringIO() @@ -1281,6 +1287,10 @@ def uptime(): return uptime_str +def append_file(path, content): + write_file(path, content, omode="ab", mode=None) + + def ensure_file(path, mode=0644): write_file(path, content='', omode="ab", mode=mode) -- cgit v1.2.3 From cde52cc8449d82d5bdce2fbb73516bee144e293c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 24 Sep 2012 16:54:51 -0400 Subject: fix make pep8 --- cloudinit/distros/fedora.py | 2 +- cloudinit/patcher.py | 1 + cloudinit/sources/__init__.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/distros/fedora.py b/cloudinit/distros/fedora.py index 9f76a116..e7092dd8 100644 --- a/cloudinit/distros/fedora.py +++ b/cloudinit/distros/fedora.py @@ -28,5 +28,5 @@ LOG = logging.getLogger(__name__) class Distro(rhel.Distro): - distro_name = 'fedora' + distro_name = 'fedora' default_user = 'ec2-user' diff --git a/cloudinit/patcher.py b/cloudinit/patcher.py index 8921a79a..fa140f04 100644 --- a/cloudinit/patcher.py +++ b/cloudinit/patcher.py @@ -39,6 +39,7 @@ def _patch_logging(): # sys.stderr using a fallback logger fallback_handler = QuietStreamHandler(sys.stderr) fallback_handler.setFormatter(logging.Formatter(FALL_FORMAT)) + def handleError(self, record): try: fallback_handler.handle(record) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 6f126091..04083d0c 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -173,7 +173,7 @@ class DataSource(object): # make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx lhost = self.metadata['local-hostname'] if util.is_ipv4(lhost): - toks = [ "ip-%s" % lhost.replace(".", "-") ] + toks = ["ip-%s" % lhost.replace(".", "-")] else: toks = lhost.split(".") -- cgit v1.2.3 From 70cc7536f45a8d7052617ad88e2816291db0a309 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 24 Sep 2012 17:13:38 -0400 Subject: DataSourceMAAS: if a oauth request fails due to 403 try updating local time In the event of a 403 (Unauthorized) in oauth, try set a 'oauth_clockskew' variable. In future headers, use a time created by 'time.time() + self.oauth_clockskew'. The idea here is that if the local time is bad (or even if the server time is bad) we will essentially use something that should be similar to the remote clock. This fixes LP: #978127. LP: #978127 --- cloudinit/sources/DataSourceMAAS.py | 43 +++++++++++++++++++++++++++++++++---- cloudinit/url_helper.py | 11 ++++++++-- 2 files changed, 48 insertions(+), 6 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index c568d365..581e9a4b 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -18,6 +18,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +from email.utils import parsedate import errno import oauth.oauth as oauth import os @@ -46,6 +47,7 @@ class DataSourceMAAS(sources.DataSource): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.base_url = None self.seed_dir = os.path.join(paths.seed_dir, 'maas') + self.oauth_clockskew = None def __str__(self): return "%s [%s]" % (util.obj_name(self), self.base_url) @@ -95,11 +97,17 @@ class DataSourceMAAS(sources.DataSource): return {} consumer_secret = mcfg.get('consumer_secret', "") + + timestamp = None + if self.oauth_clockskew: + timestamp = int(time.time()) + self.oauth_clockskew + return oauth_headers(url=url, consumer_key=mcfg['consumer_key'], token_key=mcfg['token_key'], token_secret=mcfg['token_secret'], - consumer_secret=consumer_secret) + consumer_secret=consumer_secret, + timestamp=timestamp) def wait_for_metadata_service(self, url): mcfg = self.ds_cfg @@ -124,7 +132,7 @@ class DataSourceMAAS(sources.DataSource): check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION) urls = [check_url] url = uhelp.wait_for_url(urls=urls, max_wait=max_wait, - timeout=timeout, status_cb=LOG.warn, + timeout=timeout, exception_cb=self._except_cb, headers_cb=self.md_headers) if url: @@ -135,6 +143,26 @@ class DataSourceMAAS(sources.DataSource): return bool(url) + def _except_cb(self, msg, exception): + if not (isinstance(exception, urllib2.HTTPError) and + exception.code == 403): + return + if 'date' not in exception.headers: + LOG.warn("date field not in 403 headers") + return + + date = exception.headers['date'] + + try: + ret_time = time.mktime(parsedate(date)) + except: + LOG.warn("failed to convert datetime '%s'") + return + + self.oauth_clockskew = int(ret_time - time.time()) + LOG.warn("set oauth clockskew to %d" % self.oauth_clockskew) + return + def read_maas_seed_dir(seed_d): """ @@ -229,13 +257,20 @@ def check_seed_contents(content, seed): return (userdata, md) -def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret): +def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret, + timestamp=None): consumer = oauth.OAuthConsumer(consumer_key, consumer_secret) token = oauth.OAuthToken(token_key, token_secret) + + if timestamp is None: + ts = int(time.time()) + else: + ts = timestamp + params = { 'oauth_version': "1.0", 'oauth_nonce': oauth.generate_nonce(), - 'oauth_timestamp': int(time.time()), + 'oauth_timestamp': ts, 'oauth_token': token.key, 'oauth_consumer_key': consumer.key, } diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 732d6aec..f3e3fd7e 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -136,7 +136,8 @@ def readurl(url, data=None, timeout=None, def wait_for_url(urls, max_wait=None, timeout=None, - status_cb=None, headers_cb=None, sleep_time=1): + status_cb=None, headers_cb=None, sleep_time=1, + exception_cb=None): """ urls: a list of urls to try max_wait: roughly the maximum time to wait before giving up @@ -146,6 +147,8 @@ def wait_for_url(urls, max_wait=None, timeout=None, status_cb: call method with string message when a url is not available headers_cb: call method with single argument of url to get headers for request. + exception_cb: call method with 2 arguments 'msg' (per status_cb) and + 'exception', the exception that occurred. the idea of this routine is to wait for the EC2 metdata service to come up. On both Eucalyptus and EC2 we have seen the case where @@ -164,7 +167,7 @@ def wait_for_url(urls, max_wait=None, timeout=None, """ start_time = time.time() - def log_status_cb(msg): + def log_status_cb(msg, exc=None): LOG.debug(msg) if status_cb is None: @@ -196,8 +199,10 @@ def wait_for_url(urls, max_wait=None, timeout=None, resp = readurl(url, headers=headers, timeout=timeout) if not resp.contents: reason = "empty response [%s]" % (resp.code) + e = ValueError(reason) elif not resp.ok(): reason = "bad status code [%s]" % (resp.code) + e = ValueError(reason) else: return url except urllib2.HTTPError as e: @@ -214,6 +219,8 @@ def wait_for_url(urls, max_wait=None, timeout=None, time_taken, max_wait, reason) status_cb(status_msg) + if exception_cb: + exception_cb(msg=status_msg, exception=e) if timeup(max_wait, start_time): break -- cgit v1.2.3 From d285a0463b6d16487eb5859373ccfd27eaec8b90 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 28 Sep 2012 16:54:22 -0400 Subject: make DataSourceMAAS 'main()' use load_yaml --- cloudinit/sources/DataSourceMAAS.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 581e9a4b..c172150b 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -338,7 +338,7 @@ if __name__ == "__main__": if args.config: import yaml with open(args.config) as fp: - cfg = yaml.safe_load(fp) + cfg = util.load_yaml(fp.read()) if 'datasource' in cfg: cfg = cfg['datasource']['MAAS'] for key in creds.keys(): -- cgit v1.2.3 From a28d7fe46cf8e3277a13c35c5dd0185f65ab1d0c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sun, 30 Sep 2012 09:20:59 -0400 Subject: [pylint]: remove unused import --- cloudinit/sources/DataSourceMAAS.py | 1 - 1 file changed, 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index c172150b..ec52d775 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -336,7 +336,6 @@ if __name__ == "__main__": 'token_secret': args.tsec, 'consumer_secret': args.csec} if args.config: - import yaml with open(args.config) as fp: cfg = util.load_yaml(fp.read()) if 'datasource' in cfg: -- cgit v1.2.3 From f8b23b39bdf8753986df9ecf5948ffd8e8fdee74 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 1 Oct 2012 11:50:48 -0400 Subject: fix oauth time skew. actual implementation was returning 401 not 403. This fixes (tested) bug 978127. The server was actually returning a 401 not a 403. As such, the fix here was insufficient. This will now take either of those 2 error codes. I've also tested it by changing the clock in the cloud-init upstart job with a stanza like below, and verifying that we do see the problem and then it resolve itself: pre-start script offset="10 minutes ago" past=$(date -R --date "$offset") date --set "$past" && echo ===== "set date to $past [$offset]" ===== || echo ===== "failed to set date to $past [$offset]" ==== end script LP: #978127 --- ChangeLog | 4 ++++ cloudinit/sources/DataSourceMAAS.py | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/ChangeLog b/ChangeLog index c5dcd418..cbfba6d0 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,8 @@ 0.7.0: + - add a 'exception_cb' argument to 'wait_for_url'. If provided, this + method will be called back with the exception received and the message. + - utilize the 'exception_cb' above to modify the oauth timestamp in + DataSourceMAAS requests if a 401 or 403 is received. (LP: #978127) - catch signals and exit rather than stack tracing - if logging fails, enable a fallback logger by patching the logging module - do not 'start networking' in cloud-init-nonet, but add diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index ec52d775..e187aec9 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -145,10 +145,10 @@ class DataSourceMAAS(sources.DataSource): def _except_cb(self, msg, exception): if not (isinstance(exception, urllib2.HTTPError) and - exception.code == 403): + (exception.code == 403 or exception.code == 401)): return if 'date' not in exception.headers: - LOG.warn("date field not in 403 headers") + LOG.warn("date field not in %d headers" % exception.code) return date = exception.headers['date'] -- cgit v1.2.3 From ab1b27294ce852a5d67b230971a1c28c99940e50 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 4 Oct 2012 17:32:26 -0700 Subject: Ensure that for config drive that we map 'hostname' to 'local-hostname' so that the modules work correctly with the cfgdrive style of data. --- cloudinit/sources/DataSourceConfigDrive.py | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index b8154367..b1cf942e 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -85,6 +85,16 @@ class DataSourceConfigDrive(sources.DataSource): md = results['metadata'] md = util.mergedict(md, DEFAULT_METADATA) + # Perform some metadata 'fixups' + # + # OpenStack uses the 'hostname' key + # while most of cloud-init uses the metadata + # 'local-hostname' key instead so if it doesn't + # exist we need to make sure its copied over. + for (tgt, src) in [('local-hostname', 'hostname')]: + if tgt not in md and src in md: + md[tgt] = md[src] + user_dsmode = results.get('dsmode', None) if user_dsmode not in VALID_DSMODES + (None,): LOG.warn("user specified invalid mode: %s" % user_dsmode) -- cgit v1.2.3 From ffa19cbd80ba10453f0ef448c1c10dbcbf5be504 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 5 Oct 2012 13:38:54 -0700 Subject: Ensure that config drive datasource attempts to translate the device name to a actual device using logic that will try the ec2 metadata (if avail) or will try using 'blkid' to find a corresponding label. LP: #1062540 --- cloudinit/sources/DataSourceConfigDrive.py | 58 ++++++++++++++++++++++++++++++ cloudinit/sources/DataSourceEc2.py | 16 --------- cloudinit/sources/__init__.py | 17 +++++++++ 3 files changed, 75 insertions(+), 16 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index b1cf942e..495eee82 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -48,6 +48,7 @@ class DataSourceConfigDrive(sources.DataSource): self.dsmode = 'local' self.seed_dir = os.path.join(paths.seed_dir, 'config_drive') self.version = None + self.ec2_metadata = None def __str__(self): mstr = "%s [%s,ver=%s]" % (util.obj_name(self), self.dsmode, @@ -55,6 +56,62 @@ class DataSourceConfigDrive(sources.DataSource): mstr += "[source=%s]" % (self.source) return mstr + def _ec2_name_to_device(self, name): + if not self.ec2_metadata: + return None + bdm = self.ec2_metadata.get('block-device-mapping', {}) + for (ent_name, device) in bdm.items(): + if name == ent_name: + return device + return None + + def _os_name_to_device(self, name): + device = None + try: + dev_entries = util.find_devs_with('LABEL=%s' % (name)) + if dev_entries: + device = dev_entries[0] + except util.ProcessExecutionError: + pass + return device + + def device_name_to_device(self, name): + # Translate a 'name' to a 'physical' device + if not name: + return None + # Try the ec2 mapping first + names = [name] + if name == 'root': + names.insert(0, 'ami') + if name == 'ami': + names.append('root') + device = None + for n in names: + device = self._ec2_name_to_device(n) + if device: + break + # Try the openstack way second + if not device: + for n in names: + device = self._os_name_to_device(n) + if device: + break + # Ok give up... + if not device: + return None + # Ensure translated ok + if not device.startswith("/"): + device = "/dev/%s" % device + if os.path.exists(device): + return device + # Durn, try adjusting the mapping + remapped = self._remap_device(os.path.basename(device)) + if remapped: + LOG.debug("Remapped device name %s => %s", device, remapped) + return remapped + # Really give up now + return None + def get_data(self): found = None md = {} @@ -143,6 +200,7 @@ class DataSourceConfigDrive(sources.DataSource): self.source = found self.metadata = md + self.ec2_metadata = results.get('ec2-metadata') self.userdata_raw = results.get('userdata') self.version = results['cfgdrive_ver'] diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index c7ad6d54..3686fa10 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -151,22 +151,6 @@ class DataSourceEc2(sources.DataSource): self.metadata_address = url2base.get(url) return bool(url) - def _remap_device(self, short_name): - # LP: #611137 - # the metadata service may believe that devices are named 'sda' - # when the kernel named them 'vda' or 'xvda' - # we want to return the correct value for what will actually - # exist in this instance - mappings = {"sd": ("vd", "xvd")} - for (nfrom, tlist) in mappings.iteritems(): - if not short_name.startswith(nfrom): - continue - for nto in tlist: - cand = "/dev/%s%s" % (nto, short_name[len(nfrom):]) - if os.path.exists(cand): - return cand - return None - def device_name_to_device(self, name): # Consult metadata service, that has # ephemeral0: sdb diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 04083d0c..b22369a8 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -23,6 +23,7 @@ from email.mime.multipart import MIMEMultipart import abc +import os from cloudinit import importer from cloudinit import log as logging @@ -128,6 +129,22 @@ class DataSource(object): return keys + def _remap_device(self, short_name): + # LP: #611137 + # the metadata service may believe that devices are named 'sda' + # when the kernel named them 'vda' or 'xvda' + # we want to return the correct value for what will actually + # exist in this instance + mappings = {"sd": ("vd", "xvd")} + for (nfrom, tlist) in mappings.iteritems(): + if not short_name.startswith(nfrom): + continue + for nto in tlist: + cand = "/dev/%s%s" % (nto, short_name[len(nfrom):]) + if os.path.exists(cand): + return cand + return None + def device_name_to_device(self, _name): # translate a 'name' to a device # the primary function at this point is on ec2 -- cgit v1.2.3 From 5b4fa81016f487fb6e041cef5a3b4ac0bd0863c5 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 5 Oct 2012 14:50:22 -0700 Subject: Add tests to show that the assigned bug is fixed. Also fix the extraction of the metadata key name since it actually uses 'dashes' instead of being a single word. --- cloudinit/sources/DataSourceConfigDrive.py | 2 +- .../unittests/test_datasource/test_configdrive.py | 147 +++++++++++++++++++-- 2 files changed, 140 insertions(+), 9 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 495eee82..eebe44ec 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -285,7 +285,7 @@ def read_config_drive_dir_v2(source_dir, version="2012-08-10"): ('metadata', "openstack/%s/meta_data.json" % version, True, json.loads), ('userdata', "openstack/%s/user_data" % version, False, None), - ('ec2-metadata', "ec2/latest/metadata.json", False, json.loads), + ('ec2-metadata', "ec2/latest/meta-data.json", False, json.loads), ) results = {'userdata': None} diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 55573114..99936d92 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -4,10 +4,15 @@ import os import os.path import shutil import tempfile -from unittest import TestCase + +import mocker +from mocker import MockerTestCase from cloudinit.sources import DataSourceConfigDrive as ds +from cloudinit import settings from cloudinit import util +from cloudinit import helpers + PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' @@ -60,17 +65,143 @@ CFG_DRIVE_FILES_V2 = { 'openstack/latest/user_data': USER_DATA} -class TestConfigDriveDataSource(TestCase): +class TestConfigDriveDataSource(MockerTestCase): def setUp(self): super(TestConfigDriveDataSource, self).setUp() - self.tmp = tempfile.mkdtemp() + self.tmp = self.makeDir() - def tearDown(self): - try: - shutil.rmtree(self.tmp) - except OSError: - pass + def test_ec2_metadata(self): + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + found = ds.read_config_drive_dir(self.tmp) + self.assertTrue('ec2-metadata' in found) + ec2_md = found['ec2-metadata'] + self.assertEqual(EC2_META, ec2_md) + + def test_dev_os_remap(self): + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + found = ds.read_config_drive_dir(self.tmp) + cfg_ds.metadata = found['metadata'] + name_tests = { + 'ami': '/dev/vda1', + 'root': '/dev/vda1', + 'ephemeral0': '/dev/vda2', + 'swap': '/dev/vda3', + } + for name, dev_name in name_tests.items(): + my_mock = mocker.Mocker() + find_mock = my_mock.replace(util.find_devs_with, + spec=False, passthrough=False) + provided_name = dev_name[len('/dev/'):] + provided_name = "s" + provided_name[1:] + find_mock(mocker.ARGS) + my_mock.result([provided_name]) + exists_mock = my_mock.replace(os.path.exists, + spec=False, passthrough=False) + exists_mock(mocker.ARGS) + my_mock.result(False) + exists_mock(mocker.ARGS) + my_mock.result(True) + my_mock.replay() + device = cfg_ds.device_name_to_device(name) + my_mock.restore() + self.assertEquals(dev_name, device) + + def test_dev_os_map(self): + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + found = ds.read_config_drive_dir(self.tmp) + os_md = found['metadata'] + cfg_ds.metadata = os_md + name_tests = { + 'ami': '/dev/vda1', + 'root': '/dev/vda1', + 'ephemeral0': '/dev/vda2', + 'swap': '/dev/vda3', + } + for name, dev_name in name_tests.items(): + my_mock = mocker.Mocker() + find_mock = my_mock.replace(util.find_devs_with, + spec=False, passthrough=False) + find_mock(mocker.ARGS) + my_mock.result([dev_name]) + exists_mock = my_mock.replace(os.path.exists, + spec=False, passthrough=False) + exists_mock(mocker.ARGS) + my_mock.result(True) + my_mock.replay() + device = cfg_ds.device_name_to_device(name) + my_mock.restore() + self.assertEquals(dev_name, device) + + def test_dev_ec2_remap(self): + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + found = ds.read_config_drive_dir(self.tmp) + ec2_md = found['ec2-metadata'] + os_md = found['metadata'] + cfg_ds.ec2_metadata = ec2_md + cfg_ds.metadata = os_md + name_tests = { + 'ami': '/dev/vda1', + 'root': '/dev/vda1', + 'ephemeral0': '/dev/vda2', + 'swap': '/dev/vda3', + None: None, + 'bob': None, + 'root2k': None, + } + for name, dev_name in name_tests.items(): + my_mock = mocker.Mocker() + exists_mock = my_mock.replace(os.path.exists, + spec=False, passthrough=False) + exists_mock(mocker.ARGS) + my_mock.result(False) + exists_mock(mocker.ARGS) + my_mock.result(True) + my_mock.replay() + device = cfg_ds.device_name_to_device(name) + self.assertEquals(dev_name, device) + my_mock.restore() + + def test_dev_ec2_map(self): + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + found = ds.read_config_drive_dir(self.tmp) + exists_mock = self.mocker.replace(os.path.exists, + spec=False, passthrough=False) + exists_mock(mocker.ARGS) + self.mocker.count(0, None) + self.mocker.result(True) + self.mocker.replay() + ec2_md = found['ec2-metadata'] + os_md = found['metadata'] + cfg_ds.ec2_metadata = ec2_md + cfg_ds.metadata = os_md + name_tests = { + 'ami': '/dev/sda1', + 'root': '/dev/sda1', + 'ephemeral0': '/dev/sda2', + 'swap': '/dev/sda3', + None: None, + 'bob': None, + 'root2k': None, + } + for name, dev_name in name_tests.items(): + device = cfg_ds.device_name_to_device(name) + self.assertEquals(dev_name, device) def test_dir_valid(self): """Verify a dir is read as such.""" -- cgit v1.2.3 From f510d8f5762f3c9d27afcc57f63d7614ec6c05cd Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 5 Oct 2012 15:43:54 -0700 Subject: Add checks around the device names that are found to ensure that even if they are found that they are also valid, before they are assumed to be the correct device name. --- cloudinit/sources/DataSourceConfigDrive.py | 36 ++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 12 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index eebe44ec..4af2e5ae 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -68,13 +68,30 @@ class DataSourceConfigDrive(sources.DataSource): def _os_name_to_device(self, name): device = None try: - dev_entries = util.find_devs_with('LABEL=%s' % (name)) + criteria = 'LABEL=%s' % (name) + if name in ['swap']: + criteria = 'TYPE=%s' % (name) + dev_entries = util.find_devs_with(criteria) if dev_entries: device = dev_entries[0] except util.ProcessExecutionError: pass return device + def _validate_device_name(self, device): + if not device: + return None + if not device.startswith("/"): + device = "/dev/%s" % device + if os.path.exists(device): + return device + # Durn, try adjusting the mapping + remapped = self._remap_device(os.path.basename(device)) + if remapped: + LOG.debug("Remapped device name %s => %s", device, remapped) + return remapped + return None + def device_name_to_device(self, name): # Translate a 'name' to a 'physical' device if not name: @@ -86,31 +103,26 @@ class DataSourceConfigDrive(sources.DataSource): if name == 'ami': names.append('root') device = None + LOG.debug("Using ec2 metadata lookup to find device %s", names) for n in names: device = self._ec2_name_to_device(n) + device = self._validate_device_name(device) if device: break # Try the openstack way second if not device: + LOG.debug("Using os lookup to find device %s", names) for n in names: device = self._os_name_to_device(n) + device = self._validate_device_name(device) if device: break # Ok give up... if not device: return None - # Ensure translated ok - if not device.startswith("/"): - device = "/dev/%s" % device - if os.path.exists(device): + else: + LOG.debug("Using cfg drive lookup mapped to device %s", device) return device - # Durn, try adjusting the mapping - remapped = self._remap_device(os.path.basename(device)) - if remapped: - LOG.debug("Remapped device name %s => %s", device, remapped) - return remapped - # Really give up now - return None def get_data(self): found = None -- cgit v1.2.3 From 758e152721891c707573757fe7a7ff410ec446e2 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 24 Oct 2012 20:31:19 -0700 Subject: Handle the case where newer versions of boto are used that lazily load the metadata from the ec2 metadata service. 1. Add a ec2_utils module that checks which version of boto is being used and under the right versions the metadata dictionary will be expanded. 2. Use this new ec2_utils module in the cloudstack and ec2 datasources as there entrypoints into boto. --- cloudinit/ec2_utils.py | 65 +++++++++++++++++++++++++++++++ cloudinit/sources/DataSourceCloudStack.py | 11 +++--- cloudinit/sources/DataSourceEc2.py | 15 ++++--- 3 files changed, 79 insertions(+), 12 deletions(-) create mode 100644 cloudinit/ec2_utils.py (limited to 'cloudinit/sources') diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py new file mode 100644 index 00000000..76699102 --- /dev/null +++ b/cloudinit/ec2_utils.py @@ -0,0 +1,65 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import pkg_resources +from pkg_resources import parse_version + +import cloudinit.util as util +import cloudinit.url_helper as uh + +import boto.utils as boto_utils + + +BOTO_LAZY = False +try: + _boto_lib = pkg_resources.get_distribution('boto') + if _boto_lib.parsed_version > parse_version("2.5.2"): + BOTO_LAZY = True +except pkg_resources.DistributionNotFound: + pass + + +# Versions of boto >= 2.6.0 try to lazily load +# the metadata backing, which doesn't work so well +# in cloud-init especially since the metadata is +# serialized and actions are performed where the +# metadata server may be blocked (thus the datasource +# will start failing) resulting in url exceptions +# when fields that do exist (or would have existed) +# do not exist due to the blocking that occurred. +def _unlazy_dict(mp): + if not isinstance(mp, (dict)): + return mp + if not BOTO_LAZY: + return mp + for (k, v) in mp.items(): + _unlazy_dict(v) + + +def get_instance_userdata(api_version, metadata_address): + ud = boto_utils.get_instance_userdata(api_version, None, metadata_address) + if not ud: + ud = '' + return ud + + +def get_instance_metadata(api_version, metadata_address): + metadata = boto_utils.get_instance_metadata(api_version, metadata_address) + if not isinstance(metadata, (dict)): + metadata = {} + return _unlazy_dict(metadata) diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index f7ffa7cb..78cf24d7 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -26,8 +26,7 @@ from struct import pack import os import time -import boto.utils as boto_utils - +from cloudinit import ec2_utils as ec2 from cloudinit import log as logging from cloudinit import sources from cloudinit import url_helper as uhelp @@ -116,10 +115,10 @@ class DataSourceCloudStack(sources.DataSource): if not self.wait_for_metadata_service(): return False start_time = time.time() - self.userdata_raw = boto_utils.get_instance_userdata(self.api_ver, - None, self.metadata_address) - self.metadata = boto_utils.get_instance_metadata(self.api_ver, - self.metadata_address) + self.userdata_raw = ec2.get_instance_userdata(self.api_ver, + self.metadata_address) + self.metadata = ec2.get_instance_metadata(self.api_ver, + self.metadata_address) LOG.debug("Crawl of metadata service took %s seconds", int(time.time() - start_time)) return True diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 3686fa10..3da7b54e 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -23,8 +23,7 @@ import os import time -import boto.utils as boto_utils - +from cloudinit import ec2_utils as ec2 from cloudinit import log as logging from cloudinit import sources from cloudinit import url_helper as uhelp @@ -53,6 +52,10 @@ class DataSourceEc2(sources.DataSource): def __str__(self): return util.obj_name(self) + def __getstate__(self): + # Versions of boto + pass + def get_data(self): seed_ret = {} if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")): @@ -65,10 +68,10 @@ class DataSourceEc2(sources.DataSource): if not self.wait_for_metadata_service(): return False start_time = time.time() - self.userdata_raw = boto_utils.get_instance_userdata(self.api_ver, - None, self.metadata_address) - self.metadata = boto_utils.get_instance_metadata(self.api_ver, - self.metadata_address) + self.userdata_raw = ec2.get_instance_userdata(self.api_ver, + self.metadata_address) + self.metadata = ec2.get_instance_metadata(self.api_ver, + self.metadata_address) LOG.debug("Crawl of metadata service took %s seconds", int(time.time() - start_time)) return True -- cgit v1.2.3 From 8ffc2c8f791b7694a121ec30dac7437c6e8fdb9b Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 24 Oct 2012 20:35:30 -0700 Subject: Remove function that shouldn't have shown up. --- cloudinit/sources/DataSourceEc2.py | 4 ---- 1 file changed, 4 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 3da7b54e..0fc79b32 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -52,10 +52,6 @@ class DataSourceEc2(sources.DataSource): def __str__(self): return util.obj_name(self) - def __getstate__(self): - # Versions of boto - pass - def get_data(self): seed_ret = {} if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")): -- cgit v1.2.3 From aa8b51a48a30e3a3c863ca0ddb8bc4667026d57a Mon Sep 17 00:00:00 2001 From: harlowja Date: Sat, 27 Oct 2012 19:25:48 -0700 Subject: Helpful cleanups. 1. Remove the usage of the path.join function now that all code should be going through the util file methods (and they can be mocked out as needed). 2. Adjust all occurences of the above join function to either not use it or replace it with the standard os.path.join (which can also be mocked out as needed) 3. Fix pylint from complaining about the tests folder 'helpers.py' not being found 4. Add a pylintrc file that is used instead of the options hidden in the 'run_pylint' tool. --- Makefile | 8 +-- cloudinit/config/cc_apt_pipelining.py | 12 ++-- cloudinit/config/cc_apt_update_upgrade.py | 13 ++-- cloudinit/config/cc_ca_certs.py | 24 ++++---- cloudinit/config/cc_chef.py | 30 +++++----- cloudinit/config/cc_landscape.py | 14 ++--- cloudinit/config/cc_mcollective.py | 22 +++---- cloudinit/config/cc_mounts.py | 9 ++- cloudinit/config/cc_phone_home.py | 4 +- cloudinit/config/cc_puppet.py | 70 ++++++++++------------ cloudinit/config/cc_resizefs.py | 5 +- cloudinit/config/cc_rsyslog.py | 3 +- cloudinit/config/cc_runcmd.py | 2 +- cloudinit/config/cc_salt_minion.py | 6 +- cloudinit/config/cc_set_passwords.py | 6 +- cloudinit/config/cc_ssh.py | 16 +++-- cloudinit/config/cc_ssh_authkey_fingerprints.py | 7 +-- cloudinit/config/cc_update_etc_hosts.py | 3 +- cloudinit/distros/__init__.py | 8 +-- cloudinit/distros/debian.py | 26 +++----- cloudinit/helpers.py | 29 +-------- cloudinit/sources/__init__.py | 2 - cloudinit/ssh_util.py | 26 ++++---- pylintrc | 19 ++++++ tests/__init__.py | 0 tests/unittests/__init__.py | 0 tests/unittests/test_datasource/__init__.py | 0 tests/unittests/test_distros/__init__.py | 0 tests/unittests/test_filters/__init__.py | 0 tests/unittests/test_filters/test_launch_index.py | 10 +--- tests/unittests/test_handler/__init__.py | 0 .../test_handler/test_handler_ca_certs.py | 18 +++--- tests/unittests/test_runs/__init__.py | 0 tests/unittests/test_runs/test_simple_run.py | 10 +--- tools/run-pylint | 19 ++---- 35 files changed, 170 insertions(+), 251 deletions(-) create mode 100644 pylintrc create mode 100644 tests/__init__.py create mode 100644 tests/unittests/__init__.py create mode 100644 tests/unittests/test_datasource/__init__.py create mode 100644 tests/unittests/test_distros/__init__.py create mode 100644 tests/unittests/test_filters/__init__.py create mode 100644 tests/unittests/test_handler/__init__.py create mode 100644 tests/unittests/test_runs/__init__.py (limited to 'cloudinit/sources') diff --git a/Makefile b/Makefile index 49324ca0..8f5646b7 100644 --- a/Makefile +++ b/Makefile @@ -1,20 +1,20 @@ CWD=$(shell pwd) -PY_FILES=$(shell find cloudinit bin tests tools -name "*.py") +PY_FILES=$(shell find cloudinit bin tests tools -type f -name "*.py") PY_FILES+="bin/cloud-init" all: test pep8: - $(CWD)/tools/run-pep8 $(PY_FILES) + @$(CWD)/tools/run-pep8 $(PY_FILES) pylint: - $(CWD)/tools/run-pylint $(PY_FILES) + @$(CWD)/tools/run-pylint $(PY_FILES) pyflakes: pyflakes $(PY_FILES) test: - nosetests $(noseopts) tests/unittests/ + @nosetests $(noseopts) tests/ 2to3: 2to3 $(PY_FILES) diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py index 02056ee0..e5629175 100644 --- a/cloudinit/config/cc_apt_pipelining.py +++ b/cloudinit/config/cc_apt_pipelining.py @@ -34,26 +34,24 @@ APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n" # on TCP connections - otherwise data corruption will occur. -def handle(_name, cfg, cloud, log, _args): +def handle(_name, cfg, _cloud, log, _args): apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False) apt_pipe_value_s = str(apt_pipe_value).lower().strip() if apt_pipe_value_s == "false": - write_apt_snippet(cloud, "0", log, DEFAULT_FILE) + write_apt_snippet("0", log, DEFAULT_FILE) elif apt_pipe_value_s in ("none", "unchanged", "os"): return elif apt_pipe_value_s in [str(b) for b in xrange(0, 6)]: - write_apt_snippet(cloud, apt_pipe_value_s, log, DEFAULT_FILE) + write_apt_snippet(apt_pipe_value_s, log, DEFAULT_FILE) else: log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value) -def write_apt_snippet(cloud, setting, log, f_name): +def write_apt_snippet(setting, log, f_name): """Writes f_name with apt pipeline depth 'setting'.""" file_contents = APT_PIPE_TPL % (setting) - - util.write_file(cloud.paths.join(False, f_name), file_contents) - + util.write_file(f_name, file_contents) log.debug("Wrote %s with apt pipeline depth setting %s", f_name, setting) diff --git a/cloudinit/config/cc_apt_update_upgrade.py b/cloudinit/config/cc_apt_update_upgrade.py index 356bb98d..59c34b59 100644 --- a/cloudinit/config/cc_apt_update_upgrade.py +++ b/cloudinit/config/cc_apt_update_upgrade.py @@ -78,8 +78,7 @@ def handle(name, cfg, cloud, log, _args): try: # See man 'apt.conf' contents = PROXY_TPL % (proxy) - util.write_file(cloud.paths.join(False, proxy_filename), - contents) + util.write_file(proxy_filename, contents) except Exception as e: util.logexc(log, "Failed to write proxy to %s", proxy_filename) elif os.path.isfile(proxy_filename): @@ -90,7 +89,7 @@ def handle(name, cfg, cloud, log, _args): params = mirrors params['RELEASE'] = release params['MIRROR'] = mirror - errors = add_sources(cloud, cfg['apt_sources'], params) + errors = add_sources(cfg['apt_sources'], params) for e in errors: log.warn("Source Error: %s", ':'.join(e)) @@ -196,11 +195,10 @@ def generate_sources_list(codename, mirrors, cloud, log): params = {'codename': codename} for k in mirrors: params[k] = mirrors[k] - out_fn = cloud.paths.join(False, '/etc/apt/sources.list') - templater.render_to_file(template_fn, out_fn, params) + templater.render_to_file(template_fn, '/etc/apt/sources.list', params) -def add_sources(cloud, srclist, template_params=None): +def add_sources(srclist, template_params=None): """ add entries in /etc/apt/sources.list.d for each abbreviated sources.list entry in 'srclist'. When rendering template, also @@ -250,8 +248,7 @@ def add_sources(cloud, srclist, template_params=None): try: contents = "%s\n" % (source) - util.write_file(cloud.paths.join(False, ent['filename']), - contents, omode="ab") + util.write_file(ent['filename'], contents, omode="ab") except: errorlist.append([source, "failed write to file %s" % ent['filename']]) diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index dc046bda..20f24357 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -22,6 +22,7 @@ CA_CERT_PATH = "/usr/share/ca-certificates/" CA_CERT_FILENAME = "cloud-init-ca-certs.crt" CA_CERT_CONFIG = "/etc/ca-certificates.conf" CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/" +CA_CERT_FULL_PATH = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME) distros = ['ubuntu', 'debian'] @@ -33,7 +34,7 @@ def update_ca_certs(): util.subp(["update-ca-certificates"], capture=False) -def add_ca_certs(paths, certs): +def add_ca_certs(certs): """ Adds certificates to the system. To actually apply the new certificates you must also call L{update_ca_certs}. @@ -43,27 +44,24 @@ def add_ca_certs(paths, certs): if certs: # First ensure they are strings... cert_file_contents = "\n".join([str(c) for c in certs]) - cert_file_fullpath = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME) - cert_file_fullpath = paths.join(False, cert_file_fullpath) - util.write_file(cert_file_fullpath, cert_file_contents, mode=0644) + util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0644) # Append cert filename to CA_CERT_CONFIG file. - util.write_file(paths.join(False, CA_CERT_CONFIG), - "\n%s" % CA_CERT_FILENAME, omode="ab") + util.write_file(CA_CERT_CONFIG, "\n%s" % CA_CERT_FILENAME, omode="ab") -def remove_default_ca_certs(paths): +def remove_default_ca_certs(): """ Removes all default trusted CA certificates from the system. To actually apply the change you must also call L{update_ca_certs}. """ - util.delete_dir_contents(paths.join(False, CA_CERT_PATH)) - util.delete_dir_contents(paths.join(False, CA_CERT_SYSTEM_PATH)) - util.write_file(paths.join(False, CA_CERT_CONFIG), "", mode=0644) + util.delete_dir_contents(CA_CERT_PATH) + util.delete_dir_contents(CA_CERT_SYSTEM_PATH) + util.write_file(CA_CERT_CONFIG, "", mode=0644) debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no" util.subp(('debconf-set-selections', '-'), debconf_sel) -def handle(name, cfg, cloud, log, _args): +def handle(name, cfg, _cloud, log, _args): """ Call to handle ca-cert sections in cloud-config file. @@ -85,14 +83,14 @@ def handle(name, cfg, cloud, log, _args): # default trusted CA certs first. if ca_cert_cfg.get("remove-defaults", False): log.debug("Removing default certificates") - remove_default_ca_certs(cloud.paths) + remove_default_ca_certs() # If we are given any new trusted CA certs to add, add them. if "trusted" in ca_cert_cfg: trusted_certs = util.get_cfg_option_list(ca_cert_cfg, "trusted") if trusted_certs: log.debug("Adding %d certificates" % len(trusted_certs)) - add_ca_certs(cloud.paths, trusted_certs) + add_ca_certs(trusted_certs) # Update the system with the new cert configuration. log.debug("Updating certificates") diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index 6f568261..7a3d6a31 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -26,6 +26,15 @@ from cloudinit import util RUBY_VERSION_DEFAULT = "1.8" +CHEF_DIRS = [ + '/etc/chef', + '/var/log/chef', + '/var/lib/chef', + '/var/cache/chef', + '/var/backups/chef', + '/var/run/chef', +] + def handle(name, cfg, cloud, log, _args): @@ -37,24 +46,15 @@ def handle(name, cfg, cloud, log, _args): chef_cfg = cfg['chef'] # Ensure the chef directories we use exist - c_dirs = [ - '/etc/chef', - '/var/log/chef', - '/var/lib/chef', - '/var/cache/chef', - '/var/backups/chef', - '/var/run/chef', - ] - for d in c_dirs: - util.ensure_dir(cloud.paths.join(False, d)) + for d in CHEF_DIRS: + util.ensure_dir(d) # Set the validation key based on the presence of either 'validation_key' # or 'validation_cert'. In the case where both exist, 'validation_key' # takes precedence for key in ('validation_key', 'validation_cert'): if key in chef_cfg and chef_cfg[key]: - v_fn = cloud.paths.join(False, '/etc/chef/validation.pem') - util.write_file(v_fn, chef_cfg[key]) + util.write_file('/etc/chef/validation.pem', chef_cfg[key]) break # Create the chef config from template @@ -68,8 +68,7 @@ def handle(name, cfg, cloud, log, _args): '_default'), 'validation_name': chef_cfg['validation_name'] } - out_fn = cloud.paths.join(False, '/etc/chef/client.rb') - templater.render_to_file(template_fn, out_fn, params) + templater.render_to_file(template_fn, '/etc/chef/client.rb', params) else: log.warn("No template found, not rendering to /etc/chef/client.rb") @@ -81,8 +80,7 @@ def handle(name, cfg, cloud, log, _args): initial_attributes = chef_cfg['initial_attributes'] for k in list(initial_attributes.keys()): initial_json[k] = initial_attributes[k] - firstboot_fn = cloud.paths.join(False, '/etc/chef/firstboot.json') - util.write_file(firstboot_fn, json.dumps(initial_json)) + util.write_file('/etc/chef/firstboot.json', json.dumps(initial_json)) # If chef is not installed, we install chef based on 'install_type' if not os.path.isfile('/usr/bin/chef-client'): diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index 56ab0ce3..02610dd0 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -66,22 +66,16 @@ def handle(_name, cfg, cloud, log, _args): merge_data = [ LSC_BUILTIN_CFG, - cloud.paths.join(True, LSC_CLIENT_CFG_FILE), + LSC_CLIENT_CFG_FILE, ls_cloudcfg, ] merged = merge_together(merge_data) - - lsc_client_fn = cloud.paths.join(False, LSC_CLIENT_CFG_FILE) - lsc_dir = cloud.paths.join(False, os.path.dirname(lsc_client_fn)) - if not os.path.isdir(lsc_dir): - util.ensure_dir(lsc_dir) - contents = StringIO() merged.write(contents) - contents.flush() - util.write_file(lsc_client_fn, contents.getvalue()) - log.debug("Wrote landscape config file to %s", lsc_client_fn) + util.ensure_dir(os.path.dirname(LSC_CLIENT_CFG_FILE)) + util.write_file(LSC_CLIENT_CFG_FILE, contents.getvalue()) + log.debug("Wrote landscape config file to %s", LSC_CLIENT_CFG_FILE) util.write_file(LS_DEFAULT_FILE, "RUN=1\n") util.subp(["service", "landscape-client", "restart"]) diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py index 2acdbc6f..b670390d 100644 --- a/cloudinit/config/cc_mcollective.py +++ b/cloudinit/config/cc_mcollective.py @@ -29,6 +29,7 @@ from cloudinit import util PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem" PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem" +SERVER_CFG = '/etc/mcollective/server.cfg' def handle(name, cfg, cloud, log, _args): @@ -48,26 +49,23 @@ def handle(name, cfg, cloud, log, _args): if 'conf' in mcollective_cfg: # Read server.cfg values from the # original file in order to be able to mix the rest up - server_cfg_fn = cloud.paths.join(True, '/etc/mcollective/server.cfg') - mcollective_config = ConfigObj(server_cfg_fn) + mcollective_config = ConfigObj(SERVER_CFG) # See: http://tiny.cc/jh9agw for (cfg_name, cfg) in mcollective_cfg['conf'].iteritems(): if cfg_name == 'public-cert': - pubcert_fn = cloud.paths.join(True, PUBCERT_FILE) - util.write_file(pubcert_fn, cfg, mode=0644) - mcollective_config['plugin.ssl_server_public'] = pubcert_fn + util.write_file(PUBCERT_FILE, cfg, mode=0644) + mcollective_config['plugin.ssl_server_public'] = PUBCERT_FILE mcollective_config['securityprovider'] = 'ssl' elif cfg_name == 'private-cert': - pricert_fn = cloud.paths.join(True, PRICERT_FILE) - util.write_file(pricert_fn, cfg, mode=0600) - mcollective_config['plugin.ssl_server_private'] = pricert_fn + util.write_file(PRICERT_FILE, cfg, mode=0600) + mcollective_config['plugin.ssl_server_private'] = PRICERT_FILE mcollective_config['securityprovider'] = 'ssl' else: if isinstance(cfg, (basestring, str)): # Just set it in the 'main' section mcollective_config[cfg_name] = cfg elif isinstance(cfg, (dict)): - # Iterate throug the config items, create a section + # Iterate through the config items, create a section # if it is needed and then add/or create items as needed if cfg_name not in mcollective_config.sections: mcollective_config[cfg_name] = {} @@ -78,14 +76,12 @@ def handle(name, cfg, cloud, log, _args): mcollective_config[cfg_name] = str(cfg) # We got all our config as wanted we'll rename # the previous server.cfg and create our new one - old_fn = cloud.paths.join(False, '/etc/mcollective/server.cfg.old') - util.rename(server_cfg_fn, old_fn) + util.rename(SERVER_CFG, "%s.old" % (SERVER_CFG)) # Now we got the whole file, write to disk... contents = StringIO() mcollective_config.write(contents) contents = contents.getvalue() - server_cfg_rw = cloud.paths.join(False, '/etc/mcollective/server.cfg') - util.write_file(server_cfg_rw, contents, mode=0644) + util.write_file(SERVER_CFG, contents, mode=0644) # Start mcollective util.subp(['service', 'mcollective', 'start'], capture=False) diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 14c965bb..cb772c86 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -28,6 +28,7 @@ from cloudinit import util SHORTNAME_FILTER = r"^[x]{0,1}[shv]d[a-z][0-9]*$" SHORTNAME = re.compile(SHORTNAME_FILTER) WS = re.compile("[%s]+" % (whitespace)) +FSTAB_PATH = "/etc/fstab" def is_mdname(name): @@ -167,8 +168,7 @@ def handle(_name, cfg, cloud, log, _args): cc_lines.append('\t'.join(line)) fstab_lines = [] - fstab = util.load_file(cloud.paths.join(True, "/etc/fstab")) - for line in fstab.splitlines(): + for line in util.load_file(FSTAB_PATH).splitlines(): try: toks = WS.split(line) if toks[3].find(comment) != -1: @@ -179,7 +179,7 @@ def handle(_name, cfg, cloud, log, _args): fstab_lines.extend(cc_lines) contents = "%s\n" % ('\n'.join(fstab_lines)) - util.write_file(cloud.paths.join(False, "/etc/fstab"), contents) + util.write_file(FSTAB_PATH, contents) if needswap: try: @@ -188,9 +188,8 @@ def handle(_name, cfg, cloud, log, _args): util.logexc(log, "Activating swap via 'swapon -a' failed") for d in dirs: - real_dir = cloud.paths.join(False, d) try: - util.ensure_dir(real_dir) + util.ensure_dir(d) except: util.logexc(log, "Failed to make '%s' config-mount", d) diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index ae1349eb..886487f8 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -84,10 +84,10 @@ def handle(name, cfg, cloud, log, args): for (n, path) in pubkeys.iteritems(): try: - all_keys[n] = util.load_file(cloud.paths.join(True, path)) + all_keys[n] = util.load_file(path) except: util.logexc(log, ("%s: failed to open, can not" - " phone home that data"), path) + " phone home that data!"), path) submit_keys = {} for k in post_list: diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index 74ee18e1..8fe3af57 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -21,12 +21,32 @@ from StringIO import StringIO import os -import pwd import socket from cloudinit import helpers from cloudinit import util +PUPPET_CONF_PATH = '/etc/puppet/puppet.conf' +PUPPET_SSL_CERT_DIR = '/var/lib/puppet/ssl/certs/' +PUPPET_SSL_DIR = '/var/lib/puppet/ssl' +PUPPET_SSL_CERT_PATH = '/var/lib/puppet/ssl/certs/ca.pem' + + +def _autostart_puppet(log): + # Set puppet to automatically start + if os.path.exists('/etc/default/puppet'): + util.subp(['sed', '-i', + '-e', 's/^START=.*/START=yes/', + '/etc/default/puppet'], capture=False) + elif os.path.exists('/bin/systemctl'): + util.subp(['/bin/systemctl', 'enable', 'puppet.service'], + capture=False) + elif os.path.exists('/sbin/chkconfig'): + util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False) + else: + log.warn(("Sorry we do not know how to enable" + " puppet services on this system")) + def handle(name, cfg, cloud, log, _args): # If there isn't a puppet key in the configuration don't do anything @@ -43,8 +63,7 @@ def handle(name, cfg, cloud, log, _args): # ... and then update the puppet configuration if 'conf' in puppet_cfg: # Add all sections from the conf object to puppet.conf - puppet_conf_fn = cloud.paths.join(True, '/etc/puppet/puppet.conf') - contents = util.load_file(puppet_conf_fn) + contents = util.load_file(PUPPET_CONF_PATH) # Create object for reading puppet.conf values puppet_config = helpers.DefaultingConfigParser() # Read puppet.conf values from original file in order to be able to @@ -53,28 +72,19 @@ def handle(name, cfg, cloud, log, _args): cleaned_lines = [i.lstrip() for i in contents.splitlines()] cleaned_contents = '\n'.join(cleaned_lines) puppet_config.readfp(StringIO(cleaned_contents), - filename=puppet_conf_fn) + filename=PUPPET_CONF_PATH) for (cfg_name, cfg) in puppet_cfg['conf'].iteritems(): # Cert configuration is a special case # Dump the puppet master ca certificate in the correct place if cfg_name == 'ca_cert': # Puppet ssl sub-directory isn't created yet # Create it with the proper permissions and ownership - pp_ssl_dir = cloud.paths.join(False, '/var/lib/puppet/ssl') - util.ensure_dir(pp_ssl_dir, 0771) - util.chownbyid(pp_ssl_dir, - pwd.getpwnam('puppet').pw_uid, 0) - pp_ssl_certs = cloud.paths.join(False, - '/var/lib/puppet/ssl/certs/') - util.ensure_dir(pp_ssl_certs) - util.chownbyid(pp_ssl_certs, - pwd.getpwnam('puppet').pw_uid, 0) - pp_ssl_ca_certs = cloud.paths.join(False, - ('/var/lib/puppet/' - 'ssl/certs/ca.pem')) - util.write_file(pp_ssl_ca_certs, cfg) - util.chownbyid(pp_ssl_ca_certs, - pwd.getpwnam('puppet').pw_uid, 0) + util.ensure_dir(PUPPET_SSL_DIR, 0771) + util.chownbyname(PUPPET_SSL_DIR, 'puppet', 'root') + util.ensure_dir(PUPPET_SSL_CERT_DIR) + util.chownbyname(PUPPET_SSL_CERT_DIR, 'puppet', 'root') + util.write_file(PUPPET_SSL_CERT_PATH, str(cfg)) + util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root') else: # Iterate throug the config items, we'll use ConfigParser.set # to overwrite or create new items as needed @@ -90,25 +100,11 @@ def handle(name, cfg, cloud, log, _args): puppet_config.set(cfg_name, o, v) # We got all our config as wanted we'll rename # the previous puppet.conf and create our new one - conf_old_fn = cloud.paths.join(False, - '/etc/puppet/puppet.conf.old') - util.rename(puppet_conf_fn, conf_old_fn) - puppet_conf_rw = cloud.paths.join(False, '/etc/puppet/puppet.conf') - util.write_file(puppet_conf_rw, puppet_config.stringify()) + util.rename(PUPPET_CONF_PATH, "%s.old" % (PUPPET_CONF_PATH)) + util.write_file(PUPPET_CONF_PATH, puppet_config.stringify()) - # Set puppet to automatically start - if os.path.exists('/etc/default/puppet'): - util.subp(['sed', '-i', - '-e', 's/^START=.*/START=yes/', - '/etc/default/puppet'], capture=False) - elif os.path.exists('/bin/systemctl'): - util.subp(['/bin/systemctl', 'enable', 'puppet.service'], - capture=False) - elif os.path.exists('/sbin/chkconfig'): - util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False) - else: - log.warn(("Sorry we do not know how to enable" - " puppet services on this system")) + # Set it up so it autostarts + _autostart_puppet(log) # Start puppetd util.subp(['service', 'puppet', 'start'], capture=False) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index e7f27944..b958f332 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -62,7 +62,7 @@ def get_fs_type(st_dev, path, log): raise -def handle(name, cfg, cloud, log, args): +def handle(name, cfg, _cloud, log, args): if len(args) != 0: resize_root = args[0] else: @@ -74,11 +74,10 @@ def handle(name, cfg, cloud, log, args): # TODO(harlowja) is the directory ok to be used?? resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run") - resize_root_d = cloud.paths.join(False, resize_root_d) util.ensure_dir(resize_root_d) # TODO(harlowja): allow what is to be resized to be configurable?? - resize_what = cloud.paths.join(False, "/") + resize_what = "/" with util.ExtendedTemporaryFile(prefix="cloudinit.resizefs.", dir=resize_root_d, delete=True) as tfh: devpth = tfh.name diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index 78327526..0c2c6880 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -71,8 +71,7 @@ def handle(name, cfg, cloud, log, _args): try: contents = "%s\n" % (content) - util.write_file(cloud.paths.join(False, filename), - contents, omode=omode) + util.write_file(filename, contents, omode=omode) except Exception: util.logexc(log, "Failed to write to %s", filename) diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py index 65064cfb..598c3a3e 100644 --- a/cloudinit/config/cc_runcmd.py +++ b/cloudinit/config/cc_runcmd.py @@ -33,6 +33,6 @@ def handle(name, cfg, cloud, log, _args): cmd = cfg["runcmd"] try: content = util.shellify(cmd) - util.write_file(cloud.paths.join(False, out_fn), content, 0700) + util.write_file(out_fn, content, 0700) except: util.logexc(log, "Failed to shellify %s into file %s", cmd, out_fn) diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py index 8a1440d9..f3eede18 100644 --- a/cloudinit/config/cc_salt_minion.py +++ b/cloudinit/config/cc_salt_minion.py @@ -34,8 +34,7 @@ def handle(name, cfg, cloud, log, _args): cloud.distro.install_packages(["salt-minion"]) # Ensure we can configure files at the right dir - config_dir = cloud.paths.join(False, salt_cfg.get("config_dir", - '/etc/salt')) + config_dir = salt_cfg.get("config_dir", '/etc/salt') util.ensure_dir(config_dir) # ... and then update the salt configuration @@ -47,8 +46,7 @@ def handle(name, cfg, cloud, log, _args): # ... copy the key pair if specified if 'public_key' in salt_cfg and 'private_key' in salt_cfg: - pki_dir = cloud.paths.join(False, salt_cfg.get('pki_dir', - '/etc/salt/pki')) + pki_dir = salt_cfg.get('pki_dir', '/etc/salt/pki') with util.umask(077): util.ensure_dir(pki_dir) pub_name = os.path.join(pki_dir, 'minion.pub') diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index 26c558ad..c6bf62fd 100644 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -114,8 +114,7 @@ def handle(_name, cfg, cloud, log, args): replaced_auth = False # See: man sshd_config - conf_fn = cloud.paths.join(True, ssh_util.DEF_SSHD_CFG) - old_lines = ssh_util.parse_ssh_config(conf_fn) + old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG) new_lines = [] i = 0 for (i, line) in enumerate(old_lines): @@ -134,8 +133,7 @@ def handle(_name, cfg, cloud, log, args): pw_auth)) lines = [str(e) for e in new_lines] - ssh_rw_fn = cloud.paths.join(False, ssh_util.DEF_SSHD_CFG) - util.write_file(ssh_rw_fn, "\n".join(lines)) + util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines)) try: cmd = ['service'] diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 32e48c30..b623d476 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -59,7 +59,7 @@ def handle(_name, cfg, cloud, log, _args): # remove the static keys from the pristine image if cfg.get("ssh_deletekeys", True): - key_pth = cloud.paths.join(False, "/etc/ssh/", "ssh_host_*key*") + key_pth = os.path.join("/etc/ssh/", "ssh_host_*key*") for f in glob.glob(key_pth): try: util.del_file(f) @@ -72,8 +72,7 @@ def handle(_name, cfg, cloud, log, _args): if key in KEY_2_FILE: tgt_fn = KEY_2_FILE[key][0] tgt_perms = KEY_2_FILE[key][1] - util.write_file(cloud.paths.join(False, tgt_fn), - val, tgt_perms) + util.write_file(tgt_fn, val, tgt_perms) for (priv, pub) in PRIV_2_PUB.iteritems(): if pub in cfg['ssh_keys'] or not priv in cfg['ssh_keys']: @@ -94,7 +93,7 @@ def handle(_name, cfg, cloud, log, _args): 'ssh_genkeytypes', GENERATE_KEY_NAMES) for keytype in genkeys: - keyfile = cloud.paths.join(False, KEY_FILE_TPL % (keytype)) + keyfile = KEY_FILE_TPL % (keytype) util.ensure_dir(os.path.dirname(keyfile)) if not os.path.exists(keyfile): cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile] @@ -118,17 +117,16 @@ def handle(_name, cfg, cloud, log, _args): cfgkeys = cfg["ssh_authorized_keys"] keys.extend(cfgkeys) - apply_credentials(keys, user, cloud.paths, - disable_root, disable_root_opts) + apply_credentials(keys, user, disable_root, disable_root_opts) except: util.logexc(log, "Applying ssh credentials failed!") -def apply_credentials(keys, user, paths, disable_root, disable_root_opts): +def apply_credentials(keys, user, disable_root, disable_root_opts): keys = set(keys) if user: - ssh_util.setup_user_keys(keys, user, '', paths) + ssh_util.setup_user_keys(keys, user, '') if disable_root: if not user: @@ -137,4 +135,4 @@ def apply_credentials(keys, user, paths, disable_root, disable_root_opts): else: key_prefix = '' - ssh_util.setup_user_keys(keys, 'root', key_prefix, paths) + ssh_util.setup_user_keys(keys, 'root', key_prefix) diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 8c9a8806..c38bcea2 100644 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -97,9 +97,8 @@ def handle(name, cfg, cloud, log, _args): "logging of ssh fingerprints disabled"), name) hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5") - extract_func = ssh_util.extract_authorized_keys (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) for (user_name, _cfg) in users.items(): - (auth_key_fn, auth_key_entries) = extract_func(user_name, cloud.paths) - _pprint_key_entries(user_name, auth_key_fn, - auth_key_entries, hash_meth) + (key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name) + _pprint_key_entries(user_name, key_fn, + key_entries, hash_meth) diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py index 4d75000f..96103615 100644 --- a/cloudinit/config/cc_update_etc_hosts.py +++ b/cloudinit/config/cc_update_etc_hosts.py @@ -42,8 +42,7 @@ def handle(name, cfg, cloud, log, _args): raise RuntimeError(("No hosts template could be" " found for distro %s") % (cloud.distro.name)) - out_fn = cloud.paths.join(False, '/etc/hosts') - templater.render_to_file(tpl_fn_name, out_fn, + templater.render_to_file(tpl_fn_name, '/etc/hosts', {'hostname': hostname, 'fqdn': fqdn}) elif manage_hosts == "localhost": diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 2fbb0e9b..869540d2 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -122,8 +122,7 @@ class Distro(object): new_etchosts = StringIO() need_write = False need_change = True - hosts_ro_fn = self._paths.join(True, "/etc/hosts") - for line in util.load_file(hosts_ro_fn).splitlines(): + for line in util.load_file("/etc/hosts").splitlines(): if line.strip().startswith(header): continue if not line.strip() or line.strip().startswith("#"): @@ -147,8 +146,7 @@ class Distro(object): need_write = True if need_write: contents = new_etchosts.getvalue() - util.write_file(self._paths.join(False, "/etc/hosts"), - contents, mode=0644) + util.write_file("/etc/hosts", contents, mode=0644) def _bring_up_interface(self, device_name): cmd = ['ifup', device_name] @@ -262,7 +260,7 @@ class Distro(object): # Import SSH keys if 'ssh_authorized_keys' in kwargs: keys = set(kwargs['ssh_authorized_keys']) or [] - ssh_util.setup_user_keys(keys, name, None, self._paths) + ssh_util.setup_user_keys(keys, name, key_prefix=None) return True diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 88f4e978..cc7e53a0 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -43,7 +43,7 @@ class Distro(distros.Distro): def apply_locale(self, locale, out_fn=None): if not out_fn: - out_fn = self._paths.join(False, '/etc/default/locale') + out_fn = '/etc/default/locale' util.subp(['locale-gen', locale], capture=False) util.subp(['update-locale', locale], capture=False) lines = ["# Created by cloud-init", 'LANG="%s"' % (locale), ""] @@ -54,8 +54,7 @@ class Distro(distros.Distro): self.package_command('install', pkglist) def _write_network(self, settings): - net_fn = self._paths.join(False, "/etc/network/interfaces") - util.write_file(net_fn, settings) + util.write_file("/etc/network/interfaces", settings) return ['all'] def _bring_up_interfaces(self, device_names): @@ -69,12 +68,9 @@ class Distro(distros.Distro): return distros.Distro._bring_up_interfaces(self, device_names) def set_hostname(self, hostname): - out_fn = self._paths.join(False, "/etc/hostname") - self._write_hostname(hostname, out_fn) - if out_fn == '/etc/hostname': - # Only do this if we are running in non-adjusted root mode - LOG.debug("Setting hostname to %s", hostname) - util.subp(['hostname', hostname]) + self._write_hostname(hostname, "/etc/hostname") + LOG.debug("Setting hostname to %s", hostname) + util.subp(['hostname', hostname]) def _write_hostname(self, hostname, out_fn): # "" gives trailing newline. @@ -82,16 +78,14 @@ class Distro(distros.Distro): def update_hostname(self, hostname, prev_fn): hostname_prev = self._read_hostname(prev_fn) - read_fn = self._paths.join(True, "/etc/hostname") - hostname_in_etc = self._read_hostname(read_fn) + hostname_in_etc = self._read_hostname("/etc/hostname") update_files = [] if not hostname_prev or hostname_prev != hostname: update_files.append(prev_fn) if (not hostname_in_etc or (hostname_in_etc == hostname_prev and hostname_in_etc != hostname)): - write_fn = self._paths.join(False, "/etc/hostname") - update_files.append(write_fn) + update_files.append("/etc/hostname") for fn in update_files: try: self._write_hostname(hostname, fn) @@ -103,7 +97,6 @@ class Distro(distros.Distro): LOG.debug(("%s differs from /etc/hostname." " Assuming user maintained hostname."), prev_fn) if "/etc/hostname" in update_files: - # Only do this if we are running in non-adjusted root mode LOG.debug("Setting hostname to %s", hostname) util.subp(['hostname', hostname]) @@ -130,9 +123,8 @@ class Distro(distros.Distro): " no file found at %s") % (tz, tz_file)) # "" provides trailing newline during join tz_lines = ["# Created by cloud-init", str(tz), ""] - tz_fn = self._paths.join(False, "/etc/timezone") - util.write_file(tz_fn, "\n".join(tz_lines)) - util.copy(tz_file, self._paths.join(False, "/etc/localtime")) + util.write_file("/etc/timezone", "\n".join(tz_lines)) + util.copy(tz_file, "/etc/localtime") def package_command(self, command, args=None): e = os.environ.copy() diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index a4b20208..985ce3e5 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -302,14 +302,10 @@ class Paths(object): def __init__(self, path_cfgs, ds=None): self.cfgs = path_cfgs # Populate all the initial paths - self.cloud_dir = self.join(False, - path_cfgs.get('cloud_dir', - '/var/lib/cloud')) + self.cloud_dir = path_cfgs.get('cloud_dir', '/var/lib/cloud') self.instance_link = os.path.join(self.cloud_dir, 'instance') self.boot_finished = os.path.join(self.instance_link, "boot-finished") self.upstart_conf_d = path_cfgs.get('upstart_dir') - if self.upstart_conf_d: - self.upstart_conf_d = self.join(False, self.upstart_conf_d) self.seed_dir = os.path.join(self.cloud_dir, 'seed') # This one isn't joined, since it should just be read-only template_dir = path_cfgs.get('templates_dir', '/etc/cloud/templates/') @@ -328,29 +324,6 @@ class Paths(object): # Set when a datasource becomes active self.datasource = ds - # joins the paths but also appends a read - # or write root if available - def join(self, read_only, *paths): - if read_only: - root = self.cfgs.get('read_root') - else: - root = self.cfgs.get('write_root') - if not paths: - return root - if len(paths) > 1: - joined = os.path.join(*paths) - else: - joined = paths[0] - if root: - pre_joined = joined - # Need to remove any starting '/' since this - # will confuse os.path.join - joined = joined.lstrip("/") - joined = os.path.join(root, joined) - LOG.debug("Translated %s to adjusted path %s (read-only=%s)", - pre_joined, joined, read_only) - return joined - # get_ipath_cur: get the current instance path for an item def get_ipath_cur(self, name=None): ipath = self.instance_link diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index b22369a8..745627d0 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -20,8 +20,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from email.mime.multipart import MIMEMultipart - import abc import os diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 88a11a1a..dd6b742f 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -212,17 +212,15 @@ def update_authorized_keys(old_entries, keys): return '\n'.join(lines) -def users_ssh_info(username, paths): +def users_ssh_info(username): pw_ent = pwd.getpwnam(username) - if not pw_ent: + if not pw_ent or not pw_ent.pw_dir: raise RuntimeError("Unable to get ssh info for user %r" % (username)) - ssh_dir = paths.join(False, os.path.join(pw_ent.pw_dir, '.ssh')) - return (ssh_dir, pw_ent) + return (os.path.join(pw_ent.pw_dir, '.ssh'), pw_ent) -def extract_authorized_keys(username, paths): - (ssh_dir, pw_ent) = users_ssh_info(username, paths) - sshd_conf_fn = paths.join(True, DEF_SSHD_CFG) +def extract_authorized_keys(username): + (ssh_dir, pw_ent) = users_ssh_info(username) auth_key_fn = None with util.SeLinuxGuard(ssh_dir, recursive=True): try: @@ -231,7 +229,7 @@ def extract_authorized_keys(username, paths): # The following tokens are defined: %% is replaced by a literal # '%', %h is replaced by the home directory of the user being # authenticated and %u is replaced by the username of that user. - ssh_cfg = parse_ssh_config_map(sshd_conf_fn) + ssh_cfg = parse_ssh_config_map(DEF_SSHD_CFG) auth_key_fn = ssh_cfg.get("authorizedkeysfile", '').strip() if not auth_key_fn: auth_key_fn = "%h/.ssh/authorized_keys" @@ -240,7 +238,6 @@ def extract_authorized_keys(username, paths): auth_key_fn = auth_key_fn.replace("%%", '%') if not auth_key_fn.startswith('/'): auth_key_fn = os.path.join(pw_ent.pw_dir, auth_key_fn) - auth_key_fn = paths.join(False, auth_key_fn) except (IOError, OSError): # Give up and use a default key filename auth_key_fn = os.path.join(ssh_dir, 'authorized_keys') @@ -248,14 +245,13 @@ def extract_authorized_keys(username, paths): " in ssh config" " from %r, using 'AuthorizedKeysFile' file" " %r instead"), - sshd_conf_fn, auth_key_fn) - auth_key_entries = parse_authorized_keys(auth_key_fn) - return (auth_key_fn, auth_key_entries) + DEF_SSHD_CFG, auth_key_fn) + return (auth_key_fn, parse_authorized_keys(auth_key_fn)) -def setup_user_keys(keys, username, key_prefix, paths): +def setup_user_keys(keys, username, key_prefix): # Make sure the users .ssh dir is setup accordingly - (ssh_dir, pwent) = users_ssh_info(username, paths) + (ssh_dir, pwent) = users_ssh_info(username) if not os.path.isdir(ssh_dir): util.ensure_dir(ssh_dir, mode=0700) util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid) @@ -267,7 +263,7 @@ def setup_user_keys(keys, username, key_prefix, paths): key_entries.append(parser.parse(str(k), def_opt=key_prefix)) # Extract the old and make the new - (auth_key_fn, auth_key_entries) = extract_authorized_keys(username, paths) + (auth_key_fn, auth_key_entries) = extract_authorized_keys(username) with util.SeLinuxGuard(ssh_dir, recursive=True): content = update_authorized_keys(auth_key_entries, key_entries) util.ensure_dir(os.path.dirname(auth_key_fn), mode=0700) diff --git a/pylintrc b/pylintrc new file mode 100644 index 00000000..ee886510 --- /dev/null +++ b/pylintrc @@ -0,0 +1,19 @@ +[General] +init-hook='import sys; sys.path.append("tests/")' + +[MESSAGES CONTROL] +# See: http://pylint-messages.wikidot.com/all-codes +# W0142: *args and **kwargs are fine. +# W0511: TODOs in code comments are fine. +# W0702: No exception type(s) specified +# W0703: Catch "Exception" +# C0103: Invalid name +# C0111: Missing docstring +disable=W0142,W0511,W0702,W0703,C0103,C0111 + +[REPORTS] +reports=no +include-ids=yes + +[FORMAT] +max-line-length=79 diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/__init__.py b/tests/unittests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/test_datasource/__init__.py b/tests/unittests/test_datasource/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/test_distros/__init__.py b/tests/unittests/test_distros/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/test_filters/__init__.py b/tests/unittests/test_filters/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/test_filters/test_launch_index.py b/tests/unittests/test_filters/test_launch_index.py index 1e9b9053..773bb312 100644 --- a/tests/unittests/test_filters/test_launch_index.py +++ b/tests/unittests/test_filters/test_launch_index.py @@ -1,14 +1,6 @@ import copy -import os -import sys -top_dir = os.path.join(os.path.dirname(__file__), os.pardir, "helpers.py") -top_dir = os.path.abspath(top_dir) -if os.path.exists(top_dir): - sys.path.insert(0, os.path.dirname(top_dir)) - - -import helpers +from tests.unittests import helpers import itertools diff --git a/tests/unittests/test_handler/__init__.py b/tests/unittests/test_handler/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py index d3df5c50..d73c9fa9 100644 --- a/tests/unittests/test_handler/test_handler_ca_certs.py +++ b/tests/unittests/test_handler/test_handler_ca_certs.py @@ -77,7 +77,7 @@ class TestConfig(MockerTestCase): """Test that a single cert gets passed to add_ca_certs.""" config = {"ca-certs": {"trusted": ["CERT1"]}} - self.mock_add(self.paths, ["CERT1"]) + self.mock_add(["CERT1"]) self.mock_update() self.mocker.replay() @@ -87,7 +87,7 @@ class TestConfig(MockerTestCase): """Test that multiple certs get passed to add_ca_certs.""" config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}} - self.mock_add(self.paths, ["CERT1", "CERT2"]) + self.mock_add(["CERT1", "CERT2"]) self.mock_update() self.mocker.replay() @@ -97,7 +97,7 @@ class TestConfig(MockerTestCase): """Test remove_defaults works as expected.""" config = {"ca-certs": {"remove-defaults": True}} - self.mock_remove(self.paths) + self.mock_remove() self.mock_update() self.mocker.replay() @@ -116,8 +116,8 @@ class TestConfig(MockerTestCase): """Test remove_defaults is not called when config value is False.""" config = {"ca-certs": {"remove-defaults": True, "trusted": ["CERT1"]}} - self.mock_remove(self.paths) - self.mock_add(self.paths, ["CERT1"]) + self.mock_remove() + self.mock_add(["CERT1"]) self.mock_update() self.mocker.replay() @@ -136,7 +136,7 @@ class TestAddCaCerts(MockerTestCase): """Test that no certificate are written if not provided.""" self.mocker.replace(util.write_file, passthrough=False) self.mocker.replay() - cc_ca_certs.add_ca_certs(self.paths, []) + cc_ca_certs.add_ca_certs([]) def test_single_cert(self): """Test adding a single certificate to the trusted CAs.""" @@ -149,7 +149,7 @@ class TestAddCaCerts(MockerTestCase): "\ncloud-init-ca-certs.crt", omode="ab") self.mocker.replay() - cc_ca_certs.add_ca_certs(self.paths, [cert]) + cc_ca_certs.add_ca_certs([cert]) def test_multiple_certs(self): """Test adding multiple certificates to the trusted CAs.""" @@ -163,7 +163,7 @@ class TestAddCaCerts(MockerTestCase): "\ncloud-init-ca-certs.crt", omode="ab") self.mocker.replay() - cc_ca_certs.add_ca_certs(self.paths, certs) + cc_ca_certs.add_ca_certs(certs) class TestUpdateCaCerts(MockerTestCase): @@ -198,4 +198,4 @@ class TestRemoveDefaultCaCerts(MockerTestCase): "ca-certificates ca-certificates/trust_new_crts select no") self.mocker.replay() - cc_ca_certs.remove_default_ca_certs(self.paths) + cc_ca_certs.remove_default_ca_certs() diff --git a/tests/unittests/test_runs/__init__.py b/tests/unittests/test_runs/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py index 1e852e1e..22d6cf2c 100644 --- a/tests/unittests/test_runs/test_simple_run.py +++ b/tests/unittests/test_runs/test_simple_run.py @@ -1,14 +1,6 @@ import os -import sys -# Allow running this test individually -top_dir = os.path.join(os.path.dirname(__file__), os.pardir, "helpers.py") -top_dir = os.path.abspath(top_dir) -if os.path.exists(top_dir): - sys.path.insert(0, os.path.dirname(top_dir)) - - -import helpers +from tests.unittests import helpers from cloudinit.settings import (PER_INSTANCE) from cloudinit import stages diff --git a/tools/run-pylint b/tools/run-pylint index 7ef44ac5..b74efda9 100755 --- a/tools/run-pylint +++ b/tools/run-pylint @@ -6,23 +6,16 @@ else files=( "$@" ); fi +RC_FILE="pylintrc" +if [ ! -f $RC_FILE ]; then + RC_FILE="../pylintrc" +fi + cmd=( pylint - --reports=n - --include-ids=y - --max-line-length=79 - + --rcfile=$RC_FILE --disable=R --disable=I - - --disable=W0142 # Used * or ** magic - --disable=W0511 # TODO/FIXME note - --disable=W0702 # No exception type(s) specified - --disable=W0703 # Catch "Exception" - - --disable=C0103 # Invalid name - --disable=C0111 # Missing docstring - "${files[@]}" ) -- cgit v1.2.3 From 1e6fc277a1c8d695c37741cc31f5ddab3d5b5600 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 7 Nov 2012 16:08:17 -0500 Subject: remove dead code from DataSourceEc2 --- cloudinit/sources/DataSourceEc2.py | 16 ---------------- 1 file changed, 16 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 3686fa10..cff50669 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -86,9 +86,6 @@ class DataSourceEc2(sources.DataSource): def get_instance_id(self): return self.metadata['instance-id'] - def get_availability_zone(self): - return self.metadata['placement']['availability-zone'] - def _get_url_settings(self): mcfg = self.ds_cfg if not mcfg: @@ -198,19 +195,6 @@ class DataSourceEc2(sources.DataSource): return None return ofound - def is_vpc(self): - # See: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/615545 - # Detect that the machine was launched in a VPC. - # But I did notice that when in a VPC, meta-data - # does not have public-ipv4 and public-hostname - # listed as a possibility. - ph = "public-hostname" - p4 = "public-ipv4" - if ((ph not in self.metadata or self.metadata[ph] == "") and - (p4 not in self.metadata or self.metadata[p4] == "")): - return True - return False - @property def availability_zone(self): try: -- cgit v1.2.3 From 3248ac9bbb2008e88a3bd9c030ba0fcbc14b7fce Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sat, 10 Nov 2012 22:32:49 -0500 Subject: whitespace / indentation cleanups These changes were pulled out of the previous merge (cc_yum_add_repo) as they were unrelated there. Re-applying them here. --- cloudinit/distros/__init__.py | 38 +++++++++++++--------------- cloudinit/sources/DataSourceAltCloud.py | 2 +- cloudinit/util.py | 3 +-- tests/unittests/test_runs/test_simple_run.py | 6 +++-- tools/hacking.py | 4 +-- 5 files changed, 26 insertions(+), 27 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index d2cb0a8b..8a98e334 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -187,23 +187,23 @@ class Distro(object): # inputs. If something goes wrong, we can end up with a system # that nobody can login to. adduser_opts = { - "gecos": '--comment', - "homedir": '--home', - "primary_group": '--gid', - "groups": '--groups', - "passwd": '--password', - "shell": '--shell', - "expiredate": '--expiredate', - "inactive": '--inactive', - "selinux_user": '--selinux-user', - } + "gecos": '--comment', + "homedir": '--home', + "primary_group": '--gid', + "groups": '--groups', + "passwd": '--password', + "shell": '--shell', + "expiredate": '--expiredate', + "inactive": '--inactive', + "selinux_user": '--selinux-user', + } adduser_opts_flags = { - "no_user_group": '--no-user-group', - "system": '--system', - "no_log_init": '--no-log-init', - "no_create_home": "-M", - } + "no_user_group": '--no-user-group', + "system": '--system', + "no_log_init": '--no-log-init', + "no_create_home": "-M", + } # Now check the value and create the command for option in kwargs: @@ -320,11 +320,9 @@ class Distro(object): raise e util.ensure_dir(path, 0750) - def write_sudo_rules(self, - user, - rules, - sudo_file="/etc/sudoers.d/90-cloud-init-users", - ): + def write_sudo_rules(self, user, rules, sudo_file=None): + if not sudo_file: + sudo_file = "/etc/sudoers.d/90-cloud-init-users" content_header = "# user rules for %s" % user content = "%s\n%s %s\n\n" % (content_header, user, rules) diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index d7e1204f..9812bdcb 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -47,7 +47,7 @@ META_DATA_NOT_SUPPORTED = { 'instance-id': 455, 'local-hostname': 'localhost', 'placement': {}, - } +} def read_user_data_callback(mount_dir): diff --git a/cloudinit/util.py b/cloudinit/util.py index 7890a3d6..4f5b15ee 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1193,8 +1193,7 @@ def yaml_dumps(obj): indent=4, explicit_start=True, explicit_end=True, - default_flow_style=False, - ) + default_flow_style=False) return formatted diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py index 22d6cf2c..60ef812a 100644 --- a/tests/unittests/test_runs/test_simple_run.py +++ b/tests/unittests/test_runs/test_simple_run.py @@ -37,11 +37,13 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): self.replicateTestRoot('simple_ubuntu', new_root) cfg = { 'datasource_list': ['None'], - 'write_files': [{ + 'write_files': [ + { 'path': '/etc/blah.ini', 'content': 'blah', 'permissions': 0755, - }], + }, + ], 'cloud_init_modules': ['write-files'], } cloud_cfg = util.yaml_dumps(cfg) diff --git a/tools/hacking.py b/tools/hacking.py index 11163df3..26a07c53 100755 --- a/tools/hacking.py +++ b/tools/hacking.py @@ -66,8 +66,8 @@ def cloud_import_alphabetical(physical_line, line_number, lines): # handle import x # use .lower since capitalization shouldn't dictate order split_line = import_normalize(physical_line.strip()).lower().split() - split_previous = import_normalize(lines[line_number - 2] - ).strip().lower().split() + split_previous = import_normalize(lines[line_number - 2]) + split_previous = split_previous.strip().lower().split() # with or without "as y" length = [2, 4] if (len(split_line) in length and len(split_previous) in length and -- cgit v1.2.3 From 71ba36704132ff8597dfc0e45b34e0c4424e239f Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sun, 11 Nov 2012 21:49:10 -0500 Subject: config-drive-v2: populate metadata['public-keys'] from 'public_keys' other datasources populate 'public-keys' rather than 'public_keys' and there is a more complete handler in the base DataSource. So, to take advantage of that, have DataSourceConfigDrive copy public_keys to public-keys, and remove the 'get_public_ssh_keys' from the DataSourcEConfigDrive. LP: #1077700 --- cloudinit/sources/DataSourceConfigDrive.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 9729cfb9..dbbedce1 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -218,11 +218,6 @@ class DataSourceConfigDrive(sources.DataSource): return True - def get_public_ssh_keys(self): - if not 'public-keys' in self.metadata: - return [] - return self.metadata['public-keys'] - class DataSourceConfigDriveNet(DataSourceConfigDrive): def __init__(self, sys_cfg, distro, paths): @@ -331,6 +326,13 @@ def read_config_drive_dir_v2(source_dir, version="2012-08-10"): except KeyError: raise BrokenConfigDriveDir("No uuid entry in metadata") + # other datasources (and config-drive-v1) populate metadata['public-keys'] + # where as with config-drive-v2, that would be 'public_keys'. So, just + # copy the field if it is present + if ('public_keys' in results['metadata'] and not + 'public-keys' in results['metadata']): + results['public-keys'] = results['public_keys'] + def read_content_path(item): # do not use os.path.join here, as content_path starts with / cpath = os.path.sep.join((source_dir, "openstack", -- cgit v1.2.3 From 2fabf3951d79ba67455a00895b5357fccf28f4f3 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 12 Nov 2012 11:57:14 -0500 Subject: REVERT revno 714: config-drive-v2: populate metadata['public-keys'] from 'public_keys' --- cloudinit/sources/DataSourceConfigDrive.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index dbbedce1..9729cfb9 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -218,6 +218,11 @@ class DataSourceConfigDrive(sources.DataSource): return True + def get_public_ssh_keys(self): + if not 'public-keys' in self.metadata: + return [] + return self.metadata['public-keys'] + class DataSourceConfigDriveNet(DataSourceConfigDrive): def __init__(self, sys_cfg, distro, paths): @@ -326,13 +331,6 @@ def read_config_drive_dir_v2(source_dir, version="2012-08-10"): except KeyError: raise BrokenConfigDriveDir("No uuid entry in metadata") - # other datasources (and config-drive-v1) populate metadata['public-keys'] - # where as with config-drive-v2, that would be 'public_keys'. So, just - # copy the field if it is present - if ('public_keys' in results['metadata'] and not - 'public-keys' in results['metadata']): - results['public-keys'] = results['public_keys'] - def read_content_path(item): # do not use os.path.join here, as content_path starts with / cpath = os.path.sep.join((source_dir, "openstack", -- cgit v1.2.3 From 8730e143ec07372107d794abe9f4857ead6d4718 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 12 Nov 2012 12:23:44 -0500 Subject: pep8 and pylint fixups --- cloudinit/ec2_utils.py | 9 +++------ cloudinit/sources/DataSourceCloudStack.py | 2 +- cloudinit/sources/DataSourceEc2.py | 2 +- 3 files changed, 5 insertions(+), 8 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index a278ef04..32bf3968 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -17,10 +17,7 @@ # along with this program. If not, see . import pkg_resources -from pkg_resources import parse_version - -import cloudinit.util as util -import cloudinit.url_helper as uh +from pkg_resources import parse_version as pver import boto.utils as boto_utils @@ -36,7 +33,7 @@ import boto.utils as boto_utils BOTO_LAZY = False try: _boto_lib = pkg_resources.get_distribution('boto') - if _boto_lib.parsed_version > parse_version("2.5.2"): + if _boto_lib.parsed_version > pver("2.5.2"): # pylint: disable=E1103 BOTO_LAZY = True except pkg_resources.DistributionNotFound: pass @@ -47,7 +44,7 @@ def _unlazy_dict(mp): return mp if not BOTO_LAZY: return mp - for (k, v) in mp.items(): + for (_k, v) in mp.items(): _unlazy_dict(v) return mp diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 78cf24d7..076dba5a 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -116,7 +116,7 @@ class DataSourceCloudStack(sources.DataSource): return False start_time = time.time() self.userdata_raw = ec2.get_instance_userdata(self.api_ver, - self.metadata_address) + self.metadata_address) self.metadata = ec2.get_instance_metadata(self.api_ver, self.metadata_address) LOG.debug("Crawl of metadata service took %s seconds", diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 6f51dfae..2db53446 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -65,7 +65,7 @@ class DataSourceEc2(sources.DataSource): return False start_time = time.time() self.userdata_raw = ec2.get_instance_userdata(self.api_ver, - self.metadata_address) + self.metadata_address) self.metadata = ec2.get_instance_metadata(self.api_ver, self.metadata_address) LOG.debug("Crawl of metadata service took %s seconds", -- cgit v1.2.3 From 7ba753720cd95bfca61c82445cf9c7882fe5d6f1 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 12 Nov 2012 12:26:49 -0500 Subject: config-drive-v2: support public keys This does a couple things: * separates out the 'normalize_public_keys' from the DataSource's get_public_ssh_keys * uses that from config-drive datasource * supports config drive v1 or v2 public-keys * adds a test. LP: #1077700 --- ChangeLog | 1 + cloudinit/sources/DataSourceConfigDrive.py | 7 +-- cloudinit/sources/__init__.py | 56 ++++++++++++---------- .../unittests/test_datasource/test_configdrive.py | 26 ++++++++++ 4 files changed, 61 insertions(+), 29 deletions(-) (limited to 'cloudinit/sources') diff --git a/ChangeLog b/ChangeLog index de1bcbff..a68e196e 100644 --- a/ChangeLog +++ b/ChangeLog @@ -26,6 +26,7 @@ - work around the lazy loading of get_instance_metadata in boto >= 2.6.0 by fully walking the dictionary. (LP: #1068801) Added dependency on distribute's python-pkg-resources + - fix public key importing with config-drive-v2 datasource (LP: #1077700) 0.7.0: - add a 'exception_cb' argument to 'wait_for_url'. If provided, this method will be called back with the exception received and the message. diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 9729cfb9..c7826851 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -219,9 +219,10 @@ class DataSourceConfigDrive(sources.DataSource): return True def get_public_ssh_keys(self): - if not 'public-keys' in self.metadata: - return [] - return self.metadata['public-keys'] + name = "public_keys" + if self.version == 1: + name = "public-keys" + return sources.normalize_pubkey_data(self.metadata.get(name)) class DataSourceConfigDriveNet(DataSourceConfigDrive): diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 745627d0..96baff90 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -100,32 +100,7 @@ class DataSource(object): return {} def get_public_ssh_keys(self): - keys = [] - - if not self.metadata or 'public-keys' not in self.metadata: - return keys - - if isinstance(self.metadata['public-keys'], (basestring, str)): - return str(self.metadata['public-keys']).splitlines() - - if isinstance(self.metadata['public-keys'], (list, set)): - return list(self.metadata['public-keys']) - - if isinstance(self.metadata['public-keys'], (dict)): - for (_keyname, klist) in self.metadata['public-keys'].iteritems(): - # lp:506332 uec metadata service responds with - # data that makes boto populate a string for 'klist' rather - # than a list. - if isinstance(klist, (str, basestring)): - klist = [klist] - if isinstance(klist, (list, set)): - for pkey in klist: - # There is an empty string at - # the end of the keylist, trim it - if pkey: - keys.append(pkey) - - return keys + return normalize_pubkey_data(self.metadata.get('public-keys')) def _remap_device(self, short_name): # LP: #611137 @@ -208,6 +183,35 @@ class DataSource(object): availability_zone=self.availability_zone) +def normalize_pubkey_data(pubkey_data): + keys = [] + + if not pubkey_data: + return keys + + if isinstance(pubkey_data, (basestring, str)): + return str(pubkey_data).splitlines() + + if isinstance(pubkey_data, (list, set)): + return list(pubkey_data) + + if isinstance(pubkey_data, (dict)): + for (_keyname, klist) in pubkey_data.iteritems(): + # lp:506332 uec metadata service responds with + # data that makes boto populate a string for 'klist' rather + # than a list. + if isinstance(klist, (str, basestring)): + klist = [klist] + if isinstance(klist, (list, set)): + for pkey in klist: + # There is an empty string at + # the end of the keylist, trim it + if pkey: + keys.append(pkey) + + return keys + + def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list): ds_list = list_sources(cfg_list, ds_deps, pkg_list) ds_names = [util.obj_name(f) for f in ds_list] diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 00379e03..aa5b98ed 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -288,6 +288,32 @@ class TestConfigDriveDataSource(MockerTestCase): finally: util.find_devs_with = orig_find_devs_with + def test_pubkeys_v2(self): + """Verify that public-keys work in config-drive-v2.""" + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + myds = cfg_ds_from_dir(self.tmp) + self.assertEqual(myds.get_public_ssh_keys(), + [OSTACK_META['public_keys']['mykey']]) + + +def cfg_ds_from_dir(seed_d): + found = ds.read_config_drive_dir(seed_d) + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None, + helpers.Paths({})) + populate_ds_from_read_config(cfg_ds, seed_d, found) + return cfg_ds + + +def populate_ds_from_read_config(cfg_ds, source, results): + """Patch the DataSourceConfigDrive from the results of + read_config_drive_dir hopefully in line with what it would have + if cfg_ds.get_data had been successfully called""" + cfg_ds.source = source + cfg_ds.metadata = results.get('metadata') + cfg_ds.ec2_metadata = results.get('ec2-metadata') + cfg_ds.userdata_raw = results.get('userdata') + cfg_ds.version = results.get('cfgdrive_ver') + def populate_dir(seed_dir, files): for (name, content) in files.iteritems(): -- cgit v1.2.3 From 8abbeae7ce15a6fb7a08adc697205d614f868a98 Mon Sep 17 00:00:00 2001 From: Gerard Dethier Date: Wed, 19 Dec 2012 09:27:33 -0500 Subject: DataSourceCloudStack: use virtual router rather than default route In CloudStack's documentation, it is stated that meta/user-data can be retrieved from CloudStack's Virtual Router [1]. However, cloud-init retrieves these information from default gateway. VR and default gateway may be the same machine (i.e. have the same address) in some cases, but that is not be always true (actually, in my case, it is not). This change searches the lease files in /var/lib/dhclient to pick out the dhcp-server-identifier. It admittedly does make this specific to dhclient. -- [1] http://incubator.apache.org/cloudstack/docs/en-US/Apache_CloudStack/4.0.0-incubating/html/Admin_Guide/user-data-and-meta-data.html). LP: #1089989 --- cloudinit/sources/DataSourceCloudStack.py | 51 ++++++++++++++++++------------- 1 file changed, 30 insertions(+), 21 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 076dba5a..82e1e130 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -3,10 +3,12 @@ # Copyright (C) 2012 Canonical Ltd. # Copyright (C) 2012 Cosmin Luta # Copyright (C) 2012 Yahoo! Inc. +# Copyright (C) 2012 Gerard Dethier # # Author: Cosmin Luta # Author: Scott Moser # Author: Joshua Harlow +# Author: Gerard Dethier # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as @@ -20,9 +22,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from socket import inet_ntoa -from struct import pack - import os import time @@ -40,24 +39,12 @@ class DataSourceCloudStack(sources.DataSource): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, 'cs') # Cloudstack has its metadata/userdata URLs located at - # http:///latest/ + # http:///latest/ self.api_ver = 'latest' - gw_addr = self.get_default_gateway() - if not gw_addr: - raise RuntimeError("No default gateway found!") - self.metadata_address = "http://%s/" % (gw_addr) - - def get_default_gateway(self): - """Returns the default gateway ip address in the dotted format.""" - lines = util.load_file("/proc/net/route").splitlines() - for line in lines: - items = line.split("\t") - if items[1] == "00000000": - # Found the default route, get the gateway - gw = inet_ntoa(pack(" 2: + dhcp = words[2] + LOG.debug("Found DHCP identifier %s", dhcp) + addresses.add(dhcp) + if len(addresses) != 1: + # No unique virtual router found + return None + return addresses.pop() + + # Used to match classes to dependencies datasources = [ (DataSourceCloudStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), -- cgit v1.2.3