summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cloudinit/sources/DataSourceMAAS.py226
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py143
-rw-r--r--cloudinit/sources/DataSourceOVF.py227
-rw-r--r--cloudinit/sources/__init__.py12
-rw-r--r--cloudinit/transforms/__init__.py190
-rw-r--r--cloudinit/transforms/cc_apt_pipelining.py9
-rw-r--r--cloudinit/transforms/cc_apt_update_upgrade.py116
-rw-r--r--cloudinit/transforms/cc_bootcmd.py50
-rw-r--r--cloudinit/transforms/cc_byobu.py22
-rw-r--r--cloudinit/transforms/cc_ca_certs.py25
-rw-r--r--cloudinit/transforms/cc_chef.py101
-rw-r--r--cloudinit/transforms/cc_disable_ec2_metadata.py14
-rw-r--r--cloudinit/transforms/cc_final_message.py63
-rw-r--r--cloudinit/transforms/cc_foo.py35
-rw-r--r--cloudinit/transforms/cc_grub_dpkg.py19
-rw-r--r--cloudinit/transforms/cc_keys_to_console.py14
-rw-r--r--cloudinit/transforms/cc_landscape.py43
-rw-r--r--cloudinit/transforms/cc_locale.py36
-rw-r--r--cloudinit/transforms/cc_mcollective.py80
-rw-r--r--cloudinit/transforms/cc_mounts.py84
-rw-r--r--cloudinit/transforms/cc_phone_home.py53
-rw-r--r--cloudinit/transforms/cc_puppet.py94
-rw-r--r--cloudinit/transforms/cc_resizefs.py142
-rw-r--r--cloudinit/transforms/cc_rightscale_userdata.py62
-rw-r--r--cloudinit/transforms/cc_rsyslog.py52
-rw-r--r--cloudinit/transforms/cc_runcmd.py15
-rw-r--r--cloudinit/transforms/cc_salt_minion.py49
-rw-r--r--cloudinit/transforms/cc_scripts_per_boot.py20
-rw-r--r--cloudinit/transforms/cc_scripts_per_instance.py20
-rw-r--r--cloudinit/transforms/cc_scripts_per_once.py20
-rw-r--r--cloudinit/transforms/cc_scripts_user.py19
-rw-r--r--cloudinit/transforms/cc_set_hostname.py23
-rw-r--r--cloudinit/transforms/cc_set_passwords.py108
-rw-r--r--cloudinit/transforms/cc_ssh.py93
-rw-r--r--cloudinit/transforms/cc_ssh_import_id.py25
-rw-r--r--cloudinit/transforms/cc_timezone.py41
-rw-r--r--cloudinit/transforms/cc_update_etc_hosts.py82
-rw-r--r--cloudinit/transforms/cc_update_hostname.py80
38 files changed, 1113 insertions, 1394 deletions
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 61a0038f..27196265 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -1,8 +1,10 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -16,22 +18,22 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.DataSource as DataSource
-
-from cloudinit import seeddir as base_seeddir
-from cloudinit import log
-import cloudinit.util as util
+import os
import errno
import oauth.oauth as oauth
-import os.path
-import urllib2
import time
+import urllib2
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import url_helper as uhelp
+from cloudinit import util
+LOG = logging.getLogger(__name__)
MD_VERSION = "2012-03-01"
-class DataSourceMAAS(DataSource.DataSource):
+class DataSourceMAAS(sources.DataSource):
"""
DataSourceMAAS reads instance information from MAAS.
Given a config metadata_url, and oauth tokens, it expects to find
@@ -40,61 +42,64 @@ class DataSourceMAAS(DataSource.DataSource):
user-data
hostname
"""
- seeddir = base_seeddir + '/maas'
- baseurl = None
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.base_url = None
+ self.seed_dir = os.path.join(paths.seed_dir, 'maas')
def __str__(self):
- return("DataSourceMAAS[%s]" % self.baseurl)
+ return "%s[%s]" % (util.obj_name(self), self.base_url)
def get_data(self):
mcfg = self.ds_cfg
try:
- (userdata, metadata) = read_maas_seed_dir(self.seeddir)
+ (userdata, metadata) = read_maas_seed_dir(self.seed_dir)
self.userdata_raw = userdata
self.metadata = metadata
- self.baseurl = self.seeddir
+ self.base_url = self.seed_dir
return True
except MAASSeedDirNone:
pass
except MAASSeedDirMalformed as exc:
- log.warn("%s was malformed: %s\n" % (self.seeddir, exc))
+ LOG.warn("%s was malformed: %s" % (self.seed_dir, exc))
raise
- try:
- # if there is no metadata_url, then we're not configured
- url = mcfg.get('metadata_url', None)
- if url == None:
- return False
+ # If there is no metadata_url, then we're not configured
+ url = mcfg.get('metadata_url', None)
+ if not url:
+ return False
+ try:
if not self.wait_for_metadata_service(url):
return False
- self.baseurl = url
+ self.base_url = url
- (userdata, metadata) = read_maas_seed_url(self.baseurl,
- self.md_headers)
+ (userdata, metadata) = read_maas_seed_url(self.base_url,
+ self.md_headers)
self.userdata_raw = userdata
self.metadata = metadata
return True
except Exception:
- util.logexc(log)
+ util.logexc(LOG, "Failed fetching metadata from url %s", url)
return False
def md_headers(self, url):
mcfg = self.ds_cfg
- # if we are missing token_key, token_secret or consumer_key
+ # If we are missing token_key, token_secret or consumer_key
# then just do non-authed requests
for required in ('token_key', 'token_secret', 'consumer_key'):
if required not in mcfg:
- return({})
+ return {}
consumer_secret = mcfg.get('consumer_secret', "")
-
- return(oauth_headers(url=url, consumer_key=mcfg['consumer_key'],
- token_key=mcfg['token_key'], token_secret=mcfg['token_secret'],
- consumer_secret=consumer_secret))
+ return oauth_headers(url=url,
+ consumer_key=mcfg['consumer_key'],
+ token_key=mcfg['token_key'],
+ token_secret=mcfg['token_secret'],
+ consumer_secret=consumer_secret)
def wait_for_metadata_service(self, url):
mcfg = self.ds_cfg
@@ -103,32 +108,31 @@ class DataSourceMAAS(DataSource.DataSource):
try:
max_wait = int(mcfg.get("max_wait", max_wait))
except Exception:
- util.logexc(log)
- log.warn("Failed to get max wait. using %s" % max_wait)
+ util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
if max_wait == 0:
return False
timeout = 50
try:
- timeout = int(mcfg.get("timeout", timeout))
+ if timeout in mcfg:
+ timeout = int(mcfg.get("timeout", timeout))
except Exception:
- util.logexc(log)
- log.warn("Failed to get timeout, using %s" % timeout)
+ LOG.warn("Failed to get timeout, using %s" % timeout)
starttime = time.time()
check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION)
url = util.wait_for_url(urls=[check_url], max_wait=max_wait,
- timeout=timeout, status_cb=log.warn,
- headers_cb=self.md_headers)
+ timeout=timeout, status_cb=LOG.warn,
+ headers_cb=self.md_headers)
if url:
- log.debug("Using metadata source: '%s'" % url)
+ LOG.info("Using metadata source: '%s'", url)
else:
- log.critical("giving up on md after %i seconds\n" %
- int(time.time() - starttime))
+ LOG.critical("Giving up on md from %s after %i seconds",
+ urls, int(time.time() - starttime))
- return (bool(url))
+ return bool(url)
def read_maas_seed_dir(seed_d):
@@ -139,22 +143,19 @@ def read_maas_seed_dir(seed_d):
* local-hostname
* user-data
"""
- files = ('local-hostname', 'instance-id', 'user-data', 'public-keys')
- md = {}
-
if not os.path.isdir(seed_d):
raise MAASSeedDirNone("%s: not a directory")
+ files = ('local-hostname', 'instance-id', 'user-data', 'public-keys')
+ md = {}
for fname in files:
try:
- with open(os.path.join(seed_d, fname)) as fp:
- md[fname] = fp.read()
- fp.close()
+ md[fname] = util.load_file(os.path.join(seed_d, fname))
except IOError as e:
if e.errno != errno.ENOENT:
raise
- return(check_seed_contents(md, seed_d))
+ return check_seed_contents(md, seed_d)
def read_maas_seed_url(seed_url, header_cb=None, timeout=None,
@@ -169,29 +170,26 @@ def read_maas_seed_url(seed_url, header_cb=None, timeout=None,
* <seed_url>/<version>/meta-data/local-hostname
* <seed_url>/<version>/user-data
"""
- files = ('meta-data/local-hostname',
- 'meta-data/instance-id',
- 'meta-data/public-keys',
- 'user-data')
-
base_url = "%s/%s" % (seed_url, version)
+ files = {
+ 'local-hostname': "%s/%s" % (base_url, 'meta-data/local-hostname'),
+ 'instance-id': "%s/%s" % (base_url, 'meta-data/instance-id'),
+ 'public-keys': "%s/%s" % (base_url, 'meta-data/public-keys'),
+ 'user-data': "%s/%s" % (base_url, 'user-data'),
+ }
md = {}
- for fname in files:
- url = "%s/%s" % (base_url, fname)
+ for (name, url) in files:
if header_cb:
headers = header_cb(url)
else:
headers = {}
-
try:
- req = urllib2.Request(url, data=None, headers=headers)
- resp = urllib2.urlopen(req, timeout=timeout)
- md[os.path.basename(fname)] = resp.read()
+ (resp, sc) = uhelp.readurl(url, headers=headers, timeout=timeout)
+ md[name] = resp
except urllib2.HTTPError as e:
if e.code != 404:
raise
-
- return(check_seed_contents(md, seed_url))
+ return check_seed_contents(md, seed_url)
def check_seed_contents(content, seed):
@@ -201,11 +199,10 @@ def check_seed_contents(content, seed):
Raise MAASSeedDirMalformed or MAASSeedDirNone
"""
md_required = ('instance-id', 'local-hostname')
- found = content.keys()
-
if len(content) == 0:
raise MAASSeedDirNone("%s: no data files found" % seed)
+ found = content.keys()
missing = [k for k in md_required if k not in found]
if len(missing):
raise MAASSeedDirMalformed("%s: missing files %s" % (seed, missing))
@@ -217,7 +214,7 @@ def check_seed_contents(content, seed):
continue
md[key] = val
- return(userdata, md)
+ return (userdata, md)
def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret):
@@ -232,8 +229,8 @@ def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret):
}
req = oauth.OAuthRequest(http_url=url, parameters=params)
req.sign_request(oauth.OAuthSignatureMethod_PLAINTEXT(),
- consumer, token)
- return(req.to_header())
+ consumer, token)
+ return req.to_header()
class MAASSeedDirNone(Exception):
@@ -244,102 +241,11 @@ class MAASSeedDirMalformed(Exception):
pass
+# Used to match classes to dependencies
datasources = [
- (DataSourceMAAS, (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)),
+ (DataSourceMAAS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
-
-# return a list of data sources that match this set of dependencies
+# Return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
- return(DataSource.list_from_depends(depends, datasources))
-
-
-if __name__ == "__main__":
- def main():
- """
- Call with single argument of directory or http or https url.
- If url is given additional arguments are allowed, which will be
- interpreted as consumer_key, token_key, token_secret, consumer_secret
- """
- import argparse
- import pprint
-
- parser = argparse.ArgumentParser(description='Interact with MAAS DS')
- parser.add_argument("--config", metavar="file",
- help="specify DS config file", default=None)
- parser.add_argument("--ckey", metavar="key",
- help="the consumer key to auth with", default=None)
- parser.add_argument("--tkey", metavar="key",
- help="the token key to auth with", default=None)
- parser.add_argument("--csec", metavar="secret",
- help="the consumer secret (likely '')", default="")
- parser.add_argument("--tsec", metavar="secret",
- help="the token secret to auth with", default=None)
- parser.add_argument("--apiver", metavar="version",
- help="the apiver to use ("" can be used)", default=MD_VERSION)
-
- subcmds = parser.add_subparsers(title="subcommands", dest="subcmd")
- subcmds.add_parser('crawl', help="crawl the datasource")
- subcmds.add_parser('get', help="do a single GET of provided url")
- subcmds.add_parser('check-seed', help="read andn verify seed at url")
-
- parser.add_argument("url", help="the data source to query")
-
- args = parser.parse_args()
-
- creds = {'consumer_key': args.ckey, 'token_key': args.tkey,
- 'token_secret': args.tsec, 'consumer_secret': args.csec}
-
- if args.config:
- import yaml
- with open(args.config) as fp:
- cfg = yaml.load(fp)
- if 'datasource' in cfg:
- cfg = cfg['datasource']['MAAS']
- for key in creds.keys():
- if key in cfg and creds[key] == None:
- creds[key] = cfg[key]
-
- def geturl(url, headers_cb):
- req = urllib2.Request(url, data=None, headers=headers_cb(url))
- return(urllib2.urlopen(req).read())
-
- def printurl(url, headers_cb):
- print "== %s ==\n%s\n" % (url, geturl(url, headers_cb))
-
- def crawl(url, headers_cb=None):
- if url.endswith("/"):
- for line in geturl(url, headers_cb).splitlines():
- if line.endswith("/"):
- crawl("%s%s" % (url, line), headers_cb)
- else:
- printurl("%s%s" % (url, line), headers_cb)
- else:
- printurl(url, headers_cb)
-
- def my_headers(url):
- headers = {}
- if creds.get('consumer_key', None) != None:
- headers = oauth_headers(url, **creds)
- return headers
-
- if args.subcmd == "check-seed":
- if args.url.startswith("http"):
- (userdata, metadata) = read_maas_seed_url(args.url,
- header_cb=my_headers, version=args.apiver)
- else:
- (userdata, metadata) = read_maas_seed_url(args.url)
- print "=== userdata ==="
- print userdata
- print "=== metadata ==="
- pprint.pprint(metadata)
-
- elif args.subcmd == "get":
- printurl(args.url, my_headers)
-
- elif args.subcmd == "crawl":
- if not args.url.endswith("/"):
- args.url = "%s/" % args.url
- crawl(args.url, my_headers)
-
- main()
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index e8c56b8f..84d0f99d 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -2,9 +2,11 @@
#
# Copyright (C) 2009-2010 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Hafliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -18,33 +20,34 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.DataSource as DataSource
-
-from cloudinit import seeddir as base_seeddir
-from cloudinit import log
-import cloudinit.util as util
import errno
-import subprocess
+import os
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
-class DataSourceNoCloud(DataSource.DataSource):
- metadata = None
- userdata = None
- userdata_raw = None
- supported_seed_starts = ("/", "file://")
- dsmode = "local"
- seed = None
- cmdline_id = "ds=nocloud"
- seeddir = base_seeddir + '/nocloud'
+class DataSourceNoCloud(sources.DataSource):
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.dsmode = 'local'
+ self.seed = None
+ self.cmdline_id = "ds=nocloud"
+ self.seed_dir = os.path.join(paths.seed_dir, 'nocloud')
+ self.supported_seed_starts = ("/", "file://")
def __str__(self):
- mstr = "DataSourceNoCloud"
- mstr = mstr + " [seed=%s]" % self.seed
- return(mstr)
+ mstr = "%s [seed=%s][dsmode=%s]" % (util.obj_name(self),
+ self.seed, self.dsmode)
+ return mstr
def get_data(self):
defaults = {
- "instance-id": "nocloud", "dsmode": self.dsmode
+ "instance-id": "nocloud",
+ "dsmode": self.dsmode,
}
found = []
@@ -52,24 +55,24 @@ class DataSourceNoCloud(DataSource.DataSource):
ud = ""
try:
- # parse the kernel command line, getting data passed in
+ # Parse the kernel command line, getting data passed in
if parse_cmdline_data(self.cmdline_id, md):
found.append("cmdline")
except:
- util.logexc(log)
+ util.logexc(LOG, "Unable to parse command line data")
return False
- # check to see if the seeddir has data.
+ # Check to see if the seed dir has data.
seedret = {}
- if util.read_optional_seed(seedret, base=self.seeddir + "/"):
+ if util.read_optional_seed(seedret, base=self.seed_dir + "/"):
md = util.mergedict(md, seedret['meta-data'])
ud = seedret['user-data']
- found.append(self.seeddir)
- log.debug("using seeded cache data in %s" % self.seeddir)
+ found.append(self.seed_dir)
+ LOG.debug("Using seeded cache data from %s", self.seed_dir)
- # if the datasource config had a 'seedfrom' entry, then that takes
+ # If the datasource config had a 'seedfrom' entry, then that takes
# precedence over a 'seedfrom' that was found in a filesystem
- # but not over external medi
+ # but not over external media
if 'seedfrom' in self.ds_cfg and self.ds_cfg['seedfrom']:
found.append("ds_config")
md["seedfrom"] = self.ds_cfg['seedfrom']
@@ -83,35 +86,36 @@ class DataSourceNoCloud(DataSource.DataSource):
for dev in devlist:
try:
- (newmd, newud) = util.mount_callback_umount(dev,
- util.read_seeded)
+ LOG.debug("Attempting to use data from %s", dev)
+
+ (newmd, newud) = util.mount_cb(dev, util.read_seeded)
md = util.mergedict(newmd, md)
ud = newud
- # for seed from a device, the default mode is 'net'.
+ # For seed from a device, the default mode is 'net'.
# that is more likely to be what is desired.
# If they want dsmode of local, then they must
# specify that.
if 'dsmode' not in md:
md['dsmode'] = "net"
- log.debug("using data from %s" % dev)
+ LOG.debug("Using data from %s", dev)
found.append(dev)
break
- except OSError, e:
+ except OSError as e:
if e.errno != errno.ENOENT:
raise
- except util.mountFailedError:
- log.warn("Failed to mount %s when looking for seed" % dev)
+ except util.MountFailedError:
+ util.logexc(LOG, "Failed to mount %s when looking for seed", dev)
- # there was no indication on kernel cmdline or data
+ # There was no indication on kernel cmdline or data
# in the seeddir suggesting this handler should be used.
if len(found) == 0:
return False
seeded_interfaces = None
- # the special argument "seedfrom" indicates we should
+ # The special argument "seedfrom" indicates we should
# attempt to seed the userdata / metadata from its value
# its primarily value is in allowing the user to type less
# on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg
@@ -123,57 +127,46 @@ class DataSourceNoCloud(DataSource.DataSource):
seedfound = proto
break
if not seedfound:
- log.debug("seed from %s not supported by %s" %
- (seedfrom, self.__class__))
+ LOG.debug("Seed from %s not supported by %s", seedfrom, self)
return False
if 'network-interfaces' in md:
seeded_interfaces = self.dsmode
- # this could throw errors, but the user told us to do it
+ # This could throw errors, but the user told us to do it
# so if errors are raised, let them raise
(md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
- log.debug("using seeded cache data from %s" % seedfrom)
+ LOG.debug("Using seeded cache data from %s", seedfrom)
- # values in the command line override those from the seed
+ # Values in the command line override those from the seed
md = util.mergedict(md, md_seed)
found.append(seedfrom)
+ # Now that we have exhausted any other places merge in the defaults
md = util.mergedict(md, defaults)
- # update the network-interfaces if metadata had 'network-interfaces'
+ # Update the network-interfaces if metadata had 'network-interfaces'
# entry and this is the local datasource, or 'seedfrom' was used
# and the source of the seed was self.dsmode
# ('local' for NoCloud, 'net' for NoCloudNet')
if ('network-interfaces' in md and
(self.dsmode in ("local", seeded_interfaces))):
- log.info("updating network interfaces from nocloud")
-
- util.write_file("/etc/network/interfaces",
- md['network-interfaces'])
- try:
- (out, err) = util.subp(['ifup', '--all'])
- if len(out) or len(err):
- log.warn("ifup --all had stderr: %s" % err)
-
- except subprocess.CalledProcessError as exc:
- log.warn("ifup --all failed: %s" % (exc.output[1]))
-
- self.seed = ",".join(found)
- self.metadata = md
- self.userdata_raw = ud
-
+ LOG.info("Updating network interfaces from %s", self)
+ self.distro.apply_network(md['network-interfaces'])
+
if md['dsmode'] == self.dsmode:
+ self.seed = ",".join(found)
+ self.metadata = md
+ self.userdata_raw = ud
return True
- log.debug("%s: not claiming datasource, dsmode=%s" %
- (self, md['dsmode']))
+ LOG.debug("%s: not claiming datasource, dsmode=%s", self, md['dsmode'])
return False
-# returns true or false indicating if cmdline indicated
+# Returns true or false indicating if cmdline indicated
# that this module should be used
-# example cmdline:
+# Example cmdline:
# root=LABEL=uec-rootfs ro ds=nocloud
def parse_cmdline_data(ds_id, fill, cmdline=None):
if cmdline is None:
@@ -210,23 +203,25 @@ def parse_cmdline_data(ds_id, fill, cmdline=None):
k = s2l[k]
fill[k] = v
- return(True)
+ return True
class DataSourceNoCloudNet(DataSourceNoCloud):
- cmdline_id = "ds=nocloud-net"
- supported_seed_starts = ("http://", "https://", "ftp://")
- seeddir = base_seeddir + '/nocloud-net'
- dsmode = "net"
+ def __init__(self, sys_cfg, distro, paths):
+ DataSourceNoCloud.__init__(self, sys_cfg, distro, paths)
+ self.cmdline_id = "ds=nocloud-net"
+ self.supported_seed_starts = ("http://", "https://", "ftp://")
+ self.seed_dir = os.path.join(paths.seed_dir, 'nocloud-net')
+ self.dsmode = "net"
-datasources = (
- (DataSourceNoCloud, (DataSource.DEP_FILESYSTEM, )),
- (DataSourceNoCloudNet,
- (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)),
-)
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceNoCloud, (sources.DEP_FILESYSTEM, )),
+ (DataSourceNoCloudNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
-# return a list of data sources that match this set of dependencies
+# Return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
- return(DataSource.list_from_depends(depends, datasources))
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index a0b1b518..bb0f46c2 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -2,9 +2,11 @@
#
# Copyright (C) 2011 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Hafliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -18,33 +20,30 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.DataSource as DataSource
-
-from cloudinit import seeddir as base_seeddir
-from cloudinit import log
-import cloudinit.util as util
-import os.path
-import os
from xml.dom import minidom
import base64
+import os
import re
import tempfile
-import subprocess
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
-class DataSourceOVF(DataSource.DataSource):
- seed = None
- seeddir = base_seeddir + '/ovf'
- environment = None
- cfg = {}
- userdata_raw = None
- metadata = None
- supported_seed_starts = ("/", "file://")
+LOG = logging.getLogger(__name__)
+
+
+class DataSourceOVF(sources.DataSource):
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.seed = None
+ self.seed_dir = os.path.join(paths.seed_dir, 'ovf')
+ self.environment = None
+ self.cfg = {}
+ self.supported_seed_starts = ("/", "file://")
def __str__(self):
- mstr = "DataSourceOVF"
- mstr = mstr + " [seed=%s]" % self.seed
- return(mstr)
+ return "%s [seed=%s]" % (util.obj_name(self), self.seed)
def get_data(self):
found = []
@@ -55,13 +54,12 @@ class DataSourceOVF(DataSource.DataSource):
"instance-id": "iid-dsovf"
}
- (seedfile, contents) = get_ovf_env(base_seeddir)
+ (seedfile, contents) = get_ovf_env(self.paths.seed_dir)
if seedfile:
- # found a seed dir
- seed = "%s/%s" % (base_seeddir, seedfile)
+ # Found a seed dir
+ seed = os.path.join(self.paths.seed_dir, seedfile)
(md, ud, cfg) = read_ovf_environment(contents)
self.environment = contents
-
found.append(seed)
else:
np = {'iso': transport_iso9660,
@@ -71,7 +69,6 @@ class DataSourceOVF(DataSource.DataSource):
(contents, _dev, _fname) = transfunc()
if contents:
break
-
if contents:
(md, ud, cfg) = read_ovf_environment(contents)
self.environment = contents
@@ -89,17 +86,19 @@ class DataSourceOVF(DataSource.DataSource):
seedfound = proto
break
if not seedfound:
- log.debug("seed from %s not supported by %s" %
- (seedfrom, self.__class__))
+ LOG.debug("Seed from %s not supported by %s",
+ seedfrom, self)
return False
(md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
- log.debug("using seeded cache data from %s" % seedfrom)
+ LOG.debug("Using seeded cache data from %s", seedfrom)
md = util.mergedict(md, md_seed)
found.append(seedfrom)
+ # Now that we have exhausted any other places merge in the defaults
md = util.mergedict(md, defaults)
+
self.seed = ",".join(found)
self.metadata = md
self.userdata_raw = ud
@@ -108,31 +107,37 @@ class DataSourceOVF(DataSource.DataSource):
def get_public_ssh_keys(self):
if not 'public-keys' in self.metadata:
- return([])
- return([self.metadata['public-keys'], ])
+ return []
+ pks = self.metadata['public-keys']
+ if isinstance(pks, (list)):
+ return pks
+ else:
+ return [pks]
- # the data sources' config_obj is a cloud-config formated
+ # The data sources' config_obj is a cloud-config formatted
# object that came to it from ways other than cloud-config
# because cloud-config content would be handled elsewhere
def get_config_obj(self):
- return(self.cfg)
+ return self.cfg
class DataSourceOVFNet(DataSourceOVF):
- seeddir = base_seeddir + '/ovf-net'
- supported_seed_starts = ("http://", "https://", "ftp://")
+ def __init__(self, sys_cfg, distro, paths):
+ DataSourceOVF.__init__(self, sys_cfg, distro, paths)
+ self.seed_dir = os.path.join(paths.seed_dir, 'ovf-net')
+ self.supported_seed_starts = ("http://", "https://", "ftp://")
-# this will return a dict with some content
-# meta-data, user-data
+# This will return a dict with some content
+# meta-data, user-data, some config
def read_ovf_environment(contents):
- props = getProperties(contents)
+ props = get_properties(contents)
md = {}
cfg = {}
ud = ""
- cfg_props = ['password', ]
+ cfg_props = ['password']
md_props = ['seedfrom', 'local-hostname', 'public-keys', 'instance-id']
- for prop, val in props.iteritems():
+ for (prop, val) in props.iteritems():
if prop == 'hostname':
prop = "local-hostname"
if prop in md_props:
@@ -144,23 +149,25 @@ def read_ovf_environment(contents):
ud = base64.decodestring(val)
except:
ud = val
- return(md, ud, cfg)
+ return (md, ud, cfg)
-# returns tuple of filename (in 'dirname', and the contents of the file)
+# Returns tuple of filename (in 'dirname', and the contents of the file)
# on "not found", returns 'None' for filename and False for contents
def get_ovf_env(dirname):
env_names = ("ovf-env.xml", "ovf_env.xml", "OVF_ENV.XML", "OVF-ENV.XML")
for fname in env_names:
- if os.path.isfile("%s/%s" % (dirname, fname)):
- fp = open("%s/%s" % (dirname, fname))
- contents = fp.read()
- fp.close()
- return(fname, contents)
- return(None, False)
+ full_fn = os.path.join(dirname, fname)
+ if os.path.isfile(full_fn):
+ try:
+ contents = util.load_file(full_fn)
+ return (fname, contents)
+ except:
+ util.logexc(LOG, "Failed loading ovf file %s", full_fn)
+ return (None, False)
-# transport functions take no input and return
+# Transport functions take no input and return
# a 3 tuple of content, path, filename
def transport_iso9660(require_iso=True):
@@ -173,79 +180,45 @@ def transport_iso9660(require_iso=True):
devname_regex = os.environ.get(envname, default_regex)
cdmatch = re.compile(devname_regex)
- # go through mounts to see if it was already mounted
- fp = open("/proc/mounts")
- mounts = fp.readlines()
- fp.close()
-
- mounted = {}
- for mpline in mounts:
- (dev, mp, fstype, _opts, _freq, _passno) = mpline.split()
- mounted[dev] = (dev, fstype, mp, False)
- mp = mp.replace("\\040", " ")
+ # Go through mounts to see if it was already mounted
+ mounts = util.mounts()
+ for (dev, info) in mounts.iteritems():
+ fstype = info['fstype']
if fstype != "iso9660" and require_iso:
continue
-
if cdmatch.match(dev[5:]) == None: # take off '/dev/'
continue
-
+ mp = info['mountpoint']
(fname, contents) = get_ovf_env(mp)
if contents is not False:
- return(contents, dev, fname)
-
- tmpd = None
- dvnull = None
+ return (contents, dev, fname)
devs = os.listdir("/dev/")
devs.sort()
-
for dev in devs:
- fullp = "/dev/%s" % dev
+ fullp = os.path.join("/dev/", dev)
- if fullp in mounted or not cdmatch.match(dev) or os.path.isdir(fullp):
+ if (fullp in mounted or
+ not cdmatch.match(dev) or os.path.isdir(fullp)):
continue
- fp = None
try:
- fp = open(fullp, "rb")
- fp.read(512)
- fp.close()
+ # See if we can read anything at all...??
+ with open(fullp, 'rb') as fp:
+ fp.read(512)
except:
- if fp:
- fp.close()
continue
- if tmpd is None:
- tmpd = tempfile.mkdtemp()
- if dvnull is None:
- try:
- dvnull = open("/dev/null")
- except:
- pass
-
- cmd = ["mount", "-o", "ro", fullp, tmpd]
- if require_iso:
- cmd.extend(('-t', 'iso9660'))
-
- rc = subprocess.call(cmd, stderr=dvnull, stdout=dvnull, stdin=dvnull)
- if rc:
+ try:
+ (fname, contents) = utils.mount_cb(fullp, get_ovf_env, mtype="iso9660")
+ except util.MountFailedError:
+ util.logexc(LOG, "Failed mounting %s", fullp)
continue
- (fname, contents) = get_ovf_env(tmpd)
-
- subprocess.call(["umount", tmpd])
-
if contents is not False:
- os.rmdir(tmpd)
- return(contents, fullp, fname)
-
- if tmpd:
- os.rmdir(tmpd)
-
- if dvnull:
- dvnull.close()
+ return (contents, fullp, fname)
- return(False, None, None)
+ return (False, None, None)
def transport_vmware_guestd():
@@ -259,74 +232,60 @@ def transport_vmware_guestd():
# # would need to error check here and see why this failed
# # to know if log/error should be raised
# return(False, None, None)
- return(False, None, None)
+ return (False, None, None)
-def findChild(node, filter_func):
+def find_child(node, filter_func):
ret = []
if not node.hasChildNodes():
return ret
for child in node.childNodes:
if filter_func(child):
ret.append(child)
- return(ret)
+ return ret
-def getProperties(environString):
- dom = minidom.parseString(environString)
+def get_properties(contents):
+
+ dom = minidom.parseString(contents)
if dom.documentElement.localName != "Environment":
- raise Exception("No Environment Node")
+ raise XmlError("No Environment Node")
if not dom.documentElement.hasChildNodes():
- raise Exception("No Child Nodes")
+ raise XmlError("No Child Nodes")
envNsURI = "http://schemas.dmtf.org/ovf/environment/1"
# could also check here that elem.namespaceURI ==
# "http://schemas.dmtf.org/ovf/environment/1"
- propSections = findChild(dom.documentElement,
+ propSections = find_child(dom.documentElement,
lambda n: n.localName == "PropertySection")
if len(propSections) == 0:
- raise Exception("No 'PropertySection's")
+ raise XmlError("No 'PropertySection's")
props = {}
- propElems = findChild(propSections[0], lambda n: n.localName == "Property")
+ propElems = find_child(propSections[0], lambda n: n.localName == "Property")
for elem in propElems:
key = elem.attributes.getNamedItemNS(envNsURI, "key").value
val = elem.attributes.getNamedItemNS(envNsURI, "value").value
props[key] = val
- return(props)
+ return props
+
+
+class XmlError(Exception):
+ pass
+# Used to match classes to dependencies
datasources = (
- (DataSourceOVF, (DataSource.DEP_FILESYSTEM, )),
- (DataSourceOVFNet,
- (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)),
+ (DataSourceOVF, (sources.DEP_FILESYSTEM, )),
+ (DataSourceOVFNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
)
-# return a list of data sources that match this set of dependencies
+# Return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
- return(DataSource.list_from_depends(depends, datasources))
-
-
-if __name__ == "__main__":
- def main():
- import sys
- envStr = open(sys.argv[1]).read()
- props = getProperties(envStr)
- import pprint
- pprint.pprint(props)
-
- md, ud, cfg = read_ovf_environment(envStr)
- print "=== md ==="
- pprint.pprint(md)
- print "=== ud ==="
- pprint.pprint(ud)
- print "=== cfg ==="
- pprint.pprint(cfg)
-
- main()
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index dfd1fff3..08669f5d 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -39,10 +39,6 @@ class DataSourceNotFoundException(Exception):
class DataSource(object):
def __init__(self, sys_cfg, distro, paths):
- name = util.obj_name(self)
- if name.startswith(DS_PREFIX):
- name = name[DS_PREFIX:]
- self.cfgname = name
self.sys_cfg = sys_cfg
self.distro = distro
self.paths = paths
@@ -50,8 +46,11 @@ class DataSource(object):
self.userdata = None
self.metadata = None
self.userdata_raw = None
+ name = util.obj_name(self)
+ if name.startswith(DS_PREFIX):
+ name = name[DS_PREFIX:]
self.ds_cfg = util.get_cfg_by_path(self.sys_cfg,
- ("datasource", self.cfgname), {})
+ ("datasource", name), {})
def get_userdata(self):
if self.userdata is None:
@@ -112,6 +111,7 @@ class DataSource(object):
def get_instance_id(self):
if not self.metadata or 'instance-id' not in self.metadata:
+ # Return a magic not really instance id string
return "iid-datasource"
return str(self.metadata['instance-id'])
@@ -166,7 +166,7 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list):
if s.get_data():
return (s, ds)
except Exception as e:
- LOG.exception("Getting data from %s failed due to %s", ds, e)
+ util.logexc(LOG, "Getting data from %s failed", ds)
msg = "Did not find any data source, searched classes: %s" % (ds_names)
raise DataSourceNotFoundException(msg)
diff --git a/cloudinit/transforms/__init__.py b/cloudinit/transforms/__init__.py
index 5d70ac43..8275b375 100644
--- a/cloudinit/transforms/__init__.py
+++ b/cloudinit/transforms/__init__.py
@@ -19,183 +19,12 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
-import os
-import subprocess
-import sys
-import time
-import traceback
-
-import yaml
-
-from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE)
+from cloudinit.settings import (PER_INSTANCE, FREQUENCIES)
from cloudinit import log as logging
-from cloudinit import util
LOG = logging.getLogger(__name__)
-DEF_HANDLER_VERSION = 1
-DEF_FREQ = PER_INSTANCE
-
-
-# reads a cloudconfig module list, returns
-# a 2 dimensional array suitable to pass to run_cc_modules
-def read_cc_modules(cfg, name):
- if name not in cfg:
- return([])
- module_list = []
- # create 'module_list', an array of arrays
- # where array[0] = config
- # array[1] = freq
- # array[2:] = arguemnts
- for item in cfg[name]:
- if isinstance(item, str):
- module_list.append((item,))
- elif isinstance(item, list):
- module_list.append(item)
- else:
- raise TypeError("failed to read '%s' item in config")
- return(module_list)
-
-
-def run_cc_modules(cc, module_list, log):
- failures = []
- for cfg_mod in module_list:
- name = cfg_mod[0]
- freq = None
- run_args = []
- if len(cfg_mod) > 1:
- freq = cfg_mod[1]
- if len(cfg_mod) > 2:
- run_args = cfg_mod[2:]
-
- try:
- log.debug("handling %s with freq=%s and args=%s" %
- (name, freq, run_args))
- cc.handle(name, run_args, freq=freq)
- except:
- log.warn(traceback.format_exc())
- log.error("config handling of %s, %s, %s failed\n" %
- (name, freq, run_args))
- failures.append(name)
-
- return(failures)
-
-
-# always returns well formated values
-# cfg is expected to have an entry 'output' in it, which is a dictionary
-# that includes entries for 'init', 'config', 'final' or 'all'
-# init: /var/log/cloud.out
-# config: [ ">> /var/log/cloud-config.out", /var/log/cloud-config.err ]
-# final:
-# output: "| logger -p"
-# error: "> /dev/null"
-# this returns the specific 'mode' entry, cleanly formatted, with value
-# None if if none is given
-def get_output_cfg(cfg, mode="init"):
- ret = [None, None]
- if not 'output' in cfg:
- return ret
-
- outcfg = cfg['output']
- if mode in outcfg:
- modecfg = outcfg[mode]
- else:
- if 'all' not in outcfg:
- return ret
- # if there is a 'all' item in the output list
- # then it applies to all users of this (init, config, final)
- modecfg = outcfg['all']
-
- # if value is a string, it specifies stdout and stderr
- if isinstance(modecfg, str):
- ret = [modecfg, modecfg]
-
- # if its a list, then we expect (stdout, stderr)
- if isinstance(modecfg, list):
- if len(modecfg) > 0:
- ret[0] = modecfg[0]
- if len(modecfg) > 1:
- ret[1] = modecfg[1]
-
- # if it is a dictionary, expect 'out' and 'error'
- # items, which indicate out and error
- if isinstance(modecfg, dict):
- if 'output' in modecfg:
- ret[0] = modecfg['output']
- if 'error' in modecfg:
- ret[1] = modecfg['error']
-
- # if err's entry == "&1", then make it same as stdout
- # as in shell syntax of "echo foo >/dev/null 2>&1"
- if ret[1] == "&1":
- ret[1] = ret[0]
-
- swlist = [">>", ">", "|"]
- for i in range(len(ret)):
- if not ret[i]:
- continue
- val = ret[i].lstrip()
- found = False
- for s in swlist:
- if val.startswith(s):
- val = "%s %s" % (s, val[len(s):].strip())
- found = True
- break
- if not found:
- # default behavior is append
- val = "%s %s" % (">>", val.strip())
- ret[i] = val
-
- return(ret)
-
-
-# redirect_output(outfmt, errfmt, orig_out, orig_err)
-# replace orig_out and orig_err with filehandles specified in outfmt or errfmt
-# fmt can be:
-# > FILEPATH
-# >> FILEPATH
-# | program [ arg1 [ arg2 [ ... ] ] ]
-#
-# with a '|', arguments are passed to shell, so one level of
-# shell escape is required.
-def redirect_output(outfmt, errfmt, o_out=sys.stdout, o_err=sys.stderr):
- if outfmt:
- (mode, arg) = outfmt.split(" ", 1)
- if mode == ">" or mode == ">>":
- owith = "ab"
- if mode == ">":
- owith = "wb"
- new_fp = open(arg, owith)
- elif mode == "|":
- proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
- new_fp = proc.stdin
- else:
- raise TypeError("invalid type for outfmt: %s" % outfmt)
-
- if o_out:
- os.dup2(new_fp.fileno(), o_out.fileno())
- if errfmt == outfmt:
- os.dup2(new_fp.fileno(), o_err.fileno())
- return
-
- if errfmt:
- (mode, arg) = errfmt.split(" ", 1)
- if mode == ">" or mode == ">>":
- owith = "ab"
- if mode == ">":
- owith = "wb"
- new_fp = open(arg, owith)
- elif mode == "|":
- proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
- new_fp = proc.stdin
- else:
- raise TypeError("invalid type for outfmt: %s" % outfmt)
-
- if o_err:
- os.dup2(new_fp.fileno(), o_err.fileno())
- return
-
def form_module_name(name):
canon_name = name.replace("-", "_")
@@ -209,13 +38,18 @@ def form_module_name(name):
return canon_name
-def fixup_module(mod):
- freq = getattr(mod, "frequency", None)
- if not freq:
- setattr(mod, 'frequency', PER_INSTANCE)
- handler = getattr(mod, "handle", None)
- if not handler:
+def fixup_module(mod, def_freq=PER_INSTANCE):
+ if not hasattr(mod, 'frequency'):
+ setattr(mod, 'frequency', def_freq)
+ else:
+ freq = mod.frequency
+ if freq and freq not in FREQUENCIES:
+ LOG.warn("Module %s has an unknown frequency %s", mod, freq)
+ if not hasattr(mod, 'handle'):
def empty_handle(_name, _cfg, _cloud, _log, _args):
pass
setattr(mod, 'handle', empty_handle)
+ # Used only for warning if possibly running on a not checked distro...
+ if not hasattr(mod, 'distros'):
+ setattr(mod, 'distros', None)
return mod
diff --git a/cloudinit/transforms/cc_apt_pipelining.py b/cloudinit/transforms/cc_apt_pipelining.py
index 0286a9ae..69027b0c 100644
--- a/cloudinit/transforms/cc_apt_pipelining.py
+++ b/cloudinit/transforms/cc_apt_pipelining.py
@@ -16,10 +16,13 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-from cloudinit.CloudConfig import per_instance
+from cloudinit import util
+from cloudinit.settings import PER_INSTANCE
+
+frequency = PER_INSTANCE
+
+distros = ['ubuntu', 'debian']
-frequency = per_instance
default_file = "/etc/apt/apt.conf.d/90cloud-init-pipelining"
diff --git a/cloudinit/transforms/cc_apt_update_upgrade.py b/cloudinit/transforms/cc_apt_update_upgrade.py
index a7049bce..c4a543ed 100644
--- a/cloudinit/transforms/cc_apt_update_upgrade.py
+++ b/cloudinit/transforms/cc_apt_update_upgrade.py
@@ -18,12 +18,13 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-import subprocess
-import traceback
-import os
import glob
-import cloudinit.CloudConfig as cc
+import os
+
+from cloudinit import templater
+from cloudinit import util
+
+distros = ['ubuntu', 'debian']
def handle(_name, cfg, cloud, log, _args):
@@ -34,13 +35,13 @@ def handle(_name, cfg, cloud, log, _args):
mirror = find_apt_mirror(cloud, cfg)
- log.debug("selected mirror at: %s" % mirror)
+ log.debug("Selected mirror at: %s" % mirror)
- if not util.get_cfg_option_bool(cfg, \
- 'apt_preserve_sources_list', False):
- generate_sources_list(release, mirror)
- old_mir = util.get_cfg_option_str(cfg, 'apt_old_mirror', \
- "archive.ubuntu.com/ubuntu")
+ if not util.get_cfg_option_bool(cfg,
+ 'apt_preserve_sources_list', False):
+ generate_sources_list(release, mirror, cloud, log)
+ old_mir = util.get_cfg_option_str(cfg, 'apt_old_mirror',
+ "archive.ubuntu.com/ubuntu")
rename_apt_lists(old_mir, mirror)
# set up proxy
@@ -49,19 +50,18 @@ def handle(_name, cfg, cloud, log, _args):
if proxy:
try:
contents = "Acquire::HTTP::Proxy \"%s\";\n"
- with open(proxy_filename, "w") as fp:
- fp.write(contents % proxy)
+ util.write_file(proxy_filename, contents % (proxy))
except Exception as e:
- log.warn("Failed to write proxy to %s" % proxy_filename)
+ util.logexc(log, "Failed to write proxy to %s", proxy_filename)
elif os.path.isfile(proxy_filename):
- os.unlink(proxy_filename)
+ util.del_file(proxy_filename)
# process 'apt_sources'
if 'apt_sources' in cfg:
errors = add_sources(cfg['apt_sources'],
{'MIRROR': mirror, 'RELEASE': release})
for e in errors:
- log.warn("Source Error: %s\n" % ':'.join(e))
+ log.warn("Source Error: %s", ':'.join(e))
dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False)
if dconf_sel:
@@ -69,41 +69,35 @@ def handle(_name, cfg, cloud, log, _args):
try:
util.subp(('debconf-set-selections', '-'), dconf_sel)
except:
- log.error("Failed to run debconf-set-selections")
- log.debug(traceback.format_exc())
+ util.logexc(log, "Failed to run debconf-set-selections")
pkglist = util.get_cfg_option_list_or_str(cfg, 'packages', [])
errors = []
if update or len(pkglist) or upgrade:
try:
- cc.update_package_sources()
- except subprocess.CalledProcessError as e:
- log.warn("apt-get update failed")
- log.debug(traceback.format_exc())
+ cloud.distro.update_package_sources()
+ except Exception as e:
+ util.logexc(log, "Package update failed")
errors.append(e)
if upgrade:
try:
- cc.apt_get("upgrade")
- except subprocess.CalledProcessError as e:
- log.warn("apt upgrade failed")
- log.debug(traceback.format_exc())
+ cloud.distro.package_command("upgrade")
+ except Exception as e:
+ util.logexc(log, "Package upgrade failed")
errors.append(e)
if len(pkglist):
try:
- cc.install_packages(pkglist)
- except subprocess.CalledProcessError as e:
- log.warn("Failed to install packages: %s " % pkglist)
- log.debug(traceback.format_exc())
+ cloud.distro.install_packages(pkglist)
+ except Exception as e:
+ util.logexc(log, "Failed to install packages: %s ", pkglist)
errors.append(e)
if len(errors):
raise errors[0]
- return(True)
-
def mirror2lists_fileprefix(mirror):
string = mirror
@@ -120,37 +114,40 @@ def mirror2lists_fileprefix(mirror):
def rename_apt_lists(omirror, new_mirror, lists_d="/var/lib/apt/lists"):
oprefix = "%s/%s" % (lists_d, mirror2lists_fileprefix(omirror))
nprefix = "%s/%s" % (lists_d, mirror2lists_fileprefix(new_mirror))
- if(oprefix == nprefix):
+ if oprefix == nprefix:
return
olen = len(oprefix)
for filename in glob.glob("%s_*" % oprefix):
- os.rename(filename, "%s%s" % (nprefix, filename[olen:]))
+ util.rename(filename, "%s%s" % (nprefix, filename[olen:]))
def get_release():
- stdout, _stderr = subprocess.Popen(['lsb_release', '-cs'],
- stdout=subprocess.PIPE).communicate()
- return(str(stdout).strip())
+ (stdout, _stderr) = util.subp(['lsb_release', '-cs'])
+ return stdout.strip()
-def generate_sources_list(codename, mirror):
- util.render_to_file('sources.list', '/etc/apt/sources.list', \
- {'mirror': mirror, 'codename': codename})
+def generate_sources_list(codename, mirror, cloud, log):
+ template_fn = cloud.get_template_filename('sources.list')
+ if template_fn:
+ params = {'mirror': mirror, 'codename': codename}
+ templater.render_to_file(template_fn, '/etc/apt/sources.list', params)
+ else:
+ log.warn("No template found, not rendering /etc/apt/sources.list")
-def add_sources(srclist, searchList=None):
+def add_sources(srclist, template_params=None):
"""
add entries in /etc/apt/sources.list.d for each abbreviated
sources.list entry in 'srclist'. When rendering template, also
include the values in dictionary searchList
"""
- if searchList is None:
- searchList = {}
- elst = []
+ if template_params is None:
+ template_params = {}
+ errorlist = []
for ent in srclist:
if 'source' not in ent:
- elst.append(["", "missing source"])
+ errorlist.append(["", "missing source"])
continue
source = ent['source']
@@ -158,17 +155,17 @@ def add_sources(srclist, searchList=None):
try:
util.subp(["add-apt-repository", source])
except:
- elst.append([source, "add-apt-repository failed"])
+ errorlist.append([source, "add-apt-repository failed"])
continue
- source = util.render_string(source, searchList)
+ source = templater.render_string(source, template_params)
if 'filename' not in ent:
ent['filename'] = 'cloud_config_sources.list'
if not ent['filename'].startswith("/"):
- ent['filename'] = "%s/%s" % \
- ("/etc/apt/sources.list.d/", ent['filename'])
+ ent['filename'] = os.path.join("/etc/apt/sources.list.d/",
+ ent['filename'])
if ('keyid' in ent and 'key' not in ent):
ks = "keyserver.ubuntu.com"
@@ -177,32 +174,26 @@ def add_sources(srclist, searchList=None):
try:
ent['key'] = util.getkeybyid(ent['keyid'], ks)
except:
- elst.append([source, "failed to get key from %s" % ks])
+ errorlist.append([source, "failed to get key from %s" % ks])
continue
if 'key' in ent:
try:
util.subp(('apt-key', 'add', '-'), ent['key'])
except:
- elst.append([source, "failed add key"])
+ errorlist.append([source, "failed add key"])
try:
util.write_file(ent['filename'], source + "\n", omode="ab")
except:
- elst.append([source, "failed write to file %s" % ent['filename']])
+ errorlist.append([source, "failed write to file %s" % ent['filename']])
- return(elst)
+ return errorlist
def find_apt_mirror(cloud, cfg):
""" find an apt_mirror given the cloud and cfg provided """
- # TODO: distro and defaults should be configurable
- distro = "ubuntu"
- defaults = {
- 'ubuntu': "http://archive.ubuntu.com/ubuntu",
- 'debian': "http://archive.debian.org/debian",
- }
mirror = None
cfg_mirror = cfg.get("apt_mirror", None)
@@ -211,14 +202,13 @@ def find_apt_mirror(cloud, cfg):
elif "apt_mirror_search" in cfg:
mirror = util.search_for_mirror(cfg['apt_mirror_search'])
else:
- if cloud:
- mirror = cloud.get_mirror()
+ mirror = cloud.get_local_mirror()
mydom = ""
doms = []
- if not mirror and cloud:
+ if not mirror:
# if we have a fqdn, then search its domain portion first
(_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
mydom = ".".join(fqdn.split(".")[1:])
@@ -236,6 +226,6 @@ def find_apt_mirror(cloud, cfg):
mirror = util.search_for_mirror(mirror_list)
if not mirror:
- mirror = defaults[distro]
+ mirror = cloud.distro.get_package_mirror()
return mirror
diff --git a/cloudinit/transforms/cc_bootcmd.py b/cloudinit/transforms/cc_bootcmd.py
index f584da02..a2efad32 100644
--- a/cloudinit/transforms/cc_bootcmd.py
+++ b/cloudinit/transforms/cc_bootcmd.py
@@ -17,32 +17,36 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-import subprocess
-import tempfile
+
import os
-from cloudinit.CloudConfig import per_always
-frequency = per_always
+import tempfile
+
+from cloudinit import util
+from cloudinit.settings import PER_ALWAYS
+frequency = PER_ALWAYS
+
+
+def handle(name, cfg, cloud, log, _args):
-def handle(_name, cfg, cloud, log, _args):
if "bootcmd" not in cfg:
+ log.debug("Skipping module named %s, no 'bootcomd' key in configuration", name)
return
- try:
- content = util.shellify(cfg["bootcmd"])
- tmpf = tempfile.TemporaryFile()
- tmpf.write(content)
- tmpf.seek(0)
- except:
- log.warn("failed to shellify bootcmd")
- raise
-
- try:
- env = os.environ.copy()
- env['INSTANCE_ID'] = cloud.get_instance_id()
- subprocess.check_call(['/bin/sh'], env=env, stdin=tmpf)
- tmpf.close()
- except:
- log.warn("failed to run commands from bootcmd")
- raise
+ with tempfile.NamedTemporaryFile(suffix=".sh") as tmpf:
+ try:
+ content = util.shellify(cfg["bootcmd"])
+ tmpf.write(content)
+ tmpf.flush()
+ except:
+ log.warn("Failed to shellify bootcmd")
+ raise
+
+ try:
+ env = os.environ.copy()
+ env['INSTANCE_ID'] = cloud.get_instance_id()
+ cmd = ['/bin/sh', tmpf.name]
+ util.subp(cmd, env=env, capture=False)
+ except:
+ log.warn("Failed to run commands from bootcmd")
+ raise
diff --git a/cloudinit/transforms/cc_byobu.py b/cloudinit/transforms/cc_byobu.py
index e821b261..38586174 100644
--- a/cloudinit/transforms/cc_byobu.py
+++ b/cloudinit/transforms/cc_byobu.py
@@ -18,18 +18,19 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-import subprocess
-import traceback
+from cloudinit import util
+distros = ['ubuntu', 'debian']
-def handle(_name, cfg, _cloud, log, args):
+
+def handle(name, cfg, _cloud, log, args):
if len(args) != 0:
value = args[0]
else:
value = util.get_cfg_option_str(cfg, "byobu_by_default", "")
if not value:
+ log.debug("Skipping module named %s, no 'byobu' values found", name)
return
if value == "user" or value == "system":
@@ -38,7 +39,7 @@ def handle(_name, cfg, _cloud, log, args):
valid = ("enable-user", "enable-system", "enable",
"disable-user", "disable-system", "disable")
if not value in valid:
- log.warn("Unknown value %s for byobu_by_default" % value)
+ log.warn("Unknown value %s for byobu_by_default", value)
mod_user = value.endswith("-user")
mod_sys = value.endswith("-system")
@@ -65,13 +66,6 @@ def handle(_name, cfg, _cloud, log, args):
cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")]
- log.debug("setting byobu to %s" % value)
+ log.debug("Setting byobu to %s", value)
- try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError as e:
- log.debug(traceback.format_exc(e))
- raise Exception("Cmd returned %s: %s" % (e.returncode, cmd))
- except OSError as e:
- log.debug(traceback.format_exc(e))
- raise Exception("Cmd failed to execute: %s" % (cmd))
+ util.subp(cmd)
diff --git a/cloudinit/transforms/cc_ca_certs.py b/cloudinit/transforms/cc_ca_certs.py
index 3af6238a..8ca9a200 100644
--- a/cloudinit/transforms/cc_ca_certs.py
+++ b/cloudinit/transforms/cc_ca_certs.py
@@ -13,10 +13,10 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
import os
-from subprocess import check_call
-from cloudinit.util import (write_file, get_cfg_option_list_or_str,
- delete_dir_contents, subp)
+
+from cloudinit import util
CA_CERT_PATH = "/usr/share/ca-certificates/"
CA_CERT_FILENAME = "cloud-init-ca-certs.crt"
@@ -28,7 +28,7 @@ def update_ca_certs():
"""
Updates the CA certificate cache on the current machine.
"""
- check_call(["update-ca-certificates"])
+ util.subp(["update-ca-certificates"])
def add_ca_certs(certs):
@@ -41,9 +41,9 @@ def add_ca_certs(certs):
if certs:
cert_file_contents = "\n".join(certs)
cert_file_fullpath = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME)
- write_file(cert_file_fullpath, cert_file_contents, mode=0644)
+ util.write_file(cert_file_fullpath, cert_file_contents, mode=0644)
# Append cert filename to CA_CERT_CONFIG file.
- write_file(CA_CERT_CONFIG, "\n%s" % CA_CERT_FILENAME, omode="a")
+ util.write_file(CA_CERT_CONFIG, "\n%s" % CA_CERT_FILENAME, omode="ab")
def remove_default_ca_certs():
@@ -51,14 +51,14 @@ def remove_default_ca_certs():
Removes all default trusted CA certificates from the system. To actually
apply the change you must also call L{update_ca_certs}.
"""
- delete_dir_contents(CA_CERT_PATH)
- delete_dir_contents(CA_CERT_SYSTEM_PATH)
- write_file(CA_CERT_CONFIG, "", mode=0644)
+ util.delete_dir_contents(CA_CERT_PATH)
+ util.delete_dir_contents(CA_CERT_SYSTEM_PATH)
+ util.write_file(CA_CERT_CONFIG, "", mode=0644)
debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no"
- subp(('debconf-set-selections', '-'), debconf_sel)
+ util.subp(('debconf-set-selections', '-'), debconf_sel)
-def handle(_name, cfg, _cloud, log, _args):
+def handle(name, cfg, _cloud, log, _args):
"""
Call to handle ca-cert sections in cloud-config file.
@@ -70,6 +70,7 @@ def handle(_name, cfg, _cloud, log, _args):
"""
# If there isn't a ca-certs section in the configuration don't do anything
if "ca-certs" not in cfg:
+ log.debug("Skipping module named %s, no 'ca-certs' key in configuration", name)
return
ca_cert_cfg = cfg['ca-certs']
@@ -81,7 +82,7 @@ def handle(_name, cfg, _cloud, log, _args):
# If we are given any new trusted CA certs to add, add them.
if "trusted" in ca_cert_cfg:
- trusted_certs = get_cfg_option_list_or_str(ca_cert_cfg, "trusted")
+ trusted_certs = util.get_cfg_option_list_or_str(ca_cert_cfg, "trusted")
if trusted_certs:
log.debug("adding %d certificates" % len(trusted_certs))
add_ca_certs(trusted_certs)
diff --git a/cloudinit/transforms/cc_chef.py b/cloudinit/transforms/cc_chef.py
index 941e04fe..12c2f539 100644
--- a/cloudinit/transforms/cc_chef.py
+++ b/cloudinit/transforms/cc_chef.py
@@ -18,53 +18,59 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import os
-import subprocess
import json
-import cloudinit.CloudConfig as cc
-import cloudinit.util as util
+import os
+
+from cloudinit import templater
+from cloudinit import util
ruby_version_default = "1.8"
-def handle(_name, cfg, cloud, log, _args):
+def handle(name, cfg, cloud, log, _args):
+
# If there isn't a chef key in the configuration don't do anything
if 'chef' not in cfg:
+ log.debug("Skipping module named %s, no 'chef' key in configuration", name)
return
chef_cfg = cfg['chef']
# ensure the chef directories we use exist
- mkdirs(['/etc/chef', '/var/log/chef', '/var/lib/chef',
- '/var/cache/chef', '/var/backups/chef', '/var/run/chef'])
+ util.ensure_dirs(['/etc/chef', '/var/log/chef', '/var/lib/chef',
+ '/var/cache/chef', '/var/backups/chef', '/var/run/chef'])
# set the validation key based on the presence of either 'validation_key'
# or 'validation_cert'. In the case where both exist, 'validation_key'
# takes precedence
for key in ('validation_key', 'validation_cert'):
if key in chef_cfg and chef_cfg[key]:
- with open('/etc/chef/validation.pem', 'w') as validation_key_fh:
- validation_key_fh.write(chef_cfg[key])
+ util.write_file('/etc/chef/validation.pem', chef_cfg[key])
break
# create the chef config from template
- util.render_to_file('chef_client.rb', '/etc/chef/client.rb',
- {'server_url': chef_cfg['server_url'],
- 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name',
- cloud.datasource.get_instance_id()),
- 'environment': util.get_cfg_option_str(chef_cfg, 'environment',
- '_default'),
- 'validation_name': chef_cfg['validation_name']})
+ template_fn = cloud.get_template_filename('chef_client.rb')
+ if template_fn:
+ params = {
+ 'server_url': chef_cfg['server_url'],
+ 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name',
+ cloud.datasource.get_instance_id()),
+ 'environment': util.get_cfg_option_str(chef_cfg, 'environment',
+ '_default'),
+ 'validation_name': chef_cfg['validation_name']
+ }
+ templater.render_to_file(template_fn, '/etc/chef/client.rb', params)
+ else:
+ log.warn("No template found, not rendering to /etc/chef/client.rb")
# set the firstboot json
- with open('/etc/chef/firstboot.json', 'w') as firstboot_json_fh:
- initial_json = {}
- if 'run_list' in chef_cfg:
- initial_json['run_list'] = chef_cfg['run_list']
- if 'initial_attributes' in chef_cfg:
- initial_attributes = chef_cfg['initial_attributes']
- for k in initial_attributes.keys():
- initial_json[k] = initial_attributes[k]
- firstboot_json_fh.write(json.dumps(initial_json))
+ initial_json = {}
+ if 'run_list' in chef_cfg:
+ initial_json['run_list'] = chef_cfg['run_list']
+ if 'initial_attributes' in chef_cfg:
+ initial_attributes = chef_cfg['initial_attributes']
+ for k in initial_attributes.keys():
+ initial_json[k] = initial_attributes[k]
+ util.write_file('/etc/chef/firstboot.json', json.dumps(initial_json))
# If chef is not installed, we install chef based on 'install_type'
if not os.path.isfile('/usr/bin/chef-client'):
@@ -75,14 +81,15 @@ def handle(_name, cfg, cloud, log, _args):
chef_version = util.get_cfg_option_str(chef_cfg, 'version', None)
ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version',
ruby_version_default)
- install_chef_from_gems(ruby_version, chef_version)
+ install_chef_from_gems(cloud.distro, ruby_version, chef_version)
# and finally, run chef-client
- log.debug('running chef-client')
- subprocess.check_call(['/usr/bin/chef-client', '-d', '-i', '1800',
- '-s', '20'])
- else:
+ log.debug('Running chef-client')
+ util.subp(['/usr/bin/chef-client', '-d', '-i', '1800', '-s', '20'])
+ elif install_type == 'packages':
# this will install and run the chef-client from packages
- cc.install_packages(('chef',))
+ cloud.distro.install_packages(('chef',))
+ else:
+ log.warn("Unknown chef install type %s", install_type)
def get_ruby_packages(version):
@@ -90,30 +97,20 @@ def get_ruby_packages(version):
pkgs = ['ruby%s' % version, 'ruby%s-dev' % version]
if version == "1.8":
pkgs.extend(('libopenssl-ruby1.8', 'rubygems1.8'))
- return(pkgs)
+ return pkgs
-def install_chef_from_gems(ruby_version, chef_version=None):
- cc.install_packages(get_ruby_packages(ruby_version))
+def install_chef_from_gems(ruby_version, chef_version, distro):
+ distro.install_packages(get_ruby_packages(ruby_version))
if not os.path.exists('/usr/bin/gem'):
- os.symlink('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem')
+ util.sym_link('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem')
if not os.path.exists('/usr/bin/ruby'):
- os.symlink('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby')
+ util.sym_link('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby')
if chef_version:
- subprocess.check_call(['/usr/bin/gem', 'install', 'chef',
- '-v %s' % chef_version, '--no-ri',
- '--no-rdoc', '--bindir', '/usr/bin', '-q'])
+ util.subp(['/usr/bin/gem', 'install', 'chef',
+ '-v %s' % chef_version, '--no-ri',
+ '--no-rdoc', '--bindir', '/usr/bin', '-q'])
else:
- subprocess.check_call(['/usr/bin/gem', 'install', 'chef',
- '--no-ri', '--no-rdoc', '--bindir',
- '/usr/bin', '-q'])
-
-
-def ensure_dir(d):
- if not os.path.exists(d):
- os.makedirs(d)
-
-
-def mkdirs(dirs):
- for d in dirs:
- ensure_dir(d)
+ util.subp(['/usr/bin/gem', 'install', 'chef',
+ '--no-ri', '--no-rdoc', '--bindir',
+ '/usr/bin', '-q'])
diff --git a/cloudinit/transforms/cc_disable_ec2_metadata.py b/cloudinit/transforms/cc_disable_ec2_metadata.py
index 6b31ea8e..4d2a7f55 100644
--- a/cloudinit/transforms/cc_disable_ec2_metadata.py
+++ b/cloudinit/transforms/cc_disable_ec2_metadata.py
@@ -17,14 +17,16 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-import subprocess
-from cloudinit.CloudConfig import per_always
-frequency = per_always
+from cloudinit import util
+
+from cloudinit.settings import PER_ALWAYS
+
+frequency = PER_ALWAYS
+
+reject_cmd = ['route', 'add', '-host', '169.254.169.254', 'reject']
def handle(_name, cfg, _cloud, _log, _args):
if util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False):
- fwall = "route add -host 169.254.169.254 reject"
- subprocess.call(fwall.split(' '))
+ util.subp(reject_cmd)
diff --git a/cloudinit/transforms/cc_final_message.py b/cloudinit/transforms/cc_final_message.py
index abb4ca32..dc4ae34c 100644
--- a/cloudinit/transforms/cc_final_message.py
+++ b/cloudinit/transforms/cc_final_message.py
@@ -18,41 +18,54 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from cloudinit.CloudConfig import per_always
import sys
-from cloudinit import util, boot_finished
-import time
-frequency = per_always
+from cloudinit import templater
+from cloudinit import util
+from cloudinit import version
-final_message = "cloud-init boot finished at $TIMESTAMP. Up $UPTIME seconds"
+from cloudinit.settings import PER_ALWAYS
+frequency = PER_ALWAYS
-def handle(_name, cfg, _cloud, log, args):
+final_message_def = ("Cloud-init v. {{version}} finished at {{timestamp}}."
+ " Up {{uptime}} seconds.")
+
+
+def handle(name, cfg, cloud, log, args):
+
+ msg_in = None
if len(args) != 0:
msg_in = args[0]
else:
- msg_in = util.get_cfg_option_str(cfg, "final_message", final_message)
+ msg_in = util.get_cfg_option_str(cfg, "final_message")
+
+ if not msg_in:
+ template_fn = cloud.get_template_filename('final_message')
+ if template_fn:
+ msg_in = util.load_file(template_fn)
- try:
- uptimef = open("/proc/uptime")
- uptime = uptimef.read().split(" ")[0]
- uptimef.close()
- except IOError as e:
- log.warn("unable to open /proc/uptime\n")
- uptime = "na"
+ if not msg_in:
+ msg_in = final_message_def
+ uptime = util.uptime()
+ ts = util.time_rfc2822()
+ cver = version.version_string()
try:
- ts = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime())
- except:
- ts = "na"
-
- try:
- subs = {'UPTIME': uptime, 'TIMESTAMP': ts}
- sys.stdout.write("%s\n" % util.render_string(msg_in, subs))
+ subs = {
+ 'uptime': uptime,
+ 'timestamp': ts,
+ 'version': cver,
+ }
+ # Use stdout, stderr or the logger??
+ content = templater.render_string(msg_in, subs)
+ sys.stderr.write("%s\n" % (content))
except Exception as e:
- log.warn("failed to render string to stdout: %s" % e)
+ util.logexc(log, "Failed to render final message template")
- fp = open(boot_finished, "wb")
- fp.write(uptime + "\n")
- fp.close()
+ boot_fin_fn = cloud.paths.boot_finished
+ try:
+ contents = "%s - %s - v. %s\n" % (uptime, ts, cver)
+ util.write_file(boot_fin_fn, contents)
+ except:
+ util.logexc(log, "Failed to write boot finished file %s", boot_fin_fn)
diff --git a/cloudinit/transforms/cc_foo.py b/cloudinit/transforms/cc_foo.py
index 35ec3fa7..8007f981 100644
--- a/cloudinit/transforms/cc_foo.py
+++ b/cloudinit/transforms/cc_foo.py
@@ -18,12 +18,35 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#import cloudinit
-#import cloudinit.util as util
-from cloudinit.CloudConfig import per_instance
+from cloudinit.settings import PER_INSTANCE
-frequency = per_instance
+# Modules are expected to have the following attributes.
+# 1. A required 'handle' method which takes the following params.
+# a) The name will not be this files name, but instead
+# the name specified in configuration (which is the name
+# which will be used to find this module).
+# b) A configuration object that is the result of the merging
+# of cloud configs configuration with legacy configuration
+# as well as any datasource provided configuration
+# c) A cloud object that can be used to access various
+# datasource and paths for the given distro and data provided
+# by the various datasource instance types.
+# d) A argument list that may or may not be empty to this module.
+# Typically those are from module configuration where the module
+# is defined with some extra configuration that will eventually
+# be translated from yaml into arguments to this module.
+# 2. A optional 'frequency' that defines how often this module should be ran.
+# Typically one of PER_INSTANCE, PER_ALWAYS, PER_ONCE. If not
+# provided PER_INSTANCE will be assumed.
+# See settings.py for these constants.
+# 3. A optional 'distros' array/set/tuple that defines the known distros
+# this module will work with (if not all of them). This is used to write
+# a warning out if a module is being ran on a untested distribution for
+# informational purposes. If non existent all distros are assumed and
+# no warning occurs.
+frequency = settings.PER_INSTANCE
-def handle(_name, _cfg, _cloud, _log, _args):
- print "hi"
+
+def handle(name, _cfg, _cloud, _log, _args):
+ print("Hi from %s" % (name))
diff --git a/cloudinit/transforms/cc_grub_dpkg.py b/cloudinit/transforms/cc_grub_dpkg.py
index 9f3a7eaf..c048d5cc 100644
--- a/cloudinit/transforms/cc_grub_dpkg.py
+++ b/cloudinit/transforms/cc_grub_dpkg.py
@@ -18,10 +18,12 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-import traceback
import os
+from cloudinit import util
+
+distros = ['ubuntu', 'debian']
+
def handle(_name, cfg, _cloud, log, _args):
idevs = None
@@ -52,13 +54,14 @@ def handle(_name, cfg, _cloud, log, _args):
# now idevs and idevs_empty are set to determined values
# or, those set by user
- dconf_sel = "grub-pc grub-pc/install_devices string %s\n" % idevs + \
- "grub-pc grub-pc/install_devices_empty boolean %s\n" % idevs_empty
- log.debug("setting grub debconf-set-selections with '%s','%s'" %
+ dconf_sel = ("grub-pc grub-pc/install_devices string %s\n"
+ "grub-pc grub-pc/install_devices_empty boolean %s\n") %
+ (idevs, idevs_empty)
+
+ log.debug("Setting grub debconf-set-selections with '%s','%s'" %
(idevs, idevs_empty))
try:
- util.subp(('debconf-set-selections'), dconf_sel)
+ util.subp(['debconf-set-selections'], dconf_sel)
except:
- log.error("Failed to run debconf-set-selections for grub-dpkg")
- log.debug(traceback.format_exc())
+ util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg")
diff --git a/cloudinit/transforms/cc_keys_to_console.py b/cloudinit/transforms/cc_keys_to_console.py
index 73a477c0..2f2a5297 100644
--- a/cloudinit/transforms/cc_keys_to_console.py
+++ b/cloudinit/transforms/cc_keys_to_console.py
@@ -18,11 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from cloudinit.CloudConfig import per_instance
-import cloudinit.util as util
-import subprocess
+from cloudinit.settings import PER_INSTANCE
+from cloudinit import util
-frequency = per_instance
+frequency = PER_INSTANCE
def handle(_name, cfg, _cloud, log, _args):
@@ -32,11 +31,10 @@ def handle(_name, cfg, _cloud, log, _args):
key_blacklist = util.get_cfg_option_list_or_str(cfg,
"ssh_key_console_blacklist", ["ssh-dss"])
try:
- confp = open('/dev/console', "wb")
cmd.append(','.join(fp_blacklist))
cmd.append(','.join(key_blacklist))
- subprocess.call(cmd, stdout=confp)
- confp.close()
+ (stdout, stderr) = util.subp(cmd)
+ util.write_file('/dev/console', stdout)
except:
- log.warn("writing keys to console value")
+ log.warn("Writing keys to console failed!")
raise
diff --git a/cloudinit/transforms/cc_landscape.py b/cloudinit/transforms/cc_landscape.py
index a4113cbe..48491992 100644
--- a/cloudinit/transforms/cc_landscape.py
+++ b/cloudinit/transforms/cc_landscape.py
@@ -19,14 +19,24 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
-import os.path
-from cloudinit.CloudConfig import per_instance
-from configobj import ConfigObj
-frequency = per_instance
+from StringIO import StringIO
+
+try:
+ from configobj import ConfigObj
+except ImportError:
+ ConfigObj = None
+
+from cloudinit import util
+
+from cloudinit.settings import PER_INSTANCE
+
+frequency = PER_INSTANCE
lsc_client_cfg_file = "/etc/landscape/client.conf"
+distros = ['ubuntu']
+
# defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2
lsc_builtincfg = {
'client': {
@@ -38,36 +48,43 @@ lsc_builtincfg = {
}
-def handle(_name, cfg, _cloud, log, _args):
+def handle(name, cfg, _cloud, log, _args):
"""
Basically turn a top level 'landscape' entry with a 'client' dict
and render it to ConfigObj format under '[client]' section in
/etc/landscape/client.conf
"""
+ if not ConfigObj:
+ log.warn("'ConfigObj' support not enabled, running %s disabled", name)
+ return
ls_cloudcfg = cfg.get("landscape", {})
if not isinstance(ls_cloudcfg, dict):
- raise(Exception("'landscape' existed in config, but not a dict"))
+ raise Exception(("'landscape' key existed in config,"
+ " but not a dictionary type,"
+ " is a %s instead"), util.obj_name(ls_cloudcfg))
- merged = mergeTogether([lsc_builtincfg, lsc_client_cfg_file, ls_cloudcfg])
+ merged = merge_together([lsc_builtincfg, lsc_client_cfg_file, ls_cloudcfg])
if not os.path.isdir(os.path.dirname(lsc_client_cfg_file)):
- os.makedirs(os.path.dirname(lsc_client_cfg_file))
-
- with open(lsc_client_cfg_file, "w") as fp:
- merged.write(fp)
+ util.ensure_dir(os.path.dirname(lsc_client_cfg_file))
- log.debug("updated %s" % lsc_client_cfg_file)
+ contents = StringIO()
+ merged.write(contents)
+ util.write_file(lsc_client_cfg_file, contents.getvalue())
+ log.debug("Wrote landscape config file to %s", lsc_client_cfg_file)
-def mergeTogether(objs):
+def merge_together(objs):
"""
merge together ConfigObj objects or things that ConfigObj() will take in
later entries override earlier
"""
cfg = ConfigObj({})
for obj in objs:
+ if not obj:
+ continue
if isinstance(obj, ConfigObj):
cfg.merge(obj)
else:
diff --git a/cloudinit/transforms/cc_locale.py b/cloudinit/transforms/cc_locale.py
index 2bb22fdb..3fb4c5d9 100644
--- a/cloudinit/transforms/cc_locale.py
+++ b/cloudinit/transforms/cc_locale.py
@@ -18,22 +18,28 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-import os.path
-import subprocess
-import traceback
+import os
+from cloudinit import templater
+from cloudinit import util
-def apply_locale(locale, cfgfile):
+
+def apply_locale(locale, cfgfile, cloud, log):
+ # TODO this command might not work on RH...
if os.path.exists('/usr/sbin/locale-gen'):
- subprocess.Popen(['locale-gen', locale]).communicate()
+ util.subp(['locale-gen', locale], capture=False)
if os.path.exists('/usr/sbin/update-locale'):
- subprocess.Popen(['update-locale', locale]).communicate()
-
- util.render_to_file('default-locale', cfgfile, {'locale': locale})
+ util.subp(['update-locale', locale], capture=False)
+ if not cfgfile:
+ return
+ template_fn = cloud.get_template_filename('default-locale')
+ if not template_fn:
+ log.warn("No template filename found to write to %s", cfgfile)
+ else:
+ templater.render_to_file(template_fn, cfgfile, {'locale': locale})
-def handle(_name, cfg, cloud, log, args):
+def handle(name, cfg, cloud, log, args):
if len(args) != 0:
locale = args[0]
else:
@@ -43,12 +49,10 @@ def handle(_name, cfg, cloud, log, args):
"/etc/default/locale")
if not locale:
+ log.debug(("Skipping module named %s, "
+ "no 'locale' configuration found"), name)
return
- log.debug("setting locale to %s" % locale)
+ log.debug("Setting locale to %s", locale)
- try:
- apply_locale(locale, locale_cfgfile)
- except Exception as e:
- log.debug(traceback.format_exc(e))
- raise Exception("failed to apply locale %s" % locale)
+ apply_locale(locale, locale_cfgfile, cloud, log)
diff --git a/cloudinit/transforms/cc_mcollective.py b/cloudinit/transforms/cc_mcollective.py
index a2a6230c..aeeda9d2 100644
--- a/cloudinit/transforms/cc_mcollective.py
+++ b/cloudinit/transforms/cc_mcollective.py
@@ -19,50 +19,53 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+from ConfigParser import ConfigParser
+from StringIO import StringIO
+
import os
-import subprocess
-import StringIO
-import ConfigParser
-import cloudinit.CloudConfig as cc
-import cloudinit.util as util
+
+from cloudinit import util
+from cloudinit import cfg
pubcert_file = "/etc/mcollective/ssl/server-public.pem"
pricert_file = "/etc/mcollective/ssl/server-private.pem"
-# Our fake header section
-class FakeSecHead(object):
- def __init__(self, fp):
- self.fp = fp
- self.sechead = '[nullsection]\n'
-
- def readline(self):
- if self.sechead:
- try:
- return self.sechead
- finally:
- self.sechead = None
- else:
- return self.fp.readline()
+def handle(name, cfg, cloud, log, _args):
-
-def handle(_name, cfg, _cloud, _log, _args):
# If there isn't a mcollective key in the configuration don't do anything
if 'mcollective' not in cfg:
+ log.debug(("Skipping module named %s, "
+ "no 'mcollective' key in configuration"), name)
return
+
mcollective_cfg = cfg['mcollective']
+
# Start by installing the mcollective package ...
- cc.install_packages(("mcollective",))
+ cloud.distro.install_packages(("mcollective",))
# ... and then update the mcollective configuration
if 'conf' in mcollective_cfg:
# Create object for reading server.cfg values
- mcollective_config = ConfigParser.ConfigParser()
+ mcollective_config = cfg.DefaultingConfigParser()
# Read server.cfg values from original file in order to be able to mix
# the rest up
- mcollective_config.readfp(FakeSecHead(open('/etc/mcollective/'
- 'server.cfg')))
- for cfg_name, cfg in mcollective_cfg['conf'].iteritems():
+ old_contents = util.load_file('/etc/mcollective/server.cfg')
+ # It doesn't contain any sections so just add one temporarily
+ # Use a hash id based off the contents,
+ # just incase of conflicts... (try to not have any...)
+ # This is so that an error won't occur when reading (and no
+ # sections exist in the file)
+ section_tpl = "[nullsection_%s]"
+ attempts = 0
+ section_head = section_tpl % (attempts)
+ while old_contents.find(section_head) != -1:
+ attempts += 1
+ section_head = section_tpl % (attempts)
+ sectioned_contents = "%s\n%s" % (section_head, old_contents)
+ mcollective_config.readfp(StringIO(sectioned_contents),
+ filename='/etc/mcollective/server.cfg')
+ for (cfg_name, cfg) in mcollective_cfg['conf'].iteritems():
if cfg_name == 'public-cert':
util.write_file(pubcert_file, cfg, mode=0644)
mcollective_config.set(cfg_name,
@@ -76,24 +79,19 @@ def handle(_name, cfg, _cloud, _log, _args):
else:
# Iterate throug the config items, we'll use ConfigParser.set
# to overwrite or create new items as needed
- for o, v in cfg.iteritems():
+ for (o, v) in cfg.iteritems():
mcollective_config.set(cfg_name, o, v)
# We got all our config as wanted we'll rename
# the previous server.cfg and create our new one
- os.rename('/etc/mcollective/server.cfg',
- '/etc/mcollective/server.cfg.old')
- outputfile = StringIO.StringIO()
- mcollective_config.write(outputfile)
- # Now we got the whole file, write to disk except first line
+ util.rename('/etc/mcollective/server.cfg',
+ '/etc/mcollective/server.cfg.old')
+ # Now we got the whole file, write to disk except the section
+ # we added so that config parser won't error out when trying to read.
# Note below, that we've just used ConfigParser because it generally
- # works. Below, we remove the initial 'nullsection' header
- # and then change 'key = value' to 'key: value'. The global
- # search and replace of '=' with ':' could be problematic though.
- # this most likely needs fixing.
- util.write_file('/etc/mcollective/server.cfg',
- outputfile.getvalue().replace('[nullsection]\n', '').replace(' =',
- ':'),
- mode=0644)
+ # works. Below, we remove the initial 'nullsection' header.
+ contents = mcollective_config.stringify()
+ contents = contents.replace("%s\n" % (section_head), "")
+ util.write_file('/etc/mcollective/server.cfg', contents, mode=0644)
# Start mcollective
- subprocess.check_call(['service', 'mcollective', 'start'])
+ util.subp(['service', 'mcollective', 'start'], capture=False)
diff --git a/cloudinit/transforms/cc_mounts.py b/cloudinit/transforms/cc_mounts.py
index 6cdd74e8..babcbda1 100644
--- a/cloudinit/transforms/cc_mounts.py
+++ b/cloudinit/transforms/cc_mounts.py
@@ -18,10 +18,17 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
+from string import whitespace # pylint: disable=W0402
+
import os
import re
-from string import whitespace # pylint: disable=W0402
+
+from cloudinit import util
+
+# shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1
+shortname_filter = r"^[x]{0,1}[shv]d[a-z][0-9]*$"
+shortname = re.compile(shortname_filter)
+ws = re.compile("[%s]+" % whitespace)
def is_mdname(name):
@@ -49,38 +56,46 @@ def handle(_name, cfg, cloud, log, _args):
if "mounts" in cfg:
cfgmnt = cfg["mounts"]
- # shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1
- shortname_filter = r"^[x]{0,1}[shv]d[a-z][0-9]*$"
- shortname = re.compile(shortname_filter)
-
+
for i in range(len(cfgmnt)):
# skip something that wasn't a list
if not isinstance(cfgmnt[i], list):
+ log.warn("Mount option %s not a list, got a %s instead",
+ (i + 1), util.obj_name(cfgmnt[i]))
continue
+ startname = str(cfgmnt[i][0])
+ LOG.debug("Attempting to determine the real name of %s", startname)
+
# workaround, allow user to specify 'ephemeral'
# rather than more ec2 correct 'ephemeral0'
- if cfgmnt[i][0] == "ephemeral":
+ if startname == "ephemeral":
cfgmnt[i][0] = "ephemeral0"
+ log.debug("Adjusted mount option %s name from ephemeral to ephemeral0", (i + 1))
- if is_mdname(cfgmnt[i][0]):
- newname = cloud.device_name_to_device(cfgmnt[i][0])
+ if is_mdname(startname):
+ newname = cloud.device_name_to_device(startname)
if not newname:
- log.debug("ignoring nonexistant named mount %s" % cfgmnt[i][0])
+ log.debug("Ignoring nonexistant named mount %s", startname)
cfgmnt[i][1] = None
else:
- if newname.startswith("/"):
- cfgmnt[i][0] = newname
- else:
- cfgmnt[i][0] = "/dev/%s" % newname
+ renamed = newname
+ if not newname.startswith("/"):
+ renamed = "/dev/%s" % newname
+ cfgmnt[i][0] = renamed
+ log.debug("Mapped metadata name %s to %s", startname, renamed)
else:
- if shortname.match(cfgmnt[i][0]):
- cfgmnt[i][0] = "/dev/%s" % cfgmnt[i][0]
+ if shortname.match(startname):
+ renamed = "/dev/%s" % startname
+ log.debug("Mapped shortname name %s to %s", startname, renamed)
+ cfgmnt[i][0] = renamed
# in case the user did not quote a field (likely fs-freq, fs_passno)
# but do not convert None to 'None' (LP: #898365)
for j in range(len(cfgmnt[i])):
- if isinstance(cfgmnt[i][j], int):
+ if j is None:
+ continue
+ else:
cfgmnt[i][j] = str(cfgmnt[i][j])
for i in range(len(cfgmnt)):
@@ -102,14 +117,18 @@ def handle(_name, cfg, cloud, log, _args):
# for each of the "default" mounts, add them only if no other
# entry has the same device name
for defmnt in defmnts:
- devname = cloud.device_name_to_device(defmnt[0])
+ startname = defmnt[0]
+ devname = cloud.device_name_to_device(startname)
if devname is None:
+ log.debug("Ignoring nonexistant named default mount %s", startname)
continue
if devname.startswith("/"):
defmnt[0] = devname
else:
defmnt[0] = "/dev/%s" % devname
+ log.debug("Mapped default device %s to %s", startname, defmnt[0])
+
cfgmnt_has = False
for cfgm in cfgmnt:
if cfgm[0] == defmnt[0]:
@@ -117,14 +136,21 @@ def handle(_name, cfg, cloud, log, _args):
break
if cfgmnt_has:
+ log.debug("Not including %s, already previously included", startname)
continue
cfgmnt.append(defmnt)
# now, each entry in the cfgmnt list has all fstab values
# if the second field is None (not the string, the value) we skip it
- actlist = [x for x in cfgmnt if x[1] is not None]
+ actlist = []
+ for x in cfgmnt:
+ if x[1] is None:
+ log.debug("Skipping non-existent device named %s", x[0])
+ else:
+ actlist.append(x)
if len(actlist) == 0:
+ log.debug("No modifications to fstab needed.")
return
comment = "comment=cloudconfig"
@@ -141,8 +167,7 @@ def handle(_name, cfg, cloud, log, _args):
cc_lines.append('\t'.join(line))
fstab_lines = []
- fstab = open("/etc/fstab", "r+")
- ws = re.compile("[%s]+" % whitespace)
+ fstab = util.load_file("/etc/fstab")
for line in fstab.read().splitlines():
try:
toks = ws.split(line)
@@ -153,27 +178,22 @@ def handle(_name, cfg, cloud, log, _args):
fstab_lines.append(line)
fstab_lines.extend(cc_lines)
-
- fstab.seek(0)
- fstab.write("%s\n" % '\n'.join(fstab_lines))
- fstab.truncate()
- fstab.close()
+ contents = "%s\n" % ('\n'.join(fstab_lines))
+ util.write_file("/etc/fstab", contents)
if needswap:
try:
util.subp(("swapon", "-a"))
except:
- log.warn("Failed to enable swap")
+ util.logexc(log, "Activating swap via 'swapon -a' failed")
for d in dirs:
- if os.path.exists(d):
- continue
try:
- os.makedirs(d)
+ util.ensure_dir(d)
except:
- log.warn("Failed to make '%s' config-mount\n", d)
+ util.logexc(log, "Failed to make '%s' config-mount", d)
try:
util.subp(("mount", "-a"))
except:
- log.warn("'mount -a' failed")
+ util.logexc(log, "Activating mounts via 'mount -a' failed")
diff --git a/cloudinit/transforms/cc_phone_home.py b/cloudinit/transforms/cc_phone_home.py
index a7ff74e1..36af6dfa 100644
--- a/cloudinit/transforms/cc_phone_home.py
+++ b/cloudinit/transforms/cc_phone_home.py
@@ -17,13 +17,18 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from cloudinit.CloudConfig import per_instance
-import cloudinit.util as util
+
+from cloudinit import templater
+from cloudinit import url_helper as uhelp
+from cloudinit import util
+
+from cloudinit.settings import PER_INSTANCE
+
from time import sleep
-frequency = per_instance
-post_list_all = ['pub_key_dsa', 'pub_key_rsa', 'pub_key_ecdsa', 'instance_id',
- 'hostname']
+frequency = PER_INSTANCE
+post_list_all = ['pub_key_dsa', 'pub_key_rsa', 'pub_key_ecdsa',
+ 'instance_id', 'hostname']
# phone_home:
@@ -35,7 +40,7 @@ post_list_all = ['pub_key_dsa', 'pub_key_rsa', 'pub_key_ecdsa', 'instance_id',
# url: http://my.foo.bar/$INSTANCE_ID/
# post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id
#
-def handle(_name, cfg, cloud, log, args):
+def handle(name, cfg, cloud, log, args):
if len(args) != 0:
ph_cfg = util.read_conf(args[0])
else:
@@ -44,7 +49,8 @@ def handle(_name, cfg, cloud, log, args):
ph_cfg = cfg['phone_home']
if 'url' not in ph_cfg:
- log.warn("no 'url' token in phone_home")
+ log.warn(("Skipping module named %s, "
+ "no 'url' found in 'phone_home' configuration"), name)
return
url = ph_cfg['url']
@@ -53,8 +59,8 @@ def handle(_name, cfg, cloud, log, args):
try:
tries = int(tries)
except:
- log.warn("tries is not an integer. using 10")
tries = 10
+ util.logexc(log, "Configuration entry 'tries' is not an integer, using %s", tries)
if post_list == "all":
post_list = post_list_all
@@ -71,11 +77,9 @@ def handle(_name, cfg, cloud, log, args):
for n, path in pubkeys.iteritems():
try:
- fp = open(path, "rb")
- all_keys[n] = fp.read()
- fp.close()
+ all_keys[n] = util.load_file(path)
except:
- log.warn("%s: failed to open in phone_home" % path)
+ util.logexc(log, "%s: failed to open, can not phone home that data", path)
submit_keys = {}
for k in post_list:
@@ -83,24 +87,11 @@ def handle(_name, cfg, cloud, log, args):
submit_keys[k] = all_keys[k]
else:
submit_keys[k] = "N/A"
- log.warn("requested key %s from 'post' list not available")
+ log.warn("Requested key %s from 'post' configuration list not available", k)
- url = util.render_string(url, {'INSTANCE_ID': all_keys['instance_id']})
+ url = templater.render_string(url, {'INSTANCE_ID': all_keys['instance_id']})
- null_exc = object()
- last_e = null_exc
- for i in range(0, tries):
- try:
- util.readurl(url, submit_keys)
- log.debug("succeeded submit to %s on try %i" % (url, i + 1))
- return
- except Exception as e:
- log.debug("failed to post to %s on try %i" % (url, i + 1))
- last_e = e
- sleep(3)
-
- log.warn("failed to post to %s in %i tries" % (url, tries))
- if last_e is not null_exc:
- raise(last_e)
-
- return
+ try:
+ uhelp.readurl(url, data=submit_keys, retries=tries, sec_between=3)
+ except:
+ util.logexc(log, "Failed to post phone home data to %s in %s tries", url, tries)
diff --git a/cloudinit/transforms/cc_puppet.py b/cloudinit/transforms/cc_puppet.py
index 6fc475f6..0a21a929 100644
--- a/cloudinit/transforms/cc_puppet.py
+++ b/cloudinit/transforms/cc_puppet.py
@@ -18,91 +18,85 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+from StringIO import StringIO
+
import os
-import os.path
import pwd
import socket
-import subprocess
-import StringIO
-import ConfigParser
-import cloudinit.CloudConfig as cc
-import cloudinit.util as util
+
+from cloudinit import util
+from cloudinit import cfg
-def handle(_name, cfg, cloud, log, _args):
+def handle(name, cfg, cloud, log, _args):
# If there isn't a puppet key in the configuration don't do anything
if 'puppet' not in cfg:
+ log.debug(("Skipping module named %s,"
+ " no 'puppet' configuration found"), name)
return
+
puppet_cfg = cfg['puppet']
+
# Start by installing the puppet package ...
- cc.install_packages(("puppet",))
+ cloud.distro.install_packages(("puppet",))
# ... and then update the puppet configuration
if 'conf' in puppet_cfg:
# Add all sections from the conf object to puppet.conf
- puppet_conf_fh = open('/etc/puppet/puppet.conf', 'r')
+ contents = util.load_file('/etc/puppet/puppet.conf')
# Create object for reading puppet.conf values
- puppet_config = ConfigParser.ConfigParser()
+ puppet_config = cfg.DefaultingConfigParser()
# Read puppet.conf values from original file in order to be able to
- # mix the rest up
- puppet_config.readfp(StringIO.StringIO(''.join(i.lstrip() for i in
- puppet_conf_fh.readlines())))
- # Close original file, no longer needed
- puppet_conf_fh.close()
- for cfg_name, cfg in puppet_cfg['conf'].iteritems():
+ # mix the rest up. First clean them up (TODO is this really needed??)
+ cleaned_contents = '\n'.join([i.lstrip() for i in contents.splitlines()])
+ puppet_config.readfp(StringIO(cleaned_contents),
+ filename='/etc/puppet/puppet.conf')
+ for (cfg_name, cfg) in puppet_cfg['conf'].iteritems():
# ca_cert configuration is a special case
# Dump the puppetmaster ca certificate in the correct place
if cfg_name == 'ca_cert':
# Puppet ssl sub-directory isn't created yet
# Create it with the proper permissions and ownership
- os.makedirs('/var/lib/puppet/ssl')
- os.chmod('/var/lib/puppet/ssl', 0771)
- os.chown('/var/lib/puppet/ssl',
- pwd.getpwnam('puppet').pw_uid, 0)
- os.makedirs('/var/lib/puppet/ssl/certs/')
- os.chown('/var/lib/puppet/ssl/certs/',
- pwd.getpwnam('puppet').pw_uid, 0)
- ca_fh = open('/var/lib/puppet/ssl/certs/ca.pem', 'w')
- ca_fh.write(cfg)
- ca_fh.close()
- os.chown('/var/lib/puppet/ssl/certs/ca.pem',
- pwd.getpwnam('puppet').pw_uid, 0)
- util.restorecon_if_possible('/var/lib/puppet', recursive=True)
+ util.ensure_dir('/var/lib/puppet/ssl', 0771)
+ util.chownbyid('/var/lib/puppet/ssl',
+ pwd.getpwnam('puppet').pw_uid, 0)
+ util.ensure_dir('/var/lib/puppet/ssl/certs/')
+ util.chownbyid('/var/lib/puppet/ssl/certs/',
+ pwd.getpwnam('puppet').pw_uid, 0)
+ util.write_file('/var/lib/puppet/ssl/certs/ca.pem', cfg)
+ util.chownbyid('/var/lib/puppet/ssl/certs/ca.pem',
+ pwd.getpwnam('puppet').pw_uid, 0)
else:
- #puppet_conf_fh.write("\n[%s]\n" % (cfg_name))
- # If puppet.conf already has this section we don't want to
- # write it again
- if puppet_config.has_section(cfg_name) == False:
- puppet_config.add_section(cfg_name)
# Iterate throug the config items, we'll use ConfigParser.set
# to overwrite or create new items as needed
- for o, v in cfg.iteritems():
+ for (o, v) in cfg.iteritems():
if o == 'certname':
# Expand %f as the fqdn
+ # TODO should this use the cloud fqdn??
v = v.replace("%f", socket.getfqdn())
# Expand %i as the instance id
- v = v.replace("%i",
- cloud.datasource.get_instance_id())
- # certname needs to be downcase
+ v = v.replace("%i", cloud.get_instance_id())
+ # certname needs to be downcased
v = v.lower()
puppet_config.set(cfg_name, o, v)
- #puppet_conf_fh.write("%s=%s\n" % (o, v))
# We got all our config as wanted we'll rename
# the previous puppet.conf and create our new one
- os.rename('/etc/puppet/puppet.conf', '/etc/puppet/puppet.conf.old')
- with open('/etc/puppet/puppet.conf', 'wb') as configfile:
- puppet_config.write(configfile)
- util.restorecon_if_possible('/etc/puppet/puppet.conf')
+ util.rename('/etc/puppet/puppet.conf', '/etc/puppet/puppet.conf.old')
+ contents = puppet_config.stringify()
+ util.write_file('/etc/puppet/puppet.conf', contents)
+
# Set puppet to automatically start
if os.path.exists('/etc/default/puppet'):
- subprocess.check_call(['sed', '-i',
- '-e', 's/^START=.*/START=yes/',
- '/etc/default/puppet'])
+ util.subp(['sed', '-i',
+ '-e', 's/^START=.*/START=yes/',
+ '/etc/default/puppet'], capture=False)
elif os.path.exists('/bin/systemctl'):
- subprocess.check_call(['/bin/systemctl', 'enable', 'puppet.service'])
+ util.subp(['/bin/systemctl', 'enable', 'puppet.service'], capture=False)
elif os.path.exists('/sbin/chkconfig'):
- subprocess.check_call(['/sbin/chkconfig', 'puppet', 'on'])
+ util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
else:
- log.warn("Do not know how to enable puppet service on this system")
+ log.warn(("Sorry we do not know how to enable"
+ " puppet services on this system"))
+
# Start puppetd
- subprocess.check_call(['service', 'puppet', 'start'])
+ util.subp(['service', 'puppet', 'start'], capture=False)
diff --git a/cloudinit/transforms/cc_resizefs.py b/cloudinit/transforms/cc_resizefs.py
index 2dc66def..daaf4da9 100644
--- a/cloudinit/transforms/cc_resizefs.py
+++ b/cloudinit/transforms/cc_resizefs.py
@@ -18,91 +18,117 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-import subprocess
+import errno
import os
import stat
import sys
-import time
import tempfile
-from cloudinit.CloudConfig import per_always
-
-frequency = per_always
+import time
+from cloudinit import util
+from cloudinit.settings import PER_ALWAYS
-def handle(_name, cfg, _cloud, log, args):
- if len(args) != 0:
- resize_root = False
- if str(args[0]).lower() in ['true', '1', 'on', 'yes']:
- resize_root = True
- else:
- resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True)
+frequency = PER_ALWAYS
- if str(resize_root).lower() in ['false', '0']:
- return
+resize_fs_prefixes_cmds = [
+ ('ext', 'resize2fs'),
+ ('xfs', 'xfs_growfs'),
+]
- # we use mktemp rather than mkstemp because early in boot nothing
- # else should be able to race us for this, and we need to mknod.
- devpth = tempfile.mktemp(prefix="cloudinit.resizefs.", dir="/run")
+def nodeify_path(devpth, where, log):
try:
- st_dev = os.stat("/").st_dev
+ st_dev = os.stat(where).st_dev
dev = os.makedev(os.major(st_dev), os.minor(st_dev))
os.mknod(devpth, 0400 | stat.S_IFBLK, dev)
+ return st_dev
except:
if util.is_container():
- log.debug("inside container, ignoring mknod failure in resizefs")
+ log.debug("Inside container, ignoring mknod failure in resizefs")
return
- log.warn("Failed to make device node to resize /")
+ log.warn("Failed to make device node to resize %s at %s", where, devpth)
raise
- cmd = ['blkid', '-c', '/dev/null', '-sTYPE', '-ovalue', devpth]
+
+def get_fs_type(st_dev, path, log):
try:
- (fstype, _err) = util.subp(cmd)
- except subprocess.CalledProcessError as e:
- log.warn("Failed to get filesystem type of maj=%s, min=%s via: %s" %
- (os.major(st_dev), os.minor(st_dev), cmd))
- log.warn("output=%s\nerror=%s\n", e.output[0], e.output[1])
- os.unlink(devpth)
+ fs_type = util.find_devs_with(tag='TYPE', oformat='value',
+ no_cache=True, path=path)
+ return fs_type
+ except util.ProcessExecutionError:
+ util.logexc(log, ("Failed to get filesystem type"
+ " of maj=%s, min=%s for path %s"),
+ os.major(st_dev), os.minor(st_dev), path)
raise
- if str(fstype).startswith("ext"):
- resize_cmd = ['resize2fs', devpth]
- elif fstype == "xfs":
- resize_cmd = ['xfs_growfs', devpth]
+
+def handle(name, cfg, _cloud, log, args):
+ if len(args) != 0:
+ resize_root = args[0]
else:
- os.unlink(devpth)
- log.debug("not resizing unknown filesystem %s" % fstype)
+ resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True)
+
+ if not util.translate_bool(resize_root):
+ log.debug("Skipping module named %s, resizing disabled", name)
return
- if resize_root == "noblock":
- fid = os.fork()
- if fid == 0:
- try:
- do_resize(resize_cmd, devpth, log)
- os._exit(0) # pylint: disable=W0212
- except Exception as exc:
- sys.stderr.write("Failed: %s" % exc)
- os._exit(1) # pylint: disable=W0212
- else:
- do_resize(resize_cmd, devpth, log)
+ # TODO is the directory ok to be used??
+ resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run")
+ util.ensure_dir(resize_root_d)
+ with util.SilentTemporaryFile(prefix="cloudinit.resizefs.",
+ dir=resize_root_d, delete=True) as tfh:
+ devpth = tfh.name
+
+ # Delete the file so that mknod will work
+ # but don't change the file handle to know that its
+ # removed so that when a later call that recreates
+ # occurs this temporary file will still benefit from
+ # auto deletion
+ tfh.unlink_now()
+
+ # TODO: allow what is to be resized to
+ # be configurable??
+ st_dev = nodeify_path(devpth, "/", log)
+ fs_type = get_fs_type(st_dev, devpath, log)
+
+ resizer = None
+ fstype_lc = fstype.lower()
+ for (pfix, root_cmd) in resize_fs_prefixes_cmds:
+ if fstype_lc.startswith(pfix):
+ resizer = root_cmd
+ break
- log.debug("resizing root filesystem (type=%s, maj=%i, min=%i, val=%s)" %
- (str(fstype).rstrip("\n"), os.major(st_dev), os.minor(st_dev),
- resize_root))
+ if not resizer:
+ log.warn("Not resizing unknown filesystem type %s", fs_type)
+ return
+
+ log.debug("Resizing using %s", resizer)
+ resize_cmd = [resizer, devpth]
- return
+ if resize_root == "noblock":
+ # Fork to a child that will run
+ # the resize command
+ util.fork_cb(do_resize, resize_cmd, log)
+ # Don't delete the file now in the parent
+ tfh.delete = False
+ else:
+ do_resize(resize_cmd, log)
+ action = 'Resized'
+ if resize_root == "noblock":
+ action = 'Resizing (via forking)'
+ log.debug("%s root filesystem (type=%s, maj=%i, min=%i, val=%s)",
+ action, fs_type, os.major(st_dev), os.minor(st_dev), resize_root)
-def do_resize(resize_cmd, devpth, log):
+
+def do_resize(resize_cmd, log):
+ start = time.time()
try:
- start = time.time()
util.subp(resize_cmd)
- except subprocess.CalledProcessError as e:
- log.warn("Failed to resize filesystem (%s)" % resize_cmd)
- log.warn("output=%s\nerror=%s\n", e.output[0], e.output[1])
- os.unlink(devpth)
+ except util.ProcessExecutionError as e:
+ util.logexc(log, "Failed to resize filesystem (using %s)", resize_cmd)
raise
-
- os.unlink(devpth)
- log.debug("resize took %s seconds" % (time.time() - start))
+ tot_time = int(time.time() - start)
+ log.debug("Resizing took %s seconds", tot_time)
+ # TODO: Should we add a fsck check after this to make
+ # sure we didn't corrupt anything?
diff --git a/cloudinit/transforms/cc_rightscale_userdata.py b/cloudinit/transforms/cc_rightscale_userdata.py
index 5ed0848f..cde11b54 100644
--- a/cloudinit/transforms/cc_rightscale_userdata.py
+++ b/cloudinit/transforms/cc_rightscale_userdata.py
@@ -35,44 +35,64 @@
##
##
-import cloudinit.util as util
-from cloudinit.CloudConfig import per_instance
-from cloudinit import get_ipath_cur
+import os
+
+from cloudinit import url_helper as uhelp
+from cloudinit import util
+from cloudinit.settings import PER_INSTANCE
+
from urlparse import parse_qs
-frequency = per_instance
+frequency = PER_INSTANCE
+
my_name = "cc_rightscale_userdata"
my_hookname = 'CLOUD_INIT_REMOTE_HOOK'
-def handle(_name, _cfg, cloud, log, _args):
+def handle(name, _cfg, cloud, log, _args):
try:
ud = cloud.get_userdata_raw()
except:
- log.warn("failed to get raw userdata in %s" % my_name)
+ log.warn("Failed to get raw userdata in module %s", name)
return
try:
mdict = parse_qs(ud)
- if not my_hookname in mdict:
+ if not mdict or not my_hookname in mdict:
+ log.debug("Skipping module %s, did not find %s in parsed raw userdata", name, my_hookname)
return
except:
- log.warn("failed to urlparse.parse_qa(userdata_raw())")
+ log.warn("Failed to parse query string %s into a dictionary", ud)
raise
- scripts_d = get_ipath_cur('scripts')
- i = 0
- first_e = None
- for url in mdict[my_hookname]:
- fname = "%s/rightscale-%02i" % (scripts_d, i)
- i = i + 1
+ wrote_fns = []
+ captured_excps = []
+
+ # These will eventually be then ran by the cc_scripts_user
+ # TODO: maybe this should just be a new user data handler??
+ # Instead of a late transform that acts like a user data handler?
+ scripts_d = cloud.get_ipath_cur('scripts')
+ urls = mdict[my_hookname]
+ for (i, url) in enumerate(urls):
+ fname = os.path.join(scripts_d, "rightscale-%02i" % (i))
try:
- content = util.readurl(url)
- util.write_file(fname, content, mode=0700)
+ (content, st) = uhelp.readurl(url)
+ # Ensure its a valid http response (and something gotten)
+ if uhelp.ok_http_code(st) and content:
+ util.write_file(fname, content, mode=0700)
+ wrote_fns.append(fname)
except Exception as e:
- if not first_e:
- first_e = None
- log.warn("%s failed to read %s: %s" % (my_name, url, e))
+ captured_excps.append(e)
+ util.logexc(log, "%s failed to read %s and write %s", my_name, url, fname)
+
+ if wrote_fns:
+ log.debug("Wrote out rightscale userdata to %s files", len(wrote_fns))
+
+ if len(wrote_fns) != len(urls):
+ skipped = len(urls) - len(wrote_fns)
+ log.debug("%s urls were skipped or failed", skipped)
+
+ if captured_excps:
+ log.warn("%s failed with exceptions, re-raising the last one", len(captured_excps))
+ raise captured_excps[-1]
- if first_e:
- raise(e)
diff --git a/cloudinit/transforms/cc_rsyslog.py b/cloudinit/transforms/cc_rsyslog.py
index ac7f2c74..ccbe68ff 100644
--- a/cloudinit/transforms/cc_rsyslog.py
+++ b/cloudinit/transforms/cc_rsyslog.py
@@ -18,16 +18,15 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit
-import logging
-import cloudinit.util as util
-import traceback
+import os
+
+from cloudinit import util
DEF_FILENAME = "20-cloud-config.conf"
DEF_DIR = "/etc/rsyslog.d"
-def handle(_name, cfg, _cloud, log, _args):
+def handle(name, cfg, cloud, log, _args):
# rsyslog:
# - "*.* @@192.158.1.1"
# - content: "*.* @@192.0.2.1:10514"
@@ -37,17 +36,17 @@ def handle(_name, cfg, _cloud, log, _args):
# process 'rsyslog'
if not 'rsyslog' in cfg:
+ log.debug("Skipping module named %s, no 'rsyslog' key in configuration", name)
return
def_dir = cfg.get('rsyslog_dir', DEF_DIR)
def_fname = cfg.get('rsyslog_filename', DEF_FILENAME)
files = []
- elst = []
- for ent in cfg['rsyslog']:
+ for i, ent in enumerate(cfg['rsyslog']):
if isinstance(ent, dict):
if not "content" in ent:
- elst.append((ent, "no 'content' entry"))
+ log.warn("No 'content' entry in config entry %s", i + 1)
continue
content = ent['content']
filename = ent.get("filename", def_fname)
@@ -55,8 +54,13 @@ def handle(_name, cfg, _cloud, log, _args):
content = ent
filename = def_fname
+ filename = filename.strip()
+ if not filename:
+ log.warn("Entry %s has an empty filename", i + 1)
+ continue
+
if not filename.startswith("/"):
- filename = "%s/%s" % (def_dir, filename)
+ filename = os.path.join(def_dir, filename)
omode = "ab"
# truncate filename first time you see it
@@ -67,35 +71,29 @@ def handle(_name, cfg, _cloud, log, _args):
try:
util.write_file(filename, content + "\n", omode=omode)
except Exception as e:
- log.debug(traceback.format_exc(e))
- elst.append((content, "failed to write to %s" % filename))
+ util.logexc(log, "Failed to write to %s", filename)
- # need to restart syslogd
+ # Attempt to restart syslogd
restarted = False
try:
- # if this config module is running at cloud-init time
+ # If this config module is running at cloud-init time
# (before rsyslog is running) we don't actually have to
# restart syslog.
#
- # upstart actually does what we want here, in that it doesn't
+ # Upstart actually does what we want here, in that it doesn't
# start a service that wasn't running already on 'restart'
# it will also return failure on the attempt, so 'restarted'
- # won't get set
- log.debug("restarting rsyslog")
+ # won't get set.
+ log.debug("Restarting rsyslog")
util.subp(['service', 'rsyslog', 'restart'])
restarted = True
-
except Exception as e:
- elst.append(("restart", str(e)))
+ util.logexc("Failed restarting rsyslog")
if restarted:
- # this only needs to run if we *actually* restarted
+ # This only needs to run if we *actually* restarted
# syslog above.
- cloudinit.logging_set_from_cfg_file()
- log = logging.getLogger()
- log.debug("rsyslog configured %s" % files)
-
- for e in elst:
- log.warn("rsyslog error: %s\n" % ':'.join(e))
-
- return
+ cloud.cycle_logging()
+ # This should now use rsyslog if
+ # the logging was setup to use it...
+ log.debug("%s configured %s files", name, files)
diff --git a/cloudinit/transforms/cc_runcmd.py b/cloudinit/transforms/cc_runcmd.py
index f7e8c671..19c0e721 100644
--- a/cloudinit/transforms/cc_runcmd.py
+++ b/cloudinit/transforms/cc_runcmd.py
@@ -18,15 +18,20 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
+import os
+from cloudinit import util
-def handle(_name, cfg, cloud, log, _args):
+
+def handle(name, cfg, cloud, log, _args):
if "runcmd" not in cfg:
+ log.debug("Skipping module named %s, no 'runcmd' key in configuration", name)
return
- outfile = "%s/runcmd" % cloud.get_ipath('scripts')
+
+ outfile = os.path.join(cloud.get_ipath('scripts'), "runcmd")
+ cmd = cfg["runcmd"]
try:
- content = util.shellify(cfg["runcmd"])
+ content = util.shellify(cmd)
util.write_file(outfile, content, 0700)
except:
- log.warn("failed to open %s for runcmd" % outfile)
+ util.logexc(log, "Failed to shellify %s into file %s", cmd, outfile)
diff --git a/cloudinit/transforms/cc_salt_minion.py b/cloudinit/transforms/cc_salt_minion.py
index 1a3b5039..47cbc194 100644
--- a/cloudinit/transforms/cc_salt_minion.py
+++ b/cloudinit/transforms/cc_salt_minion.py
@@ -15,42 +15,43 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
-import os.path
-import subprocess
-import cloudinit.CloudConfig as cc
-import yaml
+from cloudinit import util
-def handle(_name, cfg, _cloud, _log, _args):
+# Note: see http://saltstack.org/topics/installation/
+
+
+def handle(name, cfg, cloud, _log, _args):
# If there isn't a salt key in the configuration don't do anything
if 'salt_minion' not in cfg:
+ log.debug("Skipping module named %s, no 'salt_minion' key in configuration", name)
return
+
salt_cfg = cfg['salt_minion']
+
# Start by installing the salt package ...
- cc.install_packages(("salt",))
- config_dir = '/etc/salt'
- if not os.path.isdir(config_dir):
- os.makedirs(config_dir)
+ cloud.distro.install_packages(("salt",))
+
+ # Ensure we can configure files at the right dir
+ config_dir = salt_cfg.get("config_dir", '/etc/salt')
+ util.ensure_dir(config_dir)
+
# ... and then update the salt configuration
if 'conf' in salt_cfg:
# Add all sections from the conf object to /etc/salt/minion
minion_config = os.path.join(config_dir, 'minion')
- yaml.dump(salt_cfg['conf'],
- file(minion_config, 'w'),
- default_flow_style=False)
+ minion_data = util.yaml_dumps(salt_cfg.get('conf'))
+ util.write_file(minion_config, minion_data)
+
# ... copy the key pair if specified
if 'public_key' in salt_cfg and 'private_key' in salt_cfg:
- pki_dir = '/etc/salt/pki'
- cumask = os.umask(077)
- if not os.path.isdir(pki_dir):
- os.makedirs(pki_dir)
- pub_name = os.path.join(pki_dir, 'minion.pub')
- pem_name = os.path.join(pki_dir, 'minion.pem')
- with open(pub_name, 'w') as f:
- f.write(salt_cfg['public_key'])
- with open(pem_name, 'w') as f:
- f.write(salt_cfg['private_key'])
- os.umask(cumask)
+ pki_dir = salt_cfg.get('pki_dir', '/etc/salt/pki')
+ with util.umask(077):
+ util.ensure_dir(pki_dir)
+ pub_name = os.path.join(pki_dir, 'minion.pub')
+ pem_name = os.path.join(pki_dir, 'minion.pem')
+ util.write_file(pub_name, salt_cfg['public_key'])
+ util.write_file(pem_name, salt_cfg['private_key'])
# Start salt-minion
- subprocess.check_call(['service', 'salt-minion', 'start'])
+ util.subp(['service', 'salt-minion', 'start'], capture=False)
diff --git a/cloudinit/transforms/cc_scripts_per_boot.py b/cloudinit/transforms/cc_scripts_per_boot.py
index 41a74754..bcdf4400 100644
--- a/cloudinit/transforms/cc_scripts_per_boot.py
+++ b/cloudinit/transforms/cc_scripts_per_boot.py
@@ -18,17 +18,23 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-from cloudinit.CloudConfig import per_always
-from cloudinit import get_cpath
+import os
-frequency = per_always
-runparts_path = "%s/%s" % (get_cpath(), "scripts/per-boot")
+from cloudinit import util
+from cloudinit.settings import PER_ALWAYS
-def handle(_name, _cfg, _cloud, log, _args):
+frequency = PER_ALWAYS
+
+script_subdir = 'per-boot'
+
+
+def handle(_name, _cfg, cloud, log, _args):
+ # Comes from the following:
+ # https://forums.aws.amazon.com/thread.jspa?threadID=96918
+ runparts_path = os.path.join(cloud.get_cpath(), 'scripts', script_subdir)
try:
util.runparts(runparts_path)
except:
- log.warn("failed to run-parts in %s" % runparts_path)
+ log.warn("Failed to run-parts(%s) in %s", script_subdir, runparts_path)
raise
diff --git a/cloudinit/transforms/cc_scripts_per_instance.py b/cloudinit/transforms/cc_scripts_per_instance.py
index a2981eab..8d6609a1 100644
--- a/cloudinit/transforms/cc_scripts_per_instance.py
+++ b/cloudinit/transforms/cc_scripts_per_instance.py
@@ -18,17 +18,23 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-from cloudinit.CloudConfig import per_instance
-from cloudinit import get_cpath
+import os
-frequency = per_instance
-runparts_path = "%s/%s" % (get_cpath(), "scripts/per-instance")
+from cloudinit import util
+from cloudinit.settings import PER_INSTANCE
-def handle(_name, _cfg, _cloud, log, _args):
+frequency = PER_INSTANCE
+
+script_subdir = 'per-instance'
+
+
+def handle(_name, _cfg, cloud, log, _args):
+ # Comes from the following:
+ # https://forums.aws.amazon.com/thread.jspa?threadID=96918
+ runparts_path = os.path.join(cloud.get_cpath(), 'scripts', script_subdir)
try:
util.runparts(runparts_path)
except:
- log.warn("failed to run-parts in %s" % runparts_path)
+ log.warn("Failed to run-parts(%s) in %s", script_subdir, runparts_path)
raise
diff --git a/cloudinit/transforms/cc_scripts_per_once.py b/cloudinit/transforms/cc_scripts_per_once.py
index a69151da..dbcec05d 100644
--- a/cloudinit/transforms/cc_scripts_per_once.py
+++ b/cloudinit/transforms/cc_scripts_per_once.py
@@ -18,17 +18,23 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-from cloudinit.CloudConfig import per_once
-from cloudinit import get_cpath
+import os
-frequency = per_once
-runparts_path = "%s/%s" % (get_cpath(), "scripts/per-once")
+from cloudinit import util
+from cloudinit.settings import PER_ONCE
-def handle(_name, _cfg, _cloud, log, _args):
+frequency = PER_ONCE
+
+script_subdir = 'per-once'
+
+
+def handle(_name, _cfg, cloud, log, _args):
+ # Comes from the following:
+ # https://forums.aws.amazon.com/thread.jspa?threadID=96918
+ runparts_path = os.path.join(cloud.get_cpath(), 'scripts', script_subdir)
try:
util.runparts(runparts_path)
except:
- log.warn("failed to run-parts in %s" % runparts_path)
+ log.warn("Failed to run-parts(%s) in %s", script_subdir, runparts_path)
raise
diff --git a/cloudinit/transforms/cc_scripts_user.py b/cloudinit/transforms/cc_scripts_user.py
index 933aa4e0..1e438ee6 100644
--- a/cloudinit/transforms/cc_scripts_user.py
+++ b/cloudinit/transforms/cc_scripts_user.py
@@ -18,17 +18,22 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-from cloudinit.CloudConfig import per_instance
-from cloudinit import get_ipath_cur
+import os
-frequency = per_instance
-runparts_path = "%s/%s" % (get_ipath_cur(), "scripts")
+from cloudinit import util
+from cloudinit.settings import PER_INSTANCE
-def handle(_name, _cfg, _cloud, log, _args):
+frequency = PER_INSTANCE
+
+
+def handle(_name, _cfg, cloud, log, _args):
+ # This is written to by the user data handlers
+ # Ie, any custom shell scripts that come down
+ # go here...
+ runparts_path = os.path.join(cloud.get_ipath_cur(), "scripts")
try:
util.runparts(runparts_path)
except:
- log.warn("failed to run-parts in %s" % runparts_path)
+ log.warn("Failed to run-parts(%s) in %s", "user-data", runparts_path)
raise
diff --git a/cloudinit/transforms/cc_set_hostname.py b/cloudinit/transforms/cc_set_hostname.py
index acea74d9..fa2b59c2 100644
--- a/cloudinit/transforms/cc_set_hostname.py
+++ b/cloudinit/transforms/cc_set_hostname.py
@@ -18,25 +18,18 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
+from cloudinit import util
-def handle(_name, cfg, cloud, log, _args):
+def handle(name, cfg, cloud, log, _args):
if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
- log.debug("preserve_hostname is set. not setting hostname")
- return(True)
+ log.debug(("Configuration option 'preserve_hostname' is set,"
+ " not setting the hostname in %s"), name)
+ return
(hostname, _fqdn) = util.get_hostname_fqdn(cfg, cloud)
try:
- set_hostname(hostname, log)
+ log.debug("Setting hostname to %s", hostname)
+ cloud.distro.set_hostname(hostname)
except Exception:
- util.logexc(log)
- log.warn("failed to set hostname to %s\n", hostname)
-
- return(True)
-
-
-def set_hostname(hostname, log):
- util.subp(['hostname', hostname])
- util.write_file("/etc/hostname", "%s\n" % hostname, 0644)
- log.debug("populated /etc/hostname with %s on first boot", hostname)
+ util.logexc(log, "Failed to set hostname to %s", hostname)
diff --git a/cloudinit/transforms/cc_set_passwords.py b/cloudinit/transforms/cc_set_passwords.py
index 9d0bbdb8..4f2cdb97 100644
--- a/cloudinit/transforms/cc_set_passwords.py
+++ b/cloudinit/transforms/cc_set_passwords.py
@@ -18,13 +18,18 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
import sys
-import random
-from string import letters, digits # pylint: disable=W0402
+from cloudinit import util
-def handle(_name, cfg, _cloud, log, args):
+from string import letters, digits
+
+# We are removing certain 'painful' letters/numbers
+pw_set = (letters.translate(None, 'loLOI') +
+ digits.translate(None, '01'))
+
+
+def handle(_name, cfg, cloud, log, args):
if len(args) != 0:
# if run from command line, and give args, wipe the chpasswd['list']
password = args[0]
@@ -62,68 +67,83 @@ def handle(_name, cfg, _cloud, log, args):
ch_in = '\n'.join(plist_in)
try:
+ log.debug("Changing password for %s:", users)
util.subp(['chpasswd'], ch_in)
- log.debug("changed password for %s:" % users)
except Exception as e:
errors.append(e)
- log.warn("failed to set passwords with chpasswd: %s" % e)
+ util.logexc(log, "Failed to set passwords with chpasswd for %s", users)
if len(randlist):
- sys.stdout.write("%s\n%s\n" % ("Set the following passwords\n",
+ sys.stderr.write("%s\n%s\n" % ("Set the following 'random' passwords\n",
'\n'.join(randlist)))
if expire:
- enum = len(errors)
+ expired_users = []
for u in users:
try:
util.subp(['passwd', '--expire', u])
+ expired_users.append(u)
except Exception as e:
errors.append(e)
- log.warn("failed to expire account for %s" % u)
- if enum == len(errors):
- log.debug("expired passwords for: %s" % u)
+ util.logexc(log, "Failed to set 'expire' for %s", u)
+ if expired_users:
+ log.debug("Expired passwords for: %s users", expired_users)
+ change_pwauth = False
+ pw_auth = None
if 'ssh_pwauth' in cfg:
- val = str(cfg['ssh_pwauth']).lower()
- if val in ("true", "1", "yes"):
- pw_auth = "yes"
- change_pwauth = True
- elif val in ("false", "0", "no"):
- pw_auth = "no"
- change_pwauth = True
- else:
- change_pwauth = False
+ change_pwauth = True
+ if util.is_true_str(cfg['ssh_pwauth']):
+ pw_auth = 'yes'
+ if util.is_false_str(cfg['ssh_pwauth']):
+ pw_auth = 'no'
if change_pwauth:
- pa_s = "\(#*\)\(PasswordAuthentication[[:space:]]\+\)\(yes\|no\)"
- msg = "set PasswordAuthentication to '%s'" % pw_auth
- try:
- cmd = ['sed', '-i', 's,%s,\\2%s,' % (pa_s, pw_auth),
- '/etc/ssh/sshd_config']
- util.subp(cmd)
- log.debug(msg)
- except Exception as e:
- log.warn("failed %s" % msg)
- errors.append(e)
+ new_lines = []
+ replaced_auth = False
+ replacement = "PasswordAuthentication %s" % (pw_auth)
+
+ # See http://linux.die.net/man/5/sshd_config
+ old_lines = util.load_file('/etc/ssh/sshd_config').splitlines()
+ for i, line in enumerate(old_lines):
+ if not line.strip() or line.startswith("#"):
+ new_lines.append(line)
+ continue
+ splitup = line.split(None, 1)
+ if len(splitup) <= 1:
+ new_lines.append(line)
+ continue
+ (cmd, args) = splitup
+ # Keywords are case-insensitive and arguments are case-sensitive
+ cmd = cmd.lower().strip()
+ if cmd == 'passwordauthentication':
+ log.debug("Replacing auth line %s with %s", i + 1, replacement)
+ replaced_auth = True
+ new_lines.append(replacement)
+ else:
+ new_lines.append(line)
+
+ if not replaced_auth:
+ log.debug("Adding new auth line %s", replacement)
+ replaced_auth = True
+ new_lines.append(replacement)
+
+ new_contents = "\n".join(new_lines)
+ util.write_file('/etc/ssh/sshd_config', new_contents)
try:
- p = util.subp(['service', cfg.get('ssh_svcname', 'ssh'),
- 'restart'])
- log.debug("restarted sshd")
+ cmd = ['service']
+ cmd.append(cloud.distro.get_option('ssh_svcname', 'ssh'))
+ cmd.append('restart')
+ util.subp(cmd)
+ log.debug("Restarted the ssh daemon")
except:
- log.warn("restart of ssh failed")
+ util.logexc(log, "Restarting of the ssh daemon failed")
if len(errors):
- raise(errors[0])
-
- return
-
-
-def rand_str(strlen=32, select_from=letters + digits):
- return("".join([random.choice(select_from) for _x in range(0, strlen)]))
+ log.debug("%s errors occured, re-raising the last one", len(errors))
+ raise errors[-1]
def rand_user_password(pwlen=9):
- selfrom = (letters.translate(None, 'loLOI') +
- digits.translate(None, '01'))
- return(rand_str(pwlen, select_from=selfrom))
+ return util.rand_str(pwlen, select_from=pw_set)
diff --git a/cloudinit/transforms/cc_ssh.py b/cloudinit/transforms/cc_ssh.py
index 48eb58bc..db6848d9 100644
--- a/cloudinit/transforms/cc_ssh.py
+++ b/cloudinit/transforms/cc_ssh.py
@@ -18,15 +18,34 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-import cloudinit.SshUtil as sshutil
import os
import glob
-import subprocess
-DISABLE_ROOT_OPTS = "no-port-forwarding,no-agent-forwarding," \
-"no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\" " \
-"rather than the user \\\"root\\\".\';echo;sleep 10\""
+from cloudinit import util
+from cloudinit import ssh_util
+
+DISABLE_ROOT_OPTS = ( "no-port-forwarding,no-agent-forwarding,"
+"no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\" "
+"rather than the user \\\"root\\\".\';echo;sleep 10\"")
+
+key2file = {
+ "rsa_private": ("/etc/ssh/ssh_host_rsa_key", 0600),
+ "rsa_public": ("/etc/ssh/ssh_host_rsa_key.pub", 0644),
+ "dsa_private": ("/etc/ssh/ssh_host_dsa_key", 0600),
+ "dsa_public": ("/etc/ssh/ssh_host_dsa_key.pub", 0644),
+ "ecdsa_private": ("/etc/ssh/ssh_host_ecdsa_key", 0600),
+ "ecdsa_public": ("/etc/ssh/ssh_host_ecdsa_key.pub", 0644),
+}
+
+priv2pub = {
+ 'rsa_private': 'rsa_public',
+ 'dsa_private': 'dsa_public',
+ 'ecdsa_private': 'ecdsa_public',
+}
+
+key_gen_tpl = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
+
+generate_keys = ['rsa', 'dsa', 'ecdsa']
def handle(_name, cfg, cloud, log, _args):
@@ -35,72 +54,70 @@ def handle(_name, cfg, cloud, log, _args):
if cfg.get("ssh_deletekeys", True):
for f in glob.glob("/etc/ssh/ssh_host_*key*"):
try:
- os.unlink(f)
+ util.del_file(f)
except:
- pass
-
+ util.logexc(log, "Failed deleting key file %s", f)
+
if "ssh_keys" in cfg:
# if there are keys in cloud-config, use them
- key2file = {
- "rsa_private": ("/etc/ssh/ssh_host_rsa_key", 0600),
- "rsa_public": ("/etc/ssh/ssh_host_rsa_key.pub", 0644),
- "dsa_private": ("/etc/ssh/ssh_host_dsa_key", 0600),
- "dsa_public": ("/etc/ssh/ssh_host_dsa_key.pub", 0644),
- "ecdsa_private": ("/etc/ssh/ssh_host_ecdsa_key", 0600),
- "ecdsa_public": ("/etc/ssh/ssh_host_ecdsa_key.pub", 0644),
- }
-
- for key, val in cfg["ssh_keys"].items():
+ for (key, val) in cfg["ssh_keys"].iteritems():
if key in key2file:
- util.write_file(key2file[key][0], val, key2file[key][1])
-
- priv2pub = {'rsa_private': 'rsa_public', 'dsa_private': 'dsa_public',
- 'ecdsa_private': 'ecdsa_public', }
-
+ tgt_fn = key2file[key][0]
+ tgt_perms = key2file[key][1]
+ util.write_file(tgt_fn, val, tgt_perms)
+
cmd = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
for priv, pub in priv2pub.iteritems():
if pub in cfg['ssh_keys'] or not priv in cfg['ssh_keys']:
continue
pair = (key2file[priv][0], key2file[pub][0])
- subprocess.call(('sh', '-xc', cmd % pair))
- log.debug("generated %s from %s" % pair)
+ cmd = ['sh', '-xc', key_gen_tpl % pair]
+ try:
+ # TODO: Is this guard needed?
+ with util.SeLinuxGuard("/etc/ssh", recursive=True):
+ util.subp(cmd, capture=False)
+ log.debug("Generated a key for %s from %s", pair[0], pair[1])
+ except:
+ util.logexc(log, "Failed generated a key for %s from %s", pair[0], pair[1])
else:
# if not, generate them
- for keytype in util.get_cfg_option_list_or_str(cfg, 'ssh_genkeytypes',
- ['rsa', 'dsa', 'ecdsa']):
+ for keytype in util.get_cfg_option_list_or_str(cfg, 'ssh_genkeytypes', generate_keys):
keyfile = '/etc/ssh/ssh_host_%s_key' % keytype
if not os.path.exists(keyfile):
- subprocess.call(['ssh-keygen', '-t', keytype, '-N', '',
- '-f', keyfile])
-
- util.restorecon_if_possible('/etc/ssh', recursive=True)
+ cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]
+ try:
+ # TODO: Is this guard needed?
+ with util.SeLinuxGuard("/etc/ssh", recursive=True):
+ util.subp(cmd, capture=False)
+ except:
+ util.logexc(log, "Failed generating key type %s to file %s", keytype, keyfile)
try:
user = util.get_cfg_option_str(cfg, 'user')
disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
DISABLE_ROOT_OPTS)
- keys = cloud.get_public_ssh_keys()
+ keys = cloud.get_public_ssh_keys() or []
if "ssh_authorized_keys" in cfg:
cfgkeys = cfg["ssh_authorized_keys"]
keys.extend(cfgkeys)
apply_credentials(keys, user, disable_root, disable_root_opts, log)
except:
- util.logexc(log)
- log.warn("applying credentials failed!\n")
+ util.logexc(log, "Applying ssh credentials failed!")
def apply_credentials(keys, user, disable_root,
disable_root_opts=DISABLE_ROOT_OPTS, log=None):
+
keys = set(keys)
if user:
- sshutil.setup_user_keys(keys, user, '', log)
+ ssh_util.setup_user_keys(keys, user, '')
- if disable_root:
+ if disable_root and user:
key_prefix = disable_root_opts.replace('$USER', user)
else:
key_prefix = ''
- sshutil.setup_user_keys(keys, 'root', key_prefix, log)
+ ssh_util.setup_user_keys(keys, 'root', key_prefix)
diff --git a/cloudinit/transforms/cc_ssh_import_id.py b/cloudinit/transforms/cc_ssh_import_id.py
index bbf5bd83..019413d4 100644
--- a/cloudinit/transforms/cc_ssh_import_id.py
+++ b/cloudinit/transforms/cc_ssh_import_id.py
@@ -18,12 +18,14 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-import subprocess
-import traceback
+from cloudinit import util
+# The ssh-import-id only seems to exist on ubuntu (for now)
+# https://launchpad.net/ssh-import-id
+distros = ['ubuntu']
-def handle(_name, cfg, _cloud, log, args):
+
+def handle(name, cfg, _cloud, log, args):
if len(args) != 0:
user = args[0]
ids = []
@@ -34,17 +36,14 @@ def handle(_name, cfg, _cloud, log, args):
ids = util.get_cfg_option_list_or_str(cfg, "ssh_import_id", [])
if len(ids) == 0:
+ log.debug("Skipping module named %s, no ids found to import", name)
return
cmd = ["sudo", "-Hu", user, "ssh-import-id"] + ids
-
- log.debug("importing ssh ids. cmd = %s" % cmd)
+ log.debug("Importing ssh ids for user %s.", user)
try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError as e:
- log.debug(traceback.format_exc(e))
- raise Exception("Cmd returned %s: %s" % (e.returncode, cmd))
- except OSError as e:
- log.debug(traceback.format_exc(e))
- raise Exception("Cmd failed to execute: %s" % (cmd))
+ util.subp(cmd, capture=False)
+ except util.ProcessExecutionError as e:
+ util.logexc(log, "Failed to run command to import %s ssh ids", user)
+ raise e
diff --git a/cloudinit/transforms/cc_timezone.py b/cloudinit/transforms/cc_timezone.py
index e5c9901b..6fb5edc0 100644
--- a/cloudinit/transforms/cc_timezone.py
+++ b/cloudinit/transforms/cc_timezone.py
@@ -18,50 +18,19 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from cloudinit.CloudConfig import per_instance
-from cloudinit import util
-import os.path
-import shutil
+from cloudinit.settings import PER_INSTANCE
-frequency = per_instance
-tz_base = "/usr/share/zoneinfo"
+frequency = PER_INSTANCE
-def handle(_name, cfg, _cloud, log, args):
+def handle(_name, cfg, cloud, log, args):
if len(args) != 0:
timezone = args[0]
else:
timezone = util.get_cfg_option_str(cfg, "timezone", False)
if not timezone:
+ log.debug("Skipping module named %s, no 'timezone' specified", name)
return
- tz_file = "%s/%s" % (tz_base, timezone)
-
- if not os.path.isfile(tz_file):
- log.debug("Invalid timezone %s" % tz_file)
- raise Exception("Invalid timezone %s" % tz_file)
-
- try:
- fp = open("/etc/timezone", "wb")
- fp.write("%s\n" % timezone)
- fp.close()
- except:
- log.debug("failed to write to /etc/timezone")
- raise
- if os.path.exists("/etc/sysconfig/clock"):
- try:
- with open("/etc/sysconfig/clock", "w") as fp:
- fp.write('ZONE="%s"\n' % timezone)
- except:
- log.debug("failed to write to /etc/sysconfig/clock")
- raise
-
- try:
- shutil.copy(tz_file, "/etc/localtime")
- except:
- log.debug("failed to copy %s to /etc/localtime" % tz_file)
- raise
-
- log.debug("set timezone to %s" % timezone)
- return
+ cloud.distro.set_timezone(timezone)
diff --git a/cloudinit/transforms/cc_update_etc_hosts.py b/cloudinit/transforms/cc_update_etc_hosts.py
index 6ad2fca8..361097a6 100644
--- a/cloudinit/transforms/cc_update_etc_hosts.py
+++ b/cloudinit/transforms/cc_update_etc_hosts.py
@@ -18,70 +18,34 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-from cloudinit.CloudConfig import per_always
-import StringIO
+from cloudinit import util
+from cloudinit import templater
-frequency = per_always
+from cloudinit.settings import PER_ALWAYS
+frequency = PER_ALWAYS
-def handle(_name, cfg, cloud, log, _args):
- (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
+def handle(name, cfg, cloud, log, _args):
manage_hosts = util.get_cfg_option_str(cfg, "manage_etc_hosts", False)
- if manage_hosts in ("True", "true", True, "template"):
- # render from template file
- try:
- if not hostname:
- log.info("manage_etc_hosts was set, but no hostname found")
- return
-
- util.render_to_file('hosts', '/etc/hosts',
+ if util.translate_bool(manage_hosts, addons=['template']):
+ (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
+ # Render from template file
+ if not hostname:
+ log.warn("Option 'manage_etc_hosts' was set, but no hostname was found")
+ return
+ tpl_fn_name = cloud.get_template_filename("hosts.%s" % (cloud.distro.name()))
+ if not tpl_fn_name:
+ raise Exception("No hosts template could be found for distro %s" % (cloud.distro.name()))
+ templater.render_to_file(tpl_fn_name, '/etc/hosts',
{'hostname': hostname, 'fqdn': fqdn})
- except Exception:
- log.warn("failed to update /etc/hosts")
- raise
elif manage_hosts == "localhost":
- log.debug("managing 127.0.1.1 in /etc/hosts")
- update_etc_hosts(hostname, fqdn, log)
- return
+ log.debug("Managing localhost in /etc/hosts")
+ (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
+ if not hostname:
+ log.warn("Option 'manage_etc_hosts' was set, but no hostname was found")
+ return
+ cloud.distro.update_etc_hosts(hostname, fqdn)
else:
- if manage_hosts not in ("False", False):
- log.warn("Unknown value for manage_etc_hosts. Assuming False")
- else:
- log.debug("not managing /etc/hosts")
-
-
-def update_etc_hosts(hostname, fqdn, _log):
- with open('/etc/hosts', 'r') as etchosts:
- header = "# Added by cloud-init\n"
- hosts_line = "127.0.1.1\t%s %s\n" % (fqdn, hostname)
- need_write = False
- need_change = True
- new_etchosts = StringIO.StringIO()
- for line in etchosts:
- split_line = [s.strip() for s in line.split()]
- if len(split_line) < 2:
- new_etchosts.write(line)
- continue
- if line == header:
- continue
- ip, hosts = split_line[0], split_line[1:]
- if ip == "127.0.1.1":
- if sorted([hostname, fqdn]) == sorted(hosts):
- need_change = False
- if need_change == True:
- line = "%s%s" % (header, hosts_line)
- need_change = False
- need_write = True
- new_etchosts.write(line)
- etchosts.close()
- if need_change == True:
- new_etchosts.write("%s%s" % (header, hosts_line))
- need_write = True
- if need_write == True:
- new_etcfile = open('/etc/hosts', 'wb')
- new_etcfile.write(new_etchosts.getvalue())
- new_etcfile.close()
- new_etchosts.close()
- return
+ log.debug(("Configuration option 'manage_etc_hosts' is not set,"
+ " not managing /etc/hosts in %s"), name)
diff --git a/cloudinit/transforms/cc_update_hostname.py b/cloudinit/transforms/cc_update_hostname.py
index b9d1919a..439bdcb3 100644
--- a/cloudinit/transforms/cc_update_hostname.py
+++ b/cloudinit/transforms/cc_update_hostname.py
@@ -18,84 +18,22 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-import subprocess
-import errno
-from cloudinit.CloudConfig import per_always
+from cloudinit import util
+from cloudinit.settings import PER_ALWAYS
-frequency = per_always
+frequency = PER_ALWAYS
-def handle(_name, cfg, cloud, log, _args):
+def handle(name, cfg, cloud, log, _args):
if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
- log.debug("preserve_hostname is set. not updating hostname")
+ log.debug(("Configuration option 'preserve_hostname' is set,"
+ " not updating the hostname in %s"), name)
return
(hostname, _fqdn) = util.get_hostname_fqdn(cfg, cloud)
try:
- prev = "%s/%s" % (cloud.get_cpath('data'), "previous-hostname")
- update_hostname(hostname, prev, log)
+ prev_fn = os.path.join(cloud.get_cpath('data'), "previous-hostname")
+ cloud.distro.update_hostname(hostname, prev_fn)
except Exception:
- log.warn("failed to set hostname\n")
+ util.logexc(log, "Failed to set the hostname to %s", hostname)
raise
-
-
-# read hostname from a 'hostname' file
-# allow for comments and stripping line endings.
-# if file doesn't exist, or no contents, return default
-def read_hostname(filename, default=None):
- try:
- fp = open(filename, "r")
- lines = fp.readlines()
- fp.close()
- for line in lines:
- hpos = line.find("#")
- if hpos != -1:
- line = line[0:hpos]
- line = line.rstrip()
- if line:
- return line
- except IOError as e:
- if e.errno != errno.ENOENT:
- raise
- return default
-
-
-def update_hostname(hostname, prev_file, log):
- etc_file = "/etc/hostname"
-
- hostname_prev = None
- hostname_in_etc = None
-
- try:
- hostname_prev = read_hostname(prev_file)
- except Exception as e:
- log.warn("Failed to open %s: %s" % (prev_file, e))
-
- try:
- hostname_in_etc = read_hostname(etc_file)
- except:
- log.warn("Failed to open %s" % etc_file)
-
- update_files = []
- if not hostname_prev or hostname_prev != hostname:
- update_files.append(prev_file)
-
- if (not hostname_in_etc or
- (hostname_in_etc == hostname_prev and hostname_in_etc != hostname)):
- update_files.append(etc_file)
-
- try:
- for fname in update_files:
- util.write_file(fname, "%s\n" % hostname, 0644)
- log.debug("wrote %s to %s" % (hostname, fname))
- except:
- log.warn("failed to write hostname to %s" % fname)
-
- if hostname_in_etc and hostname_prev and hostname_in_etc != hostname_prev:
- log.debug("%s differs from %s. assuming user maintained" %
- (prev_file, etc_file))
-
- if etc_file in update_files:
- log.debug("setting hostname to %s" % hostname)
- subprocess.Popen(['hostname', hostname]).communicate()