summaryrefslogtreecommitdiff
path: root/cloudinit
diff options
context:
space:
mode:
authorJoshua Harlow <harlowja@yahoo-inc.com>2012-06-19 17:58:41 -0700
committerJoshua Harlow <harlowja@yahoo-inc.com>2012-06-19 17:58:41 -0700
commit43eb6d5aace53bef2116dde0796807befef1d8ff (patch)
tree4e19b985cb97fd1293f9617c2b800e5377fc8494 /cloudinit
parentb492cedaa5a4e66f4f4c589fc73f53afade7b904 (diff)
downloadvyos-cloud-init-43eb6d5aace53bef2116dde0796807befef1d8ff.tar.gz
vyos-cloud-init-43eb6d5aace53bef2116dde0796807befef1d8ff.zip
Make most of all the places use the paths join() function so that testing with non-real read/write paths is easier.
Diffstat (limited to 'cloudinit')
-rw-r--r--cloudinit/distros/rhel.py6
-rw-r--r--cloudinit/distros/ubuntu.py7
-rw-r--r--cloudinit/helpers.py34
-rw-r--r--cloudinit/ssh_util.py10
-rw-r--r--cloudinit/transforms/apt_pipelining.py15
-rw-r--r--cloudinit/transforms/apt_update_upgrade.py21
-rw-r--r--cloudinit/transforms/ca_certs.py23
-rw-r--r--cloudinit/transforms/chef.py31
-rw-r--r--cloudinit/transforms/keys_to_console.py9
-rw-r--r--cloudinit/transforms/landscape.py20
-rw-r--r--cloudinit/transforms/mcollective.py25
-rw-r--r--cloudinit/transforms/mounts.py6
-rw-r--r--cloudinit/transforms/phone_home.py2
-rw-r--r--cloudinit/transforms/puppet.py34
-rw-r--r--cloudinit/transforms/resizefs.py8
-rw-r--r--cloudinit/transforms/rightscale_userdata.py1
-rw-r--r--cloudinit/transforms/rsyslog.py3
-rw-r--r--cloudinit/transforms/runcmd.py2
-rw-r--r--cloudinit/transforms/salt_minion.py2
-rw-r--r--cloudinit/transforms/set_passwords.py4
-rw-r--r--cloudinit/transforms/ssh.py13
21 files changed, 167 insertions, 109 deletions
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index e0ca5909..aef7f6f3 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -73,6 +73,7 @@ class Distro(distros.Distro):
lines.insert(0, '# Created by cloud-init')
contents = "\n".join(lines)
net_fn = NETWORK_FN_TPL % (dev)
+ net_fn = self._paths.join(False, net_fn)
util.write_file(net_fn, contents, 0644)
def set_hostname(self, hostname):
@@ -104,6 +105,7 @@ class Distro(distros.Distro):
new_contents.append("# Added by cloud-init")
new_contents.append("HOSTNAME=%s" % (hostname))
contents = "\n".join(new_contents)
+ out_fn = self._paths.join(False, out_fn)
util.write_file(out_fn, contents, 0644)
def update_hostname(self, hostname, prev_file):
@@ -143,6 +145,7 @@ class Distro(distros.Distro):
return default
def _read_conf(self, filename):
+ filename = self._paths.join(True, filename)
contents = util.load_file(filename, quiet=True)
conf_lines = []
for line in contents.splitlines():
@@ -194,7 +197,8 @@ class Distro(distros.Distro):
new_contents.append("# Added by cloud-init")
new_contents.append('ZONE="%s"' % (tz))
tz_contents = "\n".join(new_contents)
- util.write_file("/etc/sysconfig/clock", tz_contents)
+ tz_fn = self._paths.join(False, "/etc/sysconfig/clock")
+ util.write_file(tz_fn, tz_contents)
# This ensures that the correct tz will be used for the system
util.copy(tz_file, "/etc/localtime")
diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py
index 24724d83..94565b14 100644
--- a/cloudinit/distros/ubuntu.py
+++ b/cloudinit/distros/ubuntu.py
@@ -46,7 +46,8 @@ class Distro(distros.Distro):
self.package_command('install', pkglist)
def _write_network(self, settings):
- util.write_file("/etc/network/interfaces", settings)
+ n_fn = self._paths.join(False, "/etc/network/interfaces")
+ util.write_file(n_fn, settings)
def set_hostname(self, hostname):
self._write_hostname(hostname, "/etc/hostname")
@@ -84,6 +85,7 @@ class Distro(distros.Distro):
util.subp(['hostname', hostname])
def _read_hostname(self, filename, default=None):
+ filename = self._paths.join(True, filename)
contents = util.load_file(filename, quiet=True)
for line in contents.splitlines():
c_pos = line.find("#")
@@ -105,7 +107,8 @@ class Distro(distros.Distro):
raise Exception(("Invalid timezone %s,"
" no file found at %s") % (tz, tz_file))
tz_contents = "%s\n" % tz
- util.write_file("/etc/timezone", tz_contents)
+ tz_fn = self._paths.join(False, "/etc/timezone")
+ util.write_file(tz_fn, tz_contents)
util.copy(tz_file, "/etc/localtime")
def package_command(self, command, args=None):
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index 3938e7ee..9f55a984 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -208,17 +208,18 @@ class Paths(object):
def __init__(self, path_cfgs, ds=None):
self.cfgs = path_cfgs
# Populate all the initial paths
- self.cloud_dir = self.join_paths(False,
- path_cfgs.get('cloud_dir',
- '/var/lib/cloud'))
+ self.cloud_dir = self.join(False,
+ path_cfgs.get('cloud_dir',
+ '/var/lib/cloud'))
self.instance_link = os.path.join(self.cloud_dir, 'instance')
self.boot_finished = os.path.join(self.instance_link, "boot-finished")
self.upstart_conf_d = path_cfgs.get('upstart_dir')
- template_dir = self.join_paths(True,
- path_cfgs.get('templates_dir',
- '/etc/cloud/templates/'))
- self.template_tpl = os.path.join(template_dir, '%s.tmpl')
+ if self.upstart_conf_d:
+ self.upstart_conf_d = self.join(False, self.upstart_conf_d)
self.seed_dir = os.path.join(self.cloud_dir, 'seed')
+ # This one isn't joined, since it should just be read-only
+ template_dir = path_cfgs.get('templates_dir', '/etc/cloud/templates/')
+ self.template_tpl = os.path.join(template_dir, '%s.tmpl')
self.lookups = {
"handlers": "handlers",
"scripts": "scripts",
@@ -235,16 +236,25 @@ class Paths(object):
# joins the paths but also appends a read
# or write root if available
- def join_paths(self, read_only, *paths):
+ def join(self, read_only, *paths):
if read_only:
- root = self.cfgs.get('read_root', '/')
+ root = self.cfgs.get('read_root')
else:
- root = self.cfgs.get('write_root', '/')
+ root = self.cfgs.get('write_root')
if not paths:
return root
- joined = os.path.join(*paths)
+ if len(paths) > 1:
+ joined = os.path.join(*paths)
+ else:
+ joined = paths[0]
if root:
- joined = os.path.join(root, joined.lstrip("/"))
+ pre_joined = joined
+ # Need to remove any starting '/' since this
+ # will confuse os.path.join
+ joined = joined.lstrip("/")
+ joined = os.path.join(root, joined)
+ LOG.debug("Translated %s to adjusted path %s (%s)",
+ pre_joined, joined, read_only)
return joined
# get_ipath_cur: get the current instance path for an item
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index 13adbb09..96143d32 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -210,9 +210,12 @@ def update_authorized_keys(fname, keys):
return '\n'.join(lines)
-def setup_user_keys(keys, user, key_prefix, sshd_config_fn=DEF_SSHD_CFG):
+def setup_user_keys(keys, user, key_prefix, paths):
+
+ # Make sure the users .ssh dir is setup accordingly
pwent = pwd.getpwnam(user)
ssh_dir = os.path.join(pwent.pw_dir, '.ssh')
+ ssh_dir = paths.join(False, ssh_dir)
if not os.path.exists(ssh_dir):
util.ensure_dir(ssh_dir, mode=0700)
util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid)
@@ -223,6 +226,7 @@ def setup_user_keys(keys, user, key_prefix, sshd_config_fn=DEF_SSHD_CFG):
for k in keys:
key_entries.append(parser.parse(str(k), def_opt=key_prefix))
+ sshd_conf_fn = paths.join(True, DEF_SSHD_CFG)
with util.SeLinuxGuard(ssh_dir, recursive=True):
try:
# AuthorizedKeysFile may contain tokens
@@ -230,7 +234,7 @@ def setup_user_keys(keys, user, key_prefix, sshd_config_fn=DEF_SSHD_CFG):
# The following tokens are defined: %% is replaced by a literal
# '%', %h is replaced by the home directory of the user being
# authenticated and %u is replaced by the username of that user.
- ssh_cfg = parse_ssh_config(sshd_config_fn)
+ ssh_cfg = parse_ssh_config(sshd_conf_fn)
akeys = ssh_cfg.get("authorizedkeysfile", '')
akeys = akeys.strip()
if not akeys:
@@ -247,7 +251,7 @@ def setup_user_keys(keys, user, key_prefix, sshd_config_fn=DEF_SSHD_CFG):
" in ssh config"
" from %s, using 'AuthorizedKeysFile' file"
" %s instead"),
- sshd_config_fn, authorized_keys)
+ sshd_conf_fn, authorized_keys)
content = update_authorized_keys(authorized_keys, key_entries)
util.ensure_dir(os.path.dirname(authorized_keys), mode=0700)
diff --git a/cloudinit/transforms/apt_pipelining.py b/cloudinit/transforms/apt_pipelining.py
index d8e574b9..f460becb 100644
--- a/cloudinit/transforms/apt_pipelining.py
+++ b/cloudinit/transforms/apt_pipelining.py
@@ -23,7 +23,7 @@ frequency = PER_INSTANCE
distros = ['ubuntu', 'debian']
-default_file = "/etc/apt/apt.conf.d/90cloud-init-pipelining"
+DEFAULT_FILE = "/etc/apt/apt.conf.d/90cloud-init-pipelining"
# Acquire::http::Pipeline-Depth can be a value
# from 0 to 5 indicating how many outstanding requests APT should send.
@@ -31,30 +31,27 @@ default_file = "/etc/apt/apt.conf.d/90cloud-init-pipelining"
# on TCP connections - otherwise data corruption will occur.
-def handle(_name, cfg, _cloud, log, _args):
+def handle(_name, cfg, cloud, log, _args):
apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False)
apt_pipe_value_s = str(apt_pipe_value).lower().strip()
if apt_pipe_value_s == "false":
- write_apt_snippet("0", log)
-
+ write_apt_snippet(cloud, "0", log, DEFAULT_FILE)
elif apt_pipe_value_s in ("none", "unchanged", "os"):
return
-
elif apt_pipe_value_s in [str(b) for b in xrange(0, 6)]:
- write_apt_snippet(apt_pipe_value_s, log)
-
+ write_apt_snippet(cloud, apt_pipe_value_s, log, DEFAULT_FILE)
else:
log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value)
-def write_apt_snippet(setting, log, f_name=default_file):
+def write_apt_snippet(cloud, setting, log, f_name):
""" Writes f_name with apt pipeline depth 'setting' """
file_contents = ("//Written by cloud-init per 'apt_pipelining'\n"
'Acquire::http::Pipeline-Depth "%s";\n') % (setting)
- util.write_file(f_name, file_contents)
+ util.write_file(cloud.paths.join(False, f_name), file_contents)
log.debug("Wrote %s with apt pipeline depth setting %s", f_name, setting)
diff --git a/cloudinit/transforms/apt_update_upgrade.py b/cloudinit/transforms/apt_update_upgrade.py
index d49d8bd2..29bbc1ae 100644
--- a/cloudinit/transforms/apt_update_upgrade.py
+++ b/cloudinit/transforms/apt_update_upgrade.py
@@ -26,6 +26,8 @@ from cloudinit import util
distros = ['ubuntu', 'debian']
+PROXY_TPL = "Acquire::HTTP::Proxy \"%s\";\n"
+
def handle(_name, cfg, cloud, log, _args):
update = util.get_cfg_option_bool(cfg, 'apt_update', False)
@@ -44,22 +46,23 @@ def handle(_name, cfg, cloud, log, _args):
"archive.ubuntu.com/ubuntu")
rename_apt_lists(old_mir, mirror)
- # set up proxy
+ # Set up any apt proxy
proxy = cfg.get("apt_proxy", None)
proxy_filename = "/etc/apt/apt.conf.d/95cloud-init-proxy"
if proxy:
try:
- # See http://linux.die.net/man/5/apt.conf
- contents = "Acquire::HTTP::Proxy \"%s\";\n"
- util.write_file(proxy_filename, contents % (proxy))
+ # See man 'apt.conf'
+ contents = PROXY_TPL % (proxy)
+ util.write_file(cloud.paths.join(False, proxy_filename),
+ contents)
except Exception as e:
util.logexc(log, "Failed to write proxy to %s", proxy_filename)
elif os.path.isfile(proxy_filename):
util.del_file(proxy_filename)
- # process 'apt_sources'
+ # Process 'apt_sources'
if 'apt_sources' in cfg:
- errors = add_sources(cfg['apt_sources'],
+ errors = add_sources(cloud, cfg['apt_sources'],
{'MIRROR': mirror, 'RELEASE': release})
for e in errors:
log.warn("Source Error: %s", ':'.join(e))
@@ -138,7 +141,7 @@ def generate_sources_list(codename, mirror, cloud, log):
log.warn("No template found, not rendering /etc/apt/sources.list")
-def add_sources(srclist, template_params=None):
+def add_sources(cloud, srclist, template_params=None):
"""
add entries in /etc/apt/sources.list.d for each abbreviated
sources.list entry in 'srclist'. When rendering template, also
@@ -187,7 +190,9 @@ def add_sources(srclist, template_params=None):
errorlist.append([source, "failed add key"])
try:
- util.write_file(ent['filename'], "%s\n" % (source), omode="ab")
+ contents = "%s\n" % (source)
+ util.write_file(cloud.paths.join(False, ent['filename']),
+ contents, omode="ab")
except:
errorlist.append([source,
"failed write to file %s" % ent['filename']])
diff --git a/cloudinit/transforms/ca_certs.py b/cloudinit/transforms/ca_certs.py
index e0802bfe..56c41561 100644
--- a/cloudinit/transforms/ca_certs.py
+++ b/cloudinit/transforms/ca_certs.py
@@ -33,7 +33,7 @@ def update_ca_certs():
util.subp(["update-ca-certificates"])
-def add_ca_certs(certs):
+def add_ca_certs(cloud, certs):
"""
Adds certificates to the system. To actually apply the new certificates
you must also call L{update_ca_certs}.
@@ -41,26 +41,29 @@ def add_ca_certs(certs):
@param certs: A list of certificate strings.
"""
if certs:
- cert_file_contents = "\n".join(certs)
+ # First ensure they are strings...
+ cert_file_contents = "\n".join([str(c) for c in certs])
cert_file_fullpath = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME)
+ cert_file_fullpath = cloud.paths.join(False, cert_file_fullpath)
util.write_file(cert_file_fullpath, cert_file_contents, mode=0644)
# Append cert filename to CA_CERT_CONFIG file.
- util.write_file(CA_CERT_CONFIG, "\n%s" % CA_CERT_FILENAME, omode="ab")
+ util.write_file(cloud.paths.join(False, CA_CERT_CONFIG),
+ "\n%s" % CA_CERT_FILENAME, omode="ab")
-def remove_default_ca_certs():
+def remove_default_ca_certs(cloud):
"""
Removes all default trusted CA certificates from the system. To actually
apply the change you must also call L{update_ca_certs}.
"""
- util.delete_dir_contents(CA_CERT_PATH)
- util.delete_dir_contents(CA_CERT_SYSTEM_PATH)
- util.write_file(CA_CERT_CONFIG, "", mode=0644)
+ util.delete_dir_contents(cloud.paths.join(False, CA_CERT_PATH))
+ util.delete_dir_contents(cloud.paths.join(False, CA_CERT_SYSTEM_PATH))
+ util.write_file(cloud.paths.join(False, CA_CERT_CONFIG), "", mode=0644)
debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no"
util.subp(('debconf-set-selections', '-'), debconf_sel)
-def handle(name, cfg, _cloud, log, _args):
+def handle(name, cfg, cloud, log, _args):
"""
Call to handle ca-cert sections in cloud-config file.
@@ -82,14 +85,14 @@ def handle(name, cfg, _cloud, log, _args):
# default trusted CA certs first.
if ca_cert_cfg.get("remove-defaults", False):
log.debug("Removing default certificates")
- remove_default_ca_certs()
+ remove_default_ca_certs(cloud)
# If we are given any new trusted CA certs to add, add them.
if "trusted" in ca_cert_cfg:
trusted_certs = util.get_cfg_option_list(ca_cert_cfg, "trusted")
if trusted_certs:
log.debug("Adding %d certificates" % len(trusted_certs))
- add_ca_certs(trusted_certs)
+ add_ca_certs(cloud, trusted_certs)
# Update the system with the new cert configuration.
log.debug("Updating certificates")
diff --git a/cloudinit/transforms/chef.py b/cloudinit/transforms/chef.py
index 31bfb85f..4e8ef346 100644
--- a/cloudinit/transforms/chef.py
+++ b/cloudinit/transforms/chef.py
@@ -36,30 +36,40 @@ def handle(name, cfg, cloud, log, _args):
return
chef_cfg = cfg['chef']
- # ensure the chef directories we use exist
- util.ensure_dirs(['/etc/chef', '/var/log/chef', '/var/lib/chef',
- '/var/cache/chef', '/var/backups/chef', '/var/run/chef'])
+ # Ensure the chef directories we use exist
+ c_dirs = [
+ '/etc/chef',
+ '/var/log/chef',
+ '/var/lib/chef',
+ '/var/cache/chef',
+ '/var/backups/chef',
+ '/var/run/chef',
+ ]
+ for d in c_dirs:
+ util.ensure_dir(cloud.paths.join(False, d))
- # set the validation key based on the presence of either 'validation_key'
+ # Set the validation key based on the presence of either 'validation_key'
# or 'validation_cert'. In the case where both exist, 'validation_key'
# takes precedence
for key in ('validation_key', 'validation_cert'):
if key in chef_cfg and chef_cfg[key]:
- util.write_file('/etc/chef/validation.pem', chef_cfg[key])
+ v_fn = cloud.paths.join(False, '/etc/chef/validation.pem')
+ util.write_file(v_fn, chef_cfg[key])
break
- # create the chef config from template
+ # Create the chef config from template
template_fn = cloud.get_template_filename('chef_client.rb')
if template_fn:
+ iid = str(cloud.datasource.get_instance_id())
params = {
'server_url': chef_cfg['server_url'],
- 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name',
- cloud.datasource.get_instance_id()),
+ 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name', iid),
'environment': util.get_cfg_option_str(chef_cfg, 'environment',
'_default'),
'validation_name': chef_cfg['validation_name']
}
- templater.render_to_file(template_fn, '/etc/chef/client.rb', params)
+ out_fn = cloud.paths.join(False, '/etc/chef/client.rb')
+ templater.render_to_file(template_fn, out_fn, params)
else:
log.warn("No template found, not rendering to /etc/chef/client.rb")
@@ -71,7 +81,8 @@ def handle(name, cfg, cloud, log, _args):
initial_attributes = chef_cfg['initial_attributes']
for k in list(initial_attributes.keys()):
initial_json[k] = initial_attributes[k]
- util.write_file('/etc/chef/firstboot.json', json.dumps(initial_json))
+ firstboot_fn = cloud.paths.join(False, '/etc/chef/firstboot.json')
+ util.write_file(firstboot_fn, json.dumps(initial_json))
# If chef is not installed, we install chef based on 'install_type'
if not os.path.isfile('/usr/bin/chef-client'):
diff --git a/cloudinit/transforms/keys_to_console.py b/cloudinit/transforms/keys_to_console.py
index e974375f..40758198 100644
--- a/cloudinit/transforms/keys_to_console.py
+++ b/cloudinit/transforms/keys_to_console.py
@@ -29,23 +29,24 @@ frequency = PER_INSTANCE
helper_tool = '/usr/lib/cloud-init/write-ssh-key-fingerprints'
-def handle(name, cfg, _cloud, log, _args):
+def handle(name, cfg, cloud, log, _args):
if not os.path.exists(helper_tool):
log.warn(("Unable to activate transform %s,"
" helper tool not found at %s"), name, helper_tool)
return
fp_blacklist = util.get_cfg_option_list(cfg,
- "ssh_fp_console_blacklist", [])
+ "ssh_fp_console_blacklist", [])
key_blacklist = util.get_cfg_option_list(cfg,
- "ssh_key_console_blacklist", ["ssh-dss"])
+ "ssh_key_console_blacklist",
+ ["ssh-dss"])
try:
cmd = [helper_tool]
cmd.append(','.join(fp_blacklist))
cmd.append(','.join(key_blacklist))
(stdout, _stderr) = util.subp(cmd)
- util.write_file('/dev/console', stdout)
+ util.write_file(cloud.paths.join(False, '/dev/console'), stdout)
except:
log.warn("Writing keys to /dev/console failed!")
raise
diff --git a/cloudinit/transforms/landscape.py b/cloudinit/transforms/landscape.py
index 19948d0e..29ce41b9 100644
--- a/cloudinit/transforms/landscape.py
+++ b/cloudinit/transforms/landscape.py
@@ -33,12 +33,12 @@ from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
-lsc_client_cfg_file = "/etc/landscape/client.conf"
+LSC_CLIENT_CFG_FILE = "/etc/landscape/client.conf"
distros = ['ubuntu']
# defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2
-lsc_builtincfg = {
+LSC_BUILTIN_CFG = {
'client': {
'log_level': "info",
'url': "https://landscape.canonical.com/message-system",
@@ -48,7 +48,7 @@ lsc_builtincfg = {
}
-def handle(name, cfg, _cloud, log, _args):
+def handle(name, cfg, cloud, log, _args):
"""
Basically turn a top level 'landscape' entry with a 'client' dict
and render it to ConfigObj format under '[client]' section in
@@ -66,15 +66,19 @@ def handle(name, cfg, _cloud, log, _args):
" but not a dictionary type,"
" is a %s instead"), util.obj_name(ls_cloudcfg))
- merged = merge_together([lsc_builtincfg, lsc_client_cfg_file, ls_cloudcfg])
+ lsc_client_fn = cloud.paths.join(True, LSC_CLIENT_CFG_FILE)
+ merged = merge_together([LSC_BUILTIN_CFG, lsc_client_fn, ls_cloudcfg])
- if not os.path.isdir(os.path.dirname(lsc_client_cfg_file)):
- util.ensure_dir(os.path.dirname(lsc_client_cfg_file))
+ lsc_dir = cloud.paths.join(False, os.path.dirname(lsc_client_fn))
+ if not os.path.isdir(lsc_dir):
+ util.ensure_dir(lsc_dir)
contents = StringIO()
merged.write(contents)
- util.write_file(lsc_client_cfg_file, contents.getvalue())
- log.debug("Wrote landscape config file to %s", lsc_client_cfg_file)
+ contents.flush()
+
+ util.write_file(lsc_client_fn, contents.getvalue())
+ log.debug("Wrote landscape config file to %s", lsc_client_fn)
def merge_together(objs):
diff --git a/cloudinit/transforms/mcollective.py b/cloudinit/transforms/mcollective.py
index 5464fe8c..9754d6b8 100644
--- a/cloudinit/transforms/mcollective.py
+++ b/cloudinit/transforms/mcollective.py
@@ -24,8 +24,8 @@ from StringIO import StringIO
from cloudinit import cfg as config
from cloudinit import util
-pubcert_file = "/etc/mcollective/ssl/server-public.pem"
-pricert_file = "/etc/mcollective/ssl/server-private.pem"
+PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem"
+PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem"
def handle(name, cfg, cloud, log, _args):
@@ -47,7 +47,8 @@ def handle(name, cfg, cloud, log, _args):
mcollective_config = config.DefaultingConfigParser()
# Read server.cfg values from original file in order to be able to mix
# the rest up
- old_contents = util.load_file('/etc/mcollective/server.cfg')
+ server_cfg_fn = cloud.paths.join(True, '/etc/mcollective/server.cfg')
+ old_contents = util.load_file(server_cfg_fn)
# It doesn't contain any sections so just add one temporarily
# Use a hash id based off the contents,
# just incase of conflicts... (try to not have any...)
@@ -61,17 +62,19 @@ def handle(name, cfg, cloud, log, _args):
section_head = section_tpl % (attempts)
sectioned_contents = "%s\n%s" % (section_head, old_contents)
mcollective_config.readfp(StringIO(sectioned_contents),
- filename='/etc/mcollective/server.cfg')
+ filename=server_cfg_fn)
for (cfg_name, cfg) in mcollective_cfg['conf'].iteritems():
if cfg_name == 'public-cert':
- util.write_file(pubcert_file, cfg, mode=0644)
+ pubcert_fn = cloud.paths.join(True, PUBCERT_FILE)
+ util.write_file(pubcert_fn, cfg, mode=0644)
mcollective_config.set(cfg_name,
- 'plugin.ssl_server_public', pubcert_file)
+ 'plugin.ssl_server_public', pubcert_fn)
mcollective_config.set(cfg_name, 'securityprovider', 'ssl')
elif cfg_name == 'private-cert':
- util.write_file(pricert_file, cfg, mode=0600)
+ pricert_fn = cloud.paths.join(True, PRICERT_FILE)
+ util.write_file(pricert_fn, cfg, mode=0600)
mcollective_config.set(cfg_name,
- 'plugin.ssl_server_private', pricert_file)
+ 'plugin.ssl_server_private', pricert_fn)
mcollective_config.set(cfg_name, 'securityprovider', 'ssl')
else:
# Iterate throug the config items, we'll use ConfigParser.set
@@ -80,15 +83,15 @@ def handle(name, cfg, cloud, log, _args):
mcollective_config.set(cfg_name, o, v)
# We got all our config as wanted we'll rename
# the previous server.cfg and create our new one
- util.rename('/etc/mcollective/server.cfg',
- '/etc/mcollective/server.cfg.old')
+ old_fn = "%s.old" % (server_cfg_fn)
+ util.rename(server_cfg_fn, old_fn)
# Now we got the whole file, write to disk except the section
# we added so that config parser won't error out when trying to read.
# Note below, that we've just used ConfigParser because it generally
# works. Below, we remove the initial 'nullsection' header.
contents = mcollective_config.stringify()
contents = contents.replace("%s\n" % (section_head), "")
- util.write_file('/etc/mcollective/server.cfg', contents, mode=0644)
+ util.write_file(server_cfg_fn, contents, mode=0644)
# Start mcollective
util.subp(['service', 'mcollective', 'start'], capture=False)
diff --git a/cloudinit/transforms/mounts.py b/cloudinit/transforms/mounts.py
index 44182b87..700fbc44 100644
--- a/cloudinit/transforms/mounts.py
+++ b/cloudinit/transforms/mounts.py
@@ -168,7 +168,7 @@ def handle(_name, cfg, cloud, log, _args):
cc_lines.append('\t'.join(line))
fstab_lines = []
- fstab = util.load_file("/etc/fstab")
+ fstab = util.load_file(cloud.paths.join(True, "/etc/fstab"))
for line in fstab.splitlines():
try:
toks = ws.split(line)
@@ -180,7 +180,7 @@ def handle(_name, cfg, cloud, log, _args):
fstab_lines.extend(cc_lines)
contents = "%s\n" % ('\n'.join(fstab_lines))
- util.write_file("/etc/fstab", contents)
+ util.write_file(cloud.paths.join(False, "/etc/fstab"), contents)
if needswap:
try:
@@ -190,7 +190,7 @@ def handle(_name, cfg, cloud, log, _args):
for d in dirs:
try:
- util.ensure_dir(d)
+ util.ensure_dir(cloud.paths.join(False, d))
except:
util.logexc(log, "Failed to make '%s' config-mount", d)
diff --git a/cloudinit/transforms/phone_home.py b/cloudinit/transforms/phone_home.py
index 98ff2b85..a8752527 100644
--- a/cloudinit/transforms/phone_home.py
+++ b/cloudinit/transforms/phone_home.py
@@ -77,7 +77,7 @@ def handle(name, cfg, cloud, log, args):
for (n, path) in pubkeys.iteritems():
try:
- all_keys[n] = util.load_file(path)
+ all_keys[n] = util.load_file(cloud.paths.join(True, path))
except:
util.logexc(log, ("%s: failed to open, can not"
" phone home that data"), path)
diff --git a/cloudinit/transforms/puppet.py b/cloudinit/transforms/puppet.py
index 76cc9732..d55118ea 100644
--- a/cloudinit/transforms/puppet.py
+++ b/cloudinit/transforms/puppet.py
@@ -43,7 +43,8 @@ def handle(name, cfg, cloud, log, _args):
# ... and then update the puppet configuration
if 'conf' in puppet_cfg:
# Add all sections from the conf object to puppet.conf
- contents = util.load_file('/etc/puppet/puppet.conf')
+ puppet_conf_fn = cloud.paths.join(False, '/etc/puppet/puppet.conf')
+ contents = util.load_file(puppet_conf_fn)
# Create object for reading puppet.conf values
puppet_config = config.DefaultingConfigParser()
# Read puppet.conf values from original file in order to be able to
@@ -51,21 +52,27 @@ def handle(name, cfg, cloud, log, _args):
cleaned_lines = [i.lstrip() for i in contents.splitlines()]
cleaned_contents = '\n'.join(cleaned_lines)
puppet_config.readfp(StringIO(cleaned_contents),
- filename='/etc/puppet/puppet.conf')
+ filename=puppet_conf_fn)
for (cfg_name, cfg) in puppet_cfg['conf'].iteritems():
- # ca_cert configuration is a special case
- # Dump the puppetmaster ca certificate in the correct place
+ # Cert configuration is a special case
+ # Dump the puppet master ca certificate in the correct place
if cfg_name == 'ca_cert':
# Puppet ssl sub-directory isn't created yet
# Create it with the proper permissions and ownership
- util.ensure_dir('/var/lib/puppet/ssl', 0771)
- util.chownbyid('/var/lib/puppet/ssl',
+ pp_ssl_dir = cloud.paths.join(False, '/var/lib/puppet/ssl')
+ util.ensure_dir(pp_ssl_dir, 0771)
+ util.chownbyid(pp_ssl_dir,
pwd.getpwnam('puppet').pw_uid, 0)
- util.ensure_dir('/var/lib/puppet/ssl/certs/')
- util.chownbyid('/var/lib/puppet/ssl/certs/',
+ pp_ssl_certs = cloud.paths.join(False,
+ '/var/lib/puppet/ssl/certs/')
+ util.ensure_dir(pp_ssl_certs)
+ util.chownbyid(pp_ssl_certs,
pwd.getpwnam('puppet').pw_uid, 0)
- util.write_file('/var/lib/puppet/ssl/certs/ca.pem', cfg)
- util.chownbyid('/var/lib/puppet/ssl/certs/ca.pem',
+ pp_ssl_ca_certs = cloud.paths.join(False,
+ ('/var/lib/puppet/'
+ 'ssl/certs/ca.pem'))
+ util.write_file(pp_ssl_ca_certs, cfg)
+ util.chownbyid(pp_ssl_ca_certs,
pwd.getpwnam('puppet').pw_uid, 0)
else:
# Iterate throug the config items, we'll use ConfigParser.set
@@ -82,10 +89,9 @@ def handle(name, cfg, cloud, log, _args):
puppet_config.set(cfg_name, o, v)
# We got all our config as wanted we'll rename
# the previous puppet.conf and create our new one
- util.rename('/etc/puppet/puppet.conf',
- '/etc/puppet/puppet.conf.old')
- contents = puppet_config.stringify()
- util.write_file('/etc/puppet/puppet.conf', contents)
+ puppet_conf_old_fn = "%s.old" % (puppet_conf_fn)
+ util.rename(puppet_conf_fn, puppet_conf_old_fn)
+ util.write_file(puppet_conf_fn, puppet_config.stringify())
# Set puppet to automatically start
if os.path.exists('/etc/default/puppet'):
diff --git a/cloudinit/transforms/resizefs.py b/cloudinit/transforms/resizefs.py
index fe012417..fd2bb9e1 100644
--- a/cloudinit/transforms/resizefs.py
+++ b/cloudinit/transforms/resizefs.py
@@ -62,7 +62,7 @@ def get_fs_type(st_dev, path, log):
raise
-def handle(name, cfg, _cloud, log, args):
+def handle(name, cfg, cloud, log, args):
if len(args) != 0:
resize_root = args[0]
else:
@@ -74,11 +74,11 @@ def handle(name, cfg, _cloud, log, args):
# TODO is the directory ok to be used??
resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run")
+ resize_root_d = cloud.paths.join(False, resize_root_d)
util.ensure_dir(resize_root_d)
- # TODO: allow what is to be resized to
- # be configurable??
- resize_what = "/"
+ # TODO: allow what is to be resized to be configurable??
+ resize_what = cloud.paths.join(False)
with util.SilentTemporaryFile(prefix="cloudinit.resizefs.",
dir=resize_root_d, delete=True) as tfh:
devpth = tfh.name
diff --git a/cloudinit/transforms/rightscale_userdata.py b/cloudinit/transforms/rightscale_userdata.py
index 8dfd845f..dc06f9ec 100644
--- a/cloudinit/transforms/rightscale_userdata.py
+++ b/cloudinit/transforms/rightscale_userdata.py
@@ -78,6 +78,7 @@ def handle(name, _cfg, cloud, log, _args):
urls = mdict[my_hookname]
for (i, url) in enumerate(urls):
fname = os.path.join(scripts_d, "rightscale-%02i" % (i))
+ fname = cloud.paths.join(False, fname)
try:
resp = uhelp.readurl(url)
# Ensure its a valid http response (and something gotten)
diff --git a/cloudinit/transforms/rsyslog.py b/cloudinit/transforms/rsyslog.py
index 71b74711..f2c1de1e 100644
--- a/cloudinit/transforms/rsyslog.py
+++ b/cloudinit/transforms/rsyslog.py
@@ -71,7 +71,8 @@ def handle(name, cfg, cloud, log, _args):
try:
contents = "%s\n" % (content)
- util.write_file(filename, contents, omode=omode)
+ util.write_file(cloud.paths.join(False, filename),
+ contents, omode=omode)
except Exception:
util.logexc(log, "Failed to write to %s", filename)
diff --git a/cloudinit/transforms/runcmd.py b/cloudinit/transforms/runcmd.py
index 31a254a5..f121484b 100644
--- a/cloudinit/transforms/runcmd.py
+++ b/cloudinit/transforms/runcmd.py
@@ -33,6 +33,6 @@ def handle(name, cfg, cloud, log, _args):
cmd = cfg["runcmd"]
try:
content = util.shellify(cmd)
- util.write_file(out_fn, content, 0700)
+ util.write_file(cloud.paths.join(False, out_fn), content, 0700)
except:
util.logexc(log, "Failed to shellify %s into file %s", cmd, out_fn)
diff --git a/cloudinit/transforms/salt_minion.py b/cloudinit/transforms/salt_minion.py
index d05d2a1e..16f5286d 100644
--- a/cloudinit/transforms/salt_minion.py
+++ b/cloudinit/transforms/salt_minion.py
@@ -35,6 +35,7 @@ def handle(name, cfg, cloud, log, _args):
# Ensure we can configure files at the right dir
config_dir = salt_cfg.get("config_dir", '/etc/salt')
+ config_dir = cloud.paths.join(False, config_dir)
util.ensure_dir(config_dir)
# ... and then update the salt configuration
@@ -47,6 +48,7 @@ def handle(name, cfg, cloud, log, _args):
# ... copy the key pair if specified
if 'public_key' in salt_cfg and 'private_key' in salt_cfg:
pki_dir = salt_cfg.get('pki_dir', '/etc/salt/pki')
+ pki_dir = cloud.paths.join(pki_dir)
with util.umask(077):
util.ensure_dir(pki_dir)
pub_name = os.path.join(pki_dir, 'minion.pub')
diff --git a/cloudinit/transforms/set_passwords.py b/cloudinit/transforms/set_passwords.py
index c0cc4e84..e7049f22 100644
--- a/cloudinit/transforms/set_passwords.py
+++ b/cloudinit/transforms/set_passwords.py
@@ -130,8 +130,8 @@ def handle(_name, cfg, cloud, log, args):
replaced_auth = True
new_lines.append(replacement)
- new_contents = "\n".join(new_lines)
- util.write_file('/etc/ssh/sshd_config', new_contents)
+ util.write_file(cloud.paths.join(False, '/etc/ssh/sshd_config'),
+ "\n".join(new_lines))
try:
cmd = ['service']
diff --git a/cloudinit/transforms/ssh.py b/cloudinit/transforms/ssh.py
index b1f2ce89..33d4bb54 100644
--- a/cloudinit/transforms/ssh.py
+++ b/cloudinit/transforms/ssh.py
@@ -64,7 +64,8 @@ def handle(_name, cfg, cloud, log, _args):
if key in key2file:
tgt_fn = key2file[key][0]
tgt_perms = key2file[key][1]
- util.write_file(tgt_fn, val, tgt_perms)
+ util.write_file(cloud.paths.join(False, tgt_fn),
+ val, tgt_perms)
for (priv, pub) in priv2pub.iteritems():
if pub in cfg['ssh_keys'] or not priv in cfg['ssh_keys']:
@@ -86,6 +87,7 @@ def handle(_name, cfg, cloud, log, _args):
generate_keys)
for keytype in genkeys:
keyfile = '/etc/ssh/ssh_host_%s_key' % (keytype)
+ keyfile = cloud.paths.join(False, keyfile)
if not os.path.exists(keyfile):
cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]
try:
@@ -107,20 +109,21 @@ def handle(_name, cfg, cloud, log, _args):
cfgkeys = cfg["ssh_authorized_keys"]
keys.extend(cfgkeys)
- apply_credentials(keys, user, disable_root, disable_root_opts)
+ apply_credentials(keys, user, cloud.paths,
+ disable_root, disable_root_opts)
except:
util.logexc(log, "Applying ssh credentials failed!")
-def apply_credentials(keys, user, disable_root, disable_root_opts):
+def apply_credentials(keys, user, paths, disable_root, disable_root_opts):
keys = set(keys)
if user:
- ssh_util.setup_user_keys(keys, user, '')
+ ssh_util.setup_user_keys(keys, user, '', paths)
if disable_root and user:
key_prefix = disable_root_opts.replace('$USER', user)
else:
key_prefix = ''
- ssh_util.setup_user_keys(keys, 'root', key_prefix)
+ ssh_util.setup_user_keys(keys, 'root', key_prefix, paths)