summaryrefslogtreecommitdiff
path: root/cloudinit
diff options
context:
space:
mode:
authorVlastimil Holer <vlastimil.holer@gmail.com>2012-12-19 18:03:03 +0100
committerVlastimil Holer <vlastimil.holer@gmail.com>2012-12-19 18:03:03 +0100
commit8dd9678d97a822e477915c150d528096a83c9777 (patch)
treea9b5708edd9d448508abf00abdd9b7bdeba16541 /cloudinit
parenta9939fe768e04d52fe530c7467357d79b78a21f4 (diff)
parent3569e71a1579b97f4e33fb46ab3fcef08a4ddad4 (diff)
downloadvyos-cloud-init-8dd9678d97a822e477915c150d528096a83c9777.tar.gz
vyos-cloud-init-8dd9678d97a822e477915c150d528096a83c9777.zip
Merged trunk lp:cloud-init
Diffstat (limited to 'cloudinit')
-rw-r--r--cloudinit/config/cc_apt_configure.py (renamed from cloudinit/config/cc_apt_update_upgrade.py)68
-rw-r--r--cloudinit/config/cc_apt_pipelining.py12
-rw-r--r--cloudinit/config/cc_byobu.py27
-rw-r--r--cloudinit/config/cc_ca_certs.py31
-rw-r--r--cloudinit/config/cc_chef.py45
-rw-r--r--cloudinit/config/cc_landscape.py19
-rw-r--r--cloudinit/config/cc_mcollective.py22
-rw-r--r--cloudinit/config/cc_migrator.py85
-rw-r--r--cloudinit/config/cc_mounts.py9
-rw-r--r--cloudinit/config/cc_package_update_upgrade_install.py99
-rw-r--r--cloudinit/config/cc_phone_home.py4
-rw-r--r--cloudinit/config/cc_power_state_change.py155
-rw-r--r--cloudinit/config/cc_puppet.py76
-rw-r--r--cloudinit/config/cc_resizefs.py13
-rw-r--r--cloudinit/config/cc_rsyslog.py3
-rw-r--r--cloudinit/config/cc_runcmd.py2
-rw-r--r--cloudinit/config/cc_salt_minion.py11
-rw-r--r--cloudinit/config/cc_set_hostname.py10
-rw-r--r--cloudinit/config/cc_set_passwords.py23
-rw-r--r--cloudinit/config/cc_ssh.py33
-rw-r--r--cloudinit/config/cc_ssh_authkey_fingerprints.py18
-rw-r--r--cloudinit/config/cc_ssh_import_id.py37
-rw-r--r--cloudinit/config/cc_update_etc_hosts.py3
-rw-r--r--cloudinit/config/cc_update_hostname.py8
-rw-r--r--cloudinit/config/cc_users_groups.py66
-rw-r--r--cloudinit/config/cc_yum_add_repo.py106
-rw-r--r--cloudinit/distros/__init__.py626
-rw-r--r--cloudinit/distros/debian.py137
-rw-r--r--cloudinit/distros/parsers/__init__.py28
-rw-r--r--cloudinit/distros/parsers/hostname.py88
-rw-r--r--cloudinit/distros/parsers/hosts.py92
-rw-r--r--cloudinit/distros/parsers/resolv_conf.py169
-rw-r--r--cloudinit/distros/parsers/sys_conf.py113
-rw-r--r--cloudinit/distros/rhel.py287
-rw-r--r--cloudinit/distros/ubuntu.py6
-rw-r--r--cloudinit/ec2_utils.py59
-rw-r--r--cloudinit/handlers/__init__.py15
-rw-r--r--cloudinit/handlers/upstart_job.py4
-rw-r--r--cloudinit/helpers.py49
-rw-r--r--cloudinit/log.py12
-rw-r--r--cloudinit/patcher.py58
-rw-r--r--cloudinit/safeyaml.py32
-rw-r--r--cloudinit/signal_handler.py71
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py21
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py60
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py109
-rw-r--r--cloudinit/sources/DataSourceEc2.py41
-rw-r--r--cloudinit/sources/DataSourceMAAS.py49
-rw-r--r--cloudinit/sources/DataSourceOVF.py5
-rw-r--r--cloudinit/sources/__init__.py79
-rw-r--r--cloudinit/ssh_util.py26
-rw-r--r--cloudinit/stages.py90
-rw-r--r--cloudinit/url_helper.py11
-rw-r--r--cloudinit/user_data.py2
-rw-r--r--cloudinit/util.py77
-rw-r--r--cloudinit/version.py2
56 files changed, 2518 insertions, 885 deletions
diff --git a/cloudinit/config/cc_apt_update_upgrade.py b/cloudinit/config/cc_apt_configure.py
index 356bb98d..f8664160 100644
--- a/cloudinit/config/cc_apt_update_upgrade.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -20,7 +20,6 @@
import glob
import os
-import time
from cloudinit import templater
from cloudinit import util
@@ -47,9 +46,6 @@ EXPORT_GPG_KEYID = """
def handle(name, cfg, cloud, log, _args):
- update = util.get_cfg_option_bool(cfg, 'apt_update', False)
- upgrade = util.get_cfg_option_bool(cfg, 'apt_upgrade', False)
-
release = get_release()
mirrors = find_apt_mirror_info(cloud, cfg)
if not mirrors or "primary" not in mirrors:
@@ -61,7 +57,7 @@ def handle(name, cfg, cloud, log, _args):
mirror = mirrors["primary"]
mirrors["mirror"] = mirror
- log.debug("mirror info: %s" % mirrors)
+ log.debug("Mirror info: %s" % mirrors)
if not util.get_cfg_option_bool(cfg,
'apt_preserve_sources_list', False):
@@ -78,8 +74,7 @@ def handle(name, cfg, cloud, log, _args):
try:
# See man 'apt.conf'
contents = PROXY_TPL % (proxy)
- util.write_file(cloud.paths.join(False, proxy_filename),
- contents)
+ util.write_file(proxy_filename, contents)
except Exception as e:
util.logexc(log, "Failed to write proxy to %s", proxy_filename)
elif os.path.isfile(proxy_filename):
@@ -90,61 +85,18 @@ def handle(name, cfg, cloud, log, _args):
params = mirrors
params['RELEASE'] = release
params['MIRROR'] = mirror
- errors = add_sources(cloud, cfg['apt_sources'], params)
+ errors = add_sources(cfg['apt_sources'], params)
for e in errors:
- log.warn("Source Error: %s", ':'.join(e))
+ log.warn("Add source error: %s", ':'.join(e))
dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False)
if dconf_sel:
- log.debug("setting debconf selections per cloud config")
+ log.debug("Setting debconf selections per cloud config")
try:
util.subp(('debconf-set-selections', '-'), dconf_sel)
- except:
+ except Exception:
util.logexc(log, "Failed to run debconf-set-selections")
- pkglist = util.get_cfg_option_list(cfg, 'packages', [])
-
- errors = []
- if update or len(pkglist) or upgrade:
- try:
- cloud.distro.update_package_sources()
- except Exception as e:
- util.logexc(log, "Package update failed")
- errors.append(e)
-
- if upgrade:
- try:
- cloud.distro.package_command("upgrade")
- except Exception as e:
- util.logexc(log, "Package upgrade failed")
- errors.append(e)
-
- if len(pkglist):
- try:
- cloud.distro.install_packages(pkglist)
- except Exception as e:
- util.logexc(log, "Failed to install packages: %s ", pkglist)
- errors.append(e)
-
- # kernel and openssl (possibly some other packages)
- # write a file /var/run/reboot-required after upgrading.
- # if that file exists and configured, then just stop right now and reboot
- # TODO(smoser): handle this less voilently
- reboot_file = "/var/run/reboot-required"
- if ((upgrade or pkglist) and cfg.get("apt_reboot_if_required", False) and
- os.path.isfile(reboot_file)):
- log.warn("rebooting after upgrade or install per %s" % reboot_file)
- time.sleep(1) # give the warning time to get out
- util.subp(["/sbin/reboot"])
- time.sleep(60)
- log.warn("requested reboot did not happen!")
- errors.append(Exception("requested reboot did not happen!"))
-
- if len(errors):
- log.warn("%s failed with exceptions, re-raising the last one",
- len(errors))
- raise errors[-1]
-
# get gpg keyid from keyserver
def getkeybyid(keyid, keyserver):
@@ -196,11 +148,10 @@ def generate_sources_list(codename, mirrors, cloud, log):
params = {'codename': codename}
for k in mirrors:
params[k] = mirrors[k]
- out_fn = cloud.paths.join(False, '/etc/apt/sources.list')
- templater.render_to_file(template_fn, out_fn, params)
+ templater.render_to_file(template_fn, '/etc/apt/sources.list', params)
-def add_sources(cloud, srclist, template_params=None):
+def add_sources(srclist, template_params=None):
"""
add entries in /etc/apt/sources.list.d for each abbreviated
sources.list entry in 'srclist'. When rendering template, also
@@ -250,8 +201,7 @@ def add_sources(cloud, srclist, template_params=None):
try:
contents = "%s\n" % (source)
- util.write_file(cloud.paths.join(False, ent['filename']),
- contents, omode="ab")
+ util.write_file(ent['filename'], contents, omode="ab")
except:
errorlist.append([source,
"failed write to file %s" % ent['filename']])
diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py
index 02056ee0..e5629175 100644
--- a/cloudinit/config/cc_apt_pipelining.py
+++ b/cloudinit/config/cc_apt_pipelining.py
@@ -34,26 +34,24 @@ APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n"
# on TCP connections - otherwise data corruption will occur.
-def handle(_name, cfg, cloud, log, _args):
+def handle(_name, cfg, _cloud, log, _args):
apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False)
apt_pipe_value_s = str(apt_pipe_value).lower().strip()
if apt_pipe_value_s == "false":
- write_apt_snippet(cloud, "0", log, DEFAULT_FILE)
+ write_apt_snippet("0", log, DEFAULT_FILE)
elif apt_pipe_value_s in ("none", "unchanged", "os"):
return
elif apt_pipe_value_s in [str(b) for b in xrange(0, 6)]:
- write_apt_snippet(cloud, apt_pipe_value_s, log, DEFAULT_FILE)
+ write_apt_snippet(apt_pipe_value_s, log, DEFAULT_FILE)
else:
log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value)
-def write_apt_snippet(cloud, setting, log, f_name):
+def write_apt_snippet(setting, log, f_name):
"""Writes f_name with apt pipeline depth 'setting'."""
file_contents = APT_PIPE_TPL % (setting)
-
- util.write_file(cloud.paths.join(False, f_name), file_contents)
-
+ util.write_file(f_name, file_contents)
log.debug("Wrote %s with apt pipeline depth setting %s", f_name, setting)
diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py
index 4e2e06bb..92d428b7 100644
--- a/cloudinit/config/cc_byobu.py
+++ b/cloudinit/config/cc_byobu.py
@@ -18,12 +18,17 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+# Ensure this is aliased to a name not 'distros'
+# since the module attribute 'distros'
+# is a list of distros that are supported, not a sub-module
+from cloudinit import distros as ds
+
from cloudinit import util
distros = ['ubuntu', 'debian']
-def handle(name, cfg, _cloud, log, args):
+def handle(name, cfg, cloud, log, args):
if len(args) != 0:
value = args[0]
else:
@@ -56,16 +61,20 @@ def handle(name, cfg, _cloud, log, args):
shcmd = ""
if mod_user:
- user = util.get_cfg_option_str(cfg, "user", "ubuntu")
- shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst)
- shcmd += " || X=$(($X+1)); "
+ (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
+ (user, _user_config) = ds.extract_default(users)
+ if not user:
+ log.warn(("No default byobu user provided, "
+ "can not launch %s for the default user"), bl_inst)
+ else:
+ shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst)
+ shcmd += " || X=$(($X+1)); "
if mod_sys:
shcmd += "echo \"%s\" | debconf-set-selections" % dc_val
shcmd += " && dpkg-reconfigure byobu --frontend=noninteractive"
shcmd += " || X=$(($X+1)); "
- cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")]
-
- log.debug("Setting byobu to %s", value)
-
- util.subp(cmd, capture=False)
+ if len(shcmd):
+ cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")]
+ log.debug("Setting byobu to %s", value)
+ util.subp(cmd, capture=False)
diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py
index dc046bda..4f2a46a1 100644
--- a/cloudinit/config/cc_ca_certs.py
+++ b/cloudinit/config/cc_ca_certs.py
@@ -22,6 +22,7 @@ CA_CERT_PATH = "/usr/share/ca-certificates/"
CA_CERT_FILENAME = "cloud-init-ca-certs.crt"
CA_CERT_CONFIG = "/etc/ca-certificates.conf"
CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/"
+CA_CERT_FULL_PATH = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME)
distros = ['ubuntu', 'debian']
@@ -33,7 +34,7 @@ def update_ca_certs():
util.subp(["update-ca-certificates"], capture=False)
-def add_ca_certs(paths, certs):
+def add_ca_certs(certs):
"""
Adds certificates to the system. To actually apply the new certificates
you must also call L{update_ca_certs}.
@@ -43,27 +44,31 @@ def add_ca_certs(paths, certs):
if certs:
# First ensure they are strings...
cert_file_contents = "\n".join([str(c) for c in certs])
- cert_file_fullpath = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME)
- cert_file_fullpath = paths.join(False, cert_file_fullpath)
- util.write_file(cert_file_fullpath, cert_file_contents, mode=0644)
+ util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0644)
+
# Append cert filename to CA_CERT_CONFIG file.
- util.write_file(paths.join(False, CA_CERT_CONFIG),
- "\n%s" % CA_CERT_FILENAME, omode="ab")
+ # We have to strip the content because blank lines in the file
+ # causes subsequent entries to be ignored. (LP: #1077020)
+ orig = util.load_file(CA_CERT_CONFIG)
+ cur_cont = '\n'.join([l for l in orig.splitlines()
+ if l != CA_CERT_FILENAME])
+ out = "%s\n%s\n" % (cur_cont.rstrip(), CA_CERT_FILENAME)
+ util.write_file(CA_CERT_CONFIG, out, omode="wb")
-def remove_default_ca_certs(paths):
+def remove_default_ca_certs():
"""
Removes all default trusted CA certificates from the system. To actually
apply the change you must also call L{update_ca_certs}.
"""
- util.delete_dir_contents(paths.join(False, CA_CERT_PATH))
- util.delete_dir_contents(paths.join(False, CA_CERT_SYSTEM_PATH))
- util.write_file(paths.join(False, CA_CERT_CONFIG), "", mode=0644)
+ util.delete_dir_contents(CA_CERT_PATH)
+ util.delete_dir_contents(CA_CERT_SYSTEM_PATH)
+ util.write_file(CA_CERT_CONFIG, "", mode=0644)
debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no"
util.subp(('debconf-set-selections', '-'), debconf_sel)
-def handle(name, cfg, cloud, log, _args):
+def handle(name, cfg, _cloud, log, _args):
"""
Call to handle ca-cert sections in cloud-config file.
@@ -85,14 +90,14 @@ def handle(name, cfg, cloud, log, _args):
# default trusted CA certs first.
if ca_cert_cfg.get("remove-defaults", False):
log.debug("Removing default certificates")
- remove_default_ca_certs(cloud.paths)
+ remove_default_ca_certs()
# If we are given any new trusted CA certs to add, add them.
if "trusted" in ca_cert_cfg:
trusted_certs = util.get_cfg_option_list(ca_cert_cfg, "trusted")
if trusted_certs:
log.debug("Adding %d certificates" % len(trusted_certs))
- add_ca_certs(cloud.paths, trusted_certs)
+ add_ca_certs(trusted_certs)
# Update the system with the new cert configuration.
log.debug("Updating certificates")
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index 6f568261..607f789e 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -22,10 +22,22 @@ import json
import os
from cloudinit import templater
+from cloudinit import url_helper
from cloudinit import util
RUBY_VERSION_DEFAULT = "1.8"
+CHEF_DIRS = [
+ '/etc/chef',
+ '/var/log/chef',
+ '/var/lib/chef',
+ '/var/cache/chef',
+ '/var/backups/chef',
+ '/var/run/chef',
+]
+
+OMNIBUS_URL = "https://www.opscode.com/chef/install.sh"
+
def handle(name, cfg, cloud, log, _args):
@@ -37,24 +49,15 @@ def handle(name, cfg, cloud, log, _args):
chef_cfg = cfg['chef']
# Ensure the chef directories we use exist
- c_dirs = [
- '/etc/chef',
- '/var/log/chef',
- '/var/lib/chef',
- '/var/cache/chef',
- '/var/backups/chef',
- '/var/run/chef',
- ]
- for d in c_dirs:
- util.ensure_dir(cloud.paths.join(False, d))
+ for d in CHEF_DIRS:
+ util.ensure_dir(d)
# Set the validation key based on the presence of either 'validation_key'
# or 'validation_cert'. In the case where both exist, 'validation_key'
# takes precedence
for key in ('validation_key', 'validation_cert'):
if key in chef_cfg and chef_cfg[key]:
- v_fn = cloud.paths.join(False, '/etc/chef/validation.pem')
- util.write_file(v_fn, chef_cfg[key])
+ util.write_file('/etc/chef/validation.pem', chef_cfg[key])
break
# Create the chef config from template
@@ -68,8 +71,7 @@ def handle(name, cfg, cloud, log, _args):
'_default'),
'validation_name': chef_cfg['validation_name']
}
- out_fn = cloud.paths.join(False, '/etc/chef/client.rb')
- templater.render_to_file(template_fn, out_fn, params)
+ templater.render_to_file(template_fn, '/etc/chef/client.rb', params)
else:
log.warn("No template found, not rendering to /etc/chef/client.rb")
@@ -81,11 +83,12 @@ def handle(name, cfg, cloud, log, _args):
initial_attributes = chef_cfg['initial_attributes']
for k in list(initial_attributes.keys()):
initial_json[k] = initial_attributes[k]
- firstboot_fn = cloud.paths.join(False, '/etc/chef/firstboot.json')
- util.write_file(firstboot_fn, json.dumps(initial_json))
+ util.write_file('/etc/chef/firstboot.json', json.dumps(initial_json))
# If chef is not installed, we install chef based on 'install_type'
- if not os.path.isfile('/usr/bin/chef-client'):
+ if (not os.path.isfile('/usr/bin/chef-client') or
+ util.get_cfg_option_bool(chef_cfg, 'force_install', default=False)):
+
install_type = util.get_cfg_option_str(chef_cfg, 'install_type',
'packages')
if install_type == "gems":
@@ -101,6 +104,14 @@ def handle(name, cfg, cloud, log, _args):
elif install_type == 'packages':
# this will install and run the chef-client from packages
cloud.distro.install_packages(('chef',))
+ elif install_type == 'omnibus':
+ url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL)
+ content = url_helper.readurl(url=url, retries=5)
+ with util.tempdir() as tmpd:
+ # use tmpd over tmpfile to avoid 'Text file busy' on execute
+ tmpf = "%s/chef-omnibus-install" % tmpd
+ util.write_file(tmpf, content, mode=0700)
+ util.subp([tmpf], capture=False)
else:
log.warn("Unknown chef install type %s", install_type)
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index d351d941..02610dd0 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -59,27 +59,26 @@ def handle(_name, cfg, cloud, log, _args):
raise RuntimeError(("'landscape' key existed in config,"
" but not a dictionary type,"
" is a %s instead"), util.obj_name(ls_cloudcfg))
+ if not ls_cloudcfg:
+ return
+
+ cloud.distro.install_packages(["landscape-client"])
merge_data = [
LSC_BUILTIN_CFG,
- cloud.paths.join(True, LSC_CLIENT_CFG_FILE),
+ LSC_CLIENT_CFG_FILE,
ls_cloudcfg,
]
merged = merge_together(merge_data)
-
- lsc_client_fn = cloud.paths.join(False, LSC_CLIENT_CFG_FILE)
- lsc_dir = cloud.paths.join(False, os.path.dirname(lsc_client_fn))
- if not os.path.isdir(lsc_dir):
- util.ensure_dir(lsc_dir)
-
contents = StringIO()
merged.write(contents)
- contents.flush()
- util.write_file(lsc_client_fn, contents.getvalue())
- log.debug("Wrote landscape config file to %s", lsc_client_fn)
+ util.ensure_dir(os.path.dirname(LSC_CLIENT_CFG_FILE))
+ util.write_file(LSC_CLIENT_CFG_FILE, contents.getvalue())
+ log.debug("Wrote landscape config file to %s", LSC_CLIENT_CFG_FILE)
util.write_file(LS_DEFAULT_FILE, "RUN=1\n")
+ util.subp(["service", "landscape-client", "restart"])
def merge_together(objs):
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
index 2acdbc6f..b670390d 100644
--- a/cloudinit/config/cc_mcollective.py
+++ b/cloudinit/config/cc_mcollective.py
@@ -29,6 +29,7 @@ from cloudinit import util
PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem"
PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem"
+SERVER_CFG = '/etc/mcollective/server.cfg'
def handle(name, cfg, cloud, log, _args):
@@ -48,26 +49,23 @@ def handle(name, cfg, cloud, log, _args):
if 'conf' in mcollective_cfg:
# Read server.cfg values from the
# original file in order to be able to mix the rest up
- server_cfg_fn = cloud.paths.join(True, '/etc/mcollective/server.cfg')
- mcollective_config = ConfigObj(server_cfg_fn)
+ mcollective_config = ConfigObj(SERVER_CFG)
# See: http://tiny.cc/jh9agw
for (cfg_name, cfg) in mcollective_cfg['conf'].iteritems():
if cfg_name == 'public-cert':
- pubcert_fn = cloud.paths.join(True, PUBCERT_FILE)
- util.write_file(pubcert_fn, cfg, mode=0644)
- mcollective_config['plugin.ssl_server_public'] = pubcert_fn
+ util.write_file(PUBCERT_FILE, cfg, mode=0644)
+ mcollective_config['plugin.ssl_server_public'] = PUBCERT_FILE
mcollective_config['securityprovider'] = 'ssl'
elif cfg_name == 'private-cert':
- pricert_fn = cloud.paths.join(True, PRICERT_FILE)
- util.write_file(pricert_fn, cfg, mode=0600)
- mcollective_config['plugin.ssl_server_private'] = pricert_fn
+ util.write_file(PRICERT_FILE, cfg, mode=0600)
+ mcollective_config['plugin.ssl_server_private'] = PRICERT_FILE
mcollective_config['securityprovider'] = 'ssl'
else:
if isinstance(cfg, (basestring, str)):
# Just set it in the 'main' section
mcollective_config[cfg_name] = cfg
elif isinstance(cfg, (dict)):
- # Iterate throug the config items, create a section
+ # Iterate through the config items, create a section
# if it is needed and then add/or create items as needed
if cfg_name not in mcollective_config.sections:
mcollective_config[cfg_name] = {}
@@ -78,14 +76,12 @@ def handle(name, cfg, cloud, log, _args):
mcollective_config[cfg_name] = str(cfg)
# We got all our config as wanted we'll rename
# the previous server.cfg and create our new one
- old_fn = cloud.paths.join(False, '/etc/mcollective/server.cfg.old')
- util.rename(server_cfg_fn, old_fn)
+ util.rename(SERVER_CFG, "%s.old" % (SERVER_CFG))
# Now we got the whole file, write to disk...
contents = StringIO()
mcollective_config.write(contents)
contents = contents.getvalue()
- server_cfg_rw = cloud.paths.join(False, '/etc/mcollective/server.cfg')
- util.write_file(server_cfg_rw, contents, mode=0644)
+ util.write_file(SERVER_CFG, contents, mode=0644)
# Start mcollective
util.subp(['service', 'mcollective', 'start'], capture=False)
diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py
new file mode 100644
index 00000000..facaa538
--- /dev/null
+++ b/cloudinit/config/cc_migrator.py
@@ -0,0 +1,85 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import shutil
+
+from cloudinit import helpers
+from cloudinit import util
+
+from cloudinit.settings import PER_ALWAYS
+
+frequency = PER_ALWAYS
+
+
+def _migrate_canon_sems(cloud):
+ paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem'))
+ am_adjusted = 0
+ for sem_path in paths:
+ if not sem_path or not os.path.exists(sem_path):
+ continue
+ for p in os.listdir(sem_path):
+ full_path = os.path.join(sem_path, p)
+ if os.path.isfile(full_path):
+ (name, ext) = os.path.splitext(p)
+ canon_name = helpers.canon_sem_name(name)
+ if canon_name != name:
+ new_path = os.path.join(sem_path, canon_name + ext)
+ shutil.move(full_path, new_path)
+ am_adjusted += 1
+ return am_adjusted
+
+
+def _migrate_legacy_sems(cloud, log):
+ legacy_adjust = {
+ 'apt-update-upgrade': [
+ 'apt-configure',
+ 'package-update-upgrade-install',
+ ],
+ }
+ paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem'))
+ for sem_path in paths:
+ if not sem_path or not os.path.exists(sem_path):
+ continue
+ sem_helper = helpers.FileSemaphores(sem_path)
+ for (mod_name, migrate_to) in legacy_adjust.items():
+ possibles = [mod_name, helpers.canon_sem_name(mod_name)]
+ old_exists = []
+ for p in os.listdir(sem_path):
+ (name, _ext) = os.path.splitext(p)
+ if name in possibles and os.path.isfile(p):
+ old_exists.append(p)
+ for p in old_exists:
+ util.del_file(os.path.join(sem_path, p))
+ (_name, freq) = os.path.splitext(p)
+ for m in migrate_to:
+ log.debug("Migrating %s => %s with the same frequency",
+ p, m)
+ with sem_helper.lock(m, freq):
+ pass
+
+
+def handle(name, cfg, cloud, log, _args):
+ do_migrate = util.get_cfg_option_str(cfg, "migrate", True)
+ if not util.translate_bool(do_migrate):
+ log.debug("Skipping module named %s, migration disabled", name)
+ return
+ sems_moved = _migrate_canon_sems(cloud)
+ log.debug("Migrated %s semaphore files to there canonicalized names",
+ sems_moved)
+ _migrate_legacy_sems(cloud, log)
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 14c965bb..cb772c86 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -28,6 +28,7 @@ from cloudinit import util
SHORTNAME_FILTER = r"^[x]{0,1}[shv]d[a-z][0-9]*$"
SHORTNAME = re.compile(SHORTNAME_FILTER)
WS = re.compile("[%s]+" % (whitespace))
+FSTAB_PATH = "/etc/fstab"
def is_mdname(name):
@@ -167,8 +168,7 @@ def handle(_name, cfg, cloud, log, _args):
cc_lines.append('\t'.join(line))
fstab_lines = []
- fstab = util.load_file(cloud.paths.join(True, "/etc/fstab"))
- for line in fstab.splitlines():
+ for line in util.load_file(FSTAB_PATH).splitlines():
try:
toks = WS.split(line)
if toks[3].find(comment) != -1:
@@ -179,7 +179,7 @@ def handle(_name, cfg, cloud, log, _args):
fstab_lines.extend(cc_lines)
contents = "%s\n" % ('\n'.join(fstab_lines))
- util.write_file(cloud.paths.join(False, "/etc/fstab"), contents)
+ util.write_file(FSTAB_PATH, contents)
if needswap:
try:
@@ -188,9 +188,8 @@ def handle(_name, cfg, cloud, log, _args):
util.logexc(log, "Activating swap via 'swapon -a' failed")
for d in dirs:
- real_dir = cloud.paths.join(False, d)
try:
- util.ensure_dir(real_dir)
+ util.ensure_dir(d)
except:
util.logexc(log, "Failed to make '%s' config-mount", d)
diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py
new file mode 100644
index 00000000..73b0e30d
--- /dev/null
+++ b/cloudinit/config/cc_package_update_upgrade_install.py
@@ -0,0 +1,99 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import time
+
+from cloudinit import log as logging
+from cloudinit import util
+
+REBOOT_FILE = "/var/run/reboot-required"
+REBOOT_CMD = ["/sbin/reboot"]
+
+
+def _multi_cfg_bool_get(cfg, *keys):
+ for k in keys:
+ if util.get_cfg_option_bool(cfg, k, False):
+ return True
+ return False
+
+
+def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2):
+ util.subp(REBOOT_CMD)
+ start = time.time()
+ wait_time = initial_sleep
+ for _i in range(0, wait_attempts):
+ time.sleep(wait_time)
+ wait_time *= backoff
+ elapsed = time.time() - start
+ log.debug("Rebooted, but still running after %s seconds", int(elapsed))
+ # If we got here, not good
+ elapsed = time.time() - start
+ raise RuntimeError(("Reboot did not happen"
+ " after %s seconds!") % (int(elapsed)))
+
+
+def handle(_name, cfg, cloud, log, _args):
+ # Handle the old style + new config names
+ update = _multi_cfg_bool_get(cfg, 'apt_update', 'package_update')
+ upgrade = _multi_cfg_bool_get(cfg, 'package_upgrade', 'apt_upgrade')
+ reboot_if_required = _multi_cfg_bool_get(cfg, 'apt_reboot_if_required',
+ 'package_reboot_if_required')
+ pkglist = util.get_cfg_option_list(cfg, 'packages', [])
+
+ errors = []
+ if update or len(pkglist) or upgrade:
+ try:
+ cloud.distro.update_package_sources()
+ except Exception as e:
+ util.logexc(log, "Package update failed")
+ errors.append(e)
+
+ if upgrade:
+ try:
+ cloud.distro.package_command("upgrade")
+ except Exception as e:
+ util.logexc(log, "Package upgrade failed")
+ errors.append(e)
+
+ if len(pkglist):
+ try:
+ cloud.distro.install_packages(pkglist)
+ except Exception as e:
+ util.logexc(log, "Failed to install packages: %s", pkglist)
+ errors.append(e)
+
+ # TODO(smoser): handle this less violently
+ # kernel and openssl (possibly some other packages)
+ # write a file /var/run/reboot-required after upgrading.
+ # if that file exists and configured, then just stop right now and reboot
+ reboot_fn_exists = os.path.isfile(REBOOT_FILE)
+ if (upgrade or pkglist) and reboot_if_required and reboot_fn_exists:
+ try:
+ log.warn("Rebooting after upgrade or install per %s", REBOOT_FILE)
+ # Flush the above warning + anything else out...
+ logging.flushLoggers(log)
+ _fire_reboot(log)
+ except Exception as e:
+ util.logexc(log, "Requested reboot did not happen!")
+ errors.append(e)
+
+ if len(errors):
+ log.warn("%s failed with exceptions, re-raising the last one",
+ len(errors))
+ raise errors[-1]
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index ae1349eb..886487f8 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -84,10 +84,10 @@ def handle(name, cfg, cloud, log, args):
for (n, path) in pubkeys.iteritems():
try:
- all_keys[n] = util.load_file(cloud.paths.join(True, path))
+ all_keys[n] = util.load_file(path)
except:
util.logexc(log, ("%s: failed to open, can not"
- " phone home that data"), path)
+ " phone home that data!"), path)
submit_keys = {}
for k in post_list:
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
new file mode 100644
index 00000000..aefa3aff
--- /dev/null
+++ b/cloudinit/config/cc_power_state_change.py
@@ -0,0 +1,155 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2011 Canonical Ltd.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit.settings import PER_INSTANCE
+from cloudinit import util
+
+import errno
+import os
+import re
+import subprocess
+import time
+
+frequency = PER_INSTANCE
+
+EXIT_FAIL = 254
+
+
+def handle(_name, cfg, _cloud, log, _args):
+
+ try:
+ (args, timeout) = load_power_state(cfg)
+ if args is None:
+ log.debug("no power_state provided. doing nothing")
+ return
+ except Exception as e:
+ log.warn("%s Not performing power state change!" % str(e))
+ return
+
+ mypid = os.getpid()
+ cmdline = util.load_file("/proc/%s/cmdline" % mypid)
+
+ if not cmdline:
+ log.warn("power_state: failed to get cmdline of current process")
+ return
+
+ devnull_fp = open(os.devnull, "w")
+
+ log.debug("After pid %s ends, will execute: %s" % (mypid, ' '.join(args)))
+
+ util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log, execmd,
+ [args, devnull_fp])
+
+
+def load_power_state(cfg):
+ # returns a tuple of shutdown_command, timeout
+ # shutdown_command is None if no config found
+ pstate = cfg.get('power_state')
+
+ if pstate is None:
+ return (None, None)
+
+ if not isinstance(pstate, dict):
+ raise TypeError("power_state is not a dict.")
+
+ opt_map = {'halt': '-H', 'poweroff': '-P', 'reboot': '-r'}
+
+ mode = pstate.get("mode")
+ if mode not in opt_map:
+ raise TypeError("power_state[mode] required, must be one of: %s." %
+ ','.join(opt_map.keys()))
+
+ delay = pstate.get("delay", "now")
+ if delay != "now" and not re.match("\+[0-9]+", delay):
+ raise TypeError("power_state[delay] must be 'now' or '+m' (minutes).")
+
+ args = ["shutdown", opt_map[mode], delay]
+ if pstate.get("message"):
+ args.append(pstate.get("message"))
+
+ try:
+ timeout = float(pstate.get('timeout', 30.0))
+ except ValueError:
+ raise ValueError("failed to convert timeout '%s' to float." %
+ pstate['timeout'])
+
+ return (args, timeout)
+
+
+def doexit(sysexit):
+ os._exit(sysexit) # pylint: disable=W0212
+
+
+def execmd(exe_args, output=None, data_in=None):
+ try:
+ proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE,
+ stdout=output, stderr=subprocess.STDOUT)
+ proc.communicate(data_in)
+ ret = proc.returncode
+ except Exception:
+ doexit(EXIT_FAIL)
+ doexit(ret)
+
+
+def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args):
+ # wait until pid, with /proc/pid/cmdline contents of pidcmdline
+ # is no longer alive. After it is gone, or timeout has passed
+ # execute func(args)
+ msg = None
+ end_time = time.time() + timeout
+
+ cmdline_f = "/proc/%s/cmdline" % pid
+
+ def fatal(msg):
+ if log:
+ log.warn(msg)
+ doexit(EXIT_FAIL)
+
+ known_errnos = (errno.ENOENT, errno.ESRCH)
+
+ while True:
+ if time.time() > end_time:
+ msg = "timeout reached before %s ended" % pid
+ break
+
+ try:
+ cmdline = ""
+ with open(cmdline_f) as fp:
+ cmdline = fp.read()
+ if cmdline != pidcmdline:
+ msg = "cmdline changed for %s [now: %s]" % (pid, cmdline)
+ break
+
+ except IOError as ioerr:
+ if ioerr.errno in known_errnos:
+ msg = "pidfile '%s' gone [%d]" % (cmdline_f, ioerr.errno)
+ else:
+ fatal("IOError during wait: %s" % ioerr)
+ break
+
+ except Exception as e:
+ fatal("Unexpected Exception: %s" % e)
+
+ time.sleep(.25)
+
+ if not msg:
+ fatal("Unexpected error in run_after_pid_gone")
+
+ if log:
+ log.debug(msg)
+ func(*args)
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index 74ee18e1..e9a0a0f4 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -21,12 +21,32 @@
from StringIO import StringIO
import os
-import pwd
import socket
from cloudinit import helpers
from cloudinit import util
+PUPPET_CONF_PATH = '/etc/puppet/puppet.conf'
+PUPPET_SSL_CERT_DIR = '/var/lib/puppet/ssl/certs/'
+PUPPET_SSL_DIR = '/var/lib/puppet/ssl'
+PUPPET_SSL_CERT_PATH = '/var/lib/puppet/ssl/certs/ca.pem'
+
+
+def _autostart_puppet(log):
+ # Set puppet to automatically start
+ if os.path.exists('/etc/default/puppet'):
+ util.subp(['sed', '-i',
+ '-e', 's/^START=.*/START=yes/',
+ '/etc/default/puppet'], capture=False)
+ elif os.path.exists('/bin/systemctl'):
+ util.subp(['/bin/systemctl', 'enable', 'puppet.service'],
+ capture=False)
+ elif os.path.exists('/sbin/chkconfig'):
+ util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
+ else:
+ log.warn(("Sorry we do not know how to enable"
+ " puppet services on this system"))
+
def handle(name, cfg, cloud, log, _args):
# If there isn't a puppet key in the configuration don't do anything
@@ -37,14 +57,15 @@ def handle(name, cfg, cloud, log, _args):
puppet_cfg = cfg['puppet']
- # Start by installing the puppet package ...
- cloud.distro.install_packages(["puppet"])
+ # Start by installing the puppet package if necessary...
+ install = util.get_cfg_option_bool(puppet_cfg, 'install', True)
+ if install:
+ cloud.distro.install_packages(["puppet"])
# ... and then update the puppet configuration
if 'conf' in puppet_cfg:
# Add all sections from the conf object to puppet.conf
- puppet_conf_fn = cloud.paths.join(True, '/etc/puppet/puppet.conf')
- contents = util.load_file(puppet_conf_fn)
+ contents = util.load_file(PUPPET_CONF_PATH)
# Create object for reading puppet.conf values
puppet_config = helpers.DefaultingConfigParser()
# Read puppet.conf values from original file in order to be able to
@@ -53,28 +74,19 @@ def handle(name, cfg, cloud, log, _args):
cleaned_lines = [i.lstrip() for i in contents.splitlines()]
cleaned_contents = '\n'.join(cleaned_lines)
puppet_config.readfp(StringIO(cleaned_contents),
- filename=puppet_conf_fn)
+ filename=PUPPET_CONF_PATH)
for (cfg_name, cfg) in puppet_cfg['conf'].iteritems():
# Cert configuration is a special case
# Dump the puppet master ca certificate in the correct place
if cfg_name == 'ca_cert':
# Puppet ssl sub-directory isn't created yet
# Create it with the proper permissions and ownership
- pp_ssl_dir = cloud.paths.join(False, '/var/lib/puppet/ssl')
- util.ensure_dir(pp_ssl_dir, 0771)
- util.chownbyid(pp_ssl_dir,
- pwd.getpwnam('puppet').pw_uid, 0)
- pp_ssl_certs = cloud.paths.join(False,
- '/var/lib/puppet/ssl/certs/')
- util.ensure_dir(pp_ssl_certs)
- util.chownbyid(pp_ssl_certs,
- pwd.getpwnam('puppet').pw_uid, 0)
- pp_ssl_ca_certs = cloud.paths.join(False,
- ('/var/lib/puppet/'
- 'ssl/certs/ca.pem'))
- util.write_file(pp_ssl_ca_certs, cfg)
- util.chownbyid(pp_ssl_ca_certs,
- pwd.getpwnam('puppet').pw_uid, 0)
+ util.ensure_dir(PUPPET_SSL_DIR, 0771)
+ util.chownbyname(PUPPET_SSL_DIR, 'puppet', 'root')
+ util.ensure_dir(PUPPET_SSL_CERT_DIR)
+ util.chownbyname(PUPPET_SSL_CERT_DIR, 'puppet', 'root')
+ util.write_file(PUPPET_SSL_CERT_PATH, str(cfg))
+ util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root')
else:
# Iterate throug the config items, we'll use ConfigParser.set
# to overwrite or create new items as needed
@@ -90,25 +102,11 @@ def handle(name, cfg, cloud, log, _args):
puppet_config.set(cfg_name, o, v)
# We got all our config as wanted we'll rename
# the previous puppet.conf and create our new one
- conf_old_fn = cloud.paths.join(False,
- '/etc/puppet/puppet.conf.old')
- util.rename(puppet_conf_fn, conf_old_fn)
- puppet_conf_rw = cloud.paths.join(False, '/etc/puppet/puppet.conf')
- util.write_file(puppet_conf_rw, puppet_config.stringify())
+ util.rename(PUPPET_CONF_PATH, "%s.old" % (PUPPET_CONF_PATH))
+ util.write_file(PUPPET_CONF_PATH, puppet_config.stringify())
- # Set puppet to automatically start
- if os.path.exists('/etc/default/puppet'):
- util.subp(['sed', '-i',
- '-e', 's/^START=.*/START=yes/',
- '/etc/default/puppet'], capture=False)
- elif os.path.exists('/bin/systemctl'):
- util.subp(['/bin/systemctl', 'enable', 'puppet.service'],
- capture=False)
- elif os.path.exists('/sbin/chkconfig'):
- util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
- else:
- log.warn(("Sorry we do not know how to enable"
- " puppet services on this system"))
+ # Set it up so it autostarts
+ _autostart_puppet(log)
# Start puppetd
util.subp(['service', 'puppet', 'start'], capture=False)
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index e7f27944..70294eda 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -32,6 +32,8 @@ RESIZE_FS_PREFIXES_CMDS = [
('xfs', 'xfs_growfs'),
]
+NOBLOCK = "noblock"
+
def nodeify_path(devpth, where, log):
try:
@@ -62,23 +64,22 @@ def get_fs_type(st_dev, path, log):
raise
-def handle(name, cfg, cloud, log, args):
+def handle(name, cfg, _cloud, log, args):
if len(args) != 0:
resize_root = args[0]
else:
resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True)
- if not util.translate_bool(resize_root):
+ if not util.translate_bool(resize_root, addons=[NOBLOCK]):
log.debug("Skipping module named %s, resizing disabled", name)
return
# TODO(harlowja) is the directory ok to be used??
resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run")
- resize_root_d = cloud.paths.join(False, resize_root_d)
util.ensure_dir(resize_root_d)
# TODO(harlowja): allow what is to be resized to be configurable??
- resize_what = cloud.paths.join(False, "/")
+ resize_what = "/"
with util.ExtendedTemporaryFile(prefix="cloudinit.resizefs.",
dir=resize_root_d, delete=True) as tfh:
devpth = tfh.name
@@ -111,7 +112,7 @@ def handle(name, cfg, cloud, log, args):
log.debug("Resizing %s (%s) using %s", resize_what, fs_type, resizer)
resize_cmd = [resizer, devpth]
- if resize_root == "noblock":
+ if resize_root == NOBLOCK:
# Fork to a child that will run
# the resize command
util.fork_cb(do_resize, resize_cmd, log)
@@ -121,7 +122,7 @@ def handle(name, cfg, cloud, log, args):
do_resize(resize_cmd, log)
action = 'Resized'
- if resize_root == "noblock":
+ if resize_root == NOBLOCK:
action = 'Resizing (via forking)'
log.debug("%s root filesystem (type=%s, maj=%i, min=%i, val=%s)",
action, fs_type, os.major(st_dev), os.minor(st_dev), resize_root)
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 78327526..0c2c6880 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -71,8 +71,7 @@ def handle(name, cfg, cloud, log, _args):
try:
contents = "%s\n" % (content)
- util.write_file(cloud.paths.join(False, filename),
- contents, omode=omode)
+ util.write_file(filename, contents, omode=omode)
except Exception:
util.logexc(log, "Failed to write to %s", filename)
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
index 65064cfb..598c3a3e 100644
--- a/cloudinit/config/cc_runcmd.py
+++ b/cloudinit/config/cc_runcmd.py
@@ -33,6 +33,6 @@ def handle(name, cfg, cloud, log, _args):
cmd = cfg["runcmd"]
try:
content = util.shellify(cmd)
- util.write_file(cloud.paths.join(False, out_fn), content, 0700)
+ util.write_file(out_fn, content, 0700)
except:
util.logexc(log, "Failed to shellify %s into file %s", cmd, out_fn)
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
index 79ed8807..f3eede18 100644
--- a/cloudinit/config/cc_salt_minion.py
+++ b/cloudinit/config/cc_salt_minion.py
@@ -34,8 +34,7 @@ def handle(name, cfg, cloud, log, _args):
cloud.distro.install_packages(["salt-minion"])
# Ensure we can configure files at the right dir
- config_dir = cloud.paths.join(False, salt_cfg.get("config_dir",
- '/etc/salt'))
+ config_dir = salt_cfg.get("config_dir", '/etc/salt')
util.ensure_dir(config_dir)
# ... and then update the salt configuration
@@ -47,8 +46,7 @@ def handle(name, cfg, cloud, log, _args):
# ... copy the key pair if specified
if 'public_key' in salt_cfg and 'private_key' in salt_cfg:
- pki_dir = cloud.paths.join(False, salt_cfg.get('pki_dir',
- '/etc/salt/pki'))
+ pki_dir = salt_cfg.get('pki_dir', '/etc/salt/pki')
with util.umask(077):
util.ensure_dir(pki_dir)
pub_name = os.path.join(pki_dir, 'minion.pub')
@@ -56,5 +54,6 @@ def handle(name, cfg, cloud, log, _args):
util.write_file(pub_name, salt_cfg['public_key'])
util.write_file(pem_name, salt_cfg['private_key'])
- # Start salt-minion
- util.subp(['service', 'salt-minion', 'start'], capture=False)
+ # restart salt-minion. 'service' will start even if not started. if it
+ # was started, it needs to be restarted for config change.
+ util.subp(['service', 'salt-minion', 'restart'], capture=False)
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index b0f27ebf..2b32fc94 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -27,9 +27,11 @@ def handle(name, cfg, cloud, log, _args):
" not setting the hostname in module %s"), name)
return
- (hostname, _fqdn) = util.get_hostname_fqdn(cfg, cloud)
+ (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
try:
- log.debug("Setting hostname to %s", hostname)
- cloud.distro.set_hostname(hostname)
+ log.debug("Setting the hostname to %s (%s)", fqdn, hostname)
+ cloud.distro.set_hostname(hostname, fqdn)
except Exception:
- util.logexc(log, "Failed to set hostname to %s", hostname)
+ util.logexc(log, "Failed to set the hostname to %s (%s)",
+ fqdn, hostname)
+ raise
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index a017e6b6..c6bf62fd 100644
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -20,6 +20,11 @@
import sys
+# Ensure this is aliased to a name not 'distros'
+# since the module attribute 'distros'
+# is a list of distros that are supported, not a sub-module
+from cloudinit import distros as ds
+
from cloudinit import ssh_util
from cloudinit import util
@@ -50,18 +55,10 @@ def handle(_name, cfg, cloud, log, args):
expire = util.get_cfg_option_bool(chfg, 'expire', expire)
if not plist and password:
- user = cloud.distro.get_default_user()
-
- if 'users' in cfg:
-
- user_zero = cfg['users'][0]
-
- if isinstance(user_zero, dict) and 'name' in user_zero:
- user = user_zero['name']
-
+ (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
+ (user, _user_config) = ds.extract_default(users)
if user:
plist = "%s:%s" % (user, password)
-
else:
log.warn("No default or defined user to change password for.")
@@ -117,8 +114,7 @@ def handle(_name, cfg, cloud, log, args):
replaced_auth = False
# See: man sshd_config
- conf_fn = cloud.paths.join(True, ssh_util.DEF_SSHD_CFG)
- old_lines = ssh_util.parse_ssh_config(conf_fn)
+ old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG)
new_lines = []
i = 0
for (i, line) in enumerate(old_lines):
@@ -137,8 +133,7 @@ def handle(_name, cfg, cloud, log, args):
pw_auth))
lines = [str(e) for e in new_lines]
- ssh_rw_fn = cloud.paths.join(False, ssh_util.DEF_SSHD_CFG)
- util.write_file(ssh_rw_fn, "\n".join(lines))
+ util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines))
try:
cmd = ['service']
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 0ded62ba..b623d476 100644
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -21,6 +21,11 @@
import glob
import os
+# Ensure this is aliased to a name not 'distros'
+# since the module attribute 'distros'
+# is a list of distros that are supported, not a sub-module
+from cloudinit import distros as ds
+
from cloudinit import ssh_util
from cloudinit import util
@@ -54,7 +59,7 @@ def handle(_name, cfg, cloud, log, _args):
# remove the static keys from the pristine image
if cfg.get("ssh_deletekeys", True):
- key_pth = cloud.paths.join(False, "/etc/ssh/", "ssh_host_*key*")
+ key_pth = os.path.join("/etc/ssh/", "ssh_host_*key*")
for f in glob.glob(key_pth):
try:
util.del_file(f)
@@ -67,8 +72,7 @@ def handle(_name, cfg, cloud, log, _args):
if key in KEY_2_FILE:
tgt_fn = KEY_2_FILE[key][0]
tgt_perms = KEY_2_FILE[key][1]
- util.write_file(cloud.paths.join(False, tgt_fn),
- val, tgt_perms)
+ util.write_file(tgt_fn, val, tgt_perms)
for (priv, pub) in PRIV_2_PUB.iteritems():
if pub in cfg['ssh_keys'] or not priv in cfg['ssh_keys']:
@@ -89,7 +93,7 @@ def handle(_name, cfg, cloud, log, _args):
'ssh_genkeytypes',
GENERATE_KEY_NAMES)
for keytype in genkeys:
- keyfile = cloud.paths.join(False, KEY_FILE_TPL % (keytype))
+ keyfile = KEY_FILE_TPL % (keytype)
util.ensure_dir(os.path.dirname(keyfile))
if not os.path.exists(keyfile):
cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]
@@ -102,16 +106,8 @@ def handle(_name, cfg, cloud, log, _args):
" %s to file %s"), keytype, keyfile)
try:
- # TODO(utlemming): consolidate this stanza that occurs in:
- # cc_ssh_import_id, cc_set_passwords, maybe cc_users_groups.py
- user = cloud.distro.get_default_user()
-
- if 'users' in cfg:
- user_zero = cfg['users'][0]
-
- if user_zero != "default":
- user = user_zero
-
+ (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
+ (user, _user_config) = ds.extract_default(users)
disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
DISABLE_ROOT_OPTS)
@@ -121,17 +117,16 @@ def handle(_name, cfg, cloud, log, _args):
cfgkeys = cfg["ssh_authorized_keys"]
keys.extend(cfgkeys)
- apply_credentials(keys, user, cloud.paths,
- disable_root, disable_root_opts)
+ apply_credentials(keys, user, disable_root, disable_root_opts)
except:
util.logexc(log, "Applying ssh credentials failed!")
-def apply_credentials(keys, user, paths, disable_root, disable_root_opts):
+def apply_credentials(keys, user, disable_root, disable_root_opts):
keys = set(keys)
if user:
- ssh_util.setup_user_keys(keys, user, '', paths)
+ ssh_util.setup_user_keys(keys, user, '')
if disable_root:
if not user:
@@ -140,4 +135,4 @@ def apply_credentials(keys, user, paths, disable_root, disable_root_opts):
else:
key_prefix = ''
- ssh_util.setup_user_keys(keys, 'root', key_prefix, paths)
+ ssh_util.setup_user_keys(keys, 'root', key_prefix)
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index 23f5755a..c38bcea2 100644
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -21,6 +21,11 @@ import hashlib
from prettytable import PrettyTable
+# Ensure this is aliased to a name not 'distros'
+# since the module attribute 'distros'
+# is a list of distros that are supported, not a sub-module
+from cloudinit import distros as ds
+
from cloudinit import ssh_util
from cloudinit import util
@@ -40,8 +45,10 @@ def _gen_fingerprint(b64_text, hash_meth='md5'):
hasher = hashlib.new(hash_meth)
hasher.update(base64.b64decode(b64_text))
return ":".join(_split_hash(hasher.hexdigest()))
- except TypeError:
+ except (TypeError, ValueError):
# Raised when b64 not really b64...
+ # or when the hash type is not really
+ # a known/supported hash type...
return '?'
@@ -89,8 +96,9 @@ def handle(name, cfg, cloud, log, _args):
log.debug(("Skipping module named %s, "
"logging of ssh fingerprints disabled"), name)
- user_name = util.get_cfg_option_str(cfg, "user", "ubuntu")
hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5")
- extract = ssh_util.extract_authorized_keys
- (auth_key_fn, auth_key_entries) = extract(user_name, cloud.paths)
- _pprint_key_entries(user_name, auth_key_fn, auth_key_entries, hash_meth)
+ (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
+ for (user_name, _cfg) in users.items():
+ (key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name)
+ _pprint_key_entries(user_name, key_fn,
+ key_entries, hash_meth)
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index 08fb63c6..83af36e9 100644
--- a/cloudinit/config/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -18,6 +18,11 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+# Ensure this is aliased to a name not 'distros'
+# since the module attribute 'distros'
+# is a list of distros that are supported, not a sub-module
+from cloudinit import distros as ds
+
from cloudinit import util
import pwd
@@ -39,33 +44,27 @@ def handle(_name, cfg, cloud, log, args):
return
# import for cloudinit created users
+ (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
elist = []
- for user_cfg in cfg['users']:
- user = None
+ for (user, user_cfg) in users.items():
import_ids = []
-
- if isinstance(user_cfg, str) and user_cfg == "default":
- user = cloud.distro.get_default_user()
- if not user:
- continue
-
+ if user_cfg['default']:
import_ids = util.get_cfg_option_list(cfg, "ssh_import_id", [])
-
- elif isinstance(user_cfg, dict):
- user = None
- import_ids = []
-
+ else:
try:
- user = user_cfg['name']
import_ids = user_cfg['ssh_import_id']
-
- if import_ids and isinstance(import_ids, str):
- import_ids = str(import_ids).split(',')
-
except:
- log.debug("user %s is not configured for ssh_import" % user)
+ log.debug("User %s is not configured for ssh_import_id", user)
continue
+ try:
+ import_ids = util.uniq_merge(import_ids)
+ import_ids = [str(i) for i in import_ids]
+ except:
+ log.debug("User %s is not correctly configured for ssh_import_id",
+ user)
+ continue
+
if not len(import_ids):
continue
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
index 4d75000f..96103615 100644
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ b/cloudinit/config/cc_update_etc_hosts.py
@@ -42,8 +42,7 @@ def handle(name, cfg, cloud, log, _args):
raise RuntimeError(("No hosts template could be"
" found for distro %s") % (cloud.distro.name))
- out_fn = cloud.paths.join(False, '/etc/hosts')
- templater.render_to_file(tpl_fn_name, out_fn,
+ templater.render_to_file(tpl_fn_name, '/etc/hosts',
{'hostname': hostname, 'fqdn': fqdn})
elif manage_hosts == "localhost":
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
index 1d6679ea..52225cd8 100644
--- a/cloudinit/config/cc_update_hostname.py
+++ b/cloudinit/config/cc_update_hostname.py
@@ -32,10 +32,12 @@ def handle(name, cfg, cloud, log, _args):
" not updating the hostname in module %s"), name)
return
- (hostname, _fqdn) = util.get_hostname_fqdn(cfg, cloud)
+ (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
try:
prev_fn = os.path.join(cloud.get_cpath('data'), "previous-hostname")
- cloud.distro.update_hostname(hostname, prev_fn)
+ log.debug("Updating hostname to %s (%s)", fqdn, hostname)
+ cloud.distro.update_hostname(hostname, fqdn, prev_fn)
except Exception:
- util.logexc(log, "Failed to set the hostname to %s", hostname)
+ util.logexc(log, "Failed to update the hostname to %s (%s)",
+ fqdn, hostname)
raise
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
index 418f3330..bf5b4581 100644
--- a/cloudinit/config/cc_users_groups.py
+++ b/cloudinit/config/cc_users_groups.py
@@ -16,63 +16,19 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+# Ensure this is aliased to a name not 'distros'
+# since the module attribute 'distros'
+# is a list of distros that are supported, not a sub-module
+from cloudinit import distros as ds
+
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
-def handle(name, cfg, cloud, log, _args):
- user_zero = None
-
- if 'groups' in cfg:
- for group in cfg['groups']:
- if isinstance(group, dict):
- for name, values in group.iteritems():
- if isinstance(values, list):
- cloud.distro.create_group(name, values)
- elif isinstance(values, str):
- cloud.distro.create_group(name, values.split(','))
- else:
- cloud.distro.create_group(group, [])
-
- if 'users' in cfg:
- user_zero = None
-
- for user_config in cfg['users']:
-
- # Handle the default user creation
- if 'default' in user_config:
- log.info("Creating default user")
-
- # Create the default user if so defined
- try:
- cloud.distro.add_default_user()
-
- if not user_zero:
- user_zero = cloud.distro.get_default_user()
-
- except NotImplementedError:
-
- if user_zero == name:
- user_zero = None
-
- log.warn("Distro has not implemented default user "
- "creation. No default user will be created")
-
- elif isinstance(user_config, dict) and 'name' in user_config:
-
- name = user_config['name']
- if not user_zero:
- user_zero = name
-
- # Make options friendly for distro.create_user
- new_opts = {}
- if isinstance(user_config, dict):
- for opt in user_config:
- new_opts[opt.replace('-', '_')] = user_config[opt]
-
- cloud.distro.create_user(**new_opts)
-
- else:
- # create user with no configuration
- cloud.distro.create_user(user_config)
+def handle(name, cfg, cloud, _log, _args):
+ (users, groups) = ds.normalize_users_groups(cfg, cloud.distro)
+ for (name, members) in groups.items():
+ cloud.distro.create_group(name, members)
+ for (user, config) in users.items():
+ cloud.distro.create_user(user, **config)
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
new file mode 100644
index 00000000..5c273825
--- /dev/null
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -0,0 +1,106 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from cloudinit import util
+
+import configobj
+
+
+def _canonicalize_id(repo_id):
+ repo_id = repo_id.lower().replace("-", "_")
+ repo_id = repo_id.replace(" ", "_")
+ return repo_id
+
+
+def _format_repo_value(val):
+ if isinstance(val, (bool)):
+ # Seems like yum prefers 1/0
+ return str(int(val))
+ if isinstance(val, (list, tuple)):
+ # Can handle 'lists' in certain cases
+ # See: http://bit.ly/Qqrf1t
+ return "\n ".join([_format_repo_value(v) for v in val])
+ if not isinstance(val, (basestring, str)):
+ return str(val)
+ return val
+
+
+## TODO(harlowja): move to distro?
+# See man yum.conf
+def _format_repository_config(repo_id, repo_config):
+ to_be = configobj.ConfigObj()
+ to_be[repo_id] = {}
+ # Do basic translation of the items -> values
+ for (k, v) in repo_config.items():
+ # For now assume that people using this know
+ # the format of yum and don't verify keys/values further
+ to_be[repo_id][k] = _format_repo_value(v)
+ lines = to_be.write()
+ lines.insert(0, "# Created by cloud-init on %s" % (util.time_rfc2822()))
+ return "\n".join(lines)
+
+
+def handle(name, cfg, _cloud, log, _args):
+ repos = cfg.get('yum_repos')
+ if not repos:
+ log.debug(("Skipping module named %s,"
+ " no 'yum_repos' configuration found"), name)
+ return
+ repo_base_path = util.get_cfg_option_str(cfg, 'yum_repo_dir',
+ '/etc/yum.repos.d/')
+ repo_locations = {}
+ repo_configs = {}
+ for (repo_id, repo_config) in repos.items():
+ canon_repo_id = _canonicalize_id(repo_id)
+ repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id))
+ if os.path.exists(repo_fn_pth):
+ log.info("Skipping repo %s, file %s already exists!",
+ repo_id, repo_fn_pth)
+ continue
+ elif canon_repo_id in repo_locations:
+ log.info("Skipping repo %s, file %s already pending!",
+ repo_id, repo_fn_pth)
+ continue
+ if not repo_config:
+ repo_config = {}
+ # Do some basic sanity checks/cleaning
+ n_repo_config = {}
+ for (k, v) in repo_config.items():
+ k = k.lower().strip().replace("-", "_")
+ if k:
+ n_repo_config[k] = v
+ repo_config = n_repo_config
+ missing_required = 0
+ for req_field in ['baseurl']:
+ if not req_field in repo_config:
+ log.warn(("Repository %s does not contain a %s"
+ " configuration 'required' entry"),
+ repo_id, req_field)
+ missing_required += 1
+ if not missing_required:
+ repo_configs[canon_repo_id] = repo_config
+ repo_locations[canon_repo_id] = repo_fn_pth
+ else:
+ log.warn("Repository %s is missing %s required fields, skipping!",
+ repo_id, missing_required)
+ for (c_repo_id, path) in repo_locations.items():
+ repo_blob = _format_repository_config(c_repo_id,
+ repo_configs.get(c_repo_id))
+ util.write_file(path, repo_blob)
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 40c6aa4f..6a684b89 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -24,9 +24,8 @@
from StringIO import StringIO
import abc
-import grp
+import itertools
import os
-import pwd
import re
from cloudinit import importer
@@ -34,54 +33,22 @@ from cloudinit import log as logging
from cloudinit import ssh_util
from cloudinit import util
-# TODO(harlowja): Make this via config??
-IFACE_ACTIONS = {
- 'up': ['ifup', '--all'],
- 'down': ['ifdown', '--all'],
-}
+from cloudinit.distros.parsers import hosts
LOG = logging.getLogger(__name__)
class Distro(object):
-
__metaclass__ = abc.ABCMeta
- default_user = None
- default_user_groups = None
+ hosts_fn = "/etc/hosts"
+ ci_sudoers_fn = "/etc/sudoers.d/90-cloud-init-users"
+ hostname_conf_fn = "/etc/hostname"
def __init__(self, name, cfg, paths):
self._paths = paths
self._cfg = cfg
self.name = name
- def add_default_user(self):
- # Adds the distro user using the rules:
- # - Password is same as username but is locked
- # - nopasswd sudo access
-
- user = self.get_default_user()
- groups = self.get_default_user_groups()
-
- if not user:
- raise NotImplementedError("No Default user")
-
- user_dict = {
- 'name': user,
- 'plain_text_passwd': user,
- 'home': "/home/%s" % user,
- 'shell': "/bin/bash",
- 'lock_passwd': True,
- 'gecos': "%s%s" % (user[0:1].upper(), user[1:]),
- 'sudo': "ALL=(ALL) NOPASSWD:ALL",
- }
-
- if groups:
- user_dict['groups'] = groups
-
- self.create_user(**user_dict)
-
- LOG.info("Added default '%s' user with passwordless sudo", user)
-
@abc.abstractmethod
def install_packages(self, pkglist):
raise NotImplementedError()
@@ -95,13 +62,10 @@ class Distro(object):
def get_option(self, opt_name, default=None):
return self._cfg.get(opt_name, default)
- @abc.abstractmethod
- def set_hostname(self, hostname):
- raise NotImplementedError()
-
- @abc.abstractmethod
- def update_hostname(self, hostname, prev_hostname_fn):
- raise NotImplementedError()
+ def set_hostname(self, hostname, fqdn=None):
+ writeable_hostname = self._select_hostname(hostname, fqdn)
+ self._write_hostname(writeable_hostname, self.hostname_conf_fn)
+ self._apply_hostname(hostname)
@abc.abstractmethod
def package_command(self, cmd, args=None):
@@ -118,26 +82,25 @@ class Distro(object):
return arch
def _get_arch_package_mirror_info(self, arch=None):
- mirror_info = self.get_option("package_mirrors", None)
- if arch == None:
+ mirror_info = self.get_option("package_mirrors", [])
+ if not arch:
arch = self.get_primary_arch()
return _get_arch_package_mirror_info(mirror_info, arch)
def get_package_mirror_info(self, arch=None,
availability_zone=None):
- # this resolves the package_mirrors config option
+ # This resolves the package_mirrors config option
# down to a single dict of {mirror_name: mirror_url}
arch_info = self._get_arch_package_mirror_info(arch)
-
return _get_package_mirror_info(availability_zone=availability_zone,
mirror_info=arch_info)
def apply_network(self, settings, bring_up=True):
# Write it out
- self._write_network(settings)
+ dev_names = self._write_network(settings)
# Now try to bring them up
if bring_up:
- return self._interface_action('up')
+ return self._bring_up_interfaces(dev_names)
return False
@abc.abstractmethod
@@ -151,51 +114,137 @@ class Distro(object):
def _get_localhost_ip(self):
return "127.0.0.1"
+ @abc.abstractmethod
+ def _read_hostname(self, filename, default=None):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def _write_hostname(self, hostname, filename):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def _read_system_hostname(self):
+ raise NotImplementedError()
+
+ def _apply_hostname(self, hostname):
+ # This really only sets the hostname
+ # temporarily (until reboot so it should
+ # not be depended on). Use the write
+ # hostname functions for 'permanent' adjustments.
+ LOG.debug("Non-persistently setting the system hostname to %s",
+ hostname)
+ try:
+ util.subp(['hostname', hostname])
+ except util.ProcessExecutionError:
+ util.logexc(LOG, ("Failed to non-persistently adjust"
+ " the system hostname to %s"), hostname)
+
+ @abc.abstractmethod
+ def _select_hostname(self, hostname, fqdn):
+ raise NotImplementedError()
+
+ def update_hostname(self, hostname, fqdn, prev_hostname_fn):
+ applying_hostname = hostname
+
+ # Determine what the actual written hostname should be
+ hostname = self._select_hostname(hostname, fqdn)
+
+ # If the previous hostname file exists lets see if we
+ # can get a hostname from it
+ if prev_hostname_fn and os.path.exists(prev_hostname_fn):
+ prev_hostname = self._read_hostname(prev_hostname_fn)
+ else:
+ prev_hostname = None
+
+ # Lets get where we should write the system hostname
+ # and what the system hostname is
+ (sys_fn, sys_hostname) = self._read_system_hostname()
+ update_files = []
+
+ # If there is no previous hostname or it differs
+ # from what we want, lets update it or create the
+ # file in the first place
+ if not prev_hostname or prev_hostname != hostname:
+ update_files.append(prev_hostname_fn)
+
+ # If the system hostname is different than the previous
+ # one or the desired one lets update it as well
+ if (not sys_hostname) or (sys_hostname == prev_hostname
+ and sys_hostname != hostname):
+ update_files.append(sys_fn)
+
+ # Remove duplicates (incase the previous config filename)
+ # is the same as the system config filename, don't bother
+ # doing it twice
+ update_files = set([f for f in update_files if f])
+ LOG.debug("Attempting to update hostname to %s in %s files",
+ hostname, len(update_files))
+
+ for fn in update_files:
+ try:
+ self._write_hostname(hostname, fn)
+ except IOError:
+ util.logexc(LOG, "Failed to write hostname %s to %s",
+ hostname, fn)
+
+ if (sys_hostname and prev_hostname and
+ sys_hostname != prev_hostname):
+ LOG.debug("%s differs from %s, assuming user maintained hostname.",
+ prev_hostname_fn, sys_fn)
+
+ # If the system hostname file name was provided set the
+ # non-fqdn as the transient hostname.
+ if sys_fn in update_files:
+ self._apply_hostname(applying_hostname)
+
def update_etc_hosts(self, hostname, fqdn):
- # Format defined at
- # http://unixhelp.ed.ac.uk/CGI/man-cgi?hosts
- header = "# Added by cloud-init"
- real_header = "%s on %s" % (header, util.time_rfc2822())
+ header = ''
+ if os.path.exists(self.hosts_fn):
+ eh = hosts.HostsConf(util.load_file(self.hosts_fn))
+ else:
+ eh = hosts.HostsConf('')
+ header = util.make_header(base="added")
local_ip = self._get_localhost_ip()
- hosts_line = "%s\t%s %s" % (local_ip, fqdn, hostname)
- new_etchosts = StringIO()
- need_write = False
- need_change = True
- hosts_ro_fn = self._paths.join(True, "/etc/hosts")
- for line in util.load_file(hosts_ro_fn).splitlines():
- if line.strip().startswith(header):
- continue
- if not line.strip() or line.strip().startswith("#"):
- new_etchosts.write("%s\n" % (line))
- continue
- split_line = [s.strip() for s in line.split()]
- if len(split_line) < 2:
- new_etchosts.write("%s\n" % (line))
- continue
- (ip, hosts) = split_line[0], split_line[1:]
- if ip == local_ip:
- if sorted([hostname, fqdn]) == sorted(hosts):
- need_change = False
- if need_change:
- line = "%s\n%s" % (real_header, hosts_line)
- need_change = False
- need_write = True
- new_etchosts.write("%s\n" % (line))
+ prev_info = eh.get_entry(local_ip)
+ need_change = False
+ if not prev_info:
+ eh.add_entry(local_ip, fqdn, hostname)
+ need_change = True
+ else:
+ need_change = True
+ for entry in prev_info:
+ entry_fqdn = None
+ entry_aliases = []
+ if len(entry) >= 1:
+ entry_fqdn = entry[0]
+ if len(entry) >= 2:
+ entry_aliases = entry[1:]
+ if entry_fqdn is not None and entry_fqdn == fqdn:
+ if hostname in entry_aliases:
+ # Exists already, leave it be
+ need_change = False
+ if need_change:
+ # Doesn't exist, add that entry in...
+ new_entries = list(prev_info)
+ new_entries.append([fqdn, hostname])
+ eh.del_entries(local_ip)
+ for entry in new_entries:
+ if len(entry) == 1:
+ eh.add_entry(local_ip, entry[0])
+ elif len(entry) >= 2:
+ eh.add_entry(local_ip, *entry)
if need_change:
- new_etchosts.write("%s\n%s\n" % (real_header, hosts_line))
- need_write = True
- if need_write:
- contents = new_etchosts.getvalue()
- util.write_file(self._paths.join(False, "/etc/hosts"),
- contents, mode=0644)
-
- def _interface_action(self, action):
- if action not in IFACE_ACTIONS:
- raise NotImplementedError("Unknown interface action %s" % (action))
- cmd = IFACE_ACTIONS[action]
+ contents = StringIO()
+ if header:
+ contents.write("%s\n" % (header))
+ contents.write("%s\n" % (eh))
+ util.write_file(self.hosts_fn, contents.getvalue(), mode=0644)
+
+ def _bring_up_interface(self, device_name):
+ cmd = ['ifup', device_name]
+ LOG.debug("Attempting to run bring up interface %s using command %s",
+ device_name, cmd)
try:
- LOG.debug("Attempting to run %s interface action using command %s",
- action, cmd)
(_out, err) = util.subp(cmd)
if len(err):
LOG.warn("Running %s resulted in stderr output: %s", cmd, err)
@@ -204,18 +253,17 @@ class Distro(object):
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
- def isuser(self, name):
- try:
- if pwd.getpwnam(name):
- return True
- except KeyError:
- return False
+ def _bring_up_interfaces(self, device_names):
+ am_failed = 0
+ for d in device_names:
+ if not self._bring_up_interface(d):
+ am_failed += 1
+ if am_failed == 0:
+ return True
+ return False
def get_default_user(self):
- return self.default_user
-
- def get_default_user_groups(self):
- return self.default_user_groups
+ return self.get_option('default_user')
def create_user(self, name, **kwargs):
"""
@@ -231,22 +279,23 @@ class Distro(object):
# inputs. If something goes wrong, we can end up with a system
# that nobody can login to.
adduser_opts = {
- "gecos": '--comment',
- "homedir": '--home',
- "primary_group": '--gid',
- "groups": '--groups',
- "passwd": '--password',
- "shell": '--shell',
- "expiredate": '--expiredate',
- "inactive": '--inactive',
- }
+ "gecos": '--comment',
+ "homedir": '--home',
+ "primary_group": '--gid',
+ "groups": '--groups',
+ "passwd": '--password',
+ "shell": '--shell',
+ "expiredate": '--expiredate',
+ "inactive": '--inactive',
+ "selinux_user": '--selinux-user',
+ }
adduser_opts_flags = {
- "no_user_group": '--no-user-group',
- "system": '--system',
- "no_log_init": '--no-log-init',
- "no_create_home": "-M",
- }
+ "no_user_group": '--no-user-group',
+ "system": '--system',
+ "no_log_init": '--no-log-init',
+ "no_create_home": "-M",
+ }
# Now check the value and create the command
for option in kwargs:
@@ -271,10 +320,10 @@ class Distro(object):
adduser_cmd.append('-m')
# Create the user
- if self.isuser(name):
+ if util.is_user(name):
LOG.warn("User %s already exists, skipping." % name)
else:
- LOG.debug("Creating name %s" % name)
+ LOG.debug("Adding user named %s", name)
try:
util.subp(adduser_cmd, logstring=x_adduser_cmd)
except Exception as e:
@@ -303,7 +352,7 @@ class Distro(object):
# Import SSH keys
if 'ssh_authorized_keys' in kwargs:
keys = set(kwargs['ssh_authorized_keys']) or []
- ssh_util.setup_user_keys(keys, name, None, self._paths)
+ ssh_util.setup_user_keys(keys, name, key_prefix=None)
return True
@@ -322,44 +371,89 @@ class Distro(object):
return True
- def write_sudo_rules(self,
- user,
- rules,
- sudo_file="/etc/sudoers.d/90-cloud-init-users",
- ):
+ def ensure_sudo_dir(self, path, sudo_base='/etc/sudoers'):
+ # Ensure the dir is included and that
+ # it actually exists as a directory
+ sudoers_contents = ''
+ base_exists = False
+ if os.path.exists(sudo_base):
+ sudoers_contents = util.load_file(sudo_base)
+ base_exists = True
+ found_include = False
+ for line in sudoers_contents.splitlines():
+ line = line.strip()
+ include_match = re.search(r"^#includedir\s+(.*)$", line)
+ if not include_match:
+ continue
+ included_dir = include_match.group(1).strip()
+ if not included_dir:
+ continue
+ included_dir = os.path.abspath(included_dir)
+ if included_dir == path:
+ found_include = True
+ break
+ if not found_include:
+ try:
+ if not base_exists:
+ lines = [('# See sudoers(5) for more information'
+ ' on "#include" directives:'), '',
+ util.make_header(base="added"),
+ "#includedir %s" % (path), '']
+ sudoers_contents = "\n".join(lines)
+ util.write_file(sudo_base, sudoers_contents, 0440)
+ else:
+ lines = ['', util.make_header(base="added"),
+ "#includedir %s" % (path), '']
+ sudoers_contents = "\n".join(lines)
+ util.append_file(sudo_base, sudoers_contents)
+ LOG.debug("Added '#includedir %s' to %s" % (path, sudo_base))
+ except IOError as e:
+ util.logexc(LOG, "Failed to write %s" % sudo_base, e)
+ raise e
+ util.ensure_dir(path, 0750)
- content_header = "# user rules for %s" % user
- content = "%s\n%s %s\n\n" % (content_header, user, rules)
+ def write_sudo_rules(self, user, rules, sudo_file=None):
+ if not sudo_file:
+ sudo_file = self.ci_sudoers_fn
- if isinstance(rules, list):
- content = "%s\n" % content_header
+ lines = [
+ '',
+ "# User rules for %s" % user,
+ ]
+ if isinstance(rules, (list, tuple)):
for rule in rules:
- content += "%s %s\n" % (user, rule)
- content += "\n"
+ lines.append("%s %s" % (user, rule))
+ elif isinstance(rules, (basestring, str)):
+ lines.append("%s %s" % (user, rules))
+ else:
+ msg = "Can not create sudoers rule addition with type %r"
+ raise TypeError(msg % (util.obj_name(rules)))
+ content = "\n".join(lines)
+ content += "\n" # trailing newline
+ self.ensure_sudo_dir(os.path.dirname(sudo_file))
if not os.path.exists(sudo_file):
- util.write_file(sudo_file, content, 0644)
-
+ contents = [
+ util.make_header(),
+ content,
+ ]
+ try:
+ util.write_file(sudo_file, "\n".join(contents), 0440)
+ except IOError as e:
+ util.logexc(LOG, "Failed to write sudoers file %s", sudo_file)
+ raise e
else:
try:
- with open(sudo_file, 'a') as f:
- f.write(content)
+ util.append_file(sudo_file, content)
except IOError as e:
- util.logexc(LOG, "Failed to write %s" % sudo_file, e)
+ util.logexc(LOG, "Failed to append sudoers file %s", sudo_file)
raise e
- def isgroup(self, name):
- try:
- if grp.getgrnam(name):
- return True
- except:
- return False
-
def create_group(self, name, members):
group_add_cmd = ['groupadd', name]
# Check if group exists, and then add it doesn't
- if self.isgroup(name):
+ if util.is_group(name):
LOG.warn("Skipping creation of existing group '%s'" % name)
else:
try:
@@ -371,7 +465,7 @@ class Distro(object):
# Add members to the group, if so defined
if len(members) > 0:
for member in members:
- if not self.isuser(member):
+ if not util.is_user(member):
LOG.warn("Unable to add group member '%s' to group '%s'"
"; user does not exist." % (member, name))
continue
@@ -385,6 +479,8 @@ def _get_package_mirror_info(mirror_info, availability_zone=None,
# given a arch specific 'mirror_info' entry (from package_mirrors)
# search through the 'search' entries, and fallback appropriately
# return a dict with only {name: mirror} entries.
+ if not mirror_info:
+ mirror_info = {}
ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" %
"north|northeast|east|southeast|south|southwest|west|northwest")
@@ -429,6 +525,248 @@ def _get_arch_package_mirror_info(package_mirrors, arch):
return default
+# Normalizes a input group configuration
+# which can be a comma seperated list of
+# group names, or a list of group names
+# or a python dictionary of group names
+# to a list of members of that group.
+#
+# The output is a dictionary of group
+# names => members of that group which
+# is the standard form used in the rest
+# of cloud-init
+def _normalize_groups(grp_cfg):
+ if isinstance(grp_cfg, (str, basestring)):
+ grp_cfg = grp_cfg.strip().split(",")
+ if isinstance(grp_cfg, (list)):
+ c_grp_cfg = {}
+ for i in grp_cfg:
+ if isinstance(i, (dict)):
+ for k, v in i.items():
+ if k not in c_grp_cfg:
+ if isinstance(v, (list)):
+ c_grp_cfg[k] = list(v)
+ elif isinstance(v, (basestring, str)):
+ c_grp_cfg[k] = [v]
+ else:
+ raise TypeError("Bad group member type %s" %
+ util.obj_name(v))
+ else:
+ if isinstance(v, (list)):
+ c_grp_cfg[k].extend(v)
+ elif isinstance(v, (basestring, str)):
+ c_grp_cfg[k].append(v)
+ else:
+ raise TypeError("Bad group member type %s" %
+ util.obj_name(v))
+ elif isinstance(i, (str, basestring)):
+ if i not in c_grp_cfg:
+ c_grp_cfg[i] = []
+ else:
+ raise TypeError("Unknown group name type %s" %
+ util.obj_name(i))
+ grp_cfg = c_grp_cfg
+ groups = {}
+ if isinstance(grp_cfg, (dict)):
+ for (grp_name, grp_members) in grp_cfg.items():
+ groups[grp_name] = util.uniq_merge_sorted(grp_members)
+ else:
+ raise TypeError(("Group config must be list, dict "
+ " or string types only and not %s") %
+ util.obj_name(grp_cfg))
+ return groups
+
+
+# Normalizes a input group configuration
+# which can be a comma seperated list of
+# user names, or a list of string user names
+# or a list of dictionaries with components
+# that define the user config + 'name' (if
+# a 'name' field does not exist then the
+# default user is assumed to 'own' that
+# configuration.
+#
+# The output is a dictionary of user
+# names => user config which is the standard
+# form used in the rest of cloud-init. Note
+# the default user will have a special config
+# entry 'default' which will be marked as true
+# all other users will be marked as false.
+def _normalize_users(u_cfg, def_user_cfg=None):
+ if isinstance(u_cfg, (dict)):
+ ad_ucfg = []
+ for (k, v) in u_cfg.items():
+ if isinstance(v, (bool, int, basestring, str, float)):
+ if util.is_true(v):
+ ad_ucfg.append(str(k))
+ elif isinstance(v, (dict)):
+ v['name'] = k
+ ad_ucfg.append(v)
+ else:
+ raise TypeError(("Unmappable user value type %s"
+ " for key %s") % (util.obj_name(v), k))
+ u_cfg = ad_ucfg
+ elif isinstance(u_cfg, (str, basestring)):
+ u_cfg = util.uniq_merge_sorted(u_cfg)
+
+ users = {}
+ for user_config in u_cfg:
+ if isinstance(user_config, (str, basestring, list)):
+ for u in util.uniq_merge(user_config):
+ if u and u not in users:
+ users[u] = {}
+ elif isinstance(user_config, (dict)):
+ if 'name' in user_config:
+ n = user_config.pop('name')
+ prev_config = users.get(n) or {}
+ users[n] = util.mergemanydict([prev_config,
+ user_config])
+ else:
+ # Assume the default user then
+ prev_config = users.get('default') or {}
+ users['default'] = util.mergemanydict([prev_config,
+ user_config])
+ else:
+ raise TypeError(("User config must be dictionary/list "
+ " or string types only and not %s") %
+ util.obj_name(user_config))
+
+ # Ensure user options are in the right python friendly format
+ if users:
+ c_users = {}
+ for (uname, uconfig) in users.items():
+ c_uconfig = {}
+ for (k, v) in uconfig.items():
+ k = k.replace('-', '_').strip()
+ if k:
+ c_uconfig[k] = v
+ c_users[uname] = c_uconfig
+ users = c_users
+
+ # Fixup the default user into the real
+ # default user name and replace it...
+ def_user = None
+ if users and 'default' in users:
+ def_config = users.pop('default')
+ if def_user_cfg:
+ # Pickup what the default 'real name' is
+ # and any groups that are provided by the
+ # default config
+ def_user_cfg = def_user_cfg.copy()
+ def_user = def_user_cfg.pop('name')
+ def_groups = def_user_cfg.pop('groups', [])
+ # Pickup any config + groups for that user name
+ # that we may have previously extracted
+ parsed_config = users.pop(def_user, {})
+ parsed_groups = parsed_config.get('groups', [])
+ # Now merge our extracted groups with
+ # anything the default config provided
+ users_groups = util.uniq_merge_sorted(parsed_groups, def_groups)
+ parsed_config['groups'] = ",".join(users_groups)
+ # The real config for the default user is the
+ # combination of the default user config provided
+ # by the distro, the default user config provided
+ # by the above merging for the user 'default' and
+ # then the parsed config from the user's 'real name'
+ # which does not have to be 'default' (but could be)
+ users[def_user] = util.mergemanydict([def_user_cfg,
+ def_config,
+ parsed_config])
+
+ # Ensure that only the default user that we
+ # found (if any) is actually marked as being
+ # the default user
+ if users:
+ for (uname, uconfig) in users.items():
+ if def_user and uname == def_user:
+ uconfig['default'] = True
+ else:
+ uconfig['default'] = False
+
+ return users
+
+
+# Normalizes a set of user/users and group
+# dictionary configuration into a useable
+# format that the rest of cloud-init can
+# understand using the default user
+# provided by the input distrobution (if any)
+# to allow for mapping of the 'default' user.
+#
+# Output is a dictionary of group names -> [member] (list)
+# and a dictionary of user names -> user configuration (dict)
+#
+# If 'user' exists it will override
+# the 'users'[0] entry (if a list) otherwise it will
+# just become an entry in the returned dictionary (no override)
+def normalize_users_groups(cfg, distro):
+ if not cfg:
+ cfg = {}
+ users = {}
+ groups = {}
+ if 'groups' in cfg:
+ groups = _normalize_groups(cfg['groups'])
+
+ # Handle the previous style of doing this...
+ old_user = None
+ if 'user' in cfg and cfg['user']:
+ old_user = str(cfg['user'])
+ if not 'users' in cfg:
+ cfg['users'] = old_user
+ old_user = None
+ if 'users' in cfg:
+ default_user_config = None
+ try:
+ default_user_config = distro.get_default_user()
+ except NotImplementedError:
+ LOG.warn(("Distro has not implemented default user "
+ "access. No default user will be normalized."))
+ base_users = cfg['users']
+ if old_user:
+ if isinstance(base_users, (list)):
+ if len(base_users):
+ # The old user replaces user[0]
+ base_users[0] = {'name': old_user}
+ else:
+ # Just add it on at the end...
+ base_users.append({'name': old_user})
+ elif isinstance(base_users, (dict)):
+ if old_user not in base_users:
+ base_users[old_user] = True
+ elif isinstance(base_users, (str, basestring)):
+ # Just append it on to be re-parsed later
+ base_users += ",%s" % (old_user)
+ users = _normalize_users(base_users, default_user_config)
+ return (users, groups)
+
+
+# Given a user dictionary config it will
+# extract the default user name and user config
+# from that list and return that tuple or
+# return (None, None) if no default user is
+# found in the given input
+def extract_default(users, default_name=None, default_config=None):
+ if not users:
+ users = {}
+
+ def safe_find(entry):
+ config = entry[1]
+ if not config or 'default' not in config:
+ return False
+ else:
+ return config['default']
+
+ tmp_users = users.items()
+ tmp_users = dict(itertools.ifilter(safe_find, tmp_users))
+ if not tmp_users:
+ return (default_name, default_config)
+ else:
+ name = tmp_users.keys()[0]
+ config = tmp_users[name]
+ config.pop('default', None)
+ return (name, config)
+
+
def fetch(name):
locs = importer.find_module(name,
['', __name__],
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index da8c1a5b..7422f4f0 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -27,12 +27,20 @@ from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import util
+from cloudinit.distros.parsers.hostname import HostnameConf
+
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
class Distro(distros.Distro):
+ hostname_conf_fn = "/etc/hostname"
+ locale_conf_fn = "/etc/default/locale"
+ network_conf_fn = "/etc/network/interfaces"
+ tz_conf_fn = "/etc/timezone"
+ tz_local_fn = "/etc/localtime"
+ tz_zone_dir = "/usr/share/zoneinfo"
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
@@ -43,94 +51,95 @@ class Distro(distros.Distro):
def apply_locale(self, locale, out_fn=None):
if not out_fn:
- out_fn = self._paths.join(False, '/etc/default/locale')
+ out_fn = self.locale_conf_fn
util.subp(['locale-gen', locale], capture=False)
util.subp(['update-locale', locale], capture=False)
- contents = [
- "# Created by cloud-init",
+ # "" provides trailing newline during join
+ lines = [
+ util.make_header(),
'LANG="%s"' % (locale),
+ "",
]
- util.write_file(out_fn, "\n".join(contents))
+ util.write_file(out_fn, "\n".join(lines))
def install_packages(self, pkglist):
self.update_package_sources()
self.package_command('install', pkglist)
def _write_network(self, settings):
- net_fn = self._paths.join(False, "/etc/network/interfaces")
- util.write_file(net_fn, settings)
-
- def set_hostname(self, hostname):
- out_fn = self._paths.join(False, "/etc/hostname")
- self._write_hostname(hostname, out_fn)
- if out_fn == '/etc/hostname':
- # Only do this if we are running in non-adjusted root mode
- LOG.debug("Setting hostname to %s", hostname)
- util.subp(['hostname', hostname])
-
- def _write_hostname(self, hostname, out_fn):
- lines = []
- lines.append("# Created by cloud-init")
- lines.append(str(hostname))
- contents = "\n".join(lines)
- util.write_file(out_fn, contents, 0644)
-
- def update_hostname(self, hostname, prev_fn):
- hostname_prev = self._read_hostname(prev_fn)
- read_fn = self._paths.join(True, "/etc/hostname")
- hostname_in_etc = self._read_hostname(read_fn)
- update_files = []
- if not hostname_prev or hostname_prev != hostname:
- update_files.append(prev_fn)
- if (not hostname_in_etc or
- (hostname_in_etc == hostname_prev and
- hostname_in_etc != hostname)):
- write_fn = self._paths.join(False, "/etc/hostname")
- update_files.append(write_fn)
- for fn in update_files:
- try:
- self._write_hostname(hostname, fn)
- except:
- util.logexc(LOG, "Failed to write hostname %s to %s",
- hostname, fn)
- if (hostname_in_etc and hostname_prev and
- hostname_in_etc != hostname_prev):
- LOG.debug(("%s differs from /etc/hostname."
- " Assuming user maintained hostname."), prev_fn)
- if "/etc/hostname" in update_files:
- # Only do this if we are running in non-adjusted root mode
- LOG.debug("Setting hostname to %s", hostname)
- util.subp(['hostname', hostname])
+ util.write_file(self.network_conf_fn, settings)
+ return ['all']
+
+ def _bring_up_interfaces(self, device_names):
+ use_all = False
+ for d in device_names:
+ if d == 'all':
+ use_all = True
+ if use_all:
+ return distros.Distro._bring_up_interface(self, '--all')
+ else:
+ return distros.Distro._bring_up_interfaces(self, device_names)
+
+ def _select_hostname(self, hostname, fqdn):
+ # Prefer the short hostname over the long
+ # fully qualified domain name
+ if not hostname:
+ return fqdn
+ return hostname
+
+ def _write_hostname(self, your_hostname, out_fn):
+ conf = None
+ try:
+ # Try to update the previous one
+ # so lets see if we can read it first.
+ conf = self._read_hostname_conf(out_fn)
+ except IOError:
+ pass
+ if not conf:
+ conf = HostnameConf('')
+ conf.set_hostname(your_hostname)
+ util.write_file(out_fn, str(conf), 0644)
+
+ def _read_system_hostname(self):
+ sys_hostname = self._read_hostname(self.hostname_conf_fn)
+ return (self.hostname_conf_fn, sys_hostname)
+
+ def _read_hostname_conf(self, filename):
+ conf = HostnameConf(util.load_file(filename))
+ conf.parse()
+ return conf
def _read_hostname(self, filename, default=None):
- contents = util.load_file(filename, quiet=True)
- for line in contents.splitlines():
- c_pos = line.find("#")
- # Handle inline comments
- if c_pos != -1:
- line = line[0:c_pos]
- line_c = line.strip()
- if line_c:
- return line_c
- return default
+ hostname = None
+ try:
+ conf = self._read_hostname_conf(filename)
+ hostname = conf.hostname
+ except IOError:
+ pass
+ if not hostname:
+ return default
+ return hostname
def _get_localhost_ip(self):
# Note: http://www.leonardoborda.com/blog/127-0-1-1-ubuntu-debian/
return "127.0.1.1"
def set_timezone(self, tz):
- tz_file = os.path.join("/usr/share/zoneinfo", tz)
+ # TODO(harlowja): move this code into
+ # the parent distro...
+ tz_file = os.path.join(self.tz_zone_dir, str(tz))
if not os.path.isfile(tz_file):
raise RuntimeError(("Invalid timezone %s,"
" no file found at %s") % (tz, tz_file))
+ # Note: "" provides trailing newline during join
tz_lines = [
- "# Created by cloud-init",
+ util.make_header(),
str(tz),
+ "",
]
- tz_contents = "\n".join(tz_lines)
- tz_fn = self._paths.join(False, "/etc/timezone")
- util.write_file(tz_fn, tz_contents)
- util.copy(tz_file, self._paths.join(False, "/etc/localtime"))
+ util.write_file(self.tz_conf_fn, "\n".join(tz_lines))
+ # This ensures that the correct tz will be used for the system
+ util.copy(tz_file, self.tz_local_fn)
def package_command(self, command, args=None):
e = os.environ.copy()
diff --git a/cloudinit/distros/parsers/__init__.py b/cloudinit/distros/parsers/__init__.py
new file mode 100644
index 00000000..1c413eaa
--- /dev/null
+++ b/cloudinit/distros/parsers/__init__.py
@@ -0,0 +1,28 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+def chop_comment(text, comment_chars):
+ comment_locations = [text.find(c) for c in comment_chars]
+ comment_locations = [c for c in comment_locations if c != -1]
+ if not comment_locations:
+ return (text, '')
+ min_comment = min(comment_locations)
+ before_comment = text[0:min_comment]
+ comment = text[min_comment:]
+ return (before_comment, comment)
diff --git a/cloudinit/distros/parsers/hostname.py b/cloudinit/distros/parsers/hostname.py
new file mode 100644
index 00000000..617b3c36
--- /dev/null
+++ b/cloudinit/distros/parsers/hostname.py
@@ -0,0 +1,88 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from StringIO import StringIO
+
+from cloudinit.distros.parsers import chop_comment
+
+
+# Parser that knows how to work with /etc/hostname format
+class HostnameConf(object):
+ def __init__(self, text):
+ self._text = text
+ self._contents = None
+
+ def parse(self):
+ if self._contents is None:
+ self._contents = self._parse(self._text)
+
+ def __str__(self):
+ self.parse()
+ contents = StringIO()
+ for (line_type, components) in self._contents:
+ if line_type == 'blank':
+ contents.write("%s\n" % (components[0]))
+ elif line_type == 'all_comment':
+ contents.write("%s\n" % (components[0]))
+ elif line_type == 'hostname':
+ (hostname, tail) = components
+ contents.write("%s%s\n" % (hostname, tail))
+ # Ensure trailing newline
+ contents = contents.getvalue()
+ if not contents.endswith("\n"):
+ contents += "\n"
+ return contents
+
+ @property
+ def hostname(self):
+ self.parse()
+ for (line_type, components) in self._contents:
+ if line_type == 'hostname':
+ return components[0]
+ return None
+
+ def set_hostname(self, your_hostname):
+ your_hostname = your_hostname.strip()
+ if not your_hostname:
+ return
+ self.parse()
+ replaced = False
+ for (line_type, components) in self._contents:
+ if line_type == 'hostname':
+ components[0] = str(your_hostname)
+ replaced = True
+ if not replaced:
+ self._contents.append(('hostname', [str(your_hostname), '']))
+
+ def _parse(self, contents):
+ entries = []
+ hostnames_found = set()
+ for line in contents.splitlines():
+ if not len(line.strip()):
+ entries.append(('blank', [line]))
+ continue
+ (head, tail) = chop_comment(line.strip(), '#')
+ if not len(head):
+ entries.append(('all_comment', [line]))
+ continue
+ entries.append(('hostname', [head, tail]))
+ hostnames_found.add(head)
+ if len(hostnames_found) > 1:
+ raise IOError("Multiple hostnames (%s) found!"
+ % (hostnames_found))
+ return entries
diff --git a/cloudinit/distros/parsers/hosts.py b/cloudinit/distros/parsers/hosts.py
new file mode 100644
index 00000000..94c97051
--- /dev/null
+++ b/cloudinit/distros/parsers/hosts.py
@@ -0,0 +1,92 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from StringIO import StringIO
+
+from cloudinit.distros.parsers import chop_comment
+
+
+# See: man hosts
+# or http://unixhelp.ed.ac.uk/CGI/man-cgi?hosts
+# or http://tinyurl.com/6lmox3
+class HostsConf(object):
+ def __init__(self, text):
+ self._text = text
+ self._contents = None
+
+ def parse(self):
+ if self._contents is None:
+ self._contents = self._parse(self._text)
+
+ def get_entry(self, ip):
+ self.parse()
+ options = []
+ for (line_type, components) in self._contents:
+ if line_type == 'option':
+ (pieces, _tail) = components
+ if len(pieces) and pieces[0] == ip:
+ options.append(pieces[1:])
+ return options
+
+ def del_entries(self, ip):
+ self.parse()
+ n_entries = []
+ for (line_type, components) in self._contents:
+ if line_type != 'option':
+ n_entries.append((line_type, components))
+ continue
+ else:
+ (pieces, _tail) = components
+ if len(pieces) and pieces[0] == ip:
+ pass
+ elif len(pieces):
+ n_entries.append((line_type, list(components)))
+ self._contents = n_entries
+
+ def add_entry(self, ip, canonical_hostname, *aliases):
+ self.parse()
+ self._contents.append(('option',
+ ([ip, canonical_hostname] + list(aliases), '')))
+
+ def _parse(self, contents):
+ entries = []
+ for line in contents.splitlines():
+ if not len(line.strip()):
+ entries.append(('blank', [line]))
+ continue
+ (head, tail) = chop_comment(line.strip(), '#')
+ if not len(head):
+ entries.append(('all_comment', [line]))
+ continue
+ entries.append(('option', [head.split(None), tail]))
+ return entries
+
+ def __str__(self):
+ self.parse()
+ contents = StringIO()
+ for (line_type, components) in self._contents:
+ if line_type == 'blank':
+ contents.write("%s\n" % (components[0]))
+ elif line_type == 'all_comment':
+ contents.write("%s\n" % (components[0]))
+ elif line_type == 'option':
+ (pieces, tail) = components
+ pieces = [str(p) for p in pieces]
+ pieces = "\t".join(pieces)
+ contents.write("%s%s\n" % (pieces, tail))
+ return contents.getvalue()
diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py
new file mode 100644
index 00000000..5733c25a
--- /dev/null
+++ b/cloudinit/distros/parsers/resolv_conf.py
@@ -0,0 +1,169 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from StringIO import StringIO
+
+from cloudinit import util
+
+from cloudinit.distros.parsers import chop_comment
+
+
+# See: man resolv.conf
+class ResolvConf(object):
+ def __init__(self, text):
+ self._text = text
+ self._contents = None
+
+ def parse(self):
+ if self._contents is None:
+ self._contents = self._parse(self._text)
+
+ @property
+ def nameservers(self):
+ self.parse()
+ return self._retr_option('nameserver')
+
+ @property
+ def local_domain(self):
+ self.parse()
+ dm = self._retr_option('domain')
+ if dm:
+ return dm[0]
+ return None
+
+ @property
+ def search_domains(self):
+ self.parse()
+ current_sds = self._retr_option('search')
+ flat_sds = []
+ for sdlist in current_sds:
+ for sd in sdlist.split(None):
+ if sd:
+ flat_sds.append(sd)
+ return flat_sds
+
+ def __str__(self):
+ self.parse()
+ contents = StringIO()
+ for (line_type, components) in self._contents:
+ if line_type == 'blank':
+ contents.write("\n")
+ elif line_type == 'all_comment':
+ contents.write("%s\n" % (components[0]))
+ elif line_type == 'option':
+ (cfg_opt, cfg_value, comment_tail) = components
+ line = "%s %s" % (cfg_opt, cfg_value)
+ if len(comment_tail):
+ line += comment_tail
+ contents.write("%s\n" % (line))
+ return contents.getvalue()
+
+ def _retr_option(self, opt_name):
+ found = []
+ for (line_type, components) in self._contents:
+ if line_type == 'option':
+ (cfg_opt, cfg_value, _comment_tail) = components
+ if cfg_opt == opt_name:
+ found.append(cfg_value)
+ return found
+
+ def add_nameserver(self, ns):
+ self.parse()
+ current_ns = self._retr_option('nameserver')
+ new_ns = list(current_ns)
+ new_ns.append(str(ns))
+ new_ns = util.uniq_list(new_ns)
+ if len(new_ns) == len(current_ns):
+ return current_ns
+ if len(current_ns) >= 3:
+ # Hard restriction on only 3 name servers
+ raise ValueError(("Adding %r would go beyond the "
+ "'3' maximum name servers") % (ns))
+ self._remove_option('nameserver')
+ for n in new_ns:
+ self._contents.append(('option', ['nameserver', n, '']))
+ return new_ns
+
+ def _remove_option(self, opt_name):
+
+ def remove_opt(item):
+ line_type, components = item
+ if line_type != 'option':
+ return False
+ (cfg_opt, _cfg_value, _comment_tail) = components
+ if cfg_opt != opt_name:
+ return False
+ return True
+
+ new_contents = []
+ for c in self._contents:
+ if not remove_opt(c):
+ new_contents.append(c)
+ self._contents = new_contents
+
+ def add_search_domain(self, search_domain):
+ flat_sds = self.search_domains
+ new_sds = list(flat_sds)
+ new_sds.append(str(search_domain))
+ new_sds = util.uniq_list(new_sds)
+ if len(flat_sds) == len(new_sds):
+ return new_sds
+ if len(flat_sds) >= 6:
+ # Hard restriction on only 6 search domains
+ raise ValueError(("Adding %r would go beyond the "
+ "'6' maximum search domains") % (search_domain))
+ s_list = " ".join(new_sds)
+ if len(s_list) > 256:
+ # Some hard limit on 256 chars total
+ raise ValueError(("Adding %r would go beyond the "
+ "256 maximum search list character limit")
+ % (search_domain))
+ self._remove_option('search')
+ self._contents.append(('option', ['search', s_list, '']))
+ return flat_sds
+
+ @local_domain.setter
+ def local_domain(self, domain):
+ self.parse()
+ self._remove_option('domain')
+ self._contents.append(('option', ['domain', str(domain), '']))
+ return domain
+
+ def _parse(self, contents):
+ entries = []
+ for (i, line) in enumerate(contents.splitlines()):
+ sline = line.strip()
+ if not sline:
+ entries.append(('blank', [line]))
+ continue
+ (head, tail) = chop_comment(line, ';#')
+ if not len(head.strip()):
+ entries.append(('all_comment', [line]))
+ continue
+ if not tail:
+ tail = ''
+ try:
+ (cfg_opt, cfg_values) = head.split(None, 1)
+ except (IndexError, ValueError):
+ raise IOError("Incorrectly formatted resolv.conf line %s"
+ % (i + 1))
+ if cfg_opt not in ['nameserver', 'domain',
+ 'search', 'sortlist', 'options']:
+ raise IOError("Unexpected resolv.conf option %s" % (cfg_opt))
+ entries.append(("option", [cfg_opt, cfg_values, tail]))
+ return entries
diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py
new file mode 100644
index 00000000..20ca1871
--- /dev/null
+++ b/cloudinit/distros/parsers/sys_conf.py
@@ -0,0 +1,113 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from StringIO import StringIO
+
+import pipes
+import re
+
+# This library is used to parse/write
+# out the various sysconfig files edited (best attempt effort)
+#
+# It has to be slightly modified though
+# to ensure that all values are quoted/unquoted correctly
+# since these configs are usually sourced into
+# bash scripts...
+import configobj
+
+# See: http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap08.html
+# or look at the 'param_expand()' function in the subst.c file in the bash
+# source tarball...
+SHELL_VAR_RULE = r'[a-zA-Z_]+[a-zA-Z0-9_]*'
+SHELL_VAR_REGEXES = [
+ # Basic variables
+ re.compile(r"\$" + SHELL_VAR_RULE),
+ # Things like $?, $0, $-, $@
+ re.compile(r"\$[0-9#\?\-@\*]"),
+ # Things like ${blah:1} - but this one
+ # gets very complex so just try the
+ # simple path
+ re.compile(r"\$\{.+\}"),
+]
+
+
+def _contains_shell_variable(text):
+ for r in SHELL_VAR_REGEXES:
+ if r.search(text):
+ return True
+ return False
+
+
+class SysConf(configobj.ConfigObj):
+ def __init__(self, contents):
+ configobj.ConfigObj.__init__(self, contents,
+ interpolation=False,
+ write_empty_values=True)
+
+ def __str__(self):
+ contents = self.write()
+ out_contents = StringIO()
+ if isinstance(contents, (list, tuple)):
+ out_contents.write("\n".join(contents))
+ else:
+ out_contents.write(str(contents))
+ return out_contents.getvalue()
+
+ def _quote(self, value, multiline=False):
+ if not isinstance(value, (str, basestring)):
+ raise ValueError('Value "%s" is not a string' % (value))
+ if len(value) == 0:
+ return ''
+ quot_func = None
+ if value[0] in ['"', "'"] and value[-1] in ['"', "'"]:
+ if len(value) == 1:
+ quot_func = (lambda x:
+ self._get_single_quote(x) % x)
+ else:
+ # Quote whitespace if it isn't the start + end of a shell command
+ if value.strip().startswith("$(") and value.strip().endswith(")"):
+ pass
+ else:
+ if re.search(r"[\t\r\n ]", value):
+ if _contains_shell_variable(value):
+ # If it contains shell variables then we likely want to
+ # leave it alone since the pipes.quote function likes
+ # to use single quotes which won't get expanded...
+ if re.search(r"[\n\"']", value):
+ quot_func = (lambda x:
+ self._get_triple_quote(x) % x)
+ else:
+ quot_func = (lambda x:
+ self._get_single_quote(x) % x)
+ else:
+ quot_func = pipes.quote
+ if not quot_func:
+ return value
+ return quot_func(value)
+
+ def _write_line(self, indent_string, entry, this_entry, comment):
+ # Ensure it is formatted fine for
+ # how these sysconfig scripts are used
+ val = self._decode_element(self._quote(this_entry))
+ key = self._decode_element(self._quote(entry))
+ cmnt = self._decode_element(comment)
+ return '%s%s%s%s%s' % (indent_string,
+ key,
+ self._a_to_u('='),
+ val,
+ cmnt)
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index d81ee5fb..bc0877d5 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -23,6 +23,10 @@
import os
from cloudinit import distros
+
+from cloudinit.distros.parsers.resolv_conf import ResolvConf
+from cloudinit.distros.parsers.sys_conf import SysConf
+
from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import util
@@ -31,32 +35,24 @@ from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
-NETWORK_FN_TPL = '/etc/sysconfig/network-scripts/ifcfg-%s'
-
-# See: http://tiny.cc/6r99fw
-# For what alot of these files that are being written
-# are and the format of them
-
-# This library is used to parse/write
-# out the various sysconfig files edited
-#
-# It has to be slightly modified though
-# to ensure that all values are quoted
-# since these configs are usually sourced into
-# bash scripts...
-from configobj import ConfigObj
-# See: http://tiny.cc/oezbgw
-D_QUOTE_CHARS = {
- "\"": "\\\"",
- "(": "\\(",
- ")": "\\)",
- "$": '\$',
- '`': '\`',
-}
+def _make_sysconfig_bool(val):
+ if val:
+ return 'yes'
+ else:
+ return 'no'
class Distro(distros.Distro):
+ # See: http://tiny.cc/6r99fw
+ clock_conf_fn = "/etc/sysconfig/clock"
+ locale_conf_fn = '/etc/sysconfig/i18n'
+ network_conf_fn = "/etc/sysconfig/network"
+ hostname_conf_fn = "/etc/sysconfig/network"
+ network_script_tpl = '/etc/sysconfig/network-scripts/ifcfg-%s'
+ resolve_conf_fn = "/etc/resolv.conf"
+ tz_local_fn = "/etc/localtime"
+ tz_zone_dir = "/usr/share/zoneinfo"
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
@@ -68,95 +64,110 @@ class Distro(distros.Distro):
def install_packages(self, pkglist):
self.package_command('install', pkglist)
+ def _adjust_resolve(self, dns_servers, search_servers):
+ try:
+ r_conf = ResolvConf(util.load_file(self.resolve_conf_fn))
+ r_conf.parse()
+ except IOError:
+ util.logexc(LOG,
+ "Failed at parsing %s reverting to an empty instance",
+ self.resolve_conf_fn)
+ r_conf = ResolvConf('')
+ r_conf.parse()
+ if dns_servers:
+ for s in dns_servers:
+ try:
+ r_conf.add_nameserver(s)
+ except ValueError:
+ util.logexc(LOG, "Failed at adding nameserver %s", s)
+ if search_servers:
+ for s in search_servers:
+ try:
+ r_conf.add_search_domain(s)
+ except ValueError:
+ util.logexc(LOG, "Failed at adding search domain %s", s)
+ util.write_file(self.resolve_conf_fn, str(r_conf), 0644)
+
def _write_network(self, settings):
# TODO(harlowja) fix this... since this is the ubuntu format
entries = translate_network(settings)
LOG.debug("Translated ubuntu style network settings %s into %s",
settings, entries)
# Make the intermediate format as the rhel format...
+ nameservers = []
+ searchservers = []
+ dev_names = entries.keys()
for (dev, info) in entries.iteritems():
- net_fn = NETWORK_FN_TPL % (dev)
- net_ro_fn = self._paths.join(True, net_fn)
- (prev_exist, net_cfg) = self._read_conf(net_ro_fn)
- net_cfg['DEVICE'] = dev
- boot_proto = info.get('bootproto')
- if boot_proto:
- net_cfg['BOOTPROTO'] = boot_proto
- net_mask = info.get('netmask')
- if net_mask:
- net_cfg["NETMASK"] = net_mask
- addr = info.get('address')
- if addr:
- net_cfg["IPADDR"] = addr
- if info.get('auto'):
- net_cfg['ONBOOT'] = 'yes'
- else:
- net_cfg['ONBOOT'] = 'no'
- gtway = info.get('gateway')
- if gtway:
- net_cfg["GATEWAY"] = gtway
- bcast = info.get('broadcast')
- if bcast:
- net_cfg["BROADCAST"] = bcast
- mac_addr = info.get('hwaddress')
- if mac_addr:
- net_cfg["MACADDR"] = mac_addr
- lines = net_cfg.write()
- if not prev_exist:
- lines.insert(0, '# Created by cloud-init')
- w_contents = "\n".join(lines)
- net_rw_fn = self._paths.join(False, net_fn)
- util.write_file(net_rw_fn, w_contents, 0644)
+ net_fn = self.network_script_tpl % (dev)
+ net_cfg = {
+ 'DEVICE': dev,
+ 'NETMASK': info.get('netmask'),
+ 'IPADDR': info.get('address'),
+ 'BOOTPROTO': info.get('bootproto'),
+ 'GATEWAY': info.get('gateway'),
+ 'BROADCAST': info.get('broadcast'),
+ 'MACADDR': info.get('hwaddress'),
+ 'ONBOOT': _make_sysconfig_bool(info.get('auto')),
+ }
+ self._update_sysconfig_file(net_fn, net_cfg)
+ if 'dns-nameservers' in info:
+ nameservers.extend(info['dns-nameservers'])
+ if 'dns-search' in info:
+ searchservers.extend(info['dns-search'])
+ if nameservers or searchservers:
+ self._adjust_resolve(nameservers, searchservers)
+ if dev_names:
+ net_cfg = {
+ 'NETWORKING': _make_sysconfig_bool(True),
+ }
+ self._update_sysconfig_file(self.network_conf_fn, net_cfg)
+ return dev_names
- def set_hostname(self, hostname):
- out_fn = self._paths.join(False, '/etc/sysconfig/network')
- self._write_hostname(hostname, out_fn)
- if out_fn == '/etc/sysconfig/network':
- # Only do this if we are running in non-adjusted root mode
- LOG.debug("Setting hostname to %s", hostname)
- util.subp(['hostname', hostname])
+ def _update_sysconfig_file(self, fn, adjustments, allow_empty=False):
+ if not adjustments:
+ return
+ (exists, contents) = self._read_conf(fn)
+ updated_am = 0
+ for (k, v) in adjustments.items():
+ if v is None:
+ continue
+ v = str(v)
+ if len(v) == 0 and not allow_empty:
+ continue
+ contents[k] = v
+ updated_am += 1
+ if updated_am:
+ lines = [
+ str(contents),
+ ]
+ if not exists:
+ lines.insert(0, util.make_header())
+ util.write_file(fn, "\n".join(lines), 0644)
def apply_locale(self, locale, out_fn=None):
if not out_fn:
- out_fn = self._paths.join(False, '/etc/sysconfig/i18n')
- ro_fn = self._paths.join(True, '/etc/sysconfig/i18n')
- (_exists, contents) = self._read_conf(ro_fn)
- contents['LANG'] = locale
- w_contents = "\n".join(contents.write())
- util.write_file(out_fn, w_contents, 0644)
+ out_fn = self.locale_conf_fn
+ locale_cfg = {
+ 'LANG': locale,
+ }
+ self._update_sysconfig_file(out_fn, locale_cfg)
def _write_hostname(self, hostname, out_fn):
- (_exists, contents) = self._read_conf(out_fn)
- contents['HOSTNAME'] = hostname
- w_contents = "\n".join(contents.write())
- util.write_file(out_fn, w_contents, 0644)
+ host_cfg = {
+ 'HOSTNAME': hostname,
+ }
+ self._update_sysconfig_file(out_fn, host_cfg)
+
+ def _select_hostname(self, hostname, fqdn):
+ # See: http://bit.ly/TwitgL
+ # Should be fqdn if we can use it
+ if fqdn:
+ return fqdn
+ return hostname
- def update_hostname(self, hostname, prev_file):
- hostname_prev = self._read_hostname(prev_file)
- read_fn = self._paths.join(True, "/etc/sysconfig/network")
- hostname_in_sys = self._read_hostname(read_fn)
- update_files = []
- if not hostname_prev or hostname_prev != hostname:
- update_files.append(prev_file)
- if (not hostname_in_sys or
- (hostname_in_sys == hostname_prev
- and hostname_in_sys != hostname)):
- write_fn = self._paths.join(False, "/etc/sysconfig/network")
- update_files.append(write_fn)
- for fn in update_files:
- try:
- self._write_hostname(hostname, fn)
- except:
- util.logexc(LOG, "Failed to write hostname %s to %s",
- hostname, fn)
- if (hostname_in_sys and hostname_prev and
- hostname_in_sys != hostname_prev):
- LOG.debug(("%s differs from /etc/sysconfig/network."
- " Assuming user maintained hostname."), prev_file)
- if "/etc/sysconfig/network" in update_files:
- # Only do this if we are running in non-adjusted root mode
- LOG.debug("Setting hostname to %s", hostname)
- util.subp(['hostname', hostname])
+ def _read_system_hostname(self):
+ return (self.network_conf_fn,
+ self._read_hostname(self.network_conf_fn))
def _read_hostname(self, filename, default=None):
(_exists, contents) = self._read_conf(filename)
@@ -167,27 +178,34 @@ class Distro(distros.Distro):
def _read_conf(self, fn):
exists = False
- if os.path.isfile(fn):
+ try:
contents = util.load_file(fn).splitlines()
exists = True
- else:
+ except IOError:
contents = []
- return (exists, QuotingConfigObj(contents))
+ return (exists,
+ SysConf(contents))
+
+ def _bring_up_interfaces(self, device_names):
+ if device_names and 'all' in device_names:
+ raise RuntimeError(('Distro %s can not translate '
+ 'the device name "all"') % (self.name))
+ return distros.Distro._bring_up_interfaces(self, device_names)
def set_timezone(self, tz):
- tz_file = os.path.join("/usr/share/zoneinfo", tz)
+ # TODO(harlowja): move this code into
+ # the parent distro...
+ tz_file = os.path.join(self.tz_zone_dir, str(tz))
if not os.path.isfile(tz_file):
raise RuntimeError(("Invalid timezone %s,"
" no file found at %s") % (tz, tz_file))
# Adjust the sysconfig clock zone setting
- read_fn = self._paths.join(True, "/etc/sysconfig/clock")
- (_exists, contents) = self._read_conf(read_fn)
- contents['ZONE'] = tz
- tz_contents = "\n".join(contents.write())
- write_fn = self._paths.join(False, "/etc/sysconfig/clock")
- util.write_file(write_fn, tz_contents)
+ clock_cfg = {
+ 'ZONE': str(tz),
+ }
+ self._update_sysconfig_file(self.clock_conf_fn, clock_cfg)
# This ensures that the correct tz will be used for the system
- util.copy(tz_file, self._paths.join(False, "/etc/localtime"))
+ util.copy(tz_file, self.tz_local_fn)
def package_command(self, command, args=None):
cmd = ['yum']
@@ -208,52 +226,7 @@ class Distro(distros.Distro):
def update_package_sources(self):
self._runner.run("update-sources", self.package_command,
- ["update"], freq=PER_INSTANCE)
-
-
-# This class helps adjust the configobj
-# writing to ensure that when writing a k/v
-# on a line, that they are properly quoted
-# and have no spaces between the '=' sign.
-# - This is mainly due to the fact that
-# the sysconfig scripts are often sourced
-# directly into bash/shell scripts so ensure
-# that it works for those types of use cases.
-class QuotingConfigObj(ConfigObj):
- def __init__(self, lines):
- ConfigObj.__init__(self, lines,
- interpolation=False,
- write_empty_values=True)
-
- def _quote_posix(self, text):
- if not text:
- return ''
- for (k, v) in D_QUOTE_CHARS.iteritems():
- text = text.replace(k, v)
- return '"%s"' % (text)
-
- def _quote_special(self, text):
- if text.lower() in ['yes', 'no', 'true', 'false']:
- return text
- else:
- return self._quote_posix(text)
-
- def _write_line(self, indent_string, entry, this_entry, comment):
- # Ensure it is formatted fine for
- # how these sysconfig scripts are used
- val = self._decode_element(self._quote(this_entry))
- # Single quoted strings should
- # always work.
- if not val.startswith("'"):
- # Perform any special quoting
- val = self._quote_special(val)
- key = self._decode_element(self._quote(entry, multiline=False))
- cmnt = self._decode_element(comment)
- return '%s%s%s%s%s' % (indent_string,
- key,
- "=",
- val,
- cmnt)
+ ["makecache"], freq=PER_INSTANCE)
# This is a util function to translate a ubuntu /etc/network/interfaces 'blob'
@@ -314,6 +287,12 @@ def translate_network(settings):
val = info[k].strip().lower()
if val:
iface_info[k] = val
+ # Name server info provided??
+ if 'dns-nameservers' in info:
+ iface_info['dns-nameservers'] = info['dns-nameservers'].split()
+ # Name server search info provided??
+ if 'dns-search' in info:
+ iface_info['dns-search'] = info['dns-search'].split()
# Is any mac address spoofing going on??
if 'hwaddress' in info:
hw_info = info['hwaddress'].lower().strip()
diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py
index 22f8c2c5..c527f248 100644
--- a/cloudinit/distros/ubuntu.py
+++ b/cloudinit/distros/ubuntu.py
@@ -28,8 +28,4 @@ LOG = logging.getLogger(__name__)
class Distro(debian.Distro):
-
- distro_name = 'ubuntu'
- default_user = 'ubuntu'
- default_user_groups = ("adm,audio,cdrom,dialout,floppy,video,"
- "plugdev,dip,netdev,sudo")
+ pass
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
new file mode 100644
index 00000000..46b93f39
--- /dev/null
+++ b/cloudinit/ec2_utils.py
@@ -0,0 +1,59 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import boto.utils as boto_utils
+
+# Versions of boto >= 2.6.0 (and possibly 2.5.2)
+# try to lazily load the metadata backing, which
+# doesn't work so well in cloud-init especially
+# since the metadata is serialized and actions are
+# performed where the metadata server may be blocked
+# (thus the datasource will start failing) resulting
+# in url exceptions when fields that do exist (or
+# would have existed) do not exist due to the blocking
+# that occurred.
+
+
+def _unlazy_dict(mp):
+ if not isinstance(mp, (dict)):
+ return mp
+ # Walk over the keys/values which
+ # forces boto to unlazy itself and
+ # has no effect on dictionaries that
+ # already have there items.
+ for (_k, v) in mp.items():
+ _unlazy_dict(v)
+ return mp
+
+
+def get_instance_userdata(api_version, metadata_address):
+ # Note: boto.utils.get_instance_metadata returns '' for empty string
+ # so the change from non-true to '' is not specifically necessary, but
+ # this way cloud-init will get consistent behavior even if boto changed
+ # in the future to return a None on "no user-data provided".
+ ud = boto_utils.get_instance_userdata(api_version, None, metadata_address)
+ if not ud:
+ ud = ''
+ return ud
+
+
+def get_instance_metadata(api_version, metadata_address):
+ metadata = boto_utils.get_instance_metadata(api_version, metadata_address)
+ if not isinstance(metadata, (dict)):
+ metadata = {}
+ return _unlazy_dict(metadata)
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index 99caed1f..8d6dcd4d 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -160,6 +160,19 @@ def _extract_first_or_bytes(blob, size):
return start
+def _escape_string(text):
+ try:
+ return text.encode("string-escape")
+ except TypeError:
+ try:
+ # Unicode doesn't support string-escape...
+ return text.encode('unicode-escape')
+ except TypeError:
+ # Give up...
+ pass
+ return text
+
+
def walker_callback(pdata, ctype, filename, payload):
if ctype in PART_CONTENT_TYPES:
walker_handle_handler(pdata, ctype, filename, payload)
@@ -171,7 +184,7 @@ def walker_callback(pdata, ctype, filename, payload):
elif payload:
# Extract the first line or 24 bytes for displaying in the log
start = _extract_first_or_bytes(payload, 24)
- details = "'%s...'" % (start.encode("string-escape"))
+ details = "'%s...'" % (_escape_string(start))
if ctype == NOT_MULTIPART_TYPE:
LOG.warning("Unhandled non-multipart (%s) userdata: %s",
ctype, details)
diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py
index 99e0afde..4684f7f2 100644
--- a/cloudinit/handlers/upstart_job.py
+++ b/cloudinit/handlers/upstart_job.py
@@ -64,3 +64,7 @@ class UpstartJobPartHandler(handlers.Handler):
payload = util.dos2unix(payload)
path = os.path.join(self.upstart_dir, filename)
util.write_file(path, payload, 0644)
+
+ # if inotify support is not present in the root filesystem
+ # (overlayroot) then we need to tell upstart to re-read /etc
+ util.subp(["initctl", "reload-configuration"], capture=False)
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index a4b20208..2077401c 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -71,12 +71,17 @@ class FileLock(object):
return "<%s using file %r>" % (util.obj_name(self), self.fn)
+def canon_sem_name(name):
+ return name.replace("-", "_")
+
+
class FileSemaphores(object):
def __init__(self, sem_path):
self.sem_path = sem_path
@contextlib.contextmanager
def lock(self, name, freq, clear_on_fail=False):
+ name = canon_sem_name(name)
try:
yield self._acquire(name, freq)
except:
@@ -85,6 +90,7 @@ class FileSemaphores(object):
raise
def clear(self, name, freq):
+ name = canon_sem_name(name)
sem_file = self._get_path(name, freq)
try:
util.del_file(sem_file)
@@ -119,11 +125,23 @@ class FileSemaphores(object):
def has_run(self, name, freq):
if not freq or freq == PER_ALWAYS:
return False
- sem_file = self._get_path(name, freq)
+
+ cname = canon_sem_name(name)
+ sem_file = self._get_path(cname, freq)
# This isn't really a good atomic check
# but it suffices for where and when cloudinit runs
if os.path.exists(sem_file):
return True
+
+ # this case could happen if the migrator module hadn't run yet
+ # but the item had run before we did canon_sem_name.
+ if cname != name and os.path.exists(self._get_path(name, freq)):
+ LOG.warn("%s has run without canonicalized name [%s].\n"
+ "likely the migrator has not yet run. It will run next boot.\n"
+ "run manually with: cloud-init single --name=migrator"
+ % (name, cname))
+ return True
+
return False
def _get_path(self, name, freq):
@@ -302,14 +320,10 @@ class Paths(object):
def __init__(self, path_cfgs, ds=None):
self.cfgs = path_cfgs
# Populate all the initial paths
- self.cloud_dir = self.join(False,
- path_cfgs.get('cloud_dir',
- '/var/lib/cloud'))
+ self.cloud_dir = path_cfgs.get('cloud_dir', '/var/lib/cloud')
self.instance_link = os.path.join(self.cloud_dir, 'instance')
self.boot_finished = os.path.join(self.instance_link, "boot-finished")
self.upstart_conf_d = path_cfgs.get('upstart_dir')
- if self.upstart_conf_d:
- self.upstart_conf_d = self.join(False, self.upstart_conf_d)
self.seed_dir = os.path.join(self.cloud_dir, 'seed')
# This one isn't joined, since it should just be read-only
template_dir = path_cfgs.get('templates_dir', '/etc/cloud/templates/')
@@ -328,29 +342,6 @@ class Paths(object):
# Set when a datasource becomes active
self.datasource = ds
- # joins the paths but also appends a read
- # or write root if available
- def join(self, read_only, *paths):
- if read_only:
- root = self.cfgs.get('read_root')
- else:
- root = self.cfgs.get('write_root')
- if not paths:
- return root
- if len(paths) > 1:
- joined = os.path.join(*paths)
- else:
- joined = paths[0]
- if root:
- pre_joined = joined
- # Need to remove any starting '/' since this
- # will confuse os.path.join
- joined = joined.lstrip("/")
- joined = os.path.join(root, joined)
- LOG.debug("Translated %s to adjusted path %s (read-only=%s)",
- pre_joined, joined, read_only)
- return joined
-
# get_ipath_cur: get the current instance path for an item
def get_ipath_cur(self, name=None):
ipath = self.instance_link
diff --git a/cloudinit/log.py b/cloudinit/log.py
index 2333e5ee..da6c2851 100644
--- a/cloudinit/log.py
+++ b/cloudinit/log.py
@@ -53,6 +53,18 @@ def setupBasicLogging():
root.setLevel(DEBUG)
+def flushLoggers(root):
+ if not root:
+ return
+ for h in root.handlers:
+ if isinstance(h, (logging.StreamHandler)):
+ try:
+ h.flush()
+ except IOError:
+ pass
+ flushLoggers(root.parent)
+
+
def setupLogging(cfg=None):
# See if the config provides any logging conf...
if not cfg:
diff --git a/cloudinit/patcher.py b/cloudinit/patcher.py
new file mode 100644
index 00000000..0f3c034e
--- /dev/null
+++ b/cloudinit/patcher.py
@@ -0,0 +1,58 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import imp
+import logging
+import sys
+
+# Default fallback format
+FALL_FORMAT = ('FALLBACK: %(asctime)s - %(filename)s[%(levelname)s]: ' +
+ '%(message)s')
+
+
+class QuietStreamHandler(logging.StreamHandler):
+ def handleError(self, record):
+ pass
+
+
+def _patch_logging():
+ # Replace 'handleError' with one that will be more
+ # tolerant of errors in that it can avoid
+ # re-notifying on exceptions and when errors
+ # do occur, it can at least try to write to
+ # sys.stderr using a fallback logger
+ fallback_handler = QuietStreamHandler(sys.stderr)
+ fallback_handler.setFormatter(logging.Formatter(FALL_FORMAT))
+
+ def handleError(self, record): # pylint: disable=W0613
+ try:
+ fallback_handler.handle(record)
+ fallback_handler.flush()
+ except IOError:
+ pass
+ setattr(logging.Handler, 'handleError', handleError)
+
+
+def patch():
+ imp.acquire_lock()
+ try:
+ _patch_logging()
+ finally:
+ imp.release_lock()
diff --git a/cloudinit/safeyaml.py b/cloudinit/safeyaml.py
new file mode 100644
index 00000000..eba5d056
--- /dev/null
+++ b/cloudinit/safeyaml.py
@@ -0,0 +1,32 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import yaml
+
+
+class _CustomSafeLoader(yaml.SafeLoader):
+ def construct_python_unicode(self, node):
+ return self.construct_scalar(node)
+
+_CustomSafeLoader.add_constructor(
+ u'tag:yaml.org,2002:python/unicode',
+ _CustomSafeLoader.construct_python_unicode)
+
+
+def load(blob):
+ return(yaml.load(blob, Loader=_CustomSafeLoader))
diff --git a/cloudinit/signal_handler.py b/cloudinit/signal_handler.py
new file mode 100644
index 00000000..40b0c94c
--- /dev/null
+++ b/cloudinit/signal_handler.py
@@ -0,0 +1,71 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import inspect
+import signal
+import sys
+
+from StringIO import StringIO
+
+from cloudinit import log as logging
+from cloudinit import util
+from cloudinit import version as vr
+
+LOG = logging.getLogger(__name__)
+
+
+BACK_FRAME_TRACE_DEPTH = 3
+EXIT_FOR = {
+ signal.SIGINT: ('Cloud-init %(version)s received SIGINT, exiting...', 1),
+ signal.SIGTERM: ('Cloud-init %(version)s received SIGTERM, exiting...', 1),
+ # Can't be caught...
+ # signal.SIGKILL: ('Cloud-init killed, exiting...', 1),
+ signal.SIGABRT: ('Cloud-init %(version)s received SIGABRT, exiting...', 1),
+}
+
+
+def _pprint_frame(frame, depth, max_depth, contents):
+ if depth > max_depth or not frame:
+ return
+ frame_info = inspect.getframeinfo(frame)
+ prefix = " " * (depth * 2)
+ contents.write("%sFilename: %s\n" % (prefix, frame_info.filename))
+ contents.write("%sFunction: %s\n" % (prefix, frame_info.function))
+ contents.write("%sLine number: %s\n" % (prefix, frame_info.lineno))
+ _pprint_frame(frame.f_back, depth + 1, max_depth, contents)
+
+
+def _handle_exit(signum, frame):
+ (msg, rc) = EXIT_FOR[signum]
+ msg = msg % ({'version': vr.version()})
+ contents = StringIO()
+ contents.write("%s\n" % (msg))
+ _pprint_frame(frame, 1, BACK_FRAME_TRACE_DEPTH, contents)
+ util.multi_log(contents.getvalue(),
+ console=True, stderr=False, log=LOG)
+ sys.exit(rc)
+
+
+def attach_handlers():
+ sigs_attached = 0
+ for signum in EXIT_FOR.keys():
+ signal.signal(signum, _handle_exit)
+ sigs_attached += len(EXIT_FOR)
+ return sigs_attached
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index 69c376a5..9812bdcb 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -47,7 +47,7 @@ META_DATA_NOT_SUPPORTED = {
'instance-id': 455,
'local-hostname': 'localhost',
'placement': {},
- }
+}
def read_user_data_callback(mount_dir):
@@ -73,13 +73,11 @@ def read_user_data_callback(mount_dir):
# First try deltacloud_user_data_file. On failure try user_data_file.
try:
- with open(deltacloud_user_data_file, 'r') as user_data_f:
- user_data = user_data_f.read().strip()
- except:
+ user_data = util.load_file(deltacloud_user_data_file).strip()
+ except IOError:
try:
- with open(user_data_file, 'r') as user_data_f:
- user_data = user_data_f.read().strip()
- except:
+ user_data = util.load_file(user_data_file).strip()
+ except IOError:
util.logexc(LOG, ('Failed accessing user data file.'))
return None
@@ -157,11 +155,10 @@ class DataSourceAltCloud(sources.DataSource):
if os.path.exists(CLOUD_INFO_FILE):
try:
- cloud_info = open(CLOUD_INFO_FILE)
- cloud_type = cloud_info.read().strip().upper()
- cloud_info.close()
- except:
- util.logexc(LOG, 'Unable to access cloud info file.')
+ cloud_type = util.load_file(CLOUD_INFO_FILE).strip().upper()
+ except IOError:
+ util.logexc(LOG, 'Unable to access cloud info file at %s.',
+ CLOUD_INFO_FILE)
return False
else:
cloud_type = self.get_cloud_type()
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index f7ffa7cb..82e1e130 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -3,10 +3,12 @@
# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012 Cosmin Luta
# Copyright (C) 2012 Yahoo! Inc.
+# Copyright (C) 2012 Gerard Dethier
#
# Author: Cosmin Luta <q4break@gmail.com>
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+# Author: Gerard Dethier <g.dethier@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -20,14 +22,10 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from socket import inet_ntoa
-from struct import pack
-
import os
import time
-import boto.utils as boto_utils
-
+from cloudinit import ec2_utils as ec2
from cloudinit import log as logging
from cloudinit import sources
from cloudinit import url_helper as uhelp
@@ -41,24 +39,12 @@ class DataSourceCloudStack(sources.DataSource):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.seed_dir = os.path.join(paths.seed_dir, 'cs')
# Cloudstack has its metadata/userdata URLs located at
- # http://<default-gateway-ip>/latest/
+ # http://<virtual-router-ip>/latest/
self.api_ver = 'latest'
- gw_addr = self.get_default_gateway()
- if not gw_addr:
- raise RuntimeError("No default gateway found!")
- self.metadata_address = "http://%s/" % (gw_addr)
-
- def get_default_gateway(self):
- """Returns the default gateway ip address in the dotted format."""
- lines = util.load_file("/proc/net/route").splitlines()
- for line in lines:
- items = line.split("\t")
- if items[1] == "00000000":
- # Found the default route, get the gateway
- gw = inet_ntoa(pack("<L", int(items[2], 16)))
- LOG.debug("Found default route, gateway is %s", gw)
- return gw
- return None
+ vr_addr = get_vr_address()
+ if not vr_addr:
+ raise RuntimeError("No virtual router found!")
+ self.metadata_address = "http://%s/" % (vr_addr)
def __str__(self):
return util.obj_name(self)
@@ -91,7 +77,7 @@ class DataSourceCloudStack(sources.DataSource):
(max_wait, timeout) = self._get_url_settings()
- urls = [self.metadata_address]
+ urls = [self.metadata_address + "/latest/meta-data/instance-id"]
start_time = time.time()
url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,
timeout=timeout, status_cb=LOG.warn)
@@ -116,10 +102,10 @@ class DataSourceCloudStack(sources.DataSource):
if not self.wait_for_metadata_service():
return False
start_time = time.time()
- self.userdata_raw = boto_utils.get_instance_userdata(self.api_ver,
- None, self.metadata_address)
- self.metadata = boto_utils.get_instance_metadata(self.api_ver,
+ self.userdata_raw = ec2.get_instance_userdata(self.api_ver,
self.metadata_address)
+ self.metadata = ec2.get_instance_metadata(self.api_ver,
+ self.metadata_address)
LOG.debug("Crawl of metadata service took %s seconds",
int(time.time() - start_time))
return True
@@ -136,6 +122,28 @@ class DataSourceCloudStack(sources.DataSource):
return self.metadata['availability-zone']
+def get_vr_address():
+ # get the address of the virtual router via dhcp responses
+ # see http://bit.ly/T76eKC for documentation on the virtual router.
+ dhclient_d = "/var/lib/dhclient"
+ addresses = set()
+ dhclient_files = os.listdir(dhclient_d)
+ for file_name in dhclient_files:
+ if file_name.endswith(".lease") or file_name.endswith(".leases"):
+ with open(os.path.join(dhclient_d, file_name), "r") as fd:
+ for line in fd:
+ if "dhcp-server-identifier" in line:
+ words = line.strip(" ;\r\n").split(" ")
+ if len(words) > 2:
+ dhcp = words[2]
+ LOG.debug("Found DHCP identifier %s", dhcp)
+ addresses.add(dhcp)
+ if len(addresses) != 1:
+ # No unique virtual router found
+ return None
+ return addresses.pop()
+
+
# Used to match classes to dependencies
datasources = [
(DataSourceCloudStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index b8154367..c7826851 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -48,6 +48,7 @@ class DataSourceConfigDrive(sources.DataSource):
self.dsmode = 'local'
self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
self.version = None
+ self.ec2_metadata = None
def __str__(self):
mstr = "%s [%s,ver=%s]" % (util.obj_name(self), self.dsmode,
@@ -55,6 +56,74 @@ class DataSourceConfigDrive(sources.DataSource):
mstr += "[source=%s]" % (self.source)
return mstr
+ def _ec2_name_to_device(self, name):
+ if not self.ec2_metadata:
+ return None
+ bdm = self.ec2_metadata.get('block-device-mapping', {})
+ for (ent_name, device) in bdm.items():
+ if name == ent_name:
+ return device
+ return None
+
+ def _os_name_to_device(self, name):
+ device = None
+ try:
+ criteria = 'LABEL=%s' % (name)
+ if name in ['swap']:
+ criteria = 'TYPE=%s' % (name)
+ dev_entries = util.find_devs_with(criteria)
+ if dev_entries:
+ device = dev_entries[0]
+ except util.ProcessExecutionError:
+ pass
+ return device
+
+ def _validate_device_name(self, device):
+ if not device:
+ return None
+ if not device.startswith("/"):
+ device = "/dev/%s" % device
+ if os.path.exists(device):
+ return device
+ # Durn, try adjusting the mapping
+ remapped = self._remap_device(os.path.basename(device))
+ if remapped:
+ LOG.debug("Remapped device name %s => %s", device, remapped)
+ return remapped
+ return None
+
+ def device_name_to_device(self, name):
+ # Translate a 'name' to a 'physical' device
+ if not name:
+ return None
+ # Try the ec2 mapping first
+ names = [name]
+ if name == 'root':
+ names.insert(0, 'ami')
+ if name == 'ami':
+ names.append('root')
+ device = None
+ LOG.debug("Using ec2 metadata lookup to find device %s", names)
+ for n in names:
+ device = self._ec2_name_to_device(n)
+ device = self._validate_device_name(device)
+ if device:
+ break
+ # Try the openstack way second
+ if not device:
+ LOG.debug("Using os lookup to find device %s", names)
+ for n in names:
+ device = self._os_name_to_device(n)
+ device = self._validate_device_name(device)
+ if device:
+ break
+ # Ok give up...
+ if not device:
+ return None
+ else:
+ LOG.debug("Using cfg drive lookup mapped to device %s", device)
+ return device
+
def get_data(self):
found = None
md = {}
@@ -85,6 +154,16 @@ class DataSourceConfigDrive(sources.DataSource):
md = results['metadata']
md = util.mergedict(md, DEFAULT_METADATA)
+ # Perform some metadata 'fixups'
+ #
+ # OpenStack uses the 'hostname' key
+ # while most of cloud-init uses the metadata
+ # 'local-hostname' key instead so if it doesn't
+ # exist we need to make sure its copied over.
+ for (tgt, src) in [('local-hostname', 'hostname')]:
+ if tgt not in md and src in md:
+ md[tgt] = md[src]
+
user_dsmode = results.get('dsmode', None)
if user_dsmode not in VALID_DSMODES + (None,):
LOG.warn("user specified invalid mode: %s" % user_dsmode)
@@ -133,15 +212,17 @@ class DataSourceConfigDrive(sources.DataSource):
self.source = found
self.metadata = md
+ self.ec2_metadata = results.get('ec2-metadata')
self.userdata_raw = results.get('userdata')
self.version = results['cfgdrive_ver']
return True
def get_public_ssh_keys(self):
- if not 'public-keys' in self.metadata:
- return []
- return self.metadata['public-keys']
+ name = "public_keys"
+ if self.version == 1:
+ name = "public-keys"
+ return sources.normalize_pubkey_data(self.metadata.get(name))
class DataSourceConfigDriveNet(DataSourceConfigDrive):
@@ -217,7 +298,7 @@ def read_config_drive_dir_v2(source_dir, version="2012-08-10"):
('metadata',
"openstack/%s/meta_data.json" % version, True, json.loads),
('userdata', "openstack/%s/user_data" % version, False, None),
- ('ec2-metadata', "ec2/latest/metadata.json", False, json.loads),
+ ('ec2-metadata', "ec2/latest/meta-data.json", False, json.loads),
)
results = {'userdata': None}
@@ -227,19 +308,19 @@ def read_config_drive_dir_v2(source_dir, version="2012-08-10"):
found = False
if os.path.isfile(fpath):
try:
- with open(fpath) as fp:
- data = fp.read()
- except Exception as exc:
- raise BrokenConfigDriveDir("failed to read: %s" % fpath)
+ data = util.load_file(fpath)
+ except IOError:
+ raise BrokenConfigDriveDir("Failed to read: %s" % fpath)
found = True
elif required:
- raise NonConfigDriveDir("missing mandatory %s" % fpath)
+ raise NonConfigDriveDir("Missing mandatory path: %s" % fpath)
if found and process:
try:
data = process(data)
except Exception as exc:
- raise BrokenConfigDriveDir("failed to process: %s" % fpath)
+ raise BrokenConfigDriveDir(("Failed to process "
+ "path: %s") % fpath)
if found:
results[name] = data
@@ -255,8 +336,7 @@ def read_config_drive_dir_v2(source_dir, version="2012-08-10"):
# do not use os.path.join here, as content_path starts with /
cpath = os.path.sep.join((source_dir, "openstack",
"./%s" % item['content_path']))
- with open(cpath) as fp:
- return(fp.read())
+ return util.load_file(cpath)
files = {}
try:
@@ -270,7 +350,7 @@ def read_config_drive_dir_v2(source_dir, version="2012-08-10"):
if item:
results['network_config'] = read_content_path(item)
except Exception as exc:
- raise BrokenConfigDriveDir("failed to read file %s: %s" % (item, exc))
+ raise BrokenConfigDriveDir("Failed to read file %s: %s" % (item, exc))
# to openstack, user can specify meta ('nova boot --meta=key=value') and
# those will appear under metadata['meta'].
@@ -385,8 +465,7 @@ def get_previous_iid(paths):
# hasn't declared itself found.
fname = os.path.join(paths.get_cpath('data'), 'instance-id')
try:
- with open(fname) as fp:
- return fp.read()
+ return util.load_file(fname)
except IOError:
return None
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index c7ad6d54..2db53446 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -23,8 +23,7 @@
import os
import time
-import boto.utils as boto_utils
-
+from cloudinit import ec2_utils as ec2
from cloudinit import log as logging
from cloudinit import sources
from cloudinit import url_helper as uhelp
@@ -65,10 +64,10 @@ class DataSourceEc2(sources.DataSource):
if not self.wait_for_metadata_service():
return False
start_time = time.time()
- self.userdata_raw = boto_utils.get_instance_userdata(self.api_ver,
- None, self.metadata_address)
- self.metadata = boto_utils.get_instance_metadata(self.api_ver,
+ self.userdata_raw = ec2.get_instance_userdata(self.api_ver,
self.metadata_address)
+ self.metadata = ec2.get_instance_metadata(self.api_ver,
+ self.metadata_address)
LOG.debug("Crawl of metadata service took %s seconds",
int(time.time() - start_time))
return True
@@ -86,9 +85,6 @@ class DataSourceEc2(sources.DataSource):
def get_instance_id(self):
return self.metadata['instance-id']
- def get_availability_zone(self):
- return self.metadata['placement']['availability-zone']
-
def _get_url_settings(self):
mcfg = self.ds_cfg
if not mcfg:
@@ -151,22 +147,6 @@ class DataSourceEc2(sources.DataSource):
self.metadata_address = url2base.get(url)
return bool(url)
- def _remap_device(self, short_name):
- # LP: #611137
- # the metadata service may believe that devices are named 'sda'
- # when the kernel named them 'vda' or 'xvda'
- # we want to return the correct value for what will actually
- # exist in this instance
- mappings = {"sd": ("vd", "xvd")}
- for (nfrom, tlist) in mappings.iteritems():
- if not short_name.startswith(nfrom):
- continue
- for nto in tlist:
- cand = "/dev/%s%s" % (nto, short_name[len(nfrom):])
- if os.path.exists(cand):
- return cand
- return None
-
def device_name_to_device(self, name):
# Consult metadata service, that has
# ephemeral0: sdb
@@ -214,19 +194,6 @@ class DataSourceEc2(sources.DataSource):
return None
return ofound
- def is_vpc(self):
- # See: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/615545
- # Detect that the machine was launched in a VPC.
- # But I did notice that when in a VPC, meta-data
- # does not have public-ipv4 and public-hostname
- # listed as a possibility.
- ph = "public-hostname"
- p4 = "public-ipv4"
- if ((ph not in self.metadata or self.metadata[ph] == "") and
- (p4 not in self.metadata or self.metadata[p4] == "")):
- return True
- return False
-
@property
def availability_zone(self):
try:
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index c568d365..b55d8a21 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -18,6 +18,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+from email.utils import parsedate
import errno
import oauth.oauth as oauth
import os
@@ -46,6 +47,7 @@ class DataSourceMAAS(sources.DataSource):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.base_url = None
self.seed_dir = os.path.join(paths.seed_dir, 'maas')
+ self.oauth_clockskew = None
def __str__(self):
return "%s [%s]" % (util.obj_name(self), self.base_url)
@@ -95,11 +97,17 @@ class DataSourceMAAS(sources.DataSource):
return {}
consumer_secret = mcfg.get('consumer_secret', "")
+
+ timestamp = None
+ if self.oauth_clockskew:
+ timestamp = int(time.time()) + self.oauth_clockskew
+
return oauth_headers(url=url,
consumer_key=mcfg['consumer_key'],
token_key=mcfg['token_key'],
token_secret=mcfg['token_secret'],
- consumer_secret=consumer_secret)
+ consumer_secret=consumer_secret,
+ timestamp=timestamp)
def wait_for_metadata_service(self, url):
mcfg = self.ds_cfg
@@ -124,7 +132,7 @@ class DataSourceMAAS(sources.DataSource):
check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION)
urls = [check_url]
url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,
- timeout=timeout, status_cb=LOG.warn,
+ timeout=timeout, exception_cb=self._except_cb,
headers_cb=self.md_headers)
if url:
@@ -135,6 +143,26 @@ class DataSourceMAAS(sources.DataSource):
return bool(url)
+ def _except_cb(self, msg, exception):
+ if not (isinstance(exception, urllib2.HTTPError) and
+ (exception.code == 403 or exception.code == 401)):
+ return
+ if 'date' not in exception.headers:
+ LOG.warn("date field not in %d headers" % exception.code)
+ return
+
+ date = exception.headers['date']
+
+ try:
+ ret_time = time.mktime(parsedate(date))
+ except:
+ LOG.warn("failed to convert datetime '%s'")
+ return
+
+ self.oauth_clockskew = int(ret_time - time.time())
+ LOG.warn("set oauth clockskew to %d" % self.oauth_clockskew)
+ return
+
def read_maas_seed_dir(seed_d):
"""
@@ -229,13 +257,20 @@ def check_seed_contents(content, seed):
return (userdata, md)
-def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret):
+def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
+ timestamp=None):
consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
token = oauth.OAuthToken(token_key, token_secret)
+
+ if timestamp is None:
+ ts = int(time.time())
+ else:
+ ts = timestamp
+
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth.generate_nonce(),
- 'oauth_timestamp': int(time.time()),
+ 'oauth_timestamp': ts,
'oauth_token': token.key,
'oauth_consumer_key': consumer.key,
}
@@ -301,9 +336,7 @@ if __name__ == "__main__":
'token_secret': args.tsec, 'consumer_secret': args.csec}
if args.config:
- import yaml
- with open(args.config) as fp:
- cfg = yaml.safe_load(fp)
+ cfg = util.read_conf(args.config)
if 'datasource' in cfg:
cfg = cfg['datasource']['MAAS']
for key in creds.keys():
@@ -312,7 +345,7 @@ if __name__ == "__main__":
def geturl(url, headers_cb):
req = urllib2.Request(url, data=None, headers=headers_cb(url))
- return(urllib2.urlopen(req).read())
+ return (urllib2.urlopen(req).read())
def printurl(url, headers_cb):
print "== %s ==\n%s\n" % (url, geturl(url, headers_cb))
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 771e64eb..e90150c6 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -204,9 +204,8 @@ def transport_iso9660(require_iso=True):
try:
# See if we can read anything at all...??
- with open(fullp, 'rb') as fp:
- fp.read(512)
- except:
+ util.peek_file(fullp, 512)
+ except IOError:
continue
try:
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index a89f4703..f98493de 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -20,9 +20,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from email.mime.multipart import MIMEMultipart
-
import abc
+import os
from cloudinit import importer
from cloudinit import log as logging
@@ -101,32 +100,23 @@ class DataSource(object):
return {}
def get_public_ssh_keys(self):
- keys = []
-
- if not self.metadata or 'public-keys' not in self.metadata:
- return keys
-
- if isinstance(self.metadata['public-keys'], (basestring, str)):
- return str(self.metadata['public-keys']).splitlines()
-
- if isinstance(self.metadata['public-keys'], (list, set)):
- return list(self.metadata['public-keys'])
-
- if isinstance(self.metadata['public-keys'], (dict)):
- for (_keyname, klist) in self.metadata['public-keys'].iteritems():
- # lp:506332 uec metadata service responds with
- # data that makes boto populate a string for 'klist' rather
- # than a list.
- if isinstance(klist, (str, basestring)):
- klist = [klist]
- if isinstance(klist, (list, set)):
- for pkey in klist:
- # There is an empty string at
- # the end of the keylist, trim it
- if pkey:
- keys.append(pkey)
-
- return keys
+ return normalize_pubkey_data(self.metadata.get('public-keys'))
+
+ def _remap_device(self, short_name):
+ # LP: #611137
+ # the metadata service may believe that devices are named 'sda'
+ # when the kernel named them 'vda' or 'xvda'
+ # we want to return the correct value for what will actually
+ # exist in this instance
+ mappings = {"sd": ("vd", "xvd")}
+ for (nfrom, tlist) in mappings.iteritems():
+ if not short_name.startswith(nfrom):
+ continue
+ for nto in tlist:
+ cand = "/dev/%s%s" % (nto, short_name[len(nfrom):])
+ if os.path.exists(cand):
+ return cand
+ return None
def device_name_to_device(self, _name):
# translate a 'name' to a device
@@ -173,6 +163,7 @@ class DataSource(object):
# make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx
lhost = self.metadata['local-hostname']
if util.is_ipv4(lhost):
+<<<<<<< TREE
toks = []
if resolve_ip:
toks = util.gethostbyaddr(lhost)
@@ -181,6 +172,9 @@ class DataSource(object):
toks = toks.split('.')
else:
toks = ["ip-%s" % lhost.replace(".", "-")]
+=======
+ toks = ["ip-%s" % lhost.replace(".", "-")]
+>>>>>>> MERGE-SOURCE
else:
toks = lhost.split(".")
@@ -200,6 +194,35 @@ class DataSource(object):
availability_zone=self.availability_zone)
+def normalize_pubkey_data(pubkey_data):
+ keys = []
+
+ if not pubkey_data:
+ return keys
+
+ if isinstance(pubkey_data, (basestring, str)):
+ return str(pubkey_data).splitlines()
+
+ if isinstance(pubkey_data, (list, set)):
+ return list(pubkey_data)
+
+ if isinstance(pubkey_data, (dict)):
+ for (_keyname, klist) in pubkey_data.iteritems():
+ # lp:506332 uec metadata service responds with
+ # data that makes boto populate a string for 'klist' rather
+ # than a list.
+ if isinstance(klist, (str, basestring)):
+ klist = [klist]
+ if isinstance(klist, (list, set)):
+ for pkey in klist:
+ # There is an empty string at
+ # the end of the keylist, trim it
+ if pkey:
+ keys.append(pkey)
+
+ return keys
+
+
def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list):
ds_list = list_sources(cfg_list, ds_deps, pkg_list)
ds_names = [util.obj_name(f) for f in ds_list]
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index 88a11a1a..dd6b742f 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -212,17 +212,15 @@ def update_authorized_keys(old_entries, keys):
return '\n'.join(lines)
-def users_ssh_info(username, paths):
+def users_ssh_info(username):
pw_ent = pwd.getpwnam(username)
- if not pw_ent:
+ if not pw_ent or not pw_ent.pw_dir:
raise RuntimeError("Unable to get ssh info for user %r" % (username))
- ssh_dir = paths.join(False, os.path.join(pw_ent.pw_dir, '.ssh'))
- return (ssh_dir, pw_ent)
+ return (os.path.join(pw_ent.pw_dir, '.ssh'), pw_ent)
-def extract_authorized_keys(username, paths):
- (ssh_dir, pw_ent) = users_ssh_info(username, paths)
- sshd_conf_fn = paths.join(True, DEF_SSHD_CFG)
+def extract_authorized_keys(username):
+ (ssh_dir, pw_ent) = users_ssh_info(username)
auth_key_fn = None
with util.SeLinuxGuard(ssh_dir, recursive=True):
try:
@@ -231,7 +229,7 @@ def extract_authorized_keys(username, paths):
# The following tokens are defined: %% is replaced by a literal
# '%', %h is replaced by the home directory of the user being
# authenticated and %u is replaced by the username of that user.
- ssh_cfg = parse_ssh_config_map(sshd_conf_fn)
+ ssh_cfg = parse_ssh_config_map(DEF_SSHD_CFG)
auth_key_fn = ssh_cfg.get("authorizedkeysfile", '').strip()
if not auth_key_fn:
auth_key_fn = "%h/.ssh/authorized_keys"
@@ -240,7 +238,6 @@ def extract_authorized_keys(username, paths):
auth_key_fn = auth_key_fn.replace("%%", '%')
if not auth_key_fn.startswith('/'):
auth_key_fn = os.path.join(pw_ent.pw_dir, auth_key_fn)
- auth_key_fn = paths.join(False, auth_key_fn)
except (IOError, OSError):
# Give up and use a default key filename
auth_key_fn = os.path.join(ssh_dir, 'authorized_keys')
@@ -248,14 +245,13 @@ def extract_authorized_keys(username, paths):
" in ssh config"
" from %r, using 'AuthorizedKeysFile' file"
" %r instead"),
- sshd_conf_fn, auth_key_fn)
- auth_key_entries = parse_authorized_keys(auth_key_fn)
- return (auth_key_fn, auth_key_entries)
+ DEF_SSHD_CFG, auth_key_fn)
+ return (auth_key_fn, parse_authorized_keys(auth_key_fn))
-def setup_user_keys(keys, username, key_prefix, paths):
+def setup_user_keys(keys, username, key_prefix):
# Make sure the users .ssh dir is setup accordingly
- (ssh_dir, pwent) = users_ssh_info(username, paths)
+ (ssh_dir, pwent) = users_ssh_info(username)
if not os.path.isdir(ssh_dir):
util.ensure_dir(ssh_dir, mode=0700)
util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid)
@@ -267,7 +263,7 @@ def setup_user_keys(keys, username, key_prefix, paths):
key_entries.append(parser.parse(str(k), def_opt=key_prefix))
# Extract the old and make the new
- (auth_key_fn, auth_key_entries) = extract_authorized_keys(username, paths)
+ (auth_key_fn, auth_key_entries) = extract_authorized_keys(username)
with util.SeLinuxGuard(ssh_dir, recursive=True):
content = update_authorized_keys(auth_key_entries, key_entries)
util.ensure_dir(os.path.dirname(auth_key_fn), mode=0700)
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index af902925..8d3213b4 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -47,6 +47,8 @@ from cloudinit import util
LOG = logging.getLogger(__name__)
+NULL_DATA_SOURCE = None
+
class Init(object):
def __init__(self, ds_deps=None):
@@ -58,18 +60,32 @@ class Init(object):
self._cfg = None
self._paths = None
self._distro = None
- # Created only when a fetch occurs
- self.datasource = None
+ # Changed only when a fetch occurs
+ self.datasource = NULL_DATA_SOURCE
+
+ def _reset(self, reset_ds=False):
+ # Recreated on access
+ self._cfg = None
+ self._paths = None
+ self._distro = None
+ if reset_ds:
+ self.datasource = NULL_DATA_SOURCE
@property
def distro(self):
if not self._distro:
# Try to find the right class to use
- scfg = self._extract_cfg('system')
- name = scfg.pop('distro', 'ubuntu')
- cls = distros.fetch(name)
- LOG.debug("Using distro class %s", cls)
- self._distro = cls(name, scfg, self.paths)
+ system_config = self._extract_cfg('system')
+ distro_name = system_config.pop('distro', 'ubuntu')
+ distro_cls = distros.fetch(distro_name)
+ LOG.debug("Using distro class %s", distro_cls)
+ self._distro = distro_cls(distro_name, system_config, self.paths)
+ # If we have an active datasource we need to adjust
+ # said datasource and move its distro/system config
+ # from whatever it was to a new set...
+ if self.datasource is not NULL_DATA_SOURCE:
+ self.datasource.distro = self._distro
+ self.datasource.sys_cfg = system_config
return self._distro
@property
@@ -147,27 +163,12 @@ class Init(object):
self._cfg = self._read_cfg(extra_fns)
# LOG.debug("Loaded 'init' config %s", self._cfg)
- def _read_base_cfg(self):
- base_cfgs = []
- default_cfg = util.get_builtin_cfg()
- kern_contents = util.read_cc_from_cmdline()
- # Kernel/cmdline parameters override system config
- if kern_contents:
- base_cfgs.append(util.load_yaml(kern_contents, default={}))
- # Anything in your conf.d location??
- # or the 'default' cloud.cfg location???
- base_cfgs.append(util.read_conf_with_confd(CLOUD_CONFIG))
- # And finally the default gets to play
- if default_cfg:
- base_cfgs.append(default_cfg)
- return util.mergemanydict(base_cfgs)
-
def _read_cfg(self, extra_fns):
no_cfg_paths = helpers.Paths({}, self.datasource)
merger = helpers.ConfigMerger(paths=no_cfg_paths,
datasource=self.datasource,
additional_fns=extra_fns,
- base_cfg=self._read_base_cfg())
+ base_cfg=fetch_base_config())
return merger.cfg
def _restore_from_cache(self):
@@ -191,7 +192,7 @@ class Init(object):
return None
def _write_to_cache(self):
- if not self.datasource:
+ if self.datasource is NULL_DATA_SOURCE:
return False
pickled_fn = self.paths.get_ipath_cur("obj_pkl")
try:
@@ -217,7 +218,7 @@ class Init(object):
return (cfg_list, pkg_list)
def _get_data_source(self):
- if self.datasource:
+ if self.datasource is not NULL_DATA_SOURCE:
return self.datasource
ds = self._restore_from_cache()
if ds:
@@ -236,11 +237,11 @@ class Init(object):
self.datasource = ds
# Ensure we adjust our path members datasource
# now that we have one (thus allowing ipath to be used)
- self.paths.datasource = ds
+ self._reset()
return ds
def _get_instance_subdirs(self):
- return ['handlers', 'scripts', 'sems']
+ return ['handlers', 'scripts', 'sem']
def _get_ipath(self, subname=None):
# Force a check to see if anything
@@ -296,6 +297,10 @@ class Init(object):
util.write_file(iid_fn, "%s\n" % iid)
util.write_file(os.path.join(dp, 'previous-instance-id'),
"%s\n" % (previous_iid))
+ # Ensure needed components are regenerated
+ # after change of instance which may cause
+ # change of configuration
+ self._reset()
return iid
def fetch(self):
@@ -409,6 +414,17 @@ class Init(object):
handlers.call_end(mod, data, frequency)
called.append(mod)
+ # Perform post-consumption adjustments so that
+ # modules that run during the init stage reflect
+ # this consumed set.
+ #
+ # They will be recreated on future access...
+ self._reset()
+ # Note(harlowja): the 'active' datasource will have
+ # references to the previous config, distro, paths
+ # objects before the load of the userdata happened,
+ # this is expected.
+
class Modules(object):
def __init__(self, init, cfg_files=None):
@@ -550,3 +566,23 @@ class Modules(object):
raw_mods = self._read_modules(section_name)
mostly_mods = self._fixup_modules(raw_mods)
return self._run_modules(mostly_mods)
+
+
+def fetch_base_config():
+ base_cfgs = []
+ default_cfg = util.get_builtin_cfg()
+ kern_contents = util.read_cc_from_cmdline()
+
+ # Kernel/cmdline parameters override system config
+ if kern_contents:
+ base_cfgs.append(util.load_yaml(kern_contents, default={}))
+
+ # Anything in your conf.d location??
+ # or the 'default' cloud.cfg location???
+ base_cfgs.append(util.read_conf_with_confd(CLOUD_CONFIG))
+
+ # And finally the default gets to play
+ if default_cfg:
+ base_cfgs.append(default_cfg)
+
+ return util.mergemanydict(base_cfgs)
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 732d6aec..f3e3fd7e 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -136,7 +136,8 @@ def readurl(url, data=None, timeout=None,
def wait_for_url(urls, max_wait=None, timeout=None,
- status_cb=None, headers_cb=None, sleep_time=1):
+ status_cb=None, headers_cb=None, sleep_time=1,
+ exception_cb=None):
"""
urls: a list of urls to try
max_wait: roughly the maximum time to wait before giving up
@@ -146,6 +147,8 @@ def wait_for_url(urls, max_wait=None, timeout=None,
status_cb: call method with string message when a url is not available
headers_cb: call method with single argument of url to get headers
for request.
+ exception_cb: call method with 2 arguments 'msg' (per status_cb) and
+ 'exception', the exception that occurred.
the idea of this routine is to wait for the EC2 metdata service to
come up. On both Eucalyptus and EC2 we have seen the case where
@@ -164,7 +167,7 @@ def wait_for_url(urls, max_wait=None, timeout=None,
"""
start_time = time.time()
- def log_status_cb(msg):
+ def log_status_cb(msg, exc=None):
LOG.debug(msg)
if status_cb is None:
@@ -196,8 +199,10 @@ def wait_for_url(urls, max_wait=None, timeout=None,
resp = readurl(url, headers=headers, timeout=timeout)
if not resp.contents:
reason = "empty response [%s]" % (resp.code)
+ e = ValueError(reason)
elif not resp.ok():
reason = "bad status code [%s]" % (resp.code)
+ e = ValueError(reason)
else:
return url
except urllib2.HTTPError as e:
@@ -214,6 +219,8 @@ def wait_for_url(urls, max_wait=None, timeout=None,
time_taken,
max_wait, reason)
status_cb(status_msg)
+ if exception_cb:
+ exception_cb(msg=status_msg, exception=e)
if timeup(max_wait, start_time):
break
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index 803ffc3a..58827e3d 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -224,7 +224,7 @@ class UserDataProcessor(object):
for header in list(ent.keys()):
if header in ('content', 'filename', 'type', 'launch-index'):
continue
- msg.add_header(header, ent['header'])
+ msg.add_header(header, ent[header])
self._attach_part(append_msg, msg)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index b25ded0d..e334559c 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -50,7 +50,9 @@ import yaml
from cloudinit import importer
from cloudinit import log as logging
+from cloudinit import safeyaml
from cloudinit import url_helper as uhelp
+from cloudinit import version
from cloudinit.settings import (CFG_BUILTIN)
@@ -248,6 +250,32 @@ def read_conf(fname):
raise
+# Merges X lists, and then keeps the
+# unique ones, but orders by sort order
+# instead of by the original order
+def uniq_merge_sorted(*lists):
+ return sorted(uniq_merge(*lists))
+
+
+# Merges X lists and then iterates over those
+# and only keeps the unique items (order preserving)
+# and returns that merged and uniqued list as the
+# final result.
+#
+# Note: if any entry is a string it will be
+# split on commas and empty entries will be
+# evicted and merged in accordingly.
+def uniq_merge(*lists):
+ combined_list = []
+ for a_list in lists:
+ if isinstance(a_list, (str, basestring)):
+ a_list = a_list.strip().split(",")
+ # Kickout the empty ones
+ a_list = [a for a in a_list if len(a)]
+ combined_list.extend(a_list)
+ return uniq_list(combined_list)
+
+
def clean_filename(fn):
for (k, v) in FN_REPLACEMENTS.iteritems():
fn = fn.replace(k, v)
@@ -612,7 +640,7 @@ def load_yaml(blob, default=None, allowed=(dict,)):
LOG.debug(("Attempting to load yaml from string "
"of length %s with allowed root types %s"),
len(blob), allowed)
- converted = yaml.safe_load(blob)
+ converted = safeyaml.load(blob)
if not isinstance(converted, allowed):
# Yes this will just be caught, but thats ok for now...
raise TypeError(("Yaml load allows %s root types,"
@@ -959,6 +987,22 @@ def find_devs_with(criteria=None, oformat='device',
return entries
+def peek_file(fname, max_bytes):
+ LOG.debug("Peeking at %s (max_bytes=%s)", fname, max_bytes)
+ with open(fname, 'rb') as ifh:
+ return ifh.read(max_bytes)
+
+
+def uniq_list(in_list):
+ out_list = []
+ for i in in_list:
+ if i in out_list:
+ continue
+ else:
+ out_list.append(i)
+ return out_list
+
+
def load_file(fname, read_cb=None, quiet=False):
LOG.debug("Reading from %s (quiet=%s)", fname, quiet)
ofh = StringIO()
@@ -1111,6 +1155,22 @@ def hash_blob(blob, routine, mlen=None):
return digest
+def is_user(name):
+ try:
+ if pwd.getpwnam(name):
+ return True
+ except KeyError:
+ return False
+
+
+def is_group(name):
+ try:
+ if grp.getgrnam(name):
+ return True
+ except KeyError:
+ return False
+
+
def rename(src, dest):
LOG.debug("Renaming %s to %s", src, dest)
# TODO(harlowja) use a se guard here??
@@ -1147,8 +1207,7 @@ def yaml_dumps(obj):
indent=4,
explicit_start=True,
explicit_end=True,
- default_flow_style=False,
- )
+ default_flow_style=False)
return formatted
@@ -1288,6 +1347,10 @@ def uptime():
return uptime_str
+def append_file(path, content):
+ write_file(path, content, omode="ab", mode=None)
+
+
def ensure_file(path, mode=0644):
write_file(path, content='', omode="ab", mode=mode)
@@ -1379,6 +1442,14 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
return (out, err)
+def make_header(comment_char="#", base='created'):
+ ci_ver = version.version_string()
+ header = str(comment_char)
+ header += " %s by cloud-init v. %s" % (base.title(), ci_ver)
+ header += " on %s" % time_rfc2822()
+ return header
+
+
def abs_join(*paths):
return os.path.abspath(os.path.join(*paths))
diff --git a/cloudinit/version.py b/cloudinit/version.py
index 4599910c..024d5118 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -20,7 +20,7 @@ from distutils import version as vr
def version():
- return vr.StrictVersion("0.7.0")
+ return vr.StrictVersion("0.7.2")
def version_string():