summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog50
-rwxr-xr-xcloud-init.py22
-rw-r--r--cloudinit/CloudConfig/__init__.py6
-rw-r--r--cloudinit/CloudConfig/cc_bootcmd.py2
-rw-r--r--cloudinit/CloudConfig/cc_byobu.py43
-rw-r--r--cloudinit/CloudConfig/cc_chef.py10
-rw-r--r--cloudinit/CloudConfig/cc_grub_dpkg.py3
-rw-r--r--cloudinit/CloudConfig/cc_mcollective.py29
-rw-r--r--cloudinit/CloudConfig/cc_mounts.py9
-rw-r--r--cloudinit/CloudConfig/cc_phone_home.py5
-rw-r--r--cloudinit/CloudConfig/cc_resizefs.py3
-rw-r--r--cloudinit/CloudConfig/cc_set_hostname.py11
-rw-r--r--cloudinit/CloudConfig/cc_ssh.py52
-rw-r--r--cloudinit/CloudConfig/cc_timezone.py1
-rw-r--r--cloudinit/CloudConfig/cc_update_etc_hosts.py53
-rw-r--r--cloudinit/CloudConfig/cc_update_hostname.py2
-rw-r--r--cloudinit/DataSource.py50
-rw-r--r--cloudinit/DataSourceEc2.py97
-rw-r--r--cloudinit/DataSourceNoCloud.py3
-rw-r--r--cloudinit/DataSourceOVF.py6
-rw-r--r--cloudinit/SshUtil.py195
-rw-r--r--cloudinit/UserDataHandler.py48
-rw-r--r--cloudinit/__init__.py215
-rw-r--r--cloudinit/boto_utils.py8
-rw-r--r--cloudinit/netinfo.py83
-rw-r--r--cloudinit/util.py66
-rw-r--r--config/cloud.cfg3
-rw-r--r--doc/examples/cloud-config-datasources.txt7
-rw-r--r--doc/examples/cloud-config-mcollective.txt34
-rw-r--r--doc/examples/cloud-config.txt92
-rw-r--r--doc/examples/include-once.txt7
-rw-r--r--doc/examples/part-handler-v2.txt38
-rw-r--r--doc/examples/seed/README4
-rw-r--r--doc/userdata.txt10
-rw-r--r--templates/hosts.tmpl2
-rwxr-xr-xtools/write-mime-multipart.py51
-rw-r--r--upstart/cloud-init-nonet.conf16
37 files changed, 1043 insertions, 293 deletions
diff --git a/ChangeLog b/ChangeLog
index aac22ff4..83c9986a 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -5,6 +5,56 @@
(LP: #739694)
- fix bug in resizefs cloud-config that would cause trace based on
failure of 'blkid /dev/root' (LP: #726938)
+ - convert dos formated files to unix for user-scripts, boothooks,
+ and upstart jobs (LP: #744965)
+ - fix bug in seeding of grub dpkg configuration (LP: #752361) due
+ to renamed devices in newer (natty) kernels (/dev/sda1 -> /dev/xvda1)
+ - make metadata urls configurable, to support eucalyptus in
+ STATIC or SYSTEM modes (LP: #761847)
+ - support disabling byobu in cloud-config
+ - run cc_ssh as a cloud-init module so it is guaranteed to run before
+ ssh starts (LP: #781101)
+ - make prefix for keys added to /root/.ssh/authorized_keys configurable
+ and add 'no-port-forwarding,no-agent-forwarding,no-X11-forwarding'
+ to the default (LP: #798505)
+ - make 'cloud-config ready' command configurable (LP: #785551)
+ - make fstab fields used to 'fill in' shorthand entries configurable
+ This means you do not have to have 'nobootwait' in the values
+ (LP: #785542)
+ - read /etc/ssh/sshd_config for AuthorizedKeysFile rather than
+ assuming ~/.ssh/authorized_keys (LP: #731849)
+ - fix cloud-init in ubuntu lxc containers (LP: #800824)
+ - sanitize hosts file for system's hostname to 127.0.1.1 (LP: #802637)
+ - add chef support (cloudinit/CloudConfig/cc_chef.py) (LP: ##798844)
+ - do not give trace on failure to resize in lxc container (LP: #800856)
+ - increase the timeout on url gets for "seedfrom" values (LP: #812646)
+ - do not write entries for ephemeral0 on t1.micro (LP: #744019)
+ - support 'include-once' so that expiring or one-time use urls can
+ be used for '#include' to provide sensitive data.
+ - support for passing public and private keys to mcollective via cloud-config
+ - support multiple staticly configured network devices, as long as
+ all of them come up early (LP: #810044)
+ - Changes to handling user data mean that:
+ * boothooks will now run more than once as they were intended (and as bootcmd
+ commands do)
+ * cloud-config and user-scripts will be updated from user data every boot
+ - Fix issue where 'isatty' would return true for apt-add-repository.
+ apt-add-repository would get stdin which was attached to a terminal
+ (/dev/console) and would thus hang when running during boot. (LP: 831505)
+ This was done by changing all users of util.subp to have None input unless specified
+ - Add some debug info to the console when cloud-init runs.
+ This is useful if debugging, IP and route information is printed to the console.
+ - change the mechanism for handling .ssh/authorized_keys, to update entries
+ rather than appending. This ensures that the authorized_keys that are being
+ inserted actually do something (LP: #434076, LP: #833499)
+ - log warning on failure to set hostname (LP: #832175)
+ - upstart/cloud-init-nonet.conf: wait for all network interfaces to be up
+ allow for the possibility of /var/run != /run.
+ - DataSourceNoCloud, DataSourceOVF : do not provide a default hostname.
+ This way the configured hostname of the system will be used if not provided
+ by metadata (LP: #838280)
+ - DataSourceOVF: change the default instance id to 'iid-dsovf' from 'nocloud'
+
0.6.1:
- fix bug in fixing permission on /var/log/cloud-init.log (LP: #704509)
- improve comment strings in rsyslog file tools/21-cloudinit.conf
diff --git a/cloud-init.py b/cloud-init.py
index ee08c191..c38512fe 100755
--- a/cloud-init.py
+++ b/cloud-init.py
@@ -24,6 +24,7 @@ import cloudinit
import cloudinit.util as util
import cloudinit.CloudConfig as CC
import cloudinit.DataSource as ds
+import cloudinit.netinfo as netinfo
import time
import logging
import errno
@@ -87,6 +88,8 @@ def main():
nonet_path = "%s/%s" % (cloudinit.get_cpath("data"), "no-net")
if cmd == "start":
+ print netinfo.debug_info()
+
stop_files = ( cloudinit.get_ipath_cur("obj_pkl"), nonet_path )
# if starting as the network start, there are cases
# where everything is already done for us, and it makes
@@ -140,15 +143,14 @@ def main():
# parse the user data (ec2-run-userdata.py)
try:
- cloud.sem_and_run("consume_userdata", "once-per-instance",
- cloud.consume_userdata,[],False)
+ ran = cloud.sem_and_run("consume_userdata", cloudinit.per_instance,
+ cloud.consume_userdata,[cloudinit.per_instance],False)
+ if not ran:
+ cloud.consume_userdata(cloudinit.per_always)
except:
warn("consuming user data failed!\n")
raise
- # finish, send the cloud-config event
- cloud.initctl_emit()
-
cfg_path = cloudinit.get_ipath_cur("cloud_config")
cc = CC.CloudConfig(cfg_path, cloud)
@@ -163,6 +165,16 @@ def main():
except Exception as e:
warn("Failed to get and set output config: %s\n" % e)
+ # send the cloud-config ready event
+ cc_path = cloudinit.get_ipath_cur('cloud_config')
+ cc_ready = cc.cfg.get("cc_ready_cmd",
+ ['initctl', 'emit', 'cloud-config',
+ '%s=%s' % (cloudinit.cfg_env_name, cc_path) ])
+ if cc_ready:
+ if isinstance(cc_ready,str):
+ cc_ready = [ 'sh', '-c', cc_ready]
+ subprocess.Popen(cc_ready).communicate()
+
module_list = CC.read_cc_modules(cc.cfg,"cloud_init_modules")
failures = []
diff --git a/cloudinit/CloudConfig/__init__.py b/cloudinit/CloudConfig/__init__.py
index 91853dfd..82f422fc 100644
--- a/cloudinit/CloudConfig/__init__.py
+++ b/cloudinit/CloudConfig/__init__.py
@@ -25,9 +25,9 @@ import os
import subprocess
import time
-per_instance="once-per-instance"
-per_always="always"
-per_once="once"
+per_instance= cloudinit.per_instance
+per_always = cloudinit.per_always
+per_once = cloudinit.per_once
class CloudConfig():
cfgfile = None
diff --git a/cloudinit/CloudConfig/cc_bootcmd.py b/cloudinit/CloudConfig/cc_bootcmd.py
index 9eccfd78..11e9938c 100644
--- a/cloudinit/CloudConfig/cc_bootcmd.py
+++ b/cloudinit/CloudConfig/cc_bootcmd.py
@@ -18,6 +18,8 @@
import cloudinit.util as util
import subprocess
import tempfile
+from cloudinit.CloudConfig import per_always
+frequency = per_always
def handle(name,cfg,cloud,log,args):
if not cfg.has_key("bootcmd"):
diff --git a/cloudinit/CloudConfig/cc_byobu.py b/cloudinit/CloudConfig/cc_byobu.py
index 1a4545af..406a1f67 100644
--- a/cloudinit/CloudConfig/cc_byobu.py
+++ b/cloudinit/CloudConfig/cc_byobu.py
@@ -27,19 +27,40 @@ def handle(name,cfg,cloud,log,args):
if not value: return
- if value == "user":
- user = util.get_cfg_option_str(cfg,"user","ubuntu")
- cmd = [ 'sudo', '-Hu', user, 'byobu-launcher-install' ]
- elif value == "system":
- shcmd="echo '%s' | debconf-set-selections && %s" % \
- ( "byobu byobu/launch-by-default boolean true",
- "dpkg-reconfigure byobu --frontend=noninteractive" )
- cmd = [ "/bin/sh", "-c", shcmd ]
- else:
+ if value == "user" or value == "system":
+ value = "enable-%s" % value
+
+ valid = ( "enable-user", "enable-system", "enable",
+ "disable-user", "disable-system", "disable" )
+ if not value in valid:
log.warn("Unknown value %s for byobu_by_default" % value)
- return
- log.debug("enabling byobu for %s" % value)
+ mod_user = value.endswith("-user")
+ mod_sys = value.endswith("-system")
+ if value.startswith("enable"):
+ bl_inst = "install"
+ dc_val = "byobu byobu/launch-by-default boolean true"
+ mod_sys = True
+ else:
+ if value == "disable":
+ mod_user = True
+ mod_sys = True
+ bl_inst = "uninstall"
+ dc_val = "byobu byobu/launch-by-default boolean false"
+
+ shcmd = ""
+ if mod_user:
+ user = util.get_cfg_option_str(cfg,"user","ubuntu")
+ shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst)
+ shcmd += " || X=$(($X+1)); "
+ if mod_sys:
+ shcmd += "echo \"%s\" | debconf-set-selections" % dc_val
+ shcmd += " && dpkg-reconfigure byobu --frontend=noninteractive"
+ shcmd += " || X=$(($X+1)); "
+
+ cmd = [ "/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X" ) ]
+
+ log.debug("setting byobu to %s" % value)
try:
subprocess.check_call(cmd)
diff --git a/cloudinit/CloudConfig/cc_chef.py b/cloudinit/CloudConfig/cc_chef.py
index 8b2cfc2a..5f13c77d 100644
--- a/cloudinit/CloudConfig/cc_chef.py
+++ b/cloudinit/CloudConfig/cc_chef.py
@@ -32,8 +32,9 @@ def handle(name,cfg,cloud,log,args):
chef_cfg = cfg['chef']
# Install chef packages from selected source
+ install_type = util.get_cfg_option_str(chef_cfg, "install_type", "packages")
if not os.path.isfile('/usr/bin/chef-client'):
- if chef_cfg['install_type'] == "gems":
+ if install_type == "gems":
if chef_cfg.has_key('version'):
chef_version = chef_cfg['version']
else:
@@ -48,10 +49,12 @@ def handle(name,cfg,cloud,log,args):
if chef_cfg.has_key('validation_cert'):
with open('/etc/chef/validation.pem', 'w') as validation_cert_fh:
validation_cert_fh.write(chef_cfg['validation_cert'])
-
+
+ validation_name = chef_cfg.get('validation_name','chef-validator')
# create the chef config from template
util.render_to_file('chef_client.rb', '/etc/chef/client.rb',
- {'server_url': chef_cfg['server_url'], 'validation_name': chef_cfg['validation_name'] || 'chef-validator'})
+ {'server_url': chef_cfg['server_url'],
+ 'validation_name': chef_cfg['validation_name']})
chef_args = ['-d']
# set the firstboot json
@@ -65,6 +68,7 @@ def handle(name,cfg,cloud,log,args):
chef_args.append('-j /etc/chef/firstboot.json')
# and finally, run chef
+ log.debug("running chef-client %s" % chef_args)
subprocess.check_call(['/usr/bin/chef-client'] + chef_args)
def install_chef_from_gems(ruby_version, chef_version = None):
diff --git a/cloudinit/CloudConfig/cc_grub_dpkg.py b/cloudinit/CloudConfig/cc_grub_dpkg.py
index dafb43cf..b26e90e8 100644
--- a/cloudinit/CloudConfig/cc_grub_dpkg.py
+++ b/cloudinit/CloudConfig/cc_grub_dpkg.py
@@ -31,7 +31,8 @@ def handle(name,cfg,cloud,log,args):
idevs_empty=util.get_cfg_option_str(cfg["grub-dpkg"],
"grub-pc/install_devices_empty",None)
- if os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda"):
+ if (( os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda") ) or
+ ( os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda") )):
if idevs == None: idevs=""
if idevs_empty == None: idevs_empty="true"
else:
diff --git a/cloudinit/CloudConfig/cc_mcollective.py b/cloudinit/CloudConfig/cc_mcollective.py
index 9aae2d64..c7912aa4 100644
--- a/cloudinit/CloudConfig/cc_mcollective.py
+++ b/cloudinit/CloudConfig/cc_mcollective.py
@@ -24,6 +24,10 @@ import fileinput
import StringIO
import ConfigParser
import cloudinit.CloudConfig as cc
+import cloudinit.util as util
+
+pubcert_file = "/etc/mcollective/ssl/server-public.pem"
+pricert_file = "/etc/mcollective/ssl/server-private.pem"
# Our fake header section
class FakeSecHead(object):
@@ -50,24 +54,35 @@ def handle(name,cfg,cloud,log,args):
# Read server.cfg values from original file in order to be able to mix the rest up
mcollective_config.readfp(FakeSecHead(open('/etc/mcollective/server.cfg')))
for cfg_name, cfg in mcollective_cfg['conf'].iteritems():
- # Iterate throug the config items, we'll use ConfigParser.set
- # to overwrite or create new items as needed
- for o, v in cfg.iteritems():
- mcollective_config.set(cfg_name,o,v)
+ if cfg_name == 'public-cert':
+ util.write_file(pubcert_file, cfg, mode=0644)
+ mcollective_config.set(cfg_name,
+ 'plugin.ssl_server_public', pubcert_file)
+ mcollective_config.set(cfg_name,'securityprovider','ssl')
+ elif cfg_name == 'private-cert':
+ util.write_file(pricert_file, cfg, mode=0600)
+ mcollective_config.set(cfg_name,
+ 'plugin.ssl_server_private', pricert_file)
+ mcollective_config.set(cfg_name,'securityprovider','ssl')
+ else:
+ # Iterate throug the config items, we'll use ConfigParser.set
+ # to overwrite or create new items as needed
+ for o, v in cfg.iteritems():
+ mcollective_config.set(cfg_name,o,v)
# We got all our config as wanted we'll rename
# the previous server.cfg and create our new one
os.rename('/etc/mcollective/server.cfg','/etc/mcollective/server.cfg.old')
outputfile = StringIO.StringIO()
mcollective_config.write(outputfile)
# Now we got the whole file, write to disk except first line
- final_configfile = open('/etc/mcollective/server.cfg', 'wb')
# Note below, that we've just used ConfigParser because it generally
# works. Below, we remove the initial 'nullsection' header
# and then change 'key = value' to 'key: value'. The global
# search and replace of '=' with ':' could be problematic though.
# this most likely needs fixing.
- final_configfile.write(outputfile.getvalue().replace('[nullsection]\n','').replace(' =',':'))
- final_configfile.close()
+ util.write_file('/etc/mcollective/server.cfg',
+ outputfile.getvalue().replace('[nullsection]\n','').replace(' =',':'),
+ mode=0644)
# Start mcollective
subprocess.check_call(['service', 'mcollective', 'start'])
diff --git a/cloudinit/CloudConfig/cc_mounts.py b/cloudinit/CloudConfig/cc_mounts.py
index 8ee4f718..592a030a 100644
--- a/cloudinit/CloudConfig/cc_mounts.py
+++ b/cloudinit/CloudConfig/cc_mounts.py
@@ -32,12 +32,13 @@ def is_mdname(name):
return False
def handle(name,cfg,cloud,log,args):
- # these are our default set of mounts
- defmnts = [ [ "ephemeral0", "/mnt", "auto", "defaults,nobootwait", "0", "2" ],
- [ "swap", "none", "swap", "sw", "0", "0" ] ]
-
# fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno
defvals = [ None, None, "auto", "defaults,nobootwait", "0", "2" ]
+ defvals = cfg.get("mount_default_fields", defvals)
+
+ # these are our default set of mounts
+ defmnts = [ [ "ephemeral0", "/mnt", "auto", defvals[3], "0", "2" ],
+ [ "swap", "none", "swap", "sw", "0", "0" ] ]
cfgmnt = [ ]
if cfg.has_key("mounts"):
diff --git a/cloudinit/CloudConfig/cc_phone_home.py b/cloudinit/CloudConfig/cc_phone_home.py
index be6abfa8..f291e1d4 100644
--- a/cloudinit/CloudConfig/cc_phone_home.py
+++ b/cloudinit/CloudConfig/cc_phone_home.py
@@ -20,7 +20,7 @@ import cloudinit.util as util
from time import sleep
frequency = per_instance
-post_list_all = [ 'pub_key_dsa', 'pub_key_rsa', 'instance_id', 'hostname' ]
+post_list_all = [ 'pub_key_dsa', 'pub_key_rsa', 'pub_key_ecdsa', 'instance_id', 'hostname' ]
# phone_home:
# url: http://my.foo.bar/$INSTANCE/
@@ -29,7 +29,7 @@ post_list_all = [ 'pub_key_dsa', 'pub_key_rsa', 'instance_id', 'hostname' ]
#
# phone_home:
# url: http://my.foo.bar/$INSTANCE_ID/
-# post: [ pub_key_dsa, pub_key_rsa, instance_id
+# post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id
#
def handle(name,cfg,cloud,log,args):
if len(args) != 0:
@@ -61,6 +61,7 @@ def handle(name,cfg,cloud,log,args):
pubkeys = {
'pub_key_dsa': '/etc/ssh/ssh_host_dsa_key.pub',
'pub_key_rsa': '/etc/ssh/ssh_host_rsa_key.pub',
+ 'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub',
}
for n, path in pubkeys.iteritems():
diff --git a/cloudinit/CloudConfig/cc_resizefs.py b/cloudinit/CloudConfig/cc_resizefs.py
index e396b283..883c269b 100644
--- a/cloudinit/CloudConfig/cc_resizefs.py
+++ b/cloudinit/CloudConfig/cc_resizefs.py
@@ -42,6 +42,9 @@ def handle(name,cfg,cloud,log,args):
dev=os.makedev(os.major(st_dev),os.minor(st_dev))
os.mknod(devpth, 0400 | stat.S_IFBLK, dev)
except:
+ if util.islxc():
+ log.debug("inside lxc, ignoring mknod failure in resizefs")
+ return
log.warn("Failed to make device node to resize /")
raise
diff --git a/cloudinit/CloudConfig/cc_set_hostname.py b/cloudinit/CloudConfig/cc_set_hostname.py
index 49368019..bc190049 100644
--- a/cloudinit/CloudConfig/cc_set_hostname.py
+++ b/cloudinit/CloudConfig/cc_set_hostname.py
@@ -23,21 +23,16 @@ def handle(name,cfg,cloud,log,args):
log.debug("preserve_hostname is set. not setting hostname")
return(True)
+ ( hostname, fqdn ) = util.get_hostname_fqdn(cfg, cloud)
try:
- hostname_prefix = util.get_cfg_option_str(cfg, "hostname_prefix", None)
- hostname_attr = util.get_cfg_option_str(cfg, "hostname_attribute", "hostname")
- hostname_function = getattr(cloud, 'get_' + hostname_attr, None)
- if hostname_fucntion is None: hostname_fucntion = cloud.get_hostname
- hostname = util.get_cfg_option_str(cfg,"hostname", hostname_function)
- if hostname_prefix: hostname = hostname_prefix + "-" + hostname
set_hostname(hostname, log)
except Exception as e:
util.logexc(log)
- log.warn("failed to set hostname\n")
+ log.warn("failed to set hostname to %s\n", hostname)
return(True)
def set_hostname(hostname, log):
- subprocess.Popen(['hostname', hostname]).communicate()
+ util.subp(['hostname', hostname])
util.write_file("/etc/hostname","%s\n" % hostname, 0644)
log.debug("populated /etc/hostname with %s on first boot", hostname)
diff --git a/cloudinit/CloudConfig/cc_ssh.py b/cloudinit/CloudConfig/cc_ssh.py
index c4603d2b..50b6a73c 100644
--- a/cloudinit/CloudConfig/cc_ssh.py
+++ b/cloudinit/CloudConfig/cc_ssh.py
@@ -16,11 +16,20 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import cloudinit.util as util
+import cloudinit.SshUtil as sshutil
import os
import glob
import subprocess
+DISABLE_ROOT_OPTS="no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\" rather than the user \\\"root\\\".\';echo;sleep 10\""
+
+
+global_log = None
+
def handle(name,cfg,cloud,log,args):
+ global global_log
+ global_log = log
+
# remove the static keys from the pristine image
for f in glob.glob("/etc/ssh/ssh_host_*_key*"):
try: os.unlink(f)
@@ -32,14 +41,18 @@ def handle(name,cfg,cloud,log,args):
"rsa_private" : ("/etc/ssh/ssh_host_rsa_key", 0600),
"rsa_public" : ("/etc/ssh/ssh_host_rsa_key.pub", 0644),
"dsa_private" : ("/etc/ssh/ssh_host_dsa_key", 0600),
- "dsa_public" : ("/etc/ssh/ssh_host_dsa_key.pub", 0644)
+ "dsa_public" : ("/etc/ssh/ssh_host_dsa_key.pub", 0644),
+ "ecdsa_private" : ("/etc/ssh/ssh_host_ecdsa_key", 0600),
+ "ecdsa_public" : ("/etc/ssh/ssh_host_ecdsa_key.pub", 0644),
}
for key,val in cfg["ssh_keys"].items():
if key2file.has_key(key):
util.write_file(key2file[key][0],val,key2file[key][1])
- priv2pub = { 'rsa_private':'rsa_public', 'dsa_private':'dsa_public' }
+ priv2pub = { 'rsa_private':'rsa_public', 'dsa_private':'dsa_public',
+ 'ecdsa_private': 'ecdsa_public', }
+
cmd = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
for priv,pub in priv2pub.iteritems():
if pub in cfg['ssh_keys'] or not priv in cfg['ssh_keys']: continue
@@ -50,19 +63,23 @@ def handle(name,cfg,cloud,log,args):
# if not, generate them
genkeys ='ssh-keygen -f /etc/ssh/ssh_host_rsa_key -t rsa -N ""; '
genkeys+='ssh-keygen -f /etc/ssh/ssh_host_dsa_key -t dsa -N ""; '
+ genkeys+='ssh-keygen -f /etc/ssh/ssh_host_ecdsa_key -t ecdsa -N ""; '
subprocess.call(('sh', '-c', "{ %s } </dev/null" % (genkeys)))
try:
user = util.get_cfg_option_str(cfg,'user')
disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
+ disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
+ DISABLE_ROOT_OPTS)
keys = cloud.get_public_ssh_keys()
if cfg.has_key("ssh_authorized_keys"):
cfgkeys = cfg["ssh_authorized_keys"]
keys.extend(cfgkeys)
- apply_credentials(keys,user,disable_root)
+ apply_credentials(keys,user,disable_root, disable_root_opts)
except:
+ util.logexc(log)
log.warn("applying credentials failed!\n")
send_ssh_keys_to_console()
@@ -70,36 +87,15 @@ def handle(name,cfg,cloud,log,args):
def send_ssh_keys_to_console():
subprocess.call(('/usr/lib/cloud-init/write-ssh-key-fingerprints',))
-def apply_credentials(keys, user, disable_root):
+def apply_credentials(keys, user, disable_root, disable_root_opts=DISABLE_ROOT_OPTS, log=global_log):
keys = set(keys)
if user:
- setup_user_keys(keys, user, '')
+ sshutil.setup_user_keys(keys, user, '', log)
if disable_root:
- key_prefix = 'command="echo \'Please login as the user \\\"%s\\\" rather than the user \\\"root\\\".\';echo;sleep 10" ' % user
+ key_prefix = disable_root_opts.replace('$USER', user)
else:
key_prefix = ''
- setup_user_keys(keys, 'root', key_prefix)
-
-def setup_user_keys(keys, user, key_prefix):
- import pwd
- saved_umask = os.umask(077)
-
- pwent = pwd.getpwnam(user)
-
- ssh_dir = '%s/.ssh' % pwent.pw_dir
- if not os.path.exists(ssh_dir):
- os.mkdir(ssh_dir)
- os.chown(ssh_dir, pwent.pw_uid, pwent.pw_gid)
-
- authorized_keys = '%s/.ssh/authorized_keys' % pwent.pw_dir
- fp = open(authorized_keys, 'a')
- fp.write(''.join(['%s%s\n' % (key_prefix, key) for key in keys]))
- fp.close()
-
- os.chown(authorized_keys, pwent.pw_uid, pwent.pw_gid)
-
- os.umask(saved_umask)
-
+ sshutil.setup_user_keys(keys, 'root', key_prefix, log)
diff --git a/cloudinit/CloudConfig/cc_timezone.py b/cloudinit/CloudConfig/cc_timezone.py
index f221819e..a26df8f9 100644
--- a/cloudinit/CloudConfig/cc_timezone.py
+++ b/cloudinit/CloudConfig/cc_timezone.py
@@ -25,7 +25,6 @@ frequency = per_instance
tz_base = "/usr/share/zoneinfo"
def handle(name,cfg,cloud,log,args):
- print args
if len(args) != 0:
timezone = args[0]
else:
diff --git a/cloudinit/CloudConfig/cc_update_etc_hosts.py b/cloudinit/CloudConfig/cc_update_etc_hosts.py
index 856cbae1..6012b8a3 100644
--- a/cloudinit/CloudConfig/cc_update_etc_hosts.py
+++ b/cloudinit/CloudConfig/cc_update_etc_hosts.py
@@ -17,25 +17,64 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import cloudinit.util as util
from cloudinit.CloudConfig import per_always
+import StringIO
frequency = per_always
def handle(name,cfg,cloud,log,args):
- if not util.get_cfg_option_bool(cfg,"manage_etc_hosts",False):
- log.debug("manage_etc_hosts is not set. not modifying /etc/hosts")
+ ( hostname, fqdn ) = util.get_hostname_fqdn(cfg, cloud)
+
+ use_template = util.get_cfg_option_bool(cfg,"manage_etc_hosts", False)
+ if not use_template:
+ # manage_etc_hosts not true, update the 127.0.1.1 entry via update_etc_hosts
+ log.debug("manage_etc_hosts is not set, checking sanity of /etc/hosts")
+ update_etc_hosts(hostname, fqdn, log)
return
+ # manage_etc_hosts is set, render from template file
try:
- hostname = util.get_cfg_option_str(cfg,"hostname",cloud.get_hostname())
- if not hostname:
- hostname = cloud.get_hostname()
-
if not hostname:
log.info("manage_etc_hosts was set, but no hostname found")
return
- util.render_to_file('hosts', '/etc/hosts', { 'hostname' : hostname })
+ util.render_to_file('hosts', '/etc/hosts', \
+ { 'hostname' : hostname, 'fqdn' : fqdn })
except Exception as e:
log.warn("failed to update /etc/hosts")
raise
+
+def update_etc_hosts(hostname, fqdn, log):
+ with open('/etc/hosts', 'r') as etchosts:
+ header = "# Added by cloud-init\n"
+ hosts_line = "127.0.1.1\t%s %s\n" % (fqdn, hostname)
+ need_write = False
+ need_change = True
+ new_etchosts = StringIO.StringIO()
+ for line in etchosts:
+ split_line = [s.strip() for s in line.split()]
+ if len(split_line) < 2:
+ new_etchosts.write(line)
+ continue
+ if line == header:
+ continue
+ ip, hosts = split_line[0], split_line[1:]
+ if ip == "127.0.1.1":
+ if sorted([hostname, fqdn]) == sorted(hosts):
+ need_change = False
+ if need_change == True:
+ line = "%s%s" % (header, hosts_line)
+ need_change = False
+ need_write = True
+ new_etchosts.write(line)
+ etchosts.close()
+ if need_change == True:
+ new_etchosts.write("%s%s" % (header, hosts_line))
+ need_write = True
+ if need_write == True:
+ new_etcfile = open ('/etc/hosts','wb')
+ new_etcfile.write(new_etchosts.getvalue())
+ new_etcfile.close()
+ new_etchosts.close()
+ return
+
diff --git a/cloudinit/CloudConfig/cc_update_hostname.py b/cloudinit/CloudConfig/cc_update_hostname.py
index 9ef02251..3f55c73b 100644
--- a/cloudinit/CloudConfig/cc_update_hostname.py
+++ b/cloudinit/CloudConfig/cc_update_hostname.py
@@ -27,8 +27,8 @@ def handle(name,cfg,cloud,log,args):
log.debug("preserve_hostname is set. not updating hostname")
return
+ ( hostname, fqdn ) = util.get_hostname_fqdn(cfg, cloud)
try:
- hostname = util.get_cfg_option_str(cfg,"hostname",cloud.get_hostname())
prev ="%s/%s" % (cloud.get_cpath('data'),"previous-hostname")
update_hostname(hostname, prev, log)
except Exception as e:
diff --git a/cloudinit/DataSource.py b/cloudinit/DataSource.py
index 350b5015..3f851e14 100644
--- a/cloudinit/DataSource.py
+++ b/cloudinit/DataSource.py
@@ -22,6 +22,7 @@ DEP_NETWORK = "NETWORK"
import UserDataHandler as ud
import cloudinit.util as util
+import platform
class DataSource:
userdata = None
@@ -93,23 +94,58 @@ class DataSource:
def get_instance_id(self):
if 'instance-id' not in self.metadata:
- return "ubuntuhost"
+ return "iid-datasource"
return(self.metadata['instance-id'])
- def get_hostname(self):
+ def get_hostname(self, fqdn=False):
+ defdomain = "localdomain"
+ defhost = "localhost"
+
+ domain = defdomain
if not 'local-hostname' in self.metadata:
- return None
- toks = self.metadata['local-hostname'].split('.')
+ # this is somewhat questionable really.
+ # the cloud datasource was asked for a hostname
+ # and didn't have one. raising error might be more appropriate
+ # but instead, basically look up the existing hostname
+ toks = []
+ pfn = platform.node()
+
+ # platform.node says: Returns the computer's network
+ # name (which may not be fully qualified)
+ toks = pfn.split(".")
+ if pfn.find(".") > 0:
+ toks = pfn.split(".")
+ elif pfn:
+ toks = [ pfn, defdomain ]
+
+ if len(toks) == 0:
+ toks = [ defhost, defdomain ]
+ #log.warn("unable to find hostname, using defaults")
+
+ else:
+ toks = self.metadata['local-hostname'].split('.')
+
# if there is an ipv4 address in 'local-hostname', then
# make up a hostname (LP: #475354)
if len(toks) == 4:
try:
r = filter(lambda x: int(x) < 256 and x > 0, toks)
if len(r) == 4:
- return("ip-%s" % '-'.join(r))
- except: pass
- return toks[0]
+ toks = [ "ip-%s" % '-'.join(r) ]
+ except:
+ pass
+
+ if len(toks) > 1:
+ hostname = toks[0]
+ domain = '.'.join(toks[1:])
+ else:
+ hostname = toks[0]
+
+ if fqdn:
+ return "%s.%s" % (hostname,domain)
+ else:
+ return hostname
# return a list of classes that have the same depends as 'depends'
# iterate through cfg_list, loading "DataSourceCollections" modules
diff --git a/cloudinit/DataSourceEc2.py b/cloudinit/DataSourceEc2.py
index 9f1cf840..890569e3 100644
--- a/cloudinit/DataSourceEc2.py
+++ b/cloudinit/DataSourceEc2.py
@@ -27,10 +27,12 @@ import sys
import boto_utils
import os.path
import errno
+import urlparse
class DataSourceEc2(DataSource.DataSource):
api_ver = '2009-04-04'
seeddir = seeddir + '/ec2'
+ metadata_address = "http://169.254.169.254"
def __str__(self):
return("DataSourceEc2")
@@ -46,8 +48,8 @@ class DataSourceEc2(DataSource.DataSource):
try:
if not self.wait_for_metadata_service():
return False
- self.userdata_raw = boto_utils.get_instance_userdata(self.api_ver)
- self.metadata = boto_utils.get_instance_metadata(self.api_ver)
+ self.userdata_raw = boto_utils.get_instance_userdata(self.api_ver, None, self.metadata_address)
+ self.metadata = boto_utils.get_instance_metadata(self.api_ver, self.metadata_address)
return True
except Exception as e:
print e
@@ -100,30 +102,58 @@ class DataSourceEc2(DataSource.DataSource):
log.warn("Failed to get timeout, using %s" % timeout)
sleeptime = 1
- address = '169.254.169.254'
+
+ def_mdurls = ["http://169.254.169.254", "http://instance-data:8773"]
+ try:
+ mdurls = mcfg.get("metadata_urls", def_mdurls)
+ except Exception as e:
+ mdurls = def_mdurls
+ util.logexc(log)
+ log.warn("Failed to get metadata URLs, using defaults")
+
starttime = time.time()
-
- url="http://%s/%s/meta-data/instance-id" % (address,self.api_ver)
- for x in range(sleeps):
- # given 100 sleeps, this ends up total sleep time of 1050 sec
- sleeptime=int(x/5)+1
- reason = ""
- try:
- req = urllib2.Request(url)
- resp = urllib2.urlopen(req, timeout=timeout)
- if resp.read() != "": return True
- reason = "empty data [%s]" % resp.getcode()
- except urllib2.HTTPError as e:
- reason = "http error [%s]" % e.code
- except urllib2.URLError as e:
- reason = "url error [%s]" % e.reason
-
- if x == 0:
- log.warning("waiting for metadata service at %s\n" % url)
-
- log.warning(" %s [%02s/%s]: %s\n" %
- (time.strftime("%H:%M:%S",time.gmtime()), x+1, sleeps, reason))
+ # Remove addresses from the list that wont resolve.
+ filtered = [x for x in mdurls if try_to_resolve_metadata(x)]
+
+ if set(filtered) != set(mdurls):
+ log.debug("removed the following from metadata urls: %s" %
+ list((set(mdurls) - set(filtered))))
+
+ if len(filtered):
+ mdurls = filtered
+ else:
+ log.warn("Empty metadata url list! using default list")
+ mdurls = def_mdurls
+
+ log.debug("Searching the following metadata urls: %s" % mdurls)
+
+ for x in range(sleeps):
+ for url in mdurls:
+ iurl="%s/%s/meta-data/instance-id" % (url, self.api_ver)
+
+ # given 100 sleeps, this ends up total sleep time of 1050 sec
+ sleeptime=int(x/5)+1
+
+ reason = ""
+ try:
+ req = urllib2.Request(iurl)
+ resp = urllib2.urlopen(req, timeout=timeout)
+ if resp.read() != "":
+ self.metadata_address = url
+ log.debug("Using metadata source: '%s'" % url)
+ return True
+ reason = "empty data [%s]" % resp.getcode()
+ except urllib2.HTTPError as e:
+ reason = "http error [%s]" % e.code
+ except urllib2.URLError as e:
+ reason = "url error [%s]" % e.reason
+
+ #not needed? Addresses being checked are displayed above
+ #if x == 0:
+ # log.warn("waiting for metadata service at %s" % url)
+
+ log.warn("'%s' failed: %s" % (url, reason))
time.sleep(sleeptime)
log.critical("giving up on md after %i seconds\n" %
@@ -146,7 +176,7 @@ class DataSourceEc2(DataSource.DataSource):
if entname == "ephemeral" and name == "ephemeral0":
found = device
if found == None:
- log.warn("unable to convert %s to a device" % name)
+ log.debug("unable to convert %s to a device" % name)
return None
# LP: #611137
@@ -171,6 +201,14 @@ class DataSourceEc2(DataSource.DataSource):
if os.path.exists(cand):
log.debug("remapped device name %s => %s" % (found,cand))
return(cand)
+
+ # on t1.micro, ephemeral0 will appear in block-device-mapping from
+ # metadata, but it will not exist on disk (and never will)
+ # at this pint, we've verified that the path did not exist
+ # in the special case of 'ephemeral0' return None to avoid bogus
+ # fstab entry (LP: #744019)
+ if name == "ephemeral0":
+ return None
return ofound
def is_vpc(self):
@@ -181,6 +219,15 @@ class DataSourceEc2(DataSource.DataSource):
return True
return False
+def try_to_resolve_metadata(url):
+ try:
+ addr = urlparse.urlsplit(url).netloc.split(":")[0]
+ socket.getaddrinfo(addr, None)
+ return True
+ except Exception as e:
+ return False
+
+
datasources = [
( DataSourceEc2, ( DataSource.DEP_FILESYSTEM , DataSource.DEP_NETWORK ) ),
]
diff --git a/cloudinit/DataSourceNoCloud.py b/cloudinit/DataSourceNoCloud.py
index 3d429bc5..956b1a5e 100644
--- a/cloudinit/DataSourceNoCloud.py
+++ b/cloudinit/DataSourceNoCloud.py
@@ -41,7 +41,6 @@ class DataSourceNoCloud(DataSource.DataSource):
def get_data(self):
defaults = {
- "local-hostname" : "ubuntuhost",
"instance-id" : "nocloud"
}
@@ -86,7 +85,7 @@ class DataSourceNoCloud(DataSource.DataSource):
# this could throw errors, but the user told us to do it
# so if errors are raised, let them raise
- (md_seed,ud) = util.read_seeded(seedfrom)
+ (md_seed,ud) = util.read_seeded(seedfrom, timeout=None)
log.debug("using seeded cache data from %s" % seedfrom)
# values in the command line override those from the seed
diff --git a/cloudinit/DataSourceOVF.py b/cloudinit/DataSourceOVF.py
index 04fa8da8..3dad4b1e 100644
--- a/cloudinit/DataSourceOVF.py
+++ b/cloudinit/DataSourceOVF.py
@@ -51,8 +51,7 @@ class DataSourceOVF(DataSource.DataSource):
ud = ""
defaults = {
- "local-hostname" : "ubuntuhost",
- "instance-id" : "nocloud"
+ "instance-id" : "iid-dsovf"
}
(seedfile, contents) = get_ovf_env(seeddir)
@@ -109,9 +108,6 @@ class DataSourceOVF(DataSource.DataSource):
if not 'public-keys' in self.metadata: return([])
return([self.metadata['public-keys'],])
- def get_hostname(self):
- return(self.metadata['local-hostname'])
-
# the data sources' config_obj is a cloud-config formated
# object that came to it from ways other than cloud-config
# because cloud-config content would be handled elsewhere
diff --git a/cloudinit/SshUtil.py b/cloudinit/SshUtil.py
new file mode 100644
index 00000000..bc699a61
--- /dev/null
+++ b/cloudinit/SshUtil.py
@@ -0,0 +1,195 @@
+#!/usr/bin/python
+
+import os
+import os.path
+import cloudinit.util as util
+
+class AuthKeyEntry():
+ # lines are options, keytype, base64-encoded key, comment
+ # man page says the following which I did not understand:
+ # The options field is optional; its presence is determined by whether
+ # the line starts with a number or not (the options field never starts
+ # with a number)
+ options = None
+ keytype = None
+ base64 = None
+ comment = None
+ is_comment = False
+ line_in = ""
+
+ def __init__(self, line, def_opt=None):
+ line=line.rstrip("\n\r")
+ self.line_in = line
+ if line.startswith("#") or line.strip() == "":
+ self.is_comment = True
+ else:
+ ent = line.strip()
+ toks = ent.split(None,3)
+ if len(toks) == 1:
+ self.base64 = toks[0]
+ elif len(toks) == 2:
+ (self.base64, self.comment) = toks
+ elif len(toks) == 3:
+ (self.keytype, self.base64, self.comment) = toks
+ elif len(toks) == 4:
+ i = 0
+ ent = line.strip()
+ quoted = False
+ # taken from auth_rsa_key_allowed in auth-rsa.c
+ try:
+ while (i < len(ent) and
+ ((quoted) or (ent[i] not in (" ", "\t")))):
+ curc = ent[i]
+ nextc = ent[i + 1]
+ if curc == "\\" and nextc == '"':
+ i = i + 1
+ elif curc == '"':
+ quoted = not quoted
+ i = i + 1
+ except IndexError as e:
+ self.is_comment = True
+ return()
+
+ try:
+ self.options = ent[0:i]
+ (self.keytype, self.base64, self.comment) = \
+ ent[i+1:].split(None,3)
+ except ValueError as e:
+ # we did not understand this line
+ self.is_comment = True
+
+ if self.options == None and def_opt:
+ self.options = def_opt
+
+ return
+
+ def debug(self):
+ print("line_in=%s\ncomment: %s\noptions=%s\nkeytype=%s\nbase64=%s\ncomment=%s\n" %
+ (self.line_in, self.is_comment, self.options, self.keytype, self.base64, self.comment)),
+ def __repr__(self):
+ if self.is_comment:
+ return(self.line_in)
+ else:
+ toks = [ ]
+ for e in (self.options, self.keytype, self.base64, self.comment):
+ if e:
+ toks.append(e)
+
+ return(' '.join(toks))
+
+def update_authorized_keys(fname, keys):
+ # keys is a list of AuthKeyEntries
+ # key_prefix is the prefix (options) to prepend
+ try:
+ fp = open(fname, "r")
+ lines = fp.readlines() # lines have carriage return
+ fp.close()
+ except IOError as e:
+ lines = [ ]
+
+ ka_stats = { } # keys_added status
+ for k in keys:
+ ka_stats[k] = False
+
+ to_add = []
+ for key in keys:
+ to_add.append(key)
+
+ for i in range(0,len(lines)):
+ ent = AuthKeyEntry(lines[i])
+ for k in keys:
+ if k.base64 == ent.base64 and not k.is_comment:
+ ent = k
+ try:
+ to_add.remove(k)
+ except ValueError:
+ pass
+ lines[i] = str(ent)
+
+ # now append any entries we did not match above
+ for key in to_add:
+ lines.append(str(key))
+
+ if len(lines) == 0:
+ return("")
+ else:
+ return('\n'.join(lines) + "\n")
+
+
+def setup_user_keys(keys, user, key_prefix, log=None):
+ import pwd
+ saved_umask = os.umask(077)
+
+ pwent = pwd.getpwnam(user)
+
+ ssh_dir = '%s/.ssh' % pwent.pw_dir
+ if not os.path.exists(ssh_dir):
+ os.mkdir(ssh_dir)
+ os.chown(ssh_dir, pwent.pw_uid, pwent.pw_gid)
+
+ try:
+ ssh_cfg = parse_ssh_config()
+ akeys = ssh_cfg.get("AuthorizedKeysFile","%h/.ssh/authorized_keys")
+ akeys = akeys.replace("%h", pwent.pw_dir)
+ akeys = akeys.replace("%u", user)
+ authorized_keys = akeys
+ except Exception as e:
+ authorized_keys = '%s/.ssh/authorized_keys' % pwent.pw_dir
+ if log:
+ util.logexc(log)
+
+ key_entries = []
+ for k in keys:
+ ke = AuthKeyEntry(k, def_opt=key_prefix)
+ key_entries.append(ke)
+
+ content = update_authorized_keys(authorized_keys, key_entries)
+ util.write_file(authorized_keys, content, 0600)
+
+ os.chown(authorized_keys, pwent.pw_uid, pwent.pw_gid)
+
+ os.umask(saved_umask)
+
+if __name__ == "__main__":
+ import sys
+ # usage: orig_file, new_keys, [key_prefix]
+ # prints out merged, where 'new_keys' will trump old
+ ## example
+ ## ### begin authorized_keys ###
+ # ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEA28CDAGtxSucHezSKqwh1wAs39xdeZTSVmmyMcKDI5Njnd1d/Uhgj/awxP0Whep8eRSm6F+Xgwi0pH1KNPCszPvq+03K+yi3YkYkQIkVBhctK6AP/UmlVQTVmjJdEvgtrppFTjCzf16q0BT0mXX5YFV3csgm8cJn7UveKHkYjJp8= smoser-work
+ # ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5ozemNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbDc1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q7NDwfIrJJtO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhTYWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw== smoser@brickies
+ # ### end authorized_keys ###
+ #
+ # ### begin new_keys ###
+ # ssh-rsa nonmatch smoser@newhost
+ # ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEA28CDAGtxSucHezSKqwh1wAs39xdeZTSVmmyMcKDI5Njnd1d/Uhgj/awxP0Whep8eRSm6F+Xgwi0pH1KNPCszPvq+03K+yi3YkYkQIkVBhctK6AP/UmlVQTVmjJdEvgtrppFTjCzf16q0BT0mXX5YFV3csgm8cJn7UveKHkYjJp8= new_comment
+ # ### end new_keys ###
+ #
+ # Then run as:
+ # program authorized_keys new_keys 'no-port-forwarding,command=\"echo hi world;\"'
+ def_prefix = None
+ orig_key_file = sys.argv[1]
+ new_key_file = sys.argv[2]
+ if len(sys.argv) > 3:
+ def_prefix = sys.argv[3]
+ fp = open(new_key_file)
+
+ newkeys = [ ]
+ for line in fp.readlines():
+ newkeys.append(AuthKeyEntry(line, def_prefix))
+
+ fp.close()
+ print update_authorized_keys(orig_key_file, newkeys)
+
+def parse_ssh_config(fname="/etc/ssh/sshd_config"):
+ ret = { }
+ fp=open(fname)
+ for l in fp.readlines():
+ l = l.strip()
+ if not l or l.startswith("#"):
+ continue
+ key,val = l.split(None,1)
+ ret[key]=val
+ fp.close()
+ return(ret)
+
diff --git a/cloudinit/UserDataHandler.py b/cloudinit/UserDataHandler.py
index fbb000fc..afd5dd99 100644
--- a/cloudinit/UserDataHandler.py
+++ b/cloudinit/UserDataHandler.py
@@ -22,9 +22,13 @@ from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import yaml
+import cloudinit
+import cloudinit.util as util
+import md5
starts_with_mappings={
'#include' : 'text/x-include-url',
+ '#include-once' : 'text/x-include-once-url',
'#!' : 'text/x-shellscript',
'#cloud-config' : 'text/cloud-config',
'#upstart-job' : 'text/upstart-job',
@@ -45,16 +49,41 @@ def decomp_str(str):
def do_include(str,parts):
import urllib
+ import os
# is just a list of urls, one per line
# also support '#include <url here>'
+ includeonce = False
for line in str.splitlines():
if line == "#include": continue
- if line.startswith("#include"):
+ if line == "#include-once":
+ includeonce = True
+ continue
+ if line.startswith("#include-once"):
+ line = line[len("#include-once"):].lstrip()
+ includeonce = True
+ elif line.startswith("#include"):
line = line[len("#include"):].lstrip()
if line.startswith("#"): continue
- content = urllib.urlopen(line).read()
+
+ # urls cannot not have leading or trailing white space
+ msum = md5.new()
+ msum.update(line.strip())
+ includeonce_filename = "%s/urlcache/%s" % (
+ cloudinit.get_ipath_cur("data"), msum.hexdigest())
+ try:
+ if includeonce and os.path.isfile(includeonce_filename):
+ with open(includeonce_filename, "r") as fp:
+ content = fp.read()
+ else:
+ content = urllib.urlopen(line).read()
+ if includeonce:
+ util.write_file(includeonce_filename, content, mode=0600)
+ except Exception as e:
+ raise
+
process_includes(email.message_from_string(decomp_str(content)),parts)
+
def explode_cc_archive(archive,parts):
for ent in yaml.load(archive):
# ent can be one of:
@@ -74,7 +103,6 @@ def explode_cc_archive(archive,parts):
if mtype == None:
mtype = type_from_startswith(payload,def_type)
- print "adding %s,%s" % (filename, mtype)
parts['content'].append(content)
parts['names'].append(filename)
parts['types'].append(mtype)
@@ -114,6 +142,10 @@ def process_includes(msg,parts):
do_include(payload,parts)
continue
+ if ctype == 'text/x-include-once-url':
+ do_include(payload,parts)
+ continue
+
if ctype == "text/cloud-config-archive":
explode_cc_archive(payload,parts)
continue
@@ -158,11 +190,10 @@ def preprocess_userdata(data):
process_includes(email.message_from_string(decomp_str(data)),parts)
return(parts2mime(parts))
-# callbacks is a dictionary with:
-# { 'content-type': handler(data,content_type,filename,payload) }
-def walk_userdata(str, callbacks, data = None):
+# callback is a function that will be called with (data, content_type, filename, payload)
+def walk_userdata(istr, callback, data = None):
partnum = 0
- for part in email.message_from_string(str).walk():
+ for part in email.message_from_string(istr).walk():
# multipart/* are just containers
if part.get_content_maintype() == 'multipart':
continue
@@ -175,8 +206,7 @@ def walk_userdata(str, callbacks, data = None):
if not filename:
filename = 'part-%03d' % partnum
- if callbacks.has_key(ctype):
- callbacks[ctype](data,ctype,filename,part.get_payload())
+ callback(data, ctype, filename, part.get_payload())
partnum = partnum+1
diff --git a/cloudinit/__init__.py b/cloudinit/__init__.py
index 24e12d08..d01d443d 100644
--- a/cloudinit/__init__.py
+++ b/cloudinit/__init__.py
@@ -46,6 +46,10 @@ pathmap = {
None : "",
}
+per_instance="once-per-instance"
+per_always="always"
+per_once="once"
+
parsed_cfgs = { }
import os
@@ -63,6 +67,7 @@ import logging
import logging.config
import StringIO
import glob
+import traceback
class NullHandler(logging.Handler):
def emit(self,record): pass
@@ -111,14 +116,16 @@ class CloudInit:
ds_deps = [ DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK ]
datasource = None
+ builtin_handlers = [ ]
+
def __init__(self, ds_deps = None, sysconfig=system_config):
- self.part_handlers = {
- 'text/x-shellscript' : self.handle_user_script,
- 'text/cloud-config' : self.handle_cloud_config,
- 'text/upstart-job' : self.handle_upstart_job,
- 'text/part-handler' : self.handle_handler,
- 'text/cloud-boothook' : self.handle_cloud_boothook
- }
+ self.builtin_handlers = [
+ [ 'text/x-shellscript', self.handle_user_script, per_always ],
+ [ 'text/cloud-config', self.handle_cloud_config, per_always ],
+ [ 'text/upstart-job', self.handle_upstart_job, per_instance ],
+ [ 'text/cloud-boothook', self.handle_cloud_boothook, per_always ],
+ ]
+
if ds_deps != None:
self.ds_deps = ds_deps
self.sysconfig=sysconfig
@@ -243,18 +250,13 @@ class CloudInit:
util.write_file(self.get_ipath('userdata'),
self.datasource.get_userdata(), 0600)
- def initctl_emit(self):
- cc_path = get_ipath_cur('cloud_config')
- subprocess.Popen(['initctl', 'emit', 'cloud-config',
- '%s=%s' % (cfg_env_name,cc_path)]).communicate()
-
def sem_getpath(self,name,freq):
if freq == 'once-per-instance':
return("%s/%s" % (self.get_ipath("sem"),name))
return("%s/%s.%s" % (get_cpath("sem"), name, freq))
def sem_has_run(self,name,freq):
- if freq == "always": return False
+ if freq == per_always: return False
semfile = self.sem_getpath(name,freq)
if os.path.exists(semfile):
return True
@@ -270,7 +272,7 @@ class CloudInit:
if e.errno != errno.EEXIST:
raise e
- if os.path.exists(semfile) and freq != "always":
+ if os.path.exists(semfile) and freq != per_always:
return False
# race condition
@@ -299,7 +301,7 @@ class CloudInit:
def sem_and_run(self,semname,freq,func,args=[],clear_on_fail=False):
if self.sem_has_run(semname,freq):
log.debug("%s already ran %s", semname, freq)
- return
+ return False
try:
if not self.sem_acquire(semname,freq):
raise Exception("Failed to acquire lock on %s" % semname)
@@ -310,13 +312,15 @@ class CloudInit:
self.sem_clear(semname,freq)
raise
+ return True
+
# get_ipath : get the instance path for a name in pathmap
# (/var/lib/cloud/instances/<instance>/name)<name>)
def get_ipath(self, name=None):
return("%s/instances/%s%s"
% (varlibdir,self.get_instance_id(), pathmap[name]))
- def consume_userdata(self):
+ def consume_userdata(self, frequency=per_instance):
self.get_userdata()
data = self
@@ -328,63 +332,40 @@ class CloudInit:
sys.path.insert(0,cdir)
sys.path.insert(0,idir)
+ part_handlers = { }
# add handlers in cdir
for fname in glob.glob("%s/*.py" % cdir):
if not os.path.isfile(fname): continue
modname = os.path.basename(fname)[0:-3]
try:
mod = __import__(modname)
- lister = getattr(mod, "list_types")
- handler = getattr(mod, "handle_part")
- mtypes = lister()
- for mtype in mtypes:
- self.part_handlers[mtype]=handler
- log.debug("added handler for [%s] from %s" % (mtypes,fname))
+ handler_register(mod, part_handlers, data, frequency)
+ log.debug("added handler for [%s] from %s" % (mod.list_types(), fname))
except:
log.warn("failed to initialize handler in %s" % fname)
util.logexc(log)
-
- # give callbacks opportunity to initialize
- for ctype, func in self.part_handlers.items():
- func(data, "__begin__",None,None)
- UserDataHandler.walk_userdata(self.get_userdata(),
- self.part_handlers, data)
-
- # give callbacks opportunity to finalize
- for ctype, func in self.part_handlers.items():
- func(data,"__end__",None,None)
-
- def handle_handler(self,data,ctype,filename,payload):
- if ctype == "__end__": return
- if ctype == "__begin__" :
- self.handlercount = 0
- return
-
- self.handlercount=self.handlercount+1
- # write content to instance's handlerdir
- handlerdir = self.get_ipath("handlers")
- modname = 'part-handler-%03d' % self.handlercount
- modfname = modname + ".py"
- util.write_file("%s/%s" % (handlerdir,modfname), payload, 0600)
+ # add the internal handers if their type hasn't been already claimed
+ for (btype, bhand, bfreq) in self.builtin_handlers:
+ if btype in part_handlers:
+ continue
+ handler_register(InternalPartHandler(bhand, [btype], bfreq),
+ part_handlers, data, frequency)
- try:
- mod = __import__(modname)
- lister = getattr(mod, "list_types")
- handler = getattr(mod, "handle_part")
- except:
- import traceback
- traceback.print_exc(file=sys.stderr)
- return
-
- # - call it with '__begin__'
- handler(data, "__begin__", None, None)
+ # walk the data
+ pdata = { 'handlers': part_handlers, 'handlerdir': idir,
+ 'data' : data, 'frequency': frequency }
+ UserDataHandler.walk_userdata(self.get_userdata(),
+ partwalker_callback, data = pdata)
- # - add it self.part_handlers
- for mtype in lister():
- self.part_handlers[mtype]=handler
+ # give callbacks opportunity to finalize
+ called = [ ]
+ for (mtype, mod) in part_handlers.iteritems():
+ if mod in called:
+ continue
+ handler_call_end(mod, data, frequency)
- def handle_user_script(self,data,ctype,filename,payload):
+ def handle_user_script(self,data,ctype,filename,payload, frequency):
if ctype == "__end__": return
if ctype == "__begin__":
# maybe delete existing things here
@@ -393,16 +374,21 @@ class CloudInit:
filename=filename.replace(os.sep,'_')
scriptsdir = get_ipath_cur('scripts')
util.write_file("%s/%s" %
- (scriptsdir,filename), payload, 0700)
+ (scriptsdir,filename), util.dos2unix(payload), 0700)
+
+ def handle_upstart_job(self,data,ctype,filename,payload, frequency):
+ # upstart jobs are only written on the first boot
+ if frequency != per_instance:
+ return
- def handle_upstart_job(self,data,ctype,filename,payload):
if ctype == "__end__" or ctype == "__begin__": return
if not filename.endswith(".conf"):
filename=filename+".conf"
- util.write_file("%s/%s" % ("/etc/init",filename), payload, 0644)
+ util.write_file("%s/%s" % ("/etc/init",filename),
+ util.dos2unix(payload), 0644)
- def handle_cloud_config(self,data,ctype,filename,payload):
+ def handle_cloud_config(self,data,ctype,filename,payload, frequency):
if ctype == "__begin__":
self.cloud_config_str=""
return
@@ -422,31 +408,20 @@ class CloudInit:
self.cloud_config_str+="\n#%s\n%s" % (filename,payload)
- def handle_cloud_boothook(self,data,ctype,filename,payload):
+ def handle_cloud_boothook(self,data,ctype,filename,payload, frequency):
if ctype == "__end__": return
if ctype == "__begin__": return
filename=filename.replace(os.sep,'_')
+ payload = util.dos2unix(payload)
prefix="#cloud-boothook"
- dos=False
start = 0
if payload.startswith(prefix):
- start = len(prefix)
- if payload[start] == '\r':
- start=start+1
- dos = True
- else:
- if payload.find('\r\n',0,100) >= 0:
- dos = True
-
- if dos:
- payload=payload[start:].replace('\r\n','\n')
- elif start != 0:
- payload=payload[start:]
+ start = len(prefix) + 1
boothooks_dir = self.get_ipath("boothooks")
filepath = "%s/%s" % (boothooks_dir,filename)
- util.write_file(filepath, payload, 0700)
+ util.write_file(filepath, payload[start:], 0700)
try:
env=os.environ.copy()
env['INSTANCE_ID']= self.datasource.get_instance_id()
@@ -467,8 +442,8 @@ class CloudInit:
def get_mirror(self):
return(self.datasource.get_local_mirror())
- def get_hostname(self):
- return(self.datasource.get_hostname())
+ def get_hostname(self, fqdn=False):
+ return(self.datasource.get_hostname(fqdn=fqdn))
def device_name_to_device(self,name):
return(self.datasource.device_name_to_device(name))
@@ -535,3 +510,81 @@ class DataSourceNotFoundException(Exception):
def list_sources(cfg_list, depends):
return(DataSource.list_sources(cfg_list,depends, ["cloudinit", "" ]))
+
+def handler_register(mod, part_handlers, data, frequency=per_instance):
+ if not hasattr(mod, "handler_version"):
+ setattr(mod, "handler_version", 1)
+
+ for mtype in mod.list_types():
+ part_handlers[mtype] = mod
+
+ handler_call_begin(mod, data, frequency)
+ return(mod)
+
+def handler_call_begin(mod, data, frequency):
+ handler_handle_part(mod, data, "__begin__", None, None, frequency)
+
+def handler_call_end(mod, data, frequency):
+ handler_handle_part(mod, data, "__end__", None, None, frequency)
+
+def handler_handle_part(mod, data, ctype, filename, payload, frequency):
+ # only add the handler if the module should run
+ modfreq = getattr(mod, "frequency", per_instance)
+ if not ( modfreq == per_always or
+ ( frequency == per_instance and modfreq == per_instance)):
+ return
+ if mod.handler_version == 1:
+ mod.handle_part(data, ctype, filename, payload)
+ else:
+ mod.handle_part(data, ctype, filename, payload, frequency)
+
+def partwalker_handle_handler(pdata, ctype, filename, payload):
+
+ curcount = pdata['handlercount']
+ modname = 'part-handler-%03d' % curcount
+ frequency = pdata['frequency']
+
+ modfname = modname + ".py"
+ util.write_file("%s/%s" % (pdata['handlerdir'], modfname), payload, 0600)
+
+ pdata['handlercount'] = curcount + 1
+
+ try:
+ mod = __import__(modname)
+ handler_register(mod, pdata['handlers'], pdata['data'], frequency)
+ except:
+ util.logexc(log)
+ traceback.print_exc(file=sys.stderr)
+ return
+
+def partwalker_callback(pdata, ctype, filename, payload):
+ # data here is the part_handlers array and then the data to pass through
+ if ctype == "text/part-handler":
+ if 'handlercount' not in pdata:
+ pdata['handlercount'] = 0
+ partwalker_handle_handler(pdata, ctype, filename, payload)
+ return
+ if ctype not in pdata['handlers']:
+ return
+ handler_handle_part(pdata['handlers'][ctype], pdata['data'],
+ ctype, filename, payload, pdata['frequency'])
+
+class InternalPartHandler:
+ freq = per_instance
+ mtypes = [ ]
+ handler_version = 1
+ handler = None
+ def __init__(self, handler, mtypes, frequency, version = 2):
+ self.handler = handler
+ self.mtypes = mtypes
+ self.frequency = frequency
+ self.handler_version = version
+
+ def __repr__():
+ return("InternalPartHandler: [%s]" % self.mtypes)
+
+ def list_types(self):
+ return(self.mtypes)
+
+ def handle_part(self, data, ctype, filename, payload, frequency):
+ return(self.handler(data, ctype, filename, payload, frequency))
diff --git a/cloudinit/boto_utils.py b/cloudinit/boto_utils.py
index b38483fa..a2cb9ca6 100644
--- a/cloudinit/boto_utils.py
+++ b/cloudinit/boto_utils.py
@@ -60,7 +60,7 @@ def retry_url(url, retry_on_404=True):
sys.stderr.write('Caught exception reading instance data, giving up\n')
return ''
-def get_instance_metadata(version='latest'):
+def get_instance_metadata(version='latest',url='http://169.254.169.254'):
"""
Returns the instance metadata as a nested Python dictionary.
Simple values (e.g. local_hostname, hostname, etc.) will be
@@ -68,11 +68,11 @@ def get_instance_metadata(version='latest'):
be stored in the dict as a list of string values. More complex
fields such as public-keys and will be stored as nested dicts.
"""
- url = 'http://169.254.169.254/%s/meta-data/' % version
+ url = '%s/%s/meta-data/' % (url,version)
return _get_instance_metadata(url)
-def get_instance_userdata(version='latest', sep=None):
- url = 'http://169.254.169.254/%s/user-data' % version
+def get_instance_userdata(version='latest', sep=None,url='http://169.254.169.254'):
+ url = '%s/%s/user-data' % (url,version)
user_data = retry_url(url, retry_on_404=False)
if user_data:
if sep:
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
new file mode 100644
index 00000000..1b6af7e2
--- /dev/null
+++ b/cloudinit/netinfo.py
@@ -0,0 +1,83 @@
+import subprocess
+
+def netdev_info():
+ fields = ( "hwaddr", "addr", "bcast", "mask" )
+ ifcfg_out = subprocess.check_output(["ifconfig", "-a"])
+ devs = { }
+ for line in ifcfg_out.splitlines():
+ if len(line) == 0:
+ continue
+ if line[0] not in ("\t", " "):
+ curdev = line.split()[0]
+ devs[curdev] = { "up": False }
+ for field in fields:
+ devs[curdev][field] = ""
+ toks = line.lower().strip().split()
+ if toks[0] == "up":
+ devs[curdev]['up'] = True
+
+ fieldpost = ""
+ if toks[0] == "inet6":
+ fieldpost = "6"
+
+ for i in range(len(toks)):
+ if toks[i] == "hwaddr":
+ try:
+ devs[curdev]["hwaddr"] = toks[i+1]
+ except IndexError as e:
+ pass
+ for field in ("addr", "bcast", "mask"):
+ target = "%s%s" % (field, fieldpost)
+ if devs[curdev].get(target,""):
+ continue
+ if toks[i] == "%s:" % field:
+ try:
+ devs[curdev][target] = toks[i+1]
+ except IndexError as e:
+ pass
+ elif toks[i].startswith("%s:" % field):
+ devs[curdev][target] = toks[i][len(field)+1:]
+ return(devs)
+
+def route_info():
+ route_out = subprocess.check_output(["route", "-n"])
+ routes = [ ]
+ for line in route_out.splitlines()[1:]:
+ if not line:
+ continue
+ toks = line.split()
+ if toks[0] == "Kernel" or toks[0] == "Destination":
+ continue
+ routes.append(toks)
+ return(routes)
+
+def getgateway():
+ for r in route_info():
+ if r[3].find("G") >= 0:
+ return("%s[%s]" % (r[1],r[7]))
+ return(None)
+
+def debug_info(pre="ci-info: "):
+ lines = [ ]
+ try:
+ netdev = netdev_info()
+ except Exception as e:
+ lines.append("netdev_info failed!")
+ netdev = []
+ for (dev, d) in netdev.iteritems():
+ lines.append("%s%-6s: %i %-15s %-15s %s" %
+ (pre, dev, d["up"], d["addr"],d["mask"], d["hwaddr"]))
+ try:
+ routes = route_info()
+ except Exception as e:
+ lines.append("route_info failed")
+ routes = []
+ n = 0
+ for r in routes:
+ lines.append("%sroute-%d: %-15s %-15s %-15s %-6s %s" %
+ (pre, n, r[0], r[1], r[2], r[7], r[3]))
+ n = n+1
+ return('\n'.join(lines))
+
+if __name__ == '__main__':
+ print debug_info()
diff --git a/cloudinit/util.py b/cloudinit/util.py
index fc4233de..bdc1fce2 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -141,10 +141,8 @@ def runparts(dirp, skip_no_exist=True):
return
def subp(args, input=None):
- s_in = None
- if input is not None:
- s_in = subprocess.PIPE
- sp = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=s_in)
+ sp = subprocess.Popen(args, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, stdin=subprocess.PIPE)
out,err = sp.communicate(input)
if sp.returncode is not 0:
raise subprocess.CalledProcessError(sp.returncode,args, (out,err))
@@ -189,8 +187,12 @@ def read_seeded(base="", ext="", timeout=2):
md_url = "%s%s%s" % (base, "meta-data", ext)
try:
- md_resp = urllib2.urlopen(urllib2.Request(md_url), timeout=timeout)
- ud_resp = urllib2.urlopen(urllib2.Request(ud_url), timeout=timeout)
+ if timeout == None:
+ md_resp = urllib2.urlopen(urllib2.Request(md_url))
+ ud_resp = urllib2.urlopen(urllib2.Request(ud_url))
+ else:
+ md_resp = urllib2.urlopen(urllib2.Request(md_url), timeout=timeout)
+ ud_resp = urllib2.urlopen(urllib2.Request(ud_url), timeout=timeout)
md_str = md_resp.read()
ud = ud_resp.read()
@@ -389,3 +391,55 @@ def shellify(cmdlist):
else:
content="%s%s\n" % ( content, str(args) )
return content
+
+def dos2unix(input):
+ # find first end of line
+ pos = input.find('\n')
+ if pos <= 0 or input[pos-1] != '\r': return(input)
+ return(input.replace('\r\n','\n'))
+
+def islxc():
+ # is this host running lxc?
+ try:
+ with open("/proc/1/cgroup") as f:
+ if f.read() == "/":
+ return True
+ except IOError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+ try:
+ # try to run a program named 'lxc-is-container'. if it returns true, then
+ # we're inside a container. otherwise, no
+ sp = subprocess.Popen(['lxc-is-container'], stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ out,err = sp.communicate(None)
+ return(sp.returncode == 0)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+ return False
+
+def get_hostname_fqdn(cfg, cloud):
+ # return the hostname and fqdn from 'cfg'. If not found in cfg,
+ # then fall back to data from cloud
+ if "fqdn" in cfg:
+ # user specified a fqdn. Default hostname then is based off that
+ fqdn = cfg['fqdn']
+ hostname = get_cfg_option_str(cfg,"hostname",fqdn.split('.')[0])
+ else:
+ if "hostname" in cfg and cfg['hostname'].find('.') > 0:
+ # user specified hostname, and it had '.' in it
+ # be nice to them. set fqdn and hostname from that
+ fqdn = cfg['hostname']
+ hostname = cfg['hostname'][:fqdn.find('.')]
+ else:
+ # no fqdn set, get fqdn from cloud.
+ # get hostname from cfg if available otherwise cloud
+ fqdn = cloud.get_hostname(fqdn=True)
+ if "hostname" in cfg:
+ hostname = cfg['hostname']
+ else:
+ hostname = cloud.get_hostname()
+ return(hostname, fqdn)
diff --git a/config/cloud.cfg b/config/cloud.cfg
index 3aaa3eda..6329fd7d 100644
--- a/config/cloud.cfg
+++ b/config/cloud.cfg
@@ -10,17 +10,18 @@ cloud_init_modules:
- update_hostname
- update_etc_hosts
- rsyslog
+ - ssh
cloud_config_modules:
- mounts
- ssh-import-id
- locale
- - ssh
- set-passwords
- grub-dpkg
- apt-update-upgrade
- timezone
- puppet
+ - chef
- mcollective
- disable-ec2-metadata
- runcmd
diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt
index 3333792e..e04f8976 100644
--- a/doc/examples/cloud-config-datasources.txt
+++ b/doc/examples/cloud-config-datasources.txt
@@ -8,3 +8,10 @@ datasource:
# after each try, a sleep of int(try_number/5)+1 is done
# default sleep is 30
retries : 30
+
+ #metadata_url: a list of URLs to check for metadata services
+ metadata_urls:
+ - http://169.254.169.254:80
+ - http://instance-data:8773
+
+
diff --git a/doc/examples/cloud-config-mcollective.txt b/doc/examples/cloud-config-mcollective.txt
index ca7ba03e..67735682 100644
--- a/doc/examples/cloud-config-mcollective.txt
+++ b/doc/examples/cloud-config-mcollective.txt
@@ -13,3 +13,37 @@ mcollective:
# plugin.stomp.host: dbhost
conf:
plugin.stomp.host: dbhost
+ # This will add ssl certs to mcollective
+ # WARNING WARNING WARNING
+ # The ec2 metadata service is a network service, and thus is readable
+ # by non-root users on the system (ie: 'ec2metadata --user-data')
+ # If you want security for this, please use include-once + SSL urls
+ public-cert: |
+ -----BEGIN CERTIFICATE-----
+ MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
+ Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
+ MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
+ b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
+ 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
+ qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
+ T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
+ BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
+ SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
+ +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
+ hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
+ -----END CERTIFICATE-----
+ private-cert: |
+ -----BEGIN CERTIFICATE-----
+ MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
+ Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
+ MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
+ b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
+ 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
+ qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
+ T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
+ BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
+ SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
+ +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
+ hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
+ -----END CERTIFICATE-----
+
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
index c1d0b278..981fadae 100644
--- a/doc/examples/cloud-config.txt
+++ b/doc/examples/cloud-config.txt
@@ -111,7 +111,7 @@ packages:
# written to /etc/fstab.
# - '/dev' can be ommitted for device names that begin with: xvd, sd, hd, vd
# - if an entry does not have all 6 fields, they will be filled in
-# from the following: [ None, None, "auto", "defaults,nobootwait", "0", "2" ]
+# with values from 'mount_default_fields' below.
#
# Note, that you should set 'nobootwait' (see man fstab) for volumes that may
# not be attached at instance boot (or reboot)
@@ -122,6 +122,11 @@ mounts:
- [ xvdh, /opt/data, "auto", "defaults,nobootwait", "0", "0" ]
- [ dd, /dev/zero ]
+# mount_default_fields
+# These values are used to fill in any entries in 'mounts' that are not
+# complete. This must be an array, and must have 7 fields.
+mount_default_fields: [ None, None, "auto", "defaults,nobootwait", "0", "2" ]
+
# add each entry to ~/.ssh/authorized_keys for the configured user
ssh_authorized_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUUk8EEAnnkhXlukKoUPND/RRClWz2s5TCzIkd3Ou5+Cyz71X0XmazM3l5WgeErvtIwQMyT1KjNoMhoJMrJnWqQPOt5Q8zWd9qG7PBl9+eiH5qV7NZ mykey@host
@@ -130,6 +135,7 @@ ssh_authorized_keys:
# Send pre-generated ssh private keys to the server
# If these are present, they will be written to /etc/ssh and
# new random keys will not be generated
+# in addition to 'rsa' and 'dsa' as shown below, 'ecdsa' is also supported
ssh_keys:
rsa_private: |
-----BEGIN RSA PRIVATE KEY-----
@@ -255,9 +261,14 @@ debconf_selections: | # Need to perserve newlines
debconf debconf/frontend seen false
# manage byobu defaults
-# byobu_by_default: ('user'|'system')
-# 'user' will set byobu 'launch-by-default' for the default user
-# 'system' will enable launch-by-default for for all users
+# byobu_by_default:
+# 'user' or 'enable-user': set byobu 'launch-by-default' for the default user
+# 'system' or 'enable-system' or 'enable':
+# enable 'launch-by-default' for all users, do not modify default user
+# 'disable': disable both default user and system
+# 'disable-system': disable system
+# 'disable-user': disable for default user
+# not-set: no changes made
byobu_by_default: system
# disable ssh access as root.
@@ -266,6 +277,15 @@ byobu_by_default: system
# default: true
disable_root: false
+# disable_root_opts: the value of this variable will prefix the
+# respective key in /root/.ssh/authorized_keys if disable_root is true
+# see 'man authorized_keys' for more information on what you can do here
+#
+# The string '$USER' will be replaced with the username of the default user
+#
+# disable_root_opts: no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command="echo 'Please login as the user \"$USER\" rather than the user \"root\".';echo;sleep 10"
+
+
# set the locale to a given locale
# default: en_US.UTF-8
locale: en_US.UTF-8
@@ -292,10 +312,49 @@ rsyslog:
# set to 'False' to disable
resize_rootfs: True
-# if hostname is set, cloud-init will set the system hostname
-# appropriately to its value
-# if not set, it will set hostname from the cloud metadata
-# default: None
+## hostname and /etc/hosts management
+# cloud-init will do its best to set up a sane hostname and corresponding
+# entries in /etc/hosts.
+#
+# if you do nothing, you should get the system generally correctly
+# configured.
+# * /etc/hostname (and thus `hostname` output) set with hostname (not fqdn)
+# * an entry in /etc/hosts for both hostname and fqdn
+# that are obtained from the metadata service
+# * On each boot, the above will again be set
+# * cloud-init generally "owns" the 127.0.1.1 entry. The
+# rest of the file will not be modified
+#
+# You can change the above behavior with the following config variables:
+# Remember that these can be set in cloud-config via user-data,
+# /etc/cloud/cloud.cfg or any file in /etc/cloud/cloud.cfg.d/
+#
+# hostname:
+# this option will be used wherever the 'hostname' is needed
+# simply substitute it in the description above.
+# ** If you wish to set your hostname, set it here **
+# default: 'hostname' as returned by the metadata service
+# on EC2, the hostname portion of 'local-hostname' is used
+# which is something like 'ip-10-244-170-199'
+#
+# fqdn:
+# this option will be used wherever 'fqdn' is needed.
+# simply substitue it in the description above.
+# default: fqdn as returned by the metadata service. on EC2 'hostname'
+# is used, so this is like: ip-10-244-170-199.ec2.internal
+#
+# manage_etc_hosts:
+# default: false
+# Setting this config variable to 'true' will mean that on every
+# boot, /etc/hosts will be re-written from /etc/cloud/templates/hosts.tmpl
+# The strings '$hostname' and '$fqdn' are replaced in the template
+# with the appropriate values.
+#
+# preserve_hostname:
+# default: False
+# If this option is set to True, then /etc/hostname will never updated
+# The default behavior is to update it if it has not been modified by
+# the user.
# final_message
# default: cloud-init boot finished at $TIMESTAMP. Up $UPTIME seconds
@@ -333,7 +392,7 @@ output:
#
phone_home:
url: http://my.example.com/$INSTANCE_ID/
- post: [ pub_key_dsa, pub_key_rsa, instance_id ]
+ post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ]
# timezone: set the timezone for this instance
# the value of 'timezone' must exist in /usr/share/zoneinfo
@@ -404,7 +463,14 @@ ssh_pwauth: True
# default is False
manual_cache_clean: False
-# if you wish to have /etc/hosts written from /etc/cloud/templates/hosts.tmpl
-# on a per-always basis (to account for ebs stop/start), then set
-# manage_etc_hosts to True. The default is 'False'
-manage_etc_hosts: False
+# When cloud-init is finished running including having run
+# cloud_init_modules, then it will run this command. The default
+# is to emit an upstart signal as shown below. If the value is a
+# list, it will be passed to Popen. If it is a string, it will be
+# invoked through 'sh -c'.
+#
+# default value:
+# cc_ready_cmd: [ initctl, emit, cloud-config, CLOUD_CFG=/var/lib/instance//cloud-config.txt ]
+# example:
+# cc_ready_cmd: [ sh, -c, 'echo HI MOM > /tmp/file' ]
+
diff --git a/doc/examples/include-once.txt b/doc/examples/include-once.txt
new file mode 100644
index 00000000..0cf74e5e
--- /dev/null
+++ b/doc/examples/include-once.txt
@@ -0,0 +1,7 @@
+#include-once
+# entries are one url per line. comment lines beginning with '#' are allowed
+# urls are passed to urllib.urlopen, so the format must be supported there
+# This entries will just be processed ONE TIME by cloud-init, any further
+# iterations won't process this file
+http://www.ubuntu.com/robots.txt
+http://www.w3schools.com/html/lastpage.htm
diff --git a/doc/examples/part-handler-v2.txt b/doc/examples/part-handler-v2.txt
new file mode 100644
index 00000000..554c34a5
--- /dev/null
+++ b/doc/examples/part-handler-v2.txt
@@ -0,0 +1,38 @@
+#part-handler
+# vi: syntax=python ts=4
+# this is an example of a version 2 part handler.
+# the differences between the initial part-handler version
+# and v2 is:
+# * handle_part receives a 5th argument, 'frequency'
+# frequency will be either 'always' or 'per-instance'
+# * handler_version must be set
+#
+# A handler declaring version 2 will be called on all instance boots, with a
+# different 'frequency' argument.
+
+handler_version = 2
+
+def list_types():
+ # return a list of mime-types that are handled by this module
+ return(["text/plain", "text/go-cubs-go"])
+
+def handle_part(data,ctype,filename,payload,frequency):
+ # data: the cloudinit object
+ # ctype: '__begin__', '__end__', or the specific mime-type of the part
+ # filename: the filename for the part, or dynamically generated part if
+ # no filename is given attribute is present
+ # payload: the content of the part (empty for begin or end)
+ # frequency: the frequency that this cloud-init run is running for
+ # this is either 'per-instance' or 'always'. 'per-instance'
+ # will be invoked only on the first boot. 'always' will
+ # will be called on subsequent boots.
+ if ctype == "__begin__":
+ print "my handler is beginning, frequency=%s" % frequency
+ return
+ if ctype == "__end__":
+ print "my handler is ending, frequency=%s" % frequency
+ return
+
+ print "==== received ctype=%s filename=%s ====" % (ctype,filename)
+ print payload
+ print "==== end ctype=%s filename=%s" % (ctype, filename)
diff --git a/doc/examples/seed/README b/doc/examples/seed/README
index 927768f8..cc15839e 100644
--- a/doc/examples/seed/README
+++ b/doc/examples/seed/README
@@ -2,9 +2,9 @@ This directory is an example of a 'seed' directory.
copying these files inside an instance's
- /var/lib/cloud/data/cache/nocloud
+ /var/lib/cloud/seed/nocloud
or
- /var/lib/cloud/data/cache/nocloud-net
+ /var/lib/cloud/seed/nocloud-net
will cause the 'DataSourceNoCloud' and 'DataSourceNoCloudNet' modules
to enable and read the given data.
diff --git a/doc/userdata.txt b/doc/userdata.txt
index 00c16b25..cc691ae6 100644
--- a/doc/userdata.txt
+++ b/doc/userdata.txt
@@ -36,6 +36,16 @@ finds. However, certain types of user-data are handled specially.
will be passed through this same set of rules. Ie, the content
read from the URL can be gzipped, mime-multi-part, or plain text
+* Include File Once
+ begins with #include-once or Content-Type: text/x-include-once-url
+ This content is a "include" file. The file contains a list of
+ urls, one per line. Each of the URLs will be read, and their content
+ will be passed through this same set of rules. Ie, the content
+ read from the URL can be gzipped, mime-multi-part, or plain text
+ This file will just be downloaded only once per instance, and its
+ contents cached for subsequent boots. This allows you to pass in
+ one-time-use or expiring URLs.
+
* Cloud Config Data
begins with #cloud-config or Content-Type: text/cloud-config
diff --git a/templates/hosts.tmpl b/templates/hosts.tmpl
index 36db43b5..ae120b02 100644
--- a/templates/hosts.tmpl
+++ b/templates/hosts.tmpl
@@ -13,7 +13,7 @@
# /etc/cloud/cloud.cfg or cloud-config from user-data
#
## The value '$hostname' will be replaced with the local-hostname
-127.0.1.1 $hostname
+127.0.1.1 $fqdn $hostname
127.0.0.1 localhost
# The following lines are desirable for IPv6 capable hosts
diff --git a/tools/write-mime-multipart.py b/tools/write-mime-multipart.py
deleted file mode 100755
index 0a67d4c5..00000000
--- a/tools/write-mime-multipart.py
+++ /dev/null
@@ -1,51 +0,0 @@
-#! /usr/bin/env python
-
-import sys, os
-import email
-import mimetypes
-import re
-
-mimetypes.types_map['.sh'] = 'text/x-shellscript'
-cloud_config_mark_strings = { '#!': 'text/x-shellscript', '#include': 'text/x-include-url',
- '#cloud-config': 'text/cloud-config', '#upstart-job': 'text/upstart-job',
- '#cloud-boothook': 'text/cloud-boothook'
- }
-def write_mime_multipart():
- multipart_msg = email.mime.Multipart.MIMEMultipart()
- for arg in sys.argv[1:]:
- if ',' in arg:
- (msg_file, msg_type) = arg.split(',')
- else:
- msg_file = arg
- msg_type = None
-
- msg_file = os.path.expanduser(msg_file)
- if not os.path.isfile(msg_file):
- print >> sys.stderr, "Can't find file %s" % arg
- exit(1)
-
- if not msg_type: msg_type = get_type_from_file(arg)
- msg = email.mime.base.MIMEBase(*msg_type.split('/'))
- msg.set_payload(open(msg_file, 'r').read())
- multipart_msg.attach(msg)
-
- print multipart_msg.as_string()
-
-def get_type_from_file(filename):
- first_line = open(filename).readline()
- m = re.match('Content-Type: (\w+/\w+)', first_line)
- if m:
- return m.groups[1]
- else:
- for mark_string, mime_type in cloud_config_mark_strings.items():
- if first_line.startswith(mark_string):
- return mime_type
- return mimetypes.guess_type(filename)[0] or 'text/plain'
-
-if __name__ == '__main__':
- if len(sys.argv) == 1 or '-h' in sys.argv or '--help' in sys.argv:
- print "Usage: %s file1,application/cloud-config file2.sh ..." % os.path.basename(sys.argv[0])
- print "MIME Multipart message will be written to STDOUT"
- exit(0)
- write_mime_multipart()
-
diff --git a/upstart/cloud-init-nonet.conf b/upstart/cloud-init-nonet.conf
index 12f21320..70c7dfea 100644
--- a/upstart/cloud-init-nonet.conf
+++ b/upstart/cloud-init-nonet.conf
@@ -1,19 +1,25 @@
# cloud-init-no-net
# the purpose of this job is
-# * to block running of cloud-init until a non 'lo' interface is up
-# * timeout if one doens't come up in a reasonable amount of time
+# * to block running of cloud-init until all network interfaces
+# configured in /etc/network/interfaces are up
+# * timeout if they all do not come up in a reasonable amount of time
start on mounted MOUNTPOINT=/ and stopped cloud-init-local
-stop on net-device-up IFACE!=lo
+stop on static-network-up
task
console output
script
- # if a non 'lo' interface is up, exit immediately
- grep -qv '^lo' /var/run/network/ifstate >/dev/null 2>&1 && exit 0
+ # /run/network/static-network-up-emitted is written by
+ # upstart (via /etc/network/if-up.d/upstart). its presense would
+ # indicate that static-network-up has already fired.
+ EMITTED="/run/network/static-network-up-emitted"
+ [ -e "$EMITTED" -o -e "/var/$EMITTED" ] && exit 0
[ -f /var/lib/cloud/instance/obj.pkl ] && exit 0
+ start networking
+
short=10; long=60;
sleep ${short}
echo $UPSTART_JOB "waiting ${long} seconds for a network device."