summaryrefslogtreecommitdiff
path: root/cloudinit
diff options
context:
space:
mode:
authorScott Moser <smoser@ubuntu.com>2010-02-03 18:09:48 -0500
committerScott Moser <smoser@ubuntu.com>2010-02-03 18:09:48 -0500
commitaed1c1e9fda1e78d19305e90f554d0dcb5388cd7 (patch)
tree7f0dd15537201ea49217ba6ec3040ede9b0426f4 /cloudinit
parentff522791cb2b58e83da4efea1be76757f05a1f1f (diff)
downloadvyos-cloud-init-aed1c1e9fda1e78d19305e90f554d0dcb5388cd7.tar.gz
vyos-cloud-init-aed1c1e9fda1e78d19305e90f554d0dcb5388cd7.zip
globally remove ec2init and rename to cloudinit
Diffstat (limited to 'cloudinit')
-rw-r--r--cloudinit/CloudConfig.py407
-rw-r--r--cloudinit/DataSource.py30
-rw-r--r--cloudinit/DataSourceEc2.py132
-rw-r--r--cloudinit/UserDataHandler.py130
-rw-r--r--cloudinit/__init__.py313
-rw-r--r--cloudinit/boto_utils.py106
-rw-r--r--cloudinit/execute.py13
-rw-r--r--cloudinit/util.py79
8 files changed, 1210 insertions, 0 deletions
diff --git a/cloudinit/CloudConfig.py b/cloudinit/CloudConfig.py
new file mode 100644
index 00000000..4a498866
--- /dev/null
+++ b/cloudinit/CloudConfig.py
@@ -0,0 +1,407 @@
+#
+# Common code for the EC2 configuration files in Ubuntu
+# Copyright (C) 2008-2010 Canonical Ltd.
+#
+# Author: Chuck Short <chuck.short@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+import yaml
+import re
+import cloudinit
+import cloudinit.util as util
+import subprocess
+import os
+import glob
+import sys
+
+per_instance="once-per-instance"
+
+class CloudConfig():
+ cfgfile = None
+ handlers = { }
+ cfg = None
+
+ def __init__(self,cfgfile):
+ self.cloud = cloudinit.EC2Init()
+ self.cfg = self.get_config_obj(cfgfile)
+ self.cloud.get_data_source()
+ self.add_handler('apt-update-upgrade', self.h_apt_update_upgrade)
+ self.add_handler('config-ssh')
+ self.add_handler('disable-ec2-metadata',
+ self.h_disable_ec2_metadata, "always")
+ self.add_handler('config-mounts')
+
+ def get_config_obj(self,cfgfile):
+ f=file(cfgfile)
+ cfg=yaml.load(f.read())
+ f.close()
+ if cfg is None: cfg = { }
+ return(util.mergedict(cfg,self.cloud.cfg))
+
+ def convert_old_config(self):
+ # support reading the old ConfigObj format file and turning it
+ # into a yaml string
+ try:
+ f = file(self.conffile)
+ str=file.read().replace('=',': ')
+ f.close()
+ return str
+ except:
+ return("")
+
+ def add_handler(self, name, handler=None, freq=None):
+ if handler is None:
+ try:
+ handler=getattr(self,'h_%s' % name.replace('-','_'))
+ except:
+ raise Exception("Unknown hander for name %s" %name)
+ if freq is None:
+ freq = per_instance
+
+ self.handlers[name]= { 'handler': handler, 'freq': freq }
+
+ def get_handler_info(self, name):
+ return(self.handlers[name]['handler'], self.handlers[name]['freq'])
+
+ def parse_ssh_keys(self):
+ disableRoot = self.cfg['disable_root']
+ if disableRoot == 'true':
+ value = 'disabled_root'
+ return value
+ else:
+ ec2Key = self.cfg['ec2_fetch_key']
+ if ec2Key != 'none':
+ value = 'default_key'
+ return value
+ else:
+ return ec2Key
+
+ def handle(self, name, args):
+ handler = None
+ freq = None
+ try:
+ (handler, freq) = self.get_handler_info(name)
+ except:
+ raise Exception("Unknown config key %s\n" % name)
+
+ self.cloud.sem_and_run(name, freq, handler, [ name, args ])
+
+ def h_apt_update_upgrade(self,name,args):
+ update = util.get_cfg_option_bool(self.cfg, 'apt_update', False)
+ upgrade = util.get_cfg_option_bool(self.cfg, 'apt_upgrade', False)
+
+ if not util.get_cfg_option_bool(self.cfg, \
+ 'apt_preserve_sources_list', False):
+ if self.cfg.has_key("apt_mirror"):
+ mirror = self.cfg["apt_mirror"]
+ else:
+ mirror = self.cloud.get_mirror()
+ generate_sources_list(mirror)
+
+ # process 'apt_sources'
+ if self.cfg.has_key('apt_sources'):
+ errors = add_sources(self.cfg['apt_sources'])
+ for e in errors:
+ warn("Source Error: %s\n" % ':'.join(e))
+
+ pkglist = []
+ if 'packages' in self.cfg:
+ if isinstance(self.cfg['packages'],list):
+ pkglist = self.cfg['packages']
+ else: pkglist.append(self.cfg['packages'])
+
+ if update or upgrade or pkglist:
+ #retcode = subprocess.call(list)
+ subprocess.Popen(['apt-get', 'update']).communicate()
+
+ e=os.environ.copy()
+ e['DEBIAN_FRONTEND']='noninteractive'
+
+ if upgrade:
+ subprocess.Popen(['apt-get', 'upgrade', '--assume-yes'], env=e).communicate()
+
+ if pkglist:
+ cmd=['apt-get', 'install', '--assume-yes']
+ cmd.extend(pkglist)
+ subprocess.Popen(cmd, env=e).communicate()
+
+ return(True)
+
+ def h_disable_ec2_metadata(self,name,args):
+ if util.get_cfg_option_bool(self.cfg, "disable_ec2_metadata", False):
+ fwall="route add -host 169.254.169.254 reject"
+ subprocess.call(fwall.split(' '))
+
+ def h_config_ssh(self,name,args):
+ # remove the static keys from the pristine image
+ for f in glob.glob("/etc/ssh/ssh_host_*_key*"):
+ try: os.unlink(f)
+ except: pass
+
+ if self.cfg.has_key("ssh_keys"):
+ # if there are keys in cloud-config, use them
+ key2file = {
+ "rsa_private" : ("/etc/ssh/ssh_host_rsa_key", 0600),
+ "rsa_public" : ("/etc/ssh/ssh_host_rsa_key.pub", 0644),
+ "dsa_private" : ("/etc/ssh/ssh_host_dsa_key", 0600),
+ "dsa_public" : ("/etc/ssh/ssh_host_dsa_key.pub", 0644)
+ }
+
+ for key,val in self.cfg["ssh_keys"].items():
+ if key2file.has_key(key):
+ util.write_file(key2file[key][0],val,key2file[key][1])
+ else:
+ # if not, generate them
+ genkeys ='ssh-keygen -f /etc/ssh/ssh_host_rsa_key -t rsa -N ""; '
+ genkeys+='ssh-keygen -f /etc/ssh/ssh_host_dsa_key -t dsa -N ""; '
+ subprocess.call(('sh', '-c', "{ %s } </dev/null" % (genkeys)))
+
+ try:
+ user = util.get_cfg_option_str(self.cfg,'user')
+ disable_root = util.get_cfg_option_bool(self.cfg, "disable_root", True)
+ keys = self.cloud.get_public_ssh_keys()
+
+ if self.cfg.has_key("ssh_authorized_keys"):
+ cfgkeys = self.cfg["ssh_authorized_keys"]
+ keys.extend(cfgkeys)
+
+ apply_credentials(keys,user,disable_root)
+ except:
+ warn("applying credentials failed!\n")
+
+ send_ssh_keys_to_console()
+
+ def h_ec2_ebs_mounts(self,name,args):
+ print "Warning, not doing anything for config %s" % name
+
+ def h_config_setup_raid(self,name,args):
+ print "Warning, not doing anything for config %s" % name
+
+ def h_config_runurl(self,name,args):
+ print "Warning, not doing anything for config %s" % name
+
+ def h_config_mounts(self,name,args):
+ # handle 'mounts'
+
+ # these are our default set of mounts
+ defmnts = [ [ "ephemeral0", "/mnt", "auto", "defaults", "0", "0" ],
+ [ "swap", "none", "swap", "sw", "0", "0" ] ]
+
+ # fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno
+ defvals = [ None, None, "auto", "defaults", "0", "0" ]
+
+ cfgmnt = [ ]
+ if self.cfg.has_key("mounts"):
+ cfgmnt = self.cfg["mounts"]
+
+ for i in range(len(cfgmnt)):
+ # skip something that wasn't a list
+ if not isinstance(cfgmnt[i],list): continue
+
+ # workaround, allow user to specify 'ephemeral'
+ # rather than more ec2 correct 'ephemeral0'
+ if cfgmnt[i] == "ephemeral":
+ cfgmnt[i] = "ephemeral0"
+
+ newname = cfgmnt[i][0]
+ if not newname.startswith("/"):
+ newname = self.cloud.device_name_to_device(cfgmnt[i][0])
+ if newname is not None:
+ cfgmnt[i][0] = newname
+ else:
+ # there is no good way of differenciating between
+ # a name that *couldn't* exist in the md service and
+ # one that merely didnt
+ # in order to allow user to specify 'sda3' rather
+ # than '/dev/sda3', go through some hoops
+ ok = False
+ for f in [ "/", "sd", "hd", "vd", "xvd" ]:
+ if cfgmnt[i][0].startswith(f):
+ ok = True
+ break
+ if not ok:
+ cfgmnt[i][1] = None
+
+ for i in range(len(cfgmnt)):
+ # fill in values with
+ for j in range(len(defvals)):
+ if len(cfgmnt[i]) <= j:
+ cfgmnt[i].append(defvals[j])
+ elif cfgmnt[i][j] is None:
+ cfgmnt[i][j] = defvals[j]
+
+ if not cfgmnt[i][0].startswith("/"):
+ cfgmnt[i][0]="/dev/%s" % cfgmnt[i][0]
+
+ # if the second entry in the list is 'None' this
+ # clears all previous entries of that same 'fs_spec'
+ # (fs_spec is the first field in /etc/fstab, ie, that device)
+ if cfgmnt[i][1] is None:
+ for j in range(i):
+ if cfgmnt[j][0] == cfgmnt[i][0]:
+ cfgmnt[j][1] = None
+
+
+ # for each of the "default" mounts, add them only if no other
+ # entry has the same device name
+ for defmnt in defmnts:
+ devname = self.cloud.device_name_to_device(defmnt[0])
+ if devname is None: continue
+ if not devname.startswith("/"):
+ defmnt[0] = "/dev/%s" % devname
+
+ cfgmnt_has = False
+ for cfgm in cfgmnt:
+ if cfgm[0] == defmnt[0]:
+ cfgmnt_has = True
+ break
+
+ if cfgmnt_has: continue
+ cfgmnt.append(defmnt)
+
+
+ # now, each entry in the cfgmnt list has all fstab values
+ # if the second field is None (not the string, the value) we skip it
+ actlist = filter(lambda x: x[1] is not None, cfgmnt)
+
+ if len(actlist) == 0: return
+
+ needswap = False
+ dirs = [ ]
+
+ fstab=file("/etc/fstab","ab")
+ fstab.write("# cloud-config mounts\n")
+ for line in actlist:
+ fstab.write('\t'.join(line) + "\n")
+ if line[2] == "swap": needswap = True
+ if line[1].startswith("/"): dirs.append(line[1])
+ fstab.close()
+
+ if needswap:
+ try: util.subp(("swapon", "-a"))
+ except: warn("Failed to enable swap")
+
+ for d in dirs:
+ if os.path.exists(d): continue
+ try: os.makedirs(d)
+ except: warn("Failed to make '%s' config-mount\n",d)
+
+ try: util.subp(("mount","-a"))
+ except: pass
+
+
+
+
+def apply_credentials(keys, user, disable_root):
+ keys = set(keys)
+ if user:
+ setup_user_keys(keys, user, '')
+
+ if disable_root:
+ key_prefix = 'command="echo \'Please login as the %s user rather than root user.\';echo;sleep 10" ' % user
+ else:
+ key_prefix = ''
+
+ setup_user_keys(keys, 'root', key_prefix)
+
+def setup_user_keys(keys, user, key_prefix):
+ import pwd
+ saved_umask = os.umask(077)
+
+ pwent = pwd.getpwnam(user)
+
+ ssh_dir = '%s/.ssh' % pwent.pw_dir
+ if not os.path.exists(ssh_dir):
+ os.mkdir(ssh_dir)
+ os.chown(ssh_dir, pwent.pw_uid, pwent.pw_gid)
+
+ authorized_keys = '%s/.ssh/authorized_keys' % pwent.pw_dir
+ fp = open(authorized_keys, 'a')
+ fp.write(''.join(['%s%s\n' % (key_prefix, key) for key in keys]))
+ fp.close()
+
+ os.chown(authorized_keys, pwent.pw_uid, pwent.pw_gid)
+
+ os.umask(saved_umask)
+
+def send_ssh_keys_to_console():
+ send_keys_sh = """
+ {
+ echo
+ echo "#############################################################"
+ echo "-----BEGIN SSH HOST KEY FINGERPRINTS-----"
+ ssh-keygen -l -f /etc/ssh/ssh_host_rsa_key.pub
+ ssh-keygen -l -f /etc/ssh/ssh_host_dsa_key.pub
+ echo "-----END SSH HOST KEY FINGERPRINTS-----"
+ echo "#############################################################"
+ } | logger -p user.info -s -t "ec2"
+ """
+ subprocess.call(('sh', '-c', send_keys_sh))
+
+
+def warn(str):
+ sys.stderr.write("Warning:%s\n" % str)
+
+# srclist is a list of dictionaries,
+# each entry must have: 'source'
+# may have: key, ( keyid and keyserver)
+def add_sources(srclist):
+ elst = []
+
+ for ent in srclist:
+ if not ent.has_key('source'):
+ elst.append([ "", "missing source" ])
+ continue
+
+ source=ent['source']
+ if source.startswith("ppa:"):
+ try: util.subp(["add-apt-repository",source])
+ except:
+ elst.append([source, "add-apt-repository failed"])
+ continue
+
+ if not ent.has_key('filename'):
+ ent['filename']='cloud_config_sources.list'
+
+ if not ent['filename'].startswith("/"):
+ ent['filename'] = "%s/%s" % \
+ ("/etc/apt/sources.list.d/", ent['filename'])
+
+ if ( ent.has_key('keyid') and not ent.has_key('key') ):
+ ks = "keyserver.ubuntu.com"
+ if ent.has_key('keyserver'): ks = ent['keyserver']
+ try:
+ ent['key'] = util.getkeybyid(ent['keyid'], ks)
+ except:
+ elst.append([source,"failed to get key from %s" % ks])
+ continue
+
+ if ent.has_key('key'):
+ try: util.subp(('apt-key', 'add', '-'), ent['key'])
+ except:
+ elst.append([source, "failed add key"])
+
+ try: util.write_file(ent['filename'], source + "\n")
+ except:
+ elst.append([source, "failed write to file %s" % ent['filename']])
+
+ return(elst)
+
+
+def generate_sources_list(mirror):
+ stdout, stderr = subprocess.Popen(['lsb_release', '-cs'], stdout=subprocess.PIPE).communicate()
+ codename = stdout.strip()
+
+ util.render_to_file('sources.list', '/etc/apt/sources.list', \
+ { 'mirror' : mirror, 'codename' : codename })
diff --git a/cloudinit/DataSource.py b/cloudinit/DataSource.py
new file mode 100644
index 00000000..9af2eda4
--- /dev/null
+++ b/cloudinit/DataSource.py
@@ -0,0 +1,30 @@
+
+import cloudinit
+import UserDataHandler as ud
+
+class DataSource:
+ userdata = None
+ metadata = None
+ userdata_raw = None
+
+ def __init__(self):
+ pass
+
+ def get_userdata(self):
+ if self.userdata == None:
+ self.userdata = ud.preprocess_userdata(self.userdata_raw)
+ return self.userdata
+
+ def get_userdata_raw(self):
+ return(self.userdata_raw)
+
+ def get_public_ssh_keys(self):
+ return([])
+
+ def device_name_to_device(self, name):
+ # translate a 'name' to a device
+ # the primary function at this point is on ec2
+ # to consult metadata service, that has
+ # ephemeral0: sdb
+ # and return 'sdb' for input 'ephemeral0'
+ return(None)
diff --git a/cloudinit/DataSourceEc2.py b/cloudinit/DataSourceEc2.py
new file mode 100644
index 00000000..a4906af2
--- /dev/null
+++ b/cloudinit/DataSourceEc2.py
@@ -0,0 +1,132 @@
+import DataSource
+
+import cloudinit
+import socket
+import urllib2
+import time
+import boto_utils
+
+class DataSourceEc2(DataSource.DataSource):
+ api_ver = '2009-04-04'
+ cachedir = cloudinit.cachedir + '/ec2'
+
+ location_locale_map = {
+ 'us' : 'en_US.UTF-8',
+ 'eu' : 'en_GB.UTF-8',
+ 'default' : 'en_US.UTF-8',
+ }
+
+ def __init__(self):
+ pass
+
+ def get_data(self):
+ try:
+ udf = open(self.cachedir + "/user-data.raw")
+ self.userdata_raw = udf.read()
+ udf.close()
+
+ mdf = open(self.cachedir + "/meta-data.raw")
+ data = mdf.read()
+ self.metadata = eval(data)
+ mdf.close()
+
+ return True
+ except:
+ pass
+
+ try:
+ if not self.wait_for_metadata_service():
+ return False
+ self.userdata_raw = boto_utils.get_instance_userdata(self.api_ver)
+ self.metadata = boto_utils.get_instance_metadata(self.api_ver)
+ return True
+ except Exception as e:
+ print e
+ return False
+
+ def get_instance_id(self):
+ return(self.metadata['instance-id'])
+
+ def get_availability_zone(self):
+ return(self.metadata['placement']['availability-zone'])
+
+ def get_local_mirror(self):
+ return(self.get_mirror_from_availability_zone())
+
+ def get_locale(self):
+ az = self.metadata['placement']['availability-zone']
+ if self.location_locale_map.has_key(az[0:2]):
+ return(self.location_locale_map[az[0:2]])
+ else:
+ return(self.location_locale_map["default"])
+
+ def get_hostname(self):
+ toks = self.metadata['local-hostname'].split('.')
+ # if there is an ipv4 address in 'local-hostname', then
+ # make up a hostname (LP: #475354)
+ if len(toks) == 4:
+ try:
+ r = filter(lambda x: int(x) < 256 and x > 0, toks)
+ if len(r) == 4:
+ return("ip-%s" % '-'.join(r))
+ except: pass
+ return toks[0]
+
+ def get_mirror_from_availability_zone(self, availability_zone = None):
+ # availability is like 'us-west-1b' or 'eu-west-1a'
+ if availability_zone == None:
+ availability_zone = self.get_availability_zone()
+
+ try:
+ host="%s.ec2.archive.ubuntu.com" % availability_zone[:-1]
+ socket.getaddrinfo(host, None, 0, socket.SOCK_STREAM)
+ return 'http://%s/ubuntu/' % host
+ except:
+ return 'http://archive.ubuntu.com/ubuntu/'
+
+ def wait_for_metadata_service(self, sleeps = 10):
+ sleeptime = 1
+ for x in range(sleeps):
+ s = socket.socket()
+ try:
+ address = '169.254.169.254'
+ port = 80
+ s.connect((address,port))
+ s.close()
+ return True
+ except socket.error, e:
+ print "sleeping %s" % sleeptime
+ time.sleep(sleeptime)
+ #timeout = timeout * 2
+ return False
+
+ def get_public_ssh_keys(self):
+ keys = []
+ if not self.metadata.has_key('public-keys'): return([])
+ for keyname, klist in self.metadata['public-keys'].items():
+ # lp:506332 uec metadata service responds with
+ # data that makes boto populate a string for 'klist' rather
+ # than a list.
+ if isinstance(klist,str):
+ klist = [ klist ]
+ for pkey in klist:
+ # there is an empty string at the end of the keylist, trim it
+ if pkey:
+ keys.append(pkey)
+
+ return(keys)
+
+ def device_name_to_device(self, name):
+ # consult metadata service, that has
+ # ephemeral0: sdb
+ # and return 'sdb' for input 'ephemeral0'
+ if not self.metadata.has_key('block-device-mapping'):
+ return(None)
+
+ for entname, device in self.metadata['block-device-mapping'].items():
+ if entname == name:
+ return(device)
+ # LP: #513842 mapping in Euca has 'ephemeral' not 'ephemeral0'
+ if entname == "ephemeral" and name == "ephemeral0":
+ return(device)
+ return None
diff --git a/cloudinit/UserDataHandler.py b/cloudinit/UserDataHandler.py
new file mode 100644
index 00000000..56feb0ff
--- /dev/null
+++ b/cloudinit/UserDataHandler.py
@@ -0,0 +1,130 @@
+import email
+
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+
+
+starts_with_mappings={
+ '#include' : 'text/x-include-url',
+ '#!' : 'text/x-shellscript',
+ '#cloud-config' : 'text/cloud-config',
+ '#upstart-job' : 'text/upstart-job',
+ '#part-handler' : 'text/part-handler'
+}
+
+# if 'str' is compressed return decompressed otherwise return it
+def decomp_str(str):
+ import StringIO
+ import gzip
+ try:
+ uncomp = gzip.GzipFile(None,"rb",1,StringIO.StringIO(str)).read()
+ return(uncomp)
+ except:
+ return(str)
+
+def do_include(str,parts):
+ import urllib
+ # is just a list of urls, one per line
+ for line in str.splitlines():
+ if line == "#include": continue
+ if line.startswith("#"): continue
+ content = urllib.urlopen(line).read()
+ process_includes(email.message_from_string(decomp_str(content)),parts)
+
+def process_includes(msg,parts):
+ # parts is a dictionary of arrays
+ # parts['content']
+ # parts['names']
+ # parts['types']
+ for t in ( 'content', 'names', 'types' ):
+ if not parts.has_key(t):
+ parts[t]=[ ]
+ for part in msg.walk():
+ # multipart/* are just containers
+ if part.get_content_maintype() == 'multipart':
+ continue
+
+ payload = part.get_payload()
+
+ ctype = None
+ for str, gtype in starts_with_mappings.items():
+ if payload.startswith(str):
+ ctype = gtype
+ break
+
+ if ctype is None:
+ ctype = part.get_content_type()
+
+ if ctype == 'text/x-include-url':
+ do_include(payload,parts)
+ continue
+
+ filename = part.get_filename()
+ if not filename:
+ filename = 'part-%03d' % len(parts['content'])
+
+ parts['content'].append(payload)
+ parts['types'].append(ctype)
+ parts['names'].append(filename)
+
+def parts2mime(parts):
+ outer = MIMEMultipart()
+
+ i = 0
+ while i < len(parts['content']):
+ if parts['types'][i] is None:
+ # No guess could be made, or the file is encoded (compressed), so
+ # use a generic bag-of-bits type.
+ ctype = 'application/octet-stream'
+ else: ctype = parts['types'][i]
+ maintype, subtype = ctype.split('/', 1)
+ if maintype == 'text':
+ msg = MIMEText(parts['content'][i], _subtype=subtype)
+ else:
+ msg = MIMEBase(maintype, subtype)
+ msg.set_payload(parts['content'][i])
+ # Encode the payload using Base64
+ encoders.encode_base64(msg)
+ # Set the filename parameter
+ msg.add_header('Content-Disposition', 'attachment',
+ filename=parts['names'][i])
+ outer.attach(msg)
+
+ i=i+1
+ return(outer.as_string())
+
+# this is heavily wasteful, reads through userdata string input
+def preprocess_userdata(data):
+ parts = { }
+ process_includes(email.message_from_string(decomp_str(data)),parts)
+ return(parts2mime(parts))
+
+# callbacks is a dictionary with:
+# { 'content-type': handler(data,content_type,filename,payload) }
+def walk_userdata(str, callbacks, data = None):
+ partnum = 0
+ for part in email.message_from_string(str).walk():
+ # multipart/* are just containers
+ if part.get_content_maintype() == 'multipart':
+ continue
+
+ ctype = part.get_content_type()
+ if ctype is None:
+ ctype = 'application/octet-stream'
+
+ filename = part.get_filename()
+ if not filename:
+ filename = 'part-%03d' % partnum
+
+ if callbacks.has_key(ctype):
+ callbacks[ctype](data,ctype,filename,part.get_payload())
+
+ partnum = partnum+1
+
+if __name__ == "__main__":
+ import sys
+ data = decomp_str(file(sys.argv[1]).read())
+ parts = { }
+ process_includes(email.message_from_string(data),parts)
+ print "#found %s parts" % len(parts['content'])
+ print parts2mime(parts)
diff --git a/cloudinit/__init__.py b/cloudinit/__init__.py
new file mode 100644
index 00000000..76aa34f0
--- /dev/null
+++ b/cloudinit/__init__.py
@@ -0,0 +1,313 @@
+#
+# Common code for the EC2 initialisation scripts in Ubuntu
+# Copyright (C) 2008-2009 Canonical Ltd
+#
+# Author: Soren Hansen <soren@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+from configobj import ConfigObj
+
+import cPickle
+import sys
+import os.path
+import errno
+import pwd
+import subprocess
+import yaml
+
+datadir = '/var/lib/cloud/data'
+semdir = '/var/lib/cloud/sem'
+pluginsdir = datadir + '/plugins'
+cachedir = datadir + '/cache'
+userdata_raw = datadir + '/user-data.txt'
+userdata = datadir + '/user-data.txt.i'
+user_scripts_dir = datadir + "/scripts"
+cloud_config = datadir + '/cloud-config.txt'
+data_source_cache = cachedir + '/obj.pkl'
+system_config = '/etc/cloud/cloud.cfg'
+cfg_env_name = "CLOUD_CFG"
+
+import DataSourceEc2
+import UserDataHandler
+import util
+
+class EC2Init:
+ datasource_map = {
+ "ec2" : DataSourceEc2.DataSourceEc2,
+ }
+ datasource = None
+ auto_order = [ 'ec2' ]
+
+ cfg = None
+ part_handlers = { }
+ old_conffile = '/etc/ec2-init/ec2-config.cfg'
+
+ def __init__(self):
+ self.part_handlers = {
+ 'text/x-shellscript' : self.handle_user_script,
+ 'text/cloud-config' : self.handle_cloud_config,
+ 'text/upstart-job' : self.handle_upstart_job,
+ 'text/part-handler' : self.handle_handler
+ }
+ self.cfg=self.read_cfg()
+
+ def read_cfg(self):
+ if self.cfg:
+ return(self.cfg)
+
+ conf = { }
+ try:
+ stream = file(system_config)
+ conf = yaml.load(stream)
+ stream.close()
+ except:
+ pass
+
+ if conf is None: conf = { }
+
+ # support reading the old ConfigObj format file and merging
+ # it into the yaml dictionary
+ try:
+ from configobj import ConfigObj
+ oldcfg = ConfigObj(self.old_conffile)
+ if oldcfg is None: oldcfg = { }
+ conf = util.mergedict(conf,oldcfg)
+ except:
+ pass
+
+ if not conf.has_key("cloud_type"):
+ conf["cloud_type"]=None
+
+ return(conf)
+
+ def restore_from_cache(self):
+ try:
+ f=open(data_source_cache, "rb")
+ data = cPickle.load(f)
+ self.datasource = data
+ return True
+ except:
+ return False
+
+ def write_to_cache(self):
+ try:
+ f=open(data_source_cache, "wb")
+ data = cPickle.dump(self.datasource,f)
+ return True
+ except:
+ return False
+
+ def get_cloud_type(self):
+ pass
+
+ def get_data_source(self):
+ if self.datasource is not None: return True
+
+ if self.restore_from_cache():
+ return True
+
+ dslist=[ ]
+ cfglist=self.cfg['cloud_type']
+ if cfglist == "auto":
+ dslist = self.auto_order
+ elif cfglist:
+ for ds in cfglist.split(','):
+ dslist.append(strip(ds).tolower())
+
+ for ds in dslist:
+ if ds not in self.datasource_map: continue
+ try:
+ s = self.datasource_map[ds]()
+ if s.get_data():
+ self.datasource = s
+ self.datasource_name = ds
+ return True
+ except Exception as e:
+ pass
+ raise Exception("Could not find data source")
+
+ def get_userdata(self):
+ return(self.datasource.get_userdata())
+
+ def update_cache(self):
+ self.write_to_cache()
+ self.store_userdata()
+
+ def store_userdata(self):
+ util.write_file(userdata_raw, self.datasource.get_userdata_raw(), 0600)
+ util.write_file(userdata, self.datasource.get_userdata(), 0600)
+
+ def initctl_emit(self):
+ subprocess.Popen(['initctl', 'emit', 'cloud-config',
+ '%s=%s' % (cfg_env_name,cloud_config)]).communicate()
+
+ def sem_getpath(self,name,freq):
+ freqtok = freq
+ if freq == 'once-per-instance':
+ freqtok = self.datasource.get_instance_id()
+
+ return("%s/%s.%s" % (semdir,name,freqtok))
+
+ def sem_has_run(self,name,freq):
+ if freq is "always": return False
+ semfile = self.sem_getpath(name,freq)
+ if os.path.exists(semfile):
+ return True
+ return False
+
+ def sem_acquire(self,name,freq):
+ from time import time
+ semfile = self.sem_getpath(name,freq)
+
+ try:
+ os.makedirs(os.path.dirname(semfile))
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise e
+
+ if os.path.exists(semfile) and freq is not "always":
+ return False
+
+ # race condition
+ try:
+ f = open(semfile,"w")
+ f.write("%s\n" % str(time()))
+ f.close()
+ except:
+ return(False)
+ return(True)
+
+ def sem_clear(self,name,freq):
+ semfile = self.sem_getpath(name,freq)
+ try:
+ os.unlink(semfile)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ return False
+
+ return True
+
+ # acquire lock on 'name' for given 'freq'
+ # if that does not exist, then call 'func' with given 'args'
+ # if 'clear_on_fail' is True and func throws an exception
+ # then remove the lock (so it would run again)
+ def sem_and_run(self,semname,freq,func,args=[],clear_on_fail=False):
+ if self.sem_has_run(semname,freq): return
+ try:
+ if not self.sem_acquire(semname,freq):
+ raise Exception("Failed to acquire lock on %s\n" % semname)
+
+ func(*args)
+ except:
+ if clear_on_fail:
+ self.sem_clear(semname,freq)
+ raise
+
+ def consume_userdata(self):
+ self.get_userdata()
+ data = self
+ # give callbacks opportunity to initialize
+ for ctype, func in self.part_handlers.items():
+ func(data, "__begin__",None,None)
+ UserDataHandler.walk_userdata(self.get_userdata(),
+ self.part_handlers, data)
+
+ # give callbacks opportunity to finalize
+ for ctype, func in self.part_handlers.items():
+ func(data,"__end__",None,None)
+
+ def handle_handler(self,data,ctype,filename,payload):
+ if ctype == "__end__": return
+ if ctype == "__begin__" :
+ self.handlercount = 0
+ return
+
+ # add the path to the plugins dir to the top of our list for import
+ if self.handlercount == 0:
+ sys.path.insert(0,pluginsdir)
+
+ self.handlercount=self.handlercount+1
+
+ # write content to pluginsdir
+ modname = 'part-handler-%03d' % self.handlercount
+ modfname = modname + ".py"
+ util.write_file("%s/%s" % (pluginsdir,modfname), payload, 0600)
+
+ try:
+ mod = __import__(modname)
+ lister = getattr(mod, "list_types")
+ handler = getattr(mod, "handle_part")
+ except:
+ import traceback
+ traceback.print_exc(file=sys.stderr)
+ return
+
+ # - call it with '__begin__'
+ handler(data, "__begin__", None, None)
+
+ # - add it self.part_handlers
+ for mtype in lister():
+ self.part_handlers[mtype]=handler
+
+ def handle_user_script(self,data,ctype,filename,payload):
+ if ctype == "__end__": return
+ if ctype == "__begin__":
+ # maybe delete existing things here
+ return
+
+ filename=filename.replace(os.sep,'_')
+ util.write_file("%s/%s" % (user_scripts_dir,filename), payload, 0700)
+
+ def handle_upstart_job(self,data,ctype,filename,payload):
+ if ctype == "__end__" or ctype == "__begin__": return
+ if not filename.endswith(".conf"):
+ filename=filename+".conf"
+
+ util.write_file("%s/%s" % ("/etc/init",filename), payload, 0644)
+
+ def handle_cloud_config(self,data,ctype,filename,payload):
+ if ctype == "__begin__":
+ self.cloud_config_str=""
+ return
+ if ctype == "__end__":
+ util.write_file(cloud_config, self.cloud_config_str, 0600)
+
+ ## this could merge the cloud config with the system config
+ ## for now, not doing this as it seems somewhat circular
+ ## as CloudConfig does that also, merging it with this cfg
+ ##
+ # ccfg = yaml.load(self.cloud_config_str)
+ # if ccfg is None: ccfg = { }
+ # self.cfg = util.mergedict(ccfg, self.cfg)
+
+ return
+
+ self.cloud_config_str+="\n#%s\n%s" % (filename,payload)
+
+ def get_public_ssh_keys(self):
+ return(self.datasource.get_public_ssh_keys())
+
+ def get_locale(self):
+ return(self.datasource.get_locale())
+
+ def get_mirror(self):
+ return(self.datasource.get_local_mirror())
+
+ def get_hostname(self):
+ return(self.datasource.get_hostname())
+
+ def device_name_to_device(self,name):
+ return(self.datasource.device_name_to_device(name))
diff --git a/cloudinit/boto_utils.py b/cloudinit/boto_utils.py
new file mode 100644
index 00000000..2f9de95f
--- /dev/null
+++ b/cloudinit/boto_utils.py
@@ -0,0 +1,106 @@
+# The contents of this file are taken from boto 1.9b's boto/utils.py
+#
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+#
+# Parts of this code were copied or derived from sample code supplied by AWS.
+# The following notice applies to that code.
+#
+# This software code is made available "AS IS" without warranties of any
+# kind. You may copy, display, modify and redistribute the software
+# code either by itself or as incorporated into your code; provided that
+# you do not remove any proprietary notices. Your use of this software
+# code is at your own risk and you waive any claim against Amazon
+# Digital Services, Inc. or its affiliates with respect to your use of
+# this software code. (c) 2006 Amazon Digital Services, Inc. or its
+# affiliates.
+import urllib2
+import sys
+
+def retry_url(url, retry_on_404=True):
+ for i in range(0, 10):
+ try:
+ req = urllib2.Request(url)
+ resp = urllib2.urlopen(req)
+ return resp.read()
+ except urllib2.HTTPError, e:
+ # in 2.6 you use getcode(), in 2.5 and earlier you use code
+ if hasattr(e, 'getcode'):
+ code = e.getcode()
+ else:
+ code = e.code
+ if code == 404 and not retry_on_404:
+ return ''
+ except:
+ pass
+ #boto.log.exception('Caught exception reading instance data')
+ sys.stderr.write('Caught exception reading instance data')
+ time.sleep(2**i)
+ #boto.log.error('Unable to read instance data, giving up')
+ sys.stderr.write('Caught exception reading instance data')
+ return ''
+
+def get_instance_metadata(version='latest'):
+ """
+ Returns the instance metadata as a nested Python dictionary.
+ Simple values (e.g. local_hostname, hostname, etc.) will be
+ stored as string values. Values such as ancestor-ami-ids will
+ be stored in the dict as a list of string values. More complex
+ fields such as public-keys and will be stored as nested dicts.
+ """
+ url = 'http://169.254.169.254/%s/meta-data/' % version
+ return _get_instance_metadata(url)
+
+def get_instance_userdata(version='latest', sep=None):
+ url = 'http://169.254.169.254/%s/user-data' % version
+ user_data = retry_url(url, retry_on_404=False)
+ if user_data:
+ if sep:
+ l = user_data.split(sep)
+ user_data = {}
+ for nvpair in l:
+ t = nvpair.split('=')
+ user_data[t[0].strip()] = t[1].strip()
+ return user_data
+
+
+def _get_instance_metadata(url):
+ d = {}
+ data = retry_url(url)
+ if data:
+ fields = data.split('\n')
+ for field in fields:
+ if field.endswith('/'):
+ d[field[0:-1]] = _get_instance_metadata(url + field)
+ else:
+ p = field.find('=')
+ if p > 0:
+ key = field[p+1:]
+ resource = field[0:p] + '/openssh-key'
+ else:
+ key = resource = field
+ val = retry_url(url + resource)
+ p = val.find('\n')
+ if p > 0:
+ val = val.split('\n')
+ d[key] = val
+ return d
diff --git a/cloudinit/execute.py b/cloudinit/execute.py
new file mode 100644
index 00000000..033da8f7
--- /dev/null
+++ b/cloudinit/execute.py
@@ -0,0 +1,13 @@
+def run(list,cfg):
+ import subprocess
+ retcode = subprocess.call(list)
+
+ if retcode == 0:
+ return
+
+ if retcode < 0:
+ str="Cmd terminated by signal %s\n" % -retcode
+ else:
+ str="Cmd returned %s\n" % retcode
+ str+=' '.join(list)
+ raise Exception(str)
diff --git a/cloudinit/util.py b/cloudinit/util.py
new file mode 100644
index 00000000..9b9f6f4d
--- /dev/null
+++ b/cloudinit/util.py
@@ -0,0 +1,79 @@
+import yaml
+import os
+import errno
+import subprocess
+from Cheetah.Template import Template
+
+def read_conf(fname):
+ stream = file(fname)
+ conf = yaml.load(stream)
+ stream.close()
+ return conf
+
+def get_cfg_option_bool(yobj, key, default=False):
+ if not yobj.has_key(key): return default
+ val = yobj[key]
+ if yobj[key] in [ True, '1', 'on', 'yes', 'true']:
+ return True
+ return False
+
+def get_cfg_option_str(yobj, key, default=None):
+ if not yobj.has_key(key): return default
+ return yobj[key]
+
+# merge values from src into cand.
+# if src has a key, cand will not override
+def mergedict(src,cand):
+ if isinstance(src,dict) and isinstance(cand,dict):
+ for k,v in cand.iteritems():
+ if k not in src:
+ src[k] = v
+ else:
+ src[k] = mergedict(src[k],v)
+ return src
+
+def write_file(file,content,mode=0644):
+ try:
+ os.makedirs(os.path.dirname(file))
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise e
+
+ f=open(file,"wb")
+ os.chmod(file,mode)
+ f.write(content)
+ f.close()
+
+# get keyid from keyserver
+def getkeybyid(keyid,keyserver):
+ shcmd="""
+ k=${1} ks=${2};
+ exec 2>/dev/null
+ [ -n "$k" ] || exit 1;
+ armour=$(gpg --list-keys --armour "${k}")
+ if [ -z "${armour}" ]; then
+ gpg --keyserver ${ks} --recv $k >/dev/null &&
+ armour=$(gpg --export --armour "${k}") &&
+ gpg --batch --yes --delete-keys "${k}"
+ fi
+ [ -n "${armour}" ] && echo "${armour}"
+ """
+ args=['sh', '-c', shcmd, "export-gpg-keyid", keyid, keyserver]
+ return(subp(args)[0])
+
+def subp(args, input=None):
+ s_in = None
+ if input is not None:
+ s_in = subprocess.PIPE
+ sp = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=s_in)
+ out,err = sp.communicate(input)
+ if sp.returncode is not 0:
+ raise subprocess.CalledProcessError(sp.returncode,args)
+ return(out,err)
+
+def render_to_file(template, outfile, searchList):
+ t = Template(file='/etc/cloud/templates/%s.tmpl' % template, searchList=[searchList])
+ f = open(outfile, 'w')
+ f.write(t.respond())
+ f.close()
+