summaryrefslogtreecommitdiff
path: root/cloudinit/__init__.py
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit/__init__.py')
-rw-r--r--cloudinit/__init__.py161
1 files changed, 89 insertions, 72 deletions
diff --git a/cloudinit/__init__.py b/cloudinit/__init__.py
index d74a0f1f..3bc6516a 100644
--- a/cloudinit/__init__.py
+++ b/cloudinit/__init__.py
@@ -46,9 +46,9 @@ pathmap = {
None : "",
}
-per_instance="once-per-instance"
-per_always="always"
-per_once="once"
+per_instance = "once-per-instance"
+per_always = "always"
+per_once = "once"
parsed_cfgs = { }
@@ -70,24 +70,25 @@ import glob
import traceback
class NullHandler(logging.Handler):
- def emit(self,record): pass
+ def emit(self, record):
+ pass
log = logging.getLogger(logger_name)
log.addHandler(NullHandler())
def logging_set_from_cfg_file(cfg_file=system_config):
- logging_set_from_cfg(util.get_base_cfg(cfg_file,cfg_builtin,parsed_cfgs))
+ logging_set_from_cfg(util.get_base_cfg(cfg_file, cfg_builtin, parsed_cfgs))
def logging_set_from_cfg(cfg):
log_cfgs = []
- logcfg=util.get_cfg_option_str(cfg, "log_cfg", False)
+ logcfg = util.get_cfg_option_str(cfg, "log_cfg", False)
if logcfg:
# if there is a 'logcfg' entry in the config, respect
# it, it is the old keyname
log_cfgs = [ logcfg ]
elif "log_cfgs" in cfg:
for cfg in cfg['log_cfgs']:
- if isinstance(cfg,list):
+ if isinstance(cfg, list):
log_cfgs.append('\n'.join(cfg))
else:
log_cfgs.append()
@@ -130,15 +131,15 @@ class CloudInit:
if ds_deps != None:
self.ds_deps = ds_deps
- self.sysconfig=sysconfig
- self.cfg=self.read_cfg()
+ self.sysconfig = sysconfig
+ self.cfg = self.read_cfg()
def read_cfg(self):
if self.cfg:
return(self.cfg)
try:
- conf = util.get_base_cfg(self.sysconfig,cfg_builtin, parsed_cfgs)
+ conf = util.get_base_cfg(self.sysconfig, cfg_builtin, parsed_cfgs)
except Exception:
conf = get_builtin_cfg()
@@ -147,8 +148,9 @@ class CloudInit:
try:
from configobj import ConfigObj
oldcfg = ConfigObj(self.old_conffile)
- if oldcfg is None: oldcfg = { }
- conf = util.mergedict(conf,oldcfg)
+ if oldcfg is None:
+ oldcfg = { }
+ conf = util.mergedict(conf, oldcfg)
except:
pass
@@ -160,7 +162,7 @@ class CloudInit:
# by using the instance link, if purge_cache was called
# the file wont exist
cache = get_ipath_cur('obj_pkl')
- f=open(cache, "rb")
+ f = open(cache, "rb")
data = cPickle.load(f)
f.close()
self.datasource = data
@@ -177,21 +179,22 @@ class CloudInit:
return False
try:
- f=open(cache, "wb")
- cPickle.dump(self.datasource,f)
+ f = open(cache, "wb")
+ cPickle.dump(self.datasource, f)
f.close()
- os.chmod(cache,0400)
+ os.chmod(cache, 0400)
except:
raise
def get_data_source(self):
- if self.datasource is not None: return True
+ if self.datasource is not None:
+ return True
if self.restore_from_cache():
log.debug("restored from cache type %s" % self.datasource)
return True
- cfglist=self.cfg['datasource_list']
+ cfglist = self.cfg['datasource_list']
dslist = list_sources(cfglist, self.ds_deps)
dsnames = [f.__name__ for f in dslist]
@@ -206,7 +209,7 @@ class CloudInit:
log.debug("found data source %s" % ds)
return True
except Exception as e:
- log.warn("get_data of %s raised %s" % (ds,e))
+ log.warn("get_data of %s raised %s" % (ds, e))
util.logexc(log)
msg = "Did not find data source. searched classes: %s" % dsnames
log.debug(msg)
@@ -216,7 +219,8 @@ class CloudInit:
try:
os.unlink(cur_instance_link)
except OSError as e:
- if e.errno != errno.ENOENT: raise
+ if e.errno != errno.ENOENT:
+ raise
iid = self.get_instance_id()
os.symlink("./instances/%s" % iid, cur_instance_link)
@@ -252,21 +256,22 @@ class CloudInit:
util.write_file(self.get_ipath('userdata'),
self.datasource.get_userdata(), 0600)
- def sem_getpath(self,name,freq):
+ def sem_getpath(self, name, freq):
if freq == 'once-per-instance':
- return("%s/%s" % (self.get_ipath("sem"),name))
+ return("%s/%s" % (self.get_ipath("sem"), name))
return("%s/%s.%s" % (get_cpath("sem"), name, freq))
- def sem_has_run(self,name,freq):
- if freq == per_always: return False
- semfile = self.sem_getpath(name,freq)
+ def sem_has_run(self, name, freq):
+ if freq == per_always:
+ return False
+ semfile = self.sem_getpath(name, freq)
if os.path.exists(semfile):
return True
return False
- def sem_acquire(self,name,freq):
+ def sem_acquire(self, name, freq):
from time import time
- semfile = self.sem_getpath(name,freq)
+ semfile = self.sem_getpath(name, freq)
try:
os.makedirs(os.path.dirname(semfile))
@@ -279,15 +284,15 @@ class CloudInit:
# race condition
try:
- f = open(semfile,"w")
+ f = open(semfile, "w")
f.write("%s\n" % str(time()))
f.close()
except:
return(False)
return(True)
- def sem_clear(self,name,freq):
- semfile = self.sem_getpath(name,freq)
+ def sem_clear(self, name, freq):
+ semfile = self.sem_getpath(name, freq)
try:
os.unlink(semfile)
except OSError as e:
@@ -300,20 +305,20 @@ class CloudInit:
# if that does not exist, then call 'func' with given 'args'
# if 'clear_on_fail' is True and func throws an exception
# then remove the lock (so it would run again)
- def sem_and_run(self,semname,freq,func,args=None,clear_on_fail=False):
+ def sem_and_run(self, semname, freq, func, args=None, clear_on_fail=False):
if args is None:
args = []
- if self.sem_has_run(semname,freq):
+ if self.sem_has_run(semname, freq):
log.debug("%s already ran %s", semname, freq)
return False
try:
- if not self.sem_acquire(semname,freq):
+ if not self.sem_acquire(semname, freq):
raise Exception("Failed to acquire lock on %s" % semname)
func(*args)
except:
if clear_on_fail:
- self.sem_clear(semname,freq)
+ self.sem_clear(semname, freq)
raise
return True
@@ -322,7 +327,7 @@ class CloudInit:
# (/var/lib/cloud/instances/<instance>/name)<name>)
def get_ipath(self, name=None):
return("%s/instances/%s%s"
- % (varlibdir,self.get_instance_id(), pathmap[name]))
+ % (varlibdir, self.get_instance_id(), pathmap[name]))
def consume_userdata(self, frequency=per_instance):
self.get_userdata()
@@ -333,18 +338,20 @@ class CloudInit:
# add the path to the plugins dir to the top of our list for import
# instance dir should be read before cloud-dir
- sys.path.insert(0,cdir)
- sys.path.insert(0,idir)
+ sys.path.insert(0, cdir)
+ sys.path.insert(0, idir)
part_handlers = { }
# add handlers in cdir
for fname in glob.glob("%s/*.py" % cdir):
- if not os.path.isfile(fname): continue
+ if not os.path.isfile(fname):
+ continue
modname = os.path.basename(fname)[0:-3]
try:
mod = __import__(modname)
handler_register(mod, part_handlers, data, frequency)
- log.debug("added handler for [%s] from %s" % (mod.list_types(), fname))
+ log.debug("added handler for [%s] from %s" % (mod.list_types(),
+ fname))
except:
log.warn("failed to initialize handler in %s" % fname)
util.logexc(log)
@@ -369,32 +376,34 @@ class CloudInit:
continue
handler_call_end(mod, data, frequency)
- def handle_user_script(self,_data,ctype,filename,payload, _frequency):
- if ctype == "__end__": return
+ def handle_user_script(self, _data, ctype, filename, payload, _frequency):
+ if ctype == "__end__":
+ return
if ctype == "__begin__":
# maybe delete existing things here
return
- filename=filename.replace(os.sep,'_')
+ filename = filename.replace(os.sep, '_')
scriptsdir = get_ipath_cur('scripts')
util.write_file("%s/%s" %
- (scriptsdir,filename), util.dos2unix(payload), 0700)
+ (scriptsdir, filename), util.dos2unix(payload), 0700)
- def handle_upstart_job(self,_data,ctype,filename,payload, frequency):
+ def handle_upstart_job(self, _data, ctype, filename, payload, frequency):
# upstart jobs are only written on the first boot
if frequency != per_instance:
return
- if ctype == "__end__" or ctype == "__begin__": return
+ if ctype == "__end__" or ctype == "__begin__":
+ return
if not filename.endswith(".conf"):
- filename=filename+".conf"
+ filename = filename+".conf"
- util.write_file("%s/%s" % ("/etc/init",filename),
+ util.write_file("%s/%s" % ("/etc/init", filename),
util.dos2unix(payload), 0644)
- def handle_cloud_config(self,_data,ctype,filename,payload, _frequency):
+ def handle_cloud_config(self, _data, ctype, filename, payload, _frequency):
if ctype == "__begin__":
- self.cloud_config_str=""
+ self.cloud_config_str = ""
return
if ctype == "__end__":
cloud_config = self.get_ipath("cloud_config")
@@ -410,32 +419,35 @@ class CloudInit:
return
- self.cloud_config_str+="\n#%s\n%s" % (filename,payload)
+ self.cloud_config_str += "\n#%s\n%s" % (filename, payload)
- def handle_cloud_boothook(self,_data,ctype,filename,payload, _frequency):
- if ctype == "__end__": return
- if ctype == "__begin__": return
+ def handle_cloud_boothook(self, _data, ctype, filename, payload,
+ _frequency):
+ if ctype == "__end__":
+ return
+ if ctype == "__begin__":
+ return
- filename=filename.replace(os.sep,'_')
+ filename = filename.replace(os.sep, '_')
payload = util.dos2unix(payload)
- prefix="#cloud-boothook"
+ prefix = "#cloud-boothook"
start = 0
if payload.startswith(prefix):
start = len(prefix) + 1
boothooks_dir = self.get_ipath("boothooks")
- filepath = "%s/%s" % (boothooks_dir,filename)
+ filepath = "%s/%s" % (boothooks_dir, filename)
util.write_file(filepath, payload[start:], 0700)
try:
- env=os.environ.copy()
- env['INSTANCE_ID']= self.datasource.get_instance_id()
+ env = os.environ.copy()
+ env['INSTANCE_ID'] = self.datasource.get_instance_id()
subprocess.check_call([filepath], env=env)
except subprocess.CalledProcessError as e:
log.error("boothooks script %s returned %i" %
- (filepath,e.returncode))
+ (filepath, e.returncode))
except Exception as e:
log.error("boothooks unknown exception %s when running %s" %
- (e,filepath))
+ (e, filepath))
def get_public_ssh_keys(self):
return(self.datasource.get_public_ssh_keys())
@@ -449,14 +461,14 @@ class CloudInit:
def get_hostname(self, fqdn=False):
return(self.datasource.get_hostname(fqdn=fqdn))
- def device_name_to_device(self,name):
+ def device_name_to_device(self, name):
return(self.datasource.device_name_to_device(name))
# I really don't know if this should be here or not, but
# I needed it in cc_update_hostname, where that code had a valid 'cloud'
# reference, but did not have a cloudinit handle
# (ie, no cloudinit.get_cpath())
- def get_cpath(self,name=None):
+ def get_cpath(self, name=None):
return(get_cpath(name))
@@ -468,26 +480,30 @@ def initfs():
dlist.append("%s/%s" % (varlibdir, subd))
util.ensure_dirs(dlist)
- cfg = util.get_base_cfg(system_config,cfg_builtin,parsed_cfgs)
+ cfg = util.get_base_cfg(system_config, cfg_builtin, parsed_cfgs)
log_file = util.get_cfg_option_str(cfg, 'def_log_file', None)
perms = util.get_cfg_option_str(cfg, 'syslog_fix_perms', None)
if log_file:
- fp = open(log_file,"ab")
+ fp = open(log_file, "ab")
fp.close()
if log_file and perms:
- (u,g) = perms.split(':',1)
- if u == "-1" or u == "None": u = None
- if g == "-1" or g == "None": g = None
+ (u, g) = perms.split(':', 1)
+ if u == "-1" or u == "None":
+ u = None
+ if g == "-1" or g == "None":
+ g = None
util.chownbyname(log_file, u, g)
def purge_cache(rmcur=True):
rmlist = [ boot_finished ]
- if rmcur: rmlist.append(cur_instance_link)
+ if rmcur:
+ rmlist.append(cur_instance_link)
for f in rmlist:
try:
os.unlink(f)
except OSError as e:
- if e.errno == errno.ENOENT: continue
+ if e.errno == errno.ENOENT:
+ continue
return(False)
except:
return(False)
@@ -503,8 +519,9 @@ def get_cpath(name=None):
return("%s%s" % (varlibdir, pathmap[name]))
def get_base_cfg(cfg_path=None):
- if cfg_path is None: cfg_path = system_config
- return(util.get_base_cfg(cfg_path,cfg_builtin,parsed_cfgs))
+ if cfg_path is None:
+ cfg_path = system_config
+ return(util.get_base_cfg(cfg_path, cfg_builtin, parsed_cfgs))
def get_builtin_cfg():
return(yaml.load(cfg_builtin))
@@ -513,7 +530,7 @@ class DataSourceNotFoundException(Exception):
pass
def list_sources(cfg_list, depends):
- return(DataSource.list_sources(cfg_list,depends, ["cloudinit", "" ]))
+ return(DataSource.list_sources(cfg_list, depends, ["cloudinit", "" ]))
def handler_register(mod, part_handlers, data, frequency=per_instance):
if not hasattr(mod, "handler_version"):