diff options
author | Scott Moser <smoser@ubuntu.com> | 2013-08-05 06:58:54 +0100 |
---|---|---|
committer | Scott Moser <smoser@ubuntu.com> | 2013-08-05 06:58:54 +0100 |
commit | 65e49111bcea78342289c671376eba85410ea781 (patch) | |
tree | 64555168d9767f63074729cdbe9d34e5ae37eb01 /cloudinit | |
parent | 696bcc1f0acc67646872cd6ce1b90375ca0ae068 (diff) | |
parent | 219191673b5491fab683ca5ff1befe845c81f6cf (diff) | |
download | vyos-cloud-init-65e49111bcea78342289c671376eba85410ea781.tar.gz vyos-cloud-init-65e49111bcea78342289c671376eba85410ea781.zip |
merge from trunk
Diffstat (limited to 'cloudinit')
-rw-r--r-- | cloudinit/config/cc_growpart.py | 3 | ||||
-rw-r--r-- | cloudinit/config/cc_resizefs.py | 11 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceAzure.py | 136 | ||||
-rw-r--r-- | cloudinit/util.py | 35 |
4 files changed, 147 insertions, 38 deletions
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 4f8c8f80..ba6c58af 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -264,7 +264,8 @@ def handle(_name, cfg, _cloud, log, _args): raise e return - resized = resize_devices(resizer, devices) + resized = util.log_time(logfunc=log.debug, msg="resize_devices", + func=resize_devices, args=(resizer, devices)) for (entry, action, msg) in resized: if action == RESIZE.CHANGED: log.info("'%s' resized: %s" % (entry, msg)) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index b4ee16b2..56040fdd 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -21,7 +21,6 @@ import errno import os import stat -import time from cloudinit.settings import PER_ALWAYS from cloudinit import util @@ -120,9 +119,12 @@ def handle(name, cfg, _cloud, log, args): if resize_root == NOBLOCK: # Fork to a child that will run # the resize command - util.fork_cb(do_resize, resize_cmd, log) + util.fork_cb( + util.log_time(logfunc=log.debug, msg="backgrounded Resizing", + func=do_resize, args=(resize_cmd, log))) else: - do_resize(resize_cmd, log) + util.log_time(logfunc=log.debug, msg="Resizing", + func=do_resize, args=(resize_cmd, log)) action = 'Resized' if resize_root == NOBLOCK: @@ -132,13 +134,10 @@ def handle(name, cfg, _cloud, log, args): def do_resize(resize_cmd, log): - start = time.time() try: util.subp(resize_cmd) except util.ProcessExecutionError: util.logexc(log, "Failed to resize filesystem (cmd=%s)", resize_cmd) raise - tot_time = time.time() - start - log.debug("Resizing took %.3f seconds", tot_time) # TODO(harlowja): Should we add a fsck check after this to make # sure we didn't corrupt anything? diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 0a5caebe..1a74de21 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -31,9 +31,21 @@ LOG = logging.getLogger(__name__) DS_NAME = 'Azure' DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} AGENT_START = ['service', 'walinuxagent', 'start'] -BUILTIN_DS_CONFIG = {'datasource': {DS_NAME: { - 'agent_command': AGENT_START, - 'data_dir': "/var/lib/waagent"}}} +BOUNCE_COMMAND = ['sh', '-xc', + "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"] + +BUILTIN_DS_CONFIG = { + 'agent_command': AGENT_START, + 'data_dir': "/var/lib/waagent", + 'set_hostname': True, + 'hostname_bounce': { + 'interface': 'eth0', + 'policy': True, + 'command': BOUNCE_COMMAND, + 'hostname_command': 'hostname', + } +} +DS_CFG_PATH = ['datasource', DS_NAME] class DataSourceAzureNet(sources.DataSource): @@ -42,19 +54,19 @@ class DataSourceAzureNet(sources.DataSource): self.seed_dir = os.path.join(paths.seed_dir, 'azure') self.cfg = {} self.seed = None + self.ds_cfg = util.mergemanydict([ + util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), + BUILTIN_DS_CONFIG]) def __str__(self): root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) def get_data(self): - ddir_cfgpath = ['datasource', DS_NAME, 'data_dir'] # azure removes/ejects the cdrom containing the ovf-env.xml # file on reboot. So, in order to successfully reboot we # need to look in the datadir and consider that valid - ddir = util.get_cfg_by_path(self.sys_cfg, ddir_cfgpath) - if ddir is None: - ddir = util.get_cfg_by_path(BUILTIN_DS_CONFIG, ddir_cfgpath) + ddir = self.ds_cfg['data_dir'] candidates = [self.seed_dir] candidates.extend(list_possible_azure_ds_devs()) @@ -91,44 +103,46 @@ class DataSourceAzureNet(sources.DataSource): return False if found == ddir: - LOG.debug("using cached datasource in %s", ddir) - - fields = [('cmd', ['datasource', DS_NAME, 'agent_command']), - ('datadir', ddir_cfgpath)] - mycfg = {} - for cfg in (self.cfg, self.sys_cfg, BUILTIN_DS_CONFIG): - for name, path in fields: - if name in mycfg: - continue - value = util.get_cfg_by_path(cfg, keyp=path) - if value is not None: - mycfg[name] = value + LOG.debug("using files cached in %s", ddir) + + # now update ds_cfg to reflect contents pass in config + usercfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) + self.ds_cfg = util.mergemanydict([usercfg, self.ds_cfg]) + mycfg = self.ds_cfg # walinux agent writes files world readable, but expects # the directory to be protected. - write_files(mycfg['datadir'], files, dirmode=0700) + write_files(mycfg['data_dir'], files, dirmode=0700) + + # handle the hostname 'publishing' + try: + handle_set_hostname(mycfg.get('set_hostname'), + self.metadata.get('local-hostname'), + mycfg['hostname_bounce']) + except Exception as e: + LOG.warn("Failed publishing hostname: %s" % e) + util.logexc(LOG, "handling set_hostname failed") try: - invoke_agent(mycfg['cmd']) + invoke_agent(mycfg['agent_command']) except util.ProcessExecutionError: # claim the datasource even if the command failed - util.logexc(LOG, "agent command '%s' failed.", mycfg['cmd']) + util.logexc(LOG, "agent command '%s' failed.", + mycfg['agent_command']) - shcfgxml = os.path.join(mycfg['datadir'], "SharedConfig.xml") + shcfgxml = os.path.join(mycfg['data_dir'], "SharedConfig.xml") wait_for = [shcfgxml] fp_files = [] for pk in self.cfg.get('_pubkeys', []): bname = pk['fingerprint'] + ".crt" - fp_files += [os.path.join(mycfg['datadir'], bname)] + fp_files += [os.path.join(mycfg['data_dir'], bname)] - start = time.time() - missing = wait_for_files(wait_for + fp_files) + missing = util.log_time(logfunc=LOG.debug, msg="waiting for files", + func=wait_for_files, + args=(wait_for + fp_files,)) if len(missing): LOG.warn("Did not find files, but going on: %s", missing) - else: - LOG.debug("waited %.3f seconds for %d files to appear", - time.time() - start, len(wait_for)) if shcfgxml in missing: LOG.warn("SharedConfig.xml missing, using static instance-id") @@ -148,6 +162,56 @@ class DataSourceAzureNet(sources.DataSource): return self.cfg +def handle_set_hostname(enabled, hostname, cfg): + if not util.is_true(enabled): + return + + if not hostname: + LOG.warn("set_hostname was true but no local-hostname") + return + + apply_hostname_bounce(hostname=hostname, policy=cfg['policy'], + interface=cfg['interface'], + command=cfg['command'], + hostname_command=cfg['hostname_command']) + + +def apply_hostname_bounce(hostname, policy, interface, command, + hostname_command="hostname"): + # set the hostname to 'hostname' if it is not already set to that. + # then, if policy is not off, bounce the interface using command + prev_hostname = util.subp(hostname_command, capture=True)[0].strip() + + util.subp([hostname_command, hostname]) + + msg = ("phostname=%s hostname=%s policy=%s interface=%s" % + (prev_hostname, hostname, policy, interface)) + + if util.is_false(policy): + LOG.debug("pubhname: policy false, skipping [%s]", msg) + return + + if prev_hostname == hostname and policy != "force": + LOG.debug("pubhname: no change, policy != force. skipping. [%s]", msg) + return + + env = os.environ.copy() + env['interface'] = interface + env['hostname'] = hostname + env['old_hostname'] = prev_hostname + + if command == "builtin": + command = BOUNCE_COMMAND + + LOG.debug("pubhname: publishing hostname [%s]", msg) + shell = not isinstance(command, (list, tuple)) + # capture=False, see comments in bug 1202758 and bug 1206164. + util.log_time(logfunc=LOG.debug, msg="publishing hostname", + get_uptime=True, func=util.subp, + kwargs={'command': command, 'shell': shell, 'capture': False, + 'env': env}) + + def crtfile_to_pubkey(fname): pipeline = ('openssl x509 -noout -pubkey < "$0" |' 'ssh-keygen -i -m PKCS8 -f /dev/stdin') @@ -319,15 +383,21 @@ def read_azure_ovf(contents): name = child.localName.lower() simple = False + value = "" if (len(child.childNodes) == 1 and child.childNodes[0].nodeType == dom.TEXT_NODE): simple = True value = child.childNodes[0].wholeText + attrs = {k: v for k, v in child.attributes.items()} + # we accept either UserData or CustomData. If both are present # then behavior is undefined. if (name == "userdata" or name == "customdata"): - ud = base64.b64decode(''.join(value.split())) + if attrs.get('encoding') in (None, "base64"): + ud = base64.b64decode(''.join(value.split())) + else: + ud = value elif name == "username": username = value elif name == "userpassword": @@ -335,7 +405,11 @@ def read_azure_ovf(contents): elif name == "hostname": md['local-hostname'] = value elif name == "dscfg": - cfg['datasource'] = {DS_NAME: util.load_yaml(value, default={})} + if attrs.get('encoding') in (None, "base64"): + dscfg = base64.b64decode(''.join(value.split())) + else: + dscfg = value + cfg['datasource'] = {DS_NAME: util.load_yaml(dscfg, default={})} elif name == "ssh": cfg['_pubkeys'] = load_azure_ovf_pubkeys(child) elif name == "disablesshpasswordauthentication": diff --git a/cloudinit/util.py b/cloudinit/util.py index 8542fe27..4a74ba57 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1770,3 +1770,38 @@ def which(program): return exe_file return None + + +def log_time(logfunc, msg, func, args=None, kwargs=None, get_uptime=False): + if args is None: + args = [] + if kwargs is None: + kwargs = {} + + start = time.time() + + ustart = None + if get_uptime: + try: + ustart = float(uptime()) + except ValueError: + pass + + try: + ret = func(*args, **kwargs) + finally: + delta = time.time() - start + if ustart is not None: + try: + udelta = float(uptime()) - ustart + except ValueError: + udelta = "N/A" + + tmsg = " took %0.3f seconds" % delta + if get_uptime: + tmsg += "(%0.2f)" % udelta + try: + logfunc(msg + tmsg) + except: + pass + return ret |