diff options
author | Joshua Harlow <harlowja@yahoo-inc.com> | 2014-11-25 11:46:10 -0800 |
---|---|---|
committer | Joshua Harlow <harlowja@yahoo-inc.com> | 2014-11-25 11:46:10 -0800 |
commit | 7b39b3976f94fd9ce9cbe39324ec14ad5a7c334e (patch) | |
tree | 0ebb51599a0024b2507811697bfe965e66ac29e1 /cloudinit | |
parent | 7d20f3843bff0069b1ac9b2f0c6d346889789058 (diff) | |
parent | 1db41a6f5283d38dff6de0b0421d51eac869a39c (diff) | |
download | vyos-cloud-init-7b39b3976f94fd9ce9cbe39324ec14ad5a7c334e.tar.gz vyos-cloud-init-7b39b3976f94fd9ce9cbe39324ec14ad5a7c334e.zip |
Update with trunk and resolve conflicts
Diffstat (limited to 'cloudinit')
-rw-r--r-- | cloudinit/config/cc_chef.py | 305 | ||||
-rw-r--r-- | cloudinit/config/cc_debug.py | 39 | ||||
-rw-r--r-- | cloudinit/config/cc_ubuntu_init_switch.py | 30 | ||||
-rw-r--r-- | cloudinit/distros/__init__.py | 16 | ||||
-rw-r--r-- | cloudinit/distros/net_util.py | 68 | ||||
-rw-r--r-- | cloudinit/distros/rhel.py | 24 | ||||
-rw-r--r-- | cloudinit/netinfo.py | 60 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceDigitalOcean.py | 104 | ||||
-rw-r--r-- | cloudinit/ssh_util.py | 5 | ||||
-rw-r--r-- | cloudinit/templater.py | 4 | ||||
-rw-r--r-- | cloudinit/util.py | 22 | ||||
-rw-r--r-- | cloudinit/version.py | 2 |
12 files changed, 548 insertions, 131 deletions
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index 806deed9..fc837363 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -18,6 +18,57 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. +""" +**Summary:** module that configures, starts and installs chef. + +**Description:** This module enables chef to be installed (from packages or +from gems, or from omnibus). Before this occurs chef configurations are +written to disk (validation.pem, client.pem, firstboot.json, client.rb), +and needed chef folders/directories are created (/etc/chef and /var/log/chef +and so-on). Then once installing proceeds correctly if configured chef will +be started (in daemon mode or in non-daemon mode) and then once that has +finished (if ran in non-daemon mode this will be when chef finishes +converging, if ran in daemon mode then no further actions are possible since +chef will have forked into its own process) then a post run function can +run that can do finishing activities (such as removing the validation pem +file). + +It can be configured with the following option structure:: + + chef: + directories: (defaulting to /etc/chef, /var/log/chef, /var/lib/chef, + /var/cache/chef, /var/backups/chef, /var/run/chef) + validation_key or validation_cert: (optional string to be written to + /etc/chef/validation.pem) + firstboot_path: (path to write run_list and initial_attributes keys that + should also be present in this configuration, defaults + to /etc/chef/firstboot.json) + exec: boolean to run or not run chef (defaults to false, unless + a gem installed is requested + where this will then default + to true) + + chef.rb template keys (if falsey, then will be skipped and not + written to /etc/chef/client.rb) + + chef: + client_key: + environment: + file_backup_path: + file_cache_path: + json_attribs: + log_level: + log_location: + node_name: + pid_file: + server_url: + show_time: + ssl_verify_mode: + validation_key: + validation_name: +""" + +import itertools import json import os @@ -27,19 +78,112 @@ from cloudinit import util RUBY_VERSION_DEFAULT = "1.8" -CHEF_DIRS = [ +CHEF_DIRS = tuple([ '/etc/chef', '/var/log/chef', '/var/lib/chef', '/var/cache/chef', '/var/backups/chef', '/var/run/chef', -] +]) +REQUIRED_CHEF_DIRS = tuple([ + '/etc/chef', +]) + +# Used if fetching chef from a omnibus style package +OMNIBUS_URL = "https://www.getchef.com/chef/install.sh" +OMNIBUS_URL_RETRIES = 5 + +CHEF_VALIDATION_PEM_PATH = '/etc/chef/validation.pem' +CHEF_FB_PATH = '/etc/chef/firstboot.json' +CHEF_RB_TPL_DEFAULTS = { + # These are ruby symbols... + 'ssl_verify_mode': ':verify_none', + 'log_level': ':info', + # These are not symbols... + 'log_location': '/var/log/chef/client.log', + 'validation_key': CHEF_VALIDATION_PEM_PATH, + 'client_key': "/etc/chef/client.pem", + 'json_attribs': CHEF_FB_PATH, + 'file_cache_path': "/var/cache/chef", + 'file_backup_path': "/var/backups/chef", + 'pid_file': "/var/run/chef/client.pid", + 'show_time': True, +} +CHEF_RB_TPL_BOOL_KEYS = frozenset(['show_time']) +CHEF_RB_TPL_PATH_KEYS = frozenset([ + 'log_location', + 'validation_key', + 'client_key', + 'file_cache_path', + 'json_attribs', + 'file_cache_path', + 'pid_file', +]) +CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys()) +CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS) +CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_PATH_KEYS) +CHEF_RB_TPL_KEYS.extend([ + 'server_url', + 'node_name', + 'environment', + 'validation_name', +]) +CHEF_RB_TPL_KEYS = frozenset(CHEF_RB_TPL_KEYS) +CHEF_RB_PATH = '/etc/chef/client.rb' +CHEF_EXEC_PATH = '/usr/bin/chef-client' +CHEF_EXEC_DEF_ARGS = tuple(['-d', '-i', '1800', '-s', '20']) + + +def is_installed(): + if not os.path.isfile(CHEF_EXEC_PATH): + return False + if not os.access(CHEF_EXEC_PATH, os.X_OK): + return False + return True + + +def post_run_chef(chef_cfg, log): + delete_pem = util.get_cfg_option_bool(chef_cfg, + 'delete_validation_post_exec', + default=False) + if delete_pem and os.path.isfile(CHEF_VALIDATION_PEM_PATH): + os.unlink(CHEF_VALIDATION_PEM_PATH) -OMNIBUS_URL = "https://www.opscode.com/chef/install.sh" + +def get_template_params(iid, chef_cfg, log): + params = CHEF_RB_TPL_DEFAULTS.copy() + # Allow users to overwrite any of the keys they want (if they so choose), + # when a value is None, then the value will be set to None and no boolean + # or string version will be populated... + for (k, v) in chef_cfg.items(): + if k not in CHEF_RB_TPL_KEYS: + log.debug("Skipping unknown chef template key '%s'", k) + continue + if v is None: + params[k] = None + else: + # This will make the value a boolean or string... + if k in CHEF_RB_TPL_BOOL_KEYS: + params[k] = util.get_cfg_option_bool(chef_cfg, k) + else: + params[k] = util.get_cfg_option_str(chef_cfg, k) + # These ones are overwritten to be exact values... + params.update({ + 'generated_by': util.make_header(), + 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name', + default=iid), + 'environment': util.get_cfg_option_str(chef_cfg, 'environment', + default='_default'), + # These two are mandatory... + 'server_url': chef_cfg['server_url'], + 'validation_name': chef_cfg['validation_name'], + }) + return params def handle(name, cfg, cloud, log, _args): + """Handler method activated by cloud-init.""" # If there isn't a chef key in the configuration don't do anything if 'chef' not in cfg: @@ -49,7 +193,10 @@ def handle(name, cfg, cloud, log, _args): chef_cfg = cfg['chef'] # Ensure the chef directories we use exist - for d in CHEF_DIRS: + chef_dirs = util.get_cfg_option_list(chef_cfg, 'directories') + if not chef_dirs: + chef_dirs = list(CHEF_DIRS) + for d in itertools.chain(chef_dirs, REQUIRED_CHEF_DIRS): util.ensure_dir(d) # Set the validation key based on the presence of either 'validation_key' @@ -57,64 +204,108 @@ def handle(name, cfg, cloud, log, _args): # takes precedence for key in ('validation_key', 'validation_cert'): if key in chef_cfg and chef_cfg[key]: - util.write_file('/etc/chef/validation.pem', chef_cfg[key]) + util.write_file(CHEF_VALIDATION_PEM_PATH, chef_cfg[key]) break # Create the chef config from template template_fn = cloud.get_template_filename('chef_client.rb') if template_fn: iid = str(cloud.datasource.get_instance_id()) - params = { - 'server_url': chef_cfg['server_url'], - 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name', iid), - 'environment': util.get_cfg_option_str(chef_cfg, 'environment', - '_default'), - 'validation_name': chef_cfg['validation_name'] - } - templater.render_to_file(template_fn, '/etc/chef/client.rb', params) + params = get_template_params(iid, chef_cfg, log) + # Do a best effort attempt to ensure that the template values that + # are associated with paths have there parent directory created + # before they are used by the chef-client itself. + param_paths = set() + for (k, v) in params.items(): + if k in CHEF_RB_TPL_PATH_KEYS and v: + param_paths.add(os.path.dirname(v)) + util.ensure_dirs(param_paths) + templater.render_to_file(template_fn, CHEF_RB_PATH, params) else: - log.warn("No template found, not rendering to /etc/chef/client.rb") - - # set the firstboot json - initial_json = {} - if 'run_list' in chef_cfg: - initial_json['run_list'] = chef_cfg['run_list'] - if 'initial_attributes' in chef_cfg: - initial_attributes = chef_cfg['initial_attributes'] - for k in list(initial_attributes.keys()): - initial_json[k] = initial_attributes[k] - util.write_file('/etc/chef/firstboot.json', json.dumps(initial_json)) + log.warn("No template found, not rendering to %s", + CHEF_RB_PATH) - # If chef is not installed, we install chef based on 'install_type' - if (not os.path.isfile('/usr/bin/chef-client') or - util.get_cfg_option_bool(chef_cfg, - 'force_install', default=False)): - - install_type = util.get_cfg_option_str(chef_cfg, 'install_type', - 'packages') - if install_type == "gems": - # this will install and run the chef-client from gems - chef_version = util.get_cfg_option_str(chef_cfg, 'version', None) - ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version', - RUBY_VERSION_DEFAULT) - install_chef_from_gems(cloud.distro, ruby_version, chef_version) - # and finally, run chef-client - log.debug('Running chef-client') - util.subp(['/usr/bin/chef-client', - '-d', '-i', '1800', '-s', '20'], capture=False) - elif install_type == 'packages': - # this will install and run the chef-client from packages - cloud.distro.install_packages(('chef',)) - elif install_type == 'omnibus': - url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL) - content = url_helper.readurl(url=url, retries=5) - with util.tempdir() as tmpd: - # use tmpd over tmpfile to avoid 'Text file busy' on execute - tmpf = "%s/chef-omnibus-install" % tmpd - util.write_file(tmpf, str(content), mode=0700) - util.subp([tmpf], capture=False) + # Set the firstboot json + fb_filename = util.get_cfg_option_str(chef_cfg, 'firstboot_path', + default=CHEF_FB_PATH) + if not fb_filename: + log.info("First boot path empty, not writing first boot json file") + else: + initial_json = {} + if 'run_list' in chef_cfg: + initial_json['run_list'] = chef_cfg['run_list'] + if 'initial_attributes' in chef_cfg: + initial_attributes = chef_cfg['initial_attributes'] + for k in list(initial_attributes.keys()): + initial_json[k] = initial_attributes[k] + util.write_file(fb_filename, json.dumps(initial_json)) + + # Try to install chef, if its not already installed... + force_install = util.get_cfg_option_bool(chef_cfg, + 'force_install', default=False) + if not is_installed() or force_install: + run = install_chef(cloud, chef_cfg, log) + elif is_installed(): + run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False) + else: + run = False + if run: + run_chef(chef_cfg, log) + post_run_chef(chef_cfg, log) + + +def run_chef(chef_cfg, log): + log.debug('Running chef-client') + cmd = [CHEF_EXEC_PATH] + if 'exec_arguments' in chef_cfg: + cmd_args = chef_cfg['exec_arguments'] + if isinstance(cmd_args, (list, tuple)): + cmd.extend(cmd_args) + elif isinstance(cmd_args, (str, basestring)): + cmd.append(cmd_args) else: - log.warn("Unknown chef install type %s", install_type) + log.warn("Unknown type %s provided for chef" + " 'exec_arguments' expected list, tuple," + " or string", type(cmd_args)) + cmd.extend(CHEF_EXEC_DEF_ARGS) + else: + cmd.extend(CHEF_EXEC_DEF_ARGS) + util.subp(cmd, capture=False) + + +def install_chef(cloud, chef_cfg, log): + # If chef is not installed, we install chef based on 'install_type' + install_type = util.get_cfg_option_str(chef_cfg, 'install_type', + 'packages') + run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False) + if install_type == "gems": + # This will install and run the chef-client from gems + chef_version = util.get_cfg_option_str(chef_cfg, 'version', None) + ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version', + RUBY_VERSION_DEFAULT) + install_chef_from_gems(cloud.distro, ruby_version, chef_version) + # Retain backwards compat, by preferring True instead of False + # when not provided/overriden... + run = util.get_cfg_option_bool(chef_cfg, 'exec', default=True) + elif install_type == 'packages': + # This will install and run the chef-client from packages + cloud.distro.install_packages(('chef',)) + elif install_type == 'omnibus': + # This will install as a omnibus unified package + url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL) + retries = max(0, util.get_cfg_option_int(chef_cfg, + "omnibus_url_retries", + default=OMNIBUS_URL_RETRIES)) + content = url_helper.readurl(url=url, retries=retries) + with util.tempdir() as tmpd: + # Use tmpdir over tmpfile to avoid 'text file busy' on execute + tmpf = "%s/chef-omnibus-install" % tmpd + util.write_file(tmpf, str(content), mode=0700) + util.subp([tmpf], capture=False) + else: + log.warn("Unknown chef install type '%s'", install_type) + run = False + return run def get_ruby_packages(version): @@ -133,9 +324,9 @@ def install_chef_from_gems(ruby_version, chef_version, distro): util.sym_link('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby') if chef_version: util.subp(['/usr/bin/gem', 'install', 'chef', - '-v %s' % chef_version, '--no-ri', - '--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False) + '-v %s' % chef_version, '--no-ri', + '--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False) else: util.subp(['/usr/bin/gem', 'install', 'chef', - '--no-ri', '--no-rdoc', '--bindir', - '/usr/bin', '-q'], capture=False) + '--no-ri', '--no-rdoc', '--bindir', + '/usr/bin', '-q'], capture=False) diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py index 7219b0f8..8c489426 100644 --- a/cloudinit/config/cc_debug.py +++ b/cloudinit/config/cc_debug.py @@ -14,11 +14,33 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -from cloudinit import type_utils -from cloudinit import util +""" +**Summary:** helper to debug cloud-init *internal* datastructures. + +**Description:** This module will enable for outputting various internal +information that cloud-init sources provide to either a file or to the output +console/log location that this cloud-init has been configured with when +running. + +It can be configured with the following option structure:: + + debug: + verbose: (defaulting to true) + output: (location to write output, defaulting to console + log) + +.. note:: + + Log configurations are not output. +""" + import copy from StringIO import StringIO +from cloudinit import type_utils +from cloudinit import util + +SKIP_KEYS = frozenset(['log_cfgs']) + def _make_header(text): header = StringIO() @@ -31,7 +53,14 @@ def _make_header(text): return header.getvalue() +def _dumps(obj): + text = util.yaml_dumps(obj, explicit_start=False, explicit_end=False) + return text.rstrip() + + def handle(name, cfg, cloud, log, args): + """Handler method activated by cloud-init.""" + verbose = util.get_cfg_by_path(cfg, ('debug', 'verbose'), default=True) if args: # if args are provided (from cmdline) then explicitly set verbose @@ -46,7 +75,7 @@ def handle(name, cfg, cloud, log, args): return # Clean out some keys that we just don't care about showing... dump_cfg = copy.deepcopy(cfg) - for k in ['log_cfgs']: + for k in SKIP_KEYS: dump_cfg.pop(k, None) all_keys = list(dump_cfg.keys()) for k in all_keys: @@ -55,10 +84,10 @@ def handle(name, cfg, cloud, log, args): # Now dump it... to_print = StringIO() to_print.write(_make_header("Config")) - to_print.write(util.yaml_dumps(dump_cfg)) + to_print.write(_dumps(dump_cfg)) to_print.write("\n") to_print.write(_make_header("MetaData")) - to_print.write(util.yaml_dumps(cloud.datasource.metadata)) + to_print.write(_dumps(cloud.datasource.metadata)) to_print.write("\n") to_print.write(_make_header("Misc")) to_print.write("Datasource: %s\n" % diff --git a/cloudinit/config/cc_ubuntu_init_switch.py b/cloudinit/config/cc_ubuntu_init_switch.py index 6f994bff..7e88ed85 100644 --- a/cloudinit/config/cc_ubuntu_init_switch.py +++ b/cloudinit/config/cc_ubuntu_init_switch.py @@ -17,30 +17,27 @@ # along with this program. If not, see <http://www.gnu.org/licenses/>. """ -ubuntu_init_switch: reboot system into another init +**Summary:** reboot system into another init. -This provides a way for the user to boot with systemd even if the -image is set to boot with upstart. It should be run as one of the first -cloud_init_modules, and will switch the init system and then issue a reboot. -The next boot will come up in the target init system and no action will +**Description:** This module provides a way for the user to boot with systemd +even if the image is set to boot with upstart. It should be run as one of the +first ``cloud_init_modules``, and will switch the init system and then issue a +reboot. The next boot will come up in the target init system and no action will be taken. This should be inert on non-ubuntu systems, and also exit quickly. -config is comes under the top level 'init_switch' dictionary. +It can be configured with the following option structure:: -#cloud-config -init_switch: - target: systemd - reboot: true + init_switch: + target: systemd (can be 'systemd' or 'upstart') + reboot: true (reboot if a change was made, or false to not reboot) -'target' can be 'systemd' or 'upstart'. Best effort is made, but its possible -this system will break, and probably won't interact well with any other -mechanism you've used to switch the init system. +.. note:: -'reboot': [default=true]. - true: reboot if a change was made. - false: do not reboot. + Best effort is made, but it's possible + this system will break, and probably won't interact well with any other + mechanism you've used to switch the init system. """ from cloudinit.settings import PER_INSTANCE @@ -91,6 +88,7 @@ fi def handle(name, cfg, cloud, log, args): + """Handler method activated by cloud-init.""" if not isinstance(cloud.distro, ubuntu.Distro): log.debug("%s: distro is '%s', not ubuntu. returning", diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 49014477..bf465442 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -388,8 +388,20 @@ class Distro(object): # Import SSH keys if 'ssh_authorized_keys' in kwargs: - keys = set(kwargs['ssh_authorized_keys']) or [] - ssh_util.setup_user_keys(keys, name, options=None) + # Try to handle this in a smart manner. + keys = kwargs['ssh_authorized_keys'] + if isinstance(keys, (basestring, str)): + keys = [keys] + if isinstance(keys, dict): + keys = list(keys.values()) + if keys is not None: + if not isinstance(keys, (tuple, list, set)): + LOG.warn("Invalid type '%s' detected for" + " 'ssh_authorized_keys', expected list," + " string, dict, or set.", type(keys)) + else: + keys = set(keys) or [] + ssh_util.setup_user_keys(keys, name, options=None) return True diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py index b9bcfd8b..dd63a6a3 100644 --- a/cloudinit/distros/net_util.py +++ b/cloudinit/distros/net_util.py @@ -114,6 +114,10 @@ def translate_network(settings): if 'iface' not in info: continue iface_details = info['iface'].split(None) + # Check if current device *may* have an ipv6 IP + use_ipv6 = False + if 'inet6' in iface_details: + use_ipv6 = True dev_name = None if len(iface_details) >= 1: dev = iface_details[0].strip().lower() @@ -122,6 +126,7 @@ def translate_network(settings): if not dev_name: continue iface_info = {} + iface_info['ipv6'] = {} if len(iface_details) >= 3: proto_type = iface_details[2].strip().lower() # Seems like this can be 'loopback' which we don't @@ -129,35 +134,50 @@ def translate_network(settings): if proto_type in ['dhcp', 'static']: iface_info['bootproto'] = proto_type # These can just be copied over - for k in ['netmask', 'address', 'gateway', 'broadcast']: - if k in info: - val = info[k].strip().lower() - if val: - iface_info[k] = val - # Name server info provided?? - if 'dns-nameservers' in info: - iface_info['dns-nameservers'] = info['dns-nameservers'].split() - # Name server search info provided?? - if 'dns-search' in info: - iface_info['dns-search'] = info['dns-search'].split() - # Is any mac address spoofing going on?? - if 'hwaddress' in info: - hw_info = info['hwaddress'].lower().strip() - hw_split = hw_info.split(None, 1) - if len(hw_split) == 2 and hw_split[0].startswith('ether'): - hw_addr = hw_split[1] - if hw_addr: - iface_info['hwaddress'] = hw_addr - real_ifaces[dev_name] = iface_info + if use_ipv6: + for k in ['address', 'gateway']: + if k in info: + val = info[k].strip().lower() + if val: + iface_info['ipv6'][k] = val + else: + for k in ['netmask', 'address', 'gateway', 'broadcast']: + if k in info: + val = info[k].strip().lower() + if val: + iface_info[k] = val + # Name server info provided?? + if 'dns-nameservers' in info: + iface_info['dns-nameservers'] = info['dns-nameservers'].split() + # Name server search info provided?? + if 'dns-search' in info: + iface_info['dns-search'] = info['dns-search'].split() + # Is any mac address spoofing going on?? + if 'hwaddress' in info: + hw_info = info['hwaddress'].lower().strip() + hw_split = hw_info.split(None, 1) + if len(hw_split) == 2 and hw_split[0].startswith('ether'): + hw_addr = hw_split[1] + if hw_addr: + iface_info['hwaddress'] = hw_addr + # If ipv6 is enabled, device will have multiple IPs, so we need to + # update the dictionary instead of overwriting it... + if dev_name in real_ifaces: + real_ifaces[dev_name].update(iface_info) + else: + real_ifaces[dev_name] = iface_info # Check for those that should be started on boot via 'auto' for (cmd, args) in entries: + args = args.split(None) + if not args: + continue + dev_name = args[0].strip().lower() if cmd == 'auto': # Seems like auto can be like 'auto eth0 eth0:1' so just get the # first part out as the device name - args = args.split(None) - if not args: - continue - dev_name = args[0].strip().lower() if dev_name in real_ifaces: real_ifaces[dev_name]['auto'] = True + if cmd == 'iface' and 'inet6' in args: + real_ifaces[dev_name]['inet6'] = True return real_ifaces + diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index d01124e3..d5cc15fe 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -72,6 +72,7 @@ class Distro(distros.Distro): nameservers = [] searchservers = [] dev_names = entries.keys() + use_ipv6 = False for (dev, info) in entries.iteritems(): net_fn = self.network_script_tpl % (dev) net_cfg = { @@ -84,6 +85,13 @@ class Distro(distros.Distro): 'MACADDR': info.get('hwaddress'), 'ONBOOT': _make_sysconfig_bool(info.get('auto')), } + if info.get('inet6'): + use_ipv6 = True + net_cfg.update({ + 'IPV6INIT': _make_sysconfig_bool(True), + 'IPV6ADDR': info.get('ipv6').get('address'), + 'IPV6_DEFAULTGW': info.get('ipv6').get('gateway'), + }) rhel_util.update_sysconfig_file(net_fn, net_cfg) if 'dns-nameservers' in info: nameservers.extend(info['dns-nameservers']) @@ -96,10 +104,14 @@ class Distro(distros.Distro): net_cfg = { 'NETWORKING': _make_sysconfig_bool(True), } + # If IPv6 interface present, enable ipv6 networking + if use_ipv6: + net_cfg['NETWORKING_IPV6'] = _make_sysconfig_bool(True) + net_cfg['IPV6_AUTOCONF'] = _make_sysconfig_bool(False) rhel_util.update_sysconfig_file(self.network_conf_fn, net_cfg) return dev_names - def _dist_uses_systemd(self): + def uses_systemd(self): # Fedora 18 and RHEL 7 were the first adopters in their series (dist, vers) = util.system_info()['dist'][:2] major = (int)(vers.split('.')[0]) @@ -107,7 +119,7 @@ class Distro(distros.Distro): or (dist.startswith('Fedora') and major >= 18)) def apply_locale(self, locale, out_fn=None): - if self._dist_uses_systemd(): + if self.uses_systemd(): if not out_fn: out_fn = self.systemd_locale_conf_fn out_fn = self.systemd_locale_conf_fn @@ -120,7 +132,7 @@ class Distro(distros.Distro): rhel_util.update_sysconfig_file(out_fn, locale_cfg) def _write_hostname(self, hostname, out_fn): - if self._dist_uses_systemd(): + if self.uses_systemd(): util.subp(['hostnamectl', 'set-hostname', str(hostname)]) else: host_cfg = { @@ -136,14 +148,14 @@ class Distro(distros.Distro): return hostname def _read_system_hostname(self): - if self._dist_uses_systemd(): + if self.uses_systemd(): host_fn = self.systemd_hostname_conf_fn else: host_fn = self.hostname_conf_fn return (host_fn, self._read_hostname(host_fn)) def _read_hostname(self, filename, default=None): - if self._dist_uses_systemd(): + if self.uses_systemd(): (out, _err) = util.subp(['hostname']) if len(out): return out @@ -164,7 +176,7 @@ class Distro(distros.Distro): def set_timezone(self, tz): tz_file = self._find_tz_file(tz) - if self._dist_uses_systemd(): + if self.uses_systemd(): # Currently, timedatectl complains if invoked during startup # so for compatibility, create the link manually. util.del_file(self.tz_local_fn) diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index 8d4df342..d891315b 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -72,6 +72,7 @@ def netdev_info(empty=""): "bcast:": "bcast", "broadcast": "bcast", "mask:": "mask", "netmask": "mask", "hwaddr": "hwaddr", "ether": "hwaddr", + "scope": "scope", } for origfield, field in ifconfigfields.items(): target = "%s%s" % (field, fieldpost) @@ -96,7 +97,12 @@ def netdev_info(empty=""): def route_info(): (route_out, _err) = util.subp(["netstat", "-rn"]) - routes = [] + (route_out6, _err6) = util.subp(["netstat", "-A inet6", "-n"]) + + routes = {} + routes['ipv4'] = [] + routes['ipv6'] = [] + entries = route_out.splitlines()[1:] for line in entries: if not line: @@ -132,7 +138,26 @@ def route_info(): 'iface': toks[7], } - routes.append(entry) + routes['ipv4'].append(entry) + + entries6 = route_out6.splitlines()[1:] + for line in entries6: + if not line: + continue + toks = line.split() + + if (len(toks) < 6 or toks[0] == "Kernel" or + toks[0] == "Proto" or toks[0] == "Active"): + continue + entry = { + 'proto': toks[0], + 'recv-q': toks[1], + 'send-q': toks[2], + 'local address': toks[3], + 'foreign address': toks[4], + 'state': toks[5], + } + routes['ipv6'].append(entry) return routes @@ -156,10 +181,12 @@ def netdev_pformat(): lines.append(util.center("Net device info failed", '!', 80)) netdev = None if netdev is not None: - fields = ['Device', 'Up', 'Address', 'Mask', 'Hw-Address'] + fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address'] tbl = PrettyTable(fields) for (dev, d) in netdev.iteritems(): - tbl.add_row([dev, d["up"], d["addr"], d["mask"], d["hwaddr"]]) + tbl.add_row([dev, d["up"], d["addr"], d["mask"], ".", d["hwaddr"]]) + if d["addr6"]: + tbl.add_row([dev, d["up"], d["addr6"], ".", d["scope6"], d["hwaddr"]]) netdev_s = tbl.get_string() max_len = len(max(netdev_s.splitlines(), key=len)) header = util.center("Net device info", "+", max_len) @@ -176,15 +203,30 @@ def route_pformat(): util.logexc(LOG, "Route info failed: %s" % e) routes = None if routes is not None: - fields = ['Route', 'Destination', 'Gateway', + fields_v4 = ['Route', 'Destination', 'Gateway', 'Genmask', 'Interface', 'Flags'] - tbl = PrettyTable(fields) - for (n, r) in enumerate(routes): + + if routes.get('ipv6') is not None: + fields_v6 = ['Route', 'Proto', 'Recv-Q', 'Send-Q', 'Local Address', + 'Foreign Address', 'State'] + + tbl_v4 = PrettyTable(fields_v4) + for (n, r) in enumerate(routes.get('ipv4')): route_id = str(n) - tbl.add_row([route_id, r['destination'], + tbl_v4.add_row([route_id, r['destination'], r['gateway'], r['genmask'], r['iface'], r['flags']]) - route_s = tbl.get_string() + route_s = tbl_v4.get_string() + if fields_v6: + tbl_v6 = PrettyTable(fields_v6) + for (n, r) in enumerate(routes.get('ipv6')): + route_id = str(n) + tbl_v6.add_row([route_id, r['proto'], + r['recv-q'], r['send-q'], + r['local address'], r['foreign address'], + r['state']]) + route_s = route_s + tbl_v6.get_string() + max_len = len(max(route_s.splitlines(), key=len)) header = util.center("Route info", "+", max_len) lines.extend([header, route_s]) diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py new file mode 100644 index 00000000..069bdb41 --- /dev/null +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -0,0 +1,104 @@ +# vi: ts=4 expandtab +# +# Author: Neal Shrader <neal@digitalocean.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +from cloudinit import log as logging +from cloudinit import util +from cloudinit import sources +from cloudinit import ec2_utils +from types import StringType +import functools + + +LOG = logging.getLogger(__name__) + +BUILTIN_DS_CONFIG = { + 'metadata_url': 'http://169.254.169.254/metadata/v1/', + 'mirrors_url': 'http://mirrors.digitalocean.com/' +} +MD_RETRIES = 0 +MD_TIMEOUT = 1 + +class DataSourceDigitalOcean(sources.DataSource): + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.metadata = dict() + self.ds_cfg = util.mergemanydict([ + util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}), + BUILTIN_DS_CONFIG]) + self.metadata_address = self.ds_cfg['metadata_url'] + + if self.ds_cfg.get('retries'): + self.retries = self.ds_cfg['retries'] + else: + self.retries = MD_RETRIES + + if self.ds_cfg.get('timeout'): + self.timeout = self.ds_cfg['timeout'] + else: + self.timeout = MD_TIMEOUT + + def get_data(self): + caller = functools.partial(util.read_file_or_url, timeout=self.timeout, + retries=self.retries) + md = ec2_utils.MetadataMaterializer(str(caller(self.metadata_address)), + base_url=self.metadata_address, + caller=caller) + + self.metadata = md.materialize() + + if self.metadata.get('id'): + return True + else: + return False + + def get_userdata_raw(self): + return "\n".join(self.metadata['user-data']) + + def get_vendordata_raw(self): + return "\n".join(self.metadata['vendor-data']) + + def get_public_ssh_keys(self): + if type(self.metadata['public-keys']) is StringType: + return [self.metadata['public-keys']] + else: + return self.metadata['public-keys'] + + @property + def availability_zone(self): + return self.metadata['region'] + + def get_instance_id(self): + return self.metadata['id'] + + def get_hostname(self, fqdn=False): + return self.metadata['hostname'] + + def get_package_mirror_info(self): + return self.ds_cfg['mirrors_url'] + + @property + def launch_index(self): + return None + +# Used to match classes to dependencies +datasources = [ + (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + ] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 70a577bc..14d0cb0f 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -293,7 +293,10 @@ def parse_ssh_config(fname): if not line or line.startswith("#"): lines.append(SshdConfigLine(line)) continue - (key, val) = line.split(None, 1) + try: + key, val = line.split(None, 1) + except ValueError: + key, val = line.split('=', 1) lines.append(SshdConfigLine(line, key, val)) return lines diff --git a/cloudinit/templater.py b/cloudinit/templater.py index 02f6261d..4cd3f13d 100644 --- a/cloudinit/templater.py +++ b/cloudinit/templater.py @@ -89,9 +89,11 @@ def detect_template(text): return CTemplate(content, searchList=[params]).respond() def jinja_render(content, params): + # keep_trailing_newline is in jinja2 2.7+, not 2.6 + add = "\n" if content.endswith("\n") else "" return JTemplate(content, undefined=jinja2.StrictUndefined, - trim_blocks=True).render(**params) + trim_blocks=True).render(**params) + add if text.find("\n") != -1: ident, rest = text.split("\n", 1) diff --git a/cloudinit/util.py b/cloudinit/util.py index f236d0bf..ee5e5c0a 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -399,6 +399,10 @@ def get_cfg_option_str(yobj, key, default=None): return val +def get_cfg_option_int(yobj, key, default=0): + return int(get_cfg_option_str(yobj, key, default=default)) + + def system_info(): return { 'platform': platform.platform(), @@ -1146,7 +1150,7 @@ def chownbyname(fname, user=None, group=None): # this returns the specific 'mode' entry, cleanly formatted, with value def get_output_cfg(cfg, mode): ret = [None, None] - if cfg or 'output' not in cfg: + if not cfg or 'output' not in cfg: return ret outcfg = cfg['output'] @@ -1270,14 +1274,14 @@ def read_write_cmdline_url(target_fn): logexc(LOG, "Failed writing url content to %s", target_fn) -def yaml_dumps(obj): - formatted = yaml.dump(obj, - line_break="\n", - indent=4, - explicit_start=True, - explicit_end=True, - default_flow_style=False) - return formatted +def yaml_dumps(obj, explicit_start=True, explicit_end=True): + return yaml.safe_dump(obj, + line_break="\n", + indent=4, + explicit_start=explicit_start, + explicit_end=explicit_end, + default_flow_style=False, + allow_unicode=True) def ensure_dir(path, mode=None): diff --git a/cloudinit/version.py b/cloudinit/version.py index edb651a9..3d1d1d23 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -20,7 +20,7 @@ from distutils import version as vr def version(): - return vr.StrictVersion("0.7.6") + return vr.StrictVersion("0.7.7") def version_string(): |